]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vrp.cc
Don't build readline/libreadline.a, when --with-system-readline is supplied
[thirdparty/gcc.git] / gcc / tree-vrp.cc
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "basic-block.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "options.h"
28 #include "dominance.h"
29 #include "function.h"
30 #include "cfg.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
46 #include "domwalk.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
53 #include "tree-dfa.h"
54
55 /* Set of SSA names found live during the RPO traversal of the function
56 for still active basic-blocks. */
57 class live_names
58 {
59 public:
60 live_names ();
61 ~live_names ();
62 void set (tree, basic_block);
63 void clear (tree, basic_block);
64 void merge (basic_block dest, basic_block src);
65 bool live_on_block_p (tree, basic_block);
66 bool live_on_edge_p (tree, edge);
67 bool block_has_live_names_p (basic_block);
68 void clear_block (basic_block);
69
70 private:
71 sbitmap *live;
72 unsigned num_blocks;
73 void init_bitmap_if_needed (basic_block);
74 };
75
76 void
77 live_names::init_bitmap_if_needed (basic_block bb)
78 {
79 unsigned i = bb->index;
80 if (!live[i])
81 {
82 live[i] = sbitmap_alloc (num_ssa_names);
83 bitmap_clear (live[i]);
84 }
85 }
86
87 bool
88 live_names::block_has_live_names_p (basic_block bb)
89 {
90 unsigned i = bb->index;
91 return live[i] && bitmap_empty_p (live[i]);
92 }
93
94 void
95 live_names::clear_block (basic_block bb)
96 {
97 unsigned i = bb->index;
98 if (live[i])
99 {
100 sbitmap_free (live[i]);
101 live[i] = NULL;
102 }
103 }
104
105 void
106 live_names::merge (basic_block dest, basic_block src)
107 {
108 init_bitmap_if_needed (dest);
109 init_bitmap_if_needed (src);
110 bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
111 }
112
113 void
114 live_names::set (tree name, basic_block bb)
115 {
116 init_bitmap_if_needed (bb);
117 bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
118 }
119
120 void
121 live_names::clear (tree name, basic_block bb)
122 {
123 unsigned i = bb->index;
124 if (live[i])
125 bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
126 }
127
128 live_names::live_names ()
129 {
130 num_blocks = last_basic_block_for_fn (cfun);
131 live = XCNEWVEC (sbitmap, num_blocks);
132 }
133
134 live_names::~live_names ()
135 {
136 for (unsigned i = 0; i < num_blocks; ++i)
137 if (live[i])
138 sbitmap_free (live[i]);
139 XDELETEVEC (live);
140 }
141
142 bool
143 live_names::live_on_block_p (tree name, basic_block bb)
144 {
145 return (live[bb->index]
146 && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
147 }
148
149 /* Return true if the SSA name NAME is live on the edge E. */
150
151 bool
152 live_names::live_on_edge_p (tree name, edge e)
153 {
154 return live_on_block_p (name, e->dest);
155 }
156
157
158 /* VR_TYPE describes a range with mininum value *MIN and maximum
159 value *MAX. Restrict the range to the set of values that have
160 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
161 return the new range type.
162
163 SGN gives the sign of the values described by the range. */
164
165 enum value_range_kind
166 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
167 wide_int *min, wide_int *max,
168 const wide_int &nonzero_bits,
169 signop sgn)
170 {
171 if (vr_type == VR_ANTI_RANGE)
172 {
173 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
174 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
175 to create an inclusive upper bound for A and an inclusive lower
176 bound for B. */
177 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
178 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
179
180 /* If the calculation of A_MAX wrapped, A is effectively empty
181 and A_MAX is the highest value that satisfies NONZERO_BITS.
182 Likewise if the calculation of B_MIN wrapped, B is effectively
183 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
184 bool a_empty = wi::ge_p (a_max, *min, sgn);
185 bool b_empty = wi::le_p (b_min, *max, sgn);
186
187 /* If both A and B are empty, there are no valid values. */
188 if (a_empty && b_empty)
189 return VR_UNDEFINED;
190
191 /* If exactly one of A or B is empty, return a VR_RANGE for the
192 other one. */
193 if (a_empty || b_empty)
194 {
195 *min = b_min;
196 *max = a_max;
197 gcc_checking_assert (wi::le_p (*min, *max, sgn));
198 return VR_RANGE;
199 }
200
201 /* Update the VR_ANTI_RANGE bounds. */
202 *min = a_max + 1;
203 *max = b_min - 1;
204 gcc_checking_assert (wi::le_p (*min, *max, sgn));
205
206 /* Now check whether the excluded range includes any values that
207 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
208 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
209 {
210 unsigned int precision = min->get_precision ();
211 *min = wi::min_value (precision, sgn);
212 *max = wi::max_value (precision, sgn);
213 vr_type = VR_RANGE;
214 }
215 }
216 if (vr_type == VR_RANGE || vr_type == VR_VARYING)
217 {
218 *max = wi::round_down_for_mask (*max, nonzero_bits);
219
220 /* Check that the range contains at least one valid value. */
221 if (wi::gt_p (*min, *max, sgn))
222 return VR_UNDEFINED;
223
224 *min = wi::round_up_for_mask (*min, nonzero_bits);
225 gcc_checking_assert (wi::le_p (*min, *max, sgn));
226 }
227 return vr_type;
228 }
229
230 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
231 a singleton. */
232
233 bool
234 range_int_cst_p (const value_range *vr)
235 {
236 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
237 }
238
239 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
240 otherwise. We only handle additive operations and set NEG to true if the
241 symbol is negated and INV to the invariant part, if any. */
242
243 tree
244 get_single_symbol (tree t, bool *neg, tree *inv)
245 {
246 bool neg_;
247 tree inv_;
248
249 *inv = NULL_TREE;
250 *neg = false;
251
252 if (TREE_CODE (t) == PLUS_EXPR
253 || TREE_CODE (t) == POINTER_PLUS_EXPR
254 || TREE_CODE (t) == MINUS_EXPR)
255 {
256 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
257 {
258 neg_ = (TREE_CODE (t) == MINUS_EXPR);
259 inv_ = TREE_OPERAND (t, 0);
260 t = TREE_OPERAND (t, 1);
261 }
262 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
263 {
264 neg_ = false;
265 inv_ = TREE_OPERAND (t, 1);
266 t = TREE_OPERAND (t, 0);
267 }
268 else
269 return NULL_TREE;
270 }
271 else
272 {
273 neg_ = false;
274 inv_ = NULL_TREE;
275 }
276
277 if (TREE_CODE (t) == NEGATE_EXPR)
278 {
279 t = TREE_OPERAND (t, 0);
280 neg_ = !neg_;
281 }
282
283 if (TREE_CODE (t) != SSA_NAME)
284 return NULL_TREE;
285
286 if (inv_ && TREE_OVERFLOW_P (inv_))
287 inv_ = drop_tree_overflow (inv_);
288
289 *neg = neg_;
290 *inv = inv_;
291 return t;
292 }
293
294 /* The reverse operation: build a symbolic expression with TYPE
295 from symbol SYM, negated according to NEG, and invariant INV. */
296
297 static tree
298 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
299 {
300 const bool pointer_p = POINTER_TYPE_P (type);
301 tree t = sym;
302
303 if (neg)
304 t = build1 (NEGATE_EXPR, type, t);
305
306 if (integer_zerop (inv))
307 return t;
308
309 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
310 }
311
312 /* Return
313 1 if VAL < VAL2
314 0 if !(VAL < VAL2)
315 -2 if those are incomparable. */
316 int
317 operand_less_p (tree val, tree val2)
318 {
319 /* LT is folded faster than GE and others. Inline the common case. */
320 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
321 return tree_int_cst_lt (val, val2);
322 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
323 return val == val2 ? 0 : -2;
324 else
325 {
326 int cmp = compare_values (val, val2);
327 if (cmp == -1)
328 return 1;
329 else if (cmp == 0 || cmp == 1)
330 return 0;
331 else
332 return -2;
333 }
334 }
335
336 /* Compare two values VAL1 and VAL2. Return
337
338 -2 if VAL1 and VAL2 cannot be compared at compile-time,
339 -1 if VAL1 < VAL2,
340 0 if VAL1 == VAL2,
341 +1 if VAL1 > VAL2, and
342 +2 if VAL1 != VAL2
343
344 This is similar to tree_int_cst_compare but supports pointer values
345 and values that cannot be compared at compile time.
346
347 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
348 true if the return value is only valid if we assume that signed
349 overflow is undefined. */
350
351 int
352 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
353 {
354 if (val1 == val2)
355 return 0;
356
357 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
358 both integers. */
359 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
360 == POINTER_TYPE_P (TREE_TYPE (val2)));
361
362 /* Convert the two values into the same type. This is needed because
363 sizetype causes sign extension even for unsigned types. */
364 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
365 val2 = fold_convert (TREE_TYPE (val1), val2);
366
367 const bool overflow_undefined
368 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
369 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
370 tree inv1, inv2;
371 bool neg1, neg2;
372 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
373 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
374
375 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
376 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
377 if (sym1 && sym2)
378 {
379 /* Both values must use the same name with the same sign. */
380 if (sym1 != sym2 || neg1 != neg2)
381 return -2;
382
383 /* [-]NAME + CST == [-]NAME + CST. */
384 if (inv1 == inv2)
385 return 0;
386
387 /* If overflow is defined we cannot simplify more. */
388 if (!overflow_undefined)
389 return -2;
390
391 if (strict_overflow_p != NULL
392 /* Symbolic range building sets the no-warning bit to declare
393 that overflow doesn't happen. */
394 && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
395 && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
396 *strict_overflow_p = true;
397
398 if (!inv1)
399 inv1 = build_int_cst (TREE_TYPE (val1), 0);
400 if (!inv2)
401 inv2 = build_int_cst (TREE_TYPE (val2), 0);
402
403 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
404 TYPE_SIGN (TREE_TYPE (val1)));
405 }
406
407 const bool cst1 = is_gimple_min_invariant (val1);
408 const bool cst2 = is_gimple_min_invariant (val2);
409
410 /* If one is of the form '[-]NAME + CST' and the other is constant, then
411 it might be possible to say something depending on the constants. */
412 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
413 {
414 if (!overflow_undefined)
415 return -2;
416
417 if (strict_overflow_p != NULL
418 /* Symbolic range building sets the no-warning bit to declare
419 that overflow doesn't happen. */
420 && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
421 && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
422 *strict_overflow_p = true;
423
424 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
425 tree cst = cst1 ? val1 : val2;
426 tree inv = cst1 ? inv2 : inv1;
427
428 /* Compute the difference between the constants. If it overflows or
429 underflows, this means that we can trivially compare the NAME with
430 it and, consequently, the two values with each other. */
431 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
432 if (wi::cmp (0, wi::to_wide (inv), sgn)
433 != wi::cmp (diff, wi::to_wide (cst), sgn))
434 {
435 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
436 return cst1 ? res : -res;
437 }
438
439 return -2;
440 }
441
442 /* We cannot say anything more for non-constants. */
443 if (!cst1 || !cst2)
444 return -2;
445
446 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
447 {
448 /* We cannot compare overflowed values. */
449 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
450 return -2;
451
452 if (TREE_CODE (val1) == INTEGER_CST
453 && TREE_CODE (val2) == INTEGER_CST)
454 return tree_int_cst_compare (val1, val2);
455
456 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
457 {
458 if (known_eq (wi::to_poly_widest (val1),
459 wi::to_poly_widest (val2)))
460 return 0;
461 if (known_lt (wi::to_poly_widest (val1),
462 wi::to_poly_widest (val2)))
463 return -1;
464 if (known_gt (wi::to_poly_widest (val1),
465 wi::to_poly_widest (val2)))
466 return 1;
467 }
468
469 return -2;
470 }
471 else
472 {
473 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
474 {
475 /* We cannot compare overflowed values. */
476 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
477 return -2;
478
479 return tree_int_cst_compare (val1, val2);
480 }
481
482 /* First see if VAL1 and VAL2 are not the same. */
483 if (operand_equal_p (val1, val2, 0))
484 return 0;
485
486 fold_defer_overflow_warnings ();
487
488 /* If VAL1 is a lower address than VAL2, return -1. */
489 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
490 if (t && integer_onep (t))
491 {
492 fold_undefer_and_ignore_overflow_warnings ();
493 return -1;
494 }
495
496 /* If VAL1 is a higher address than VAL2, return +1. */
497 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
498 if (t && integer_onep (t))
499 {
500 fold_undefer_and_ignore_overflow_warnings ();
501 return 1;
502 }
503
504 /* If VAL1 is different than VAL2, return +2. */
505 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
506 fold_undefer_and_ignore_overflow_warnings ();
507 if (t && integer_onep (t))
508 return 2;
509
510 return -2;
511 }
512 }
513
514 /* Compare values like compare_values_warnv. */
515
516 int
517 compare_values (tree val1, tree val2)
518 {
519 bool sop;
520 return compare_values_warnv (val1, val2, &sop);
521 }
522
523 /* If BOUND will include a symbolic bound, adjust it accordingly,
524 otherwise leave it as is.
525
526 CODE is the original operation that combined the bounds (PLUS_EXPR
527 or MINUS_EXPR).
528
529 TYPE is the type of the original operation.
530
531 SYM_OPn is the symbolic for OPn if it has a symbolic.
532
533 NEG_OPn is TRUE if the OPn was negated. */
534
535 static void
536 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
537 tree sym_op0, tree sym_op1,
538 bool neg_op0, bool neg_op1)
539 {
540 bool minus_p = (code == MINUS_EXPR);
541 /* If the result bound is constant, we're done; otherwise, build the
542 symbolic lower bound. */
543 if (sym_op0 == sym_op1)
544 ;
545 else if (sym_op0)
546 bound = build_symbolic_expr (type, sym_op0,
547 neg_op0, bound);
548 else if (sym_op1)
549 {
550 /* We may not negate if that might introduce
551 undefined overflow. */
552 if (!minus_p
553 || neg_op1
554 || TYPE_OVERFLOW_WRAPS (type))
555 bound = build_symbolic_expr (type, sym_op1,
556 neg_op1 ^ minus_p, bound);
557 else
558 bound = NULL_TREE;
559 }
560 }
561
562 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
563 int bound according to CODE. CODE is the operation combining the
564 bound (either a PLUS_EXPR or a MINUS_EXPR).
565
566 TYPE is the type of the combine operation.
567
568 WI is the wide int to store the result.
569
570 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
571 if over/underflow occurred. */
572
573 static void
574 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
575 tree type, tree op0, tree op1)
576 {
577 bool minus_p = (code == MINUS_EXPR);
578 const signop sgn = TYPE_SIGN (type);
579 const unsigned int prec = TYPE_PRECISION (type);
580
581 /* Combine the bounds, if any. */
582 if (op0 && op1)
583 {
584 if (minus_p)
585 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
586 else
587 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
588 }
589 else if (op0)
590 wi = wi::to_wide (op0);
591 else if (op1)
592 {
593 if (minus_p)
594 wi = wi::neg (wi::to_wide (op1), &ovf);
595 else
596 wi = wi::to_wide (op1);
597 }
598 else
599 wi = wi::shwi (0, prec);
600 }
601
602 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
603 put the result in VR.
604
605 TYPE is the type of the range.
606
607 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
608 occurred while originally calculating WMIN or WMAX. -1 indicates
609 underflow. +1 indicates overflow. 0 indicates neither. */
610
611 static void
612 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
613 tree type,
614 const wide_int &wmin, const wide_int &wmax,
615 wi::overflow_type min_ovf,
616 wi::overflow_type max_ovf)
617 {
618 const signop sgn = TYPE_SIGN (type);
619 const unsigned int prec = TYPE_PRECISION (type);
620
621 /* For one bit precision if max < min, then the swapped
622 range covers all values. */
623 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
624 {
625 kind = VR_VARYING;
626 return;
627 }
628
629 if (TYPE_OVERFLOW_WRAPS (type))
630 {
631 /* If overflow wraps, truncate the values and adjust the
632 range kind and bounds appropriately. */
633 wide_int tmin = wide_int::from (wmin, prec, sgn);
634 wide_int tmax = wide_int::from (wmax, prec, sgn);
635 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
636 {
637 /* If the limits are swapped, we wrapped around and cover
638 the entire range. */
639 if (wi::gt_p (tmin, tmax, sgn))
640 kind = VR_VARYING;
641 else
642 {
643 kind = VR_RANGE;
644 /* No overflow or both overflow or underflow. The
645 range kind stays VR_RANGE. */
646 min = wide_int_to_tree (type, tmin);
647 max = wide_int_to_tree (type, tmax);
648 }
649 return;
650 }
651 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
652 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
653 {
654 /* Min underflow or max overflow. The range kind
655 changes to VR_ANTI_RANGE. */
656 bool covers = false;
657 wide_int tem = tmin;
658 tmin = tmax + 1;
659 if (wi::cmp (tmin, tmax, sgn) < 0)
660 covers = true;
661 tmax = tem - 1;
662 if (wi::cmp (tmax, tem, sgn) > 0)
663 covers = true;
664 /* If the anti-range would cover nothing, drop to varying.
665 Likewise if the anti-range bounds are outside of the
666 types values. */
667 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
668 {
669 kind = VR_VARYING;
670 return;
671 }
672 kind = VR_ANTI_RANGE;
673 min = wide_int_to_tree (type, tmin);
674 max = wide_int_to_tree (type, tmax);
675 return;
676 }
677 else
678 {
679 /* Other underflow and/or overflow, drop to VR_VARYING. */
680 kind = VR_VARYING;
681 return;
682 }
683 }
684 else
685 {
686 /* If overflow does not wrap, saturate to the types min/max
687 value. */
688 wide_int type_min = wi::min_value (prec, sgn);
689 wide_int type_max = wi::max_value (prec, sgn);
690 kind = VR_RANGE;
691 if (min_ovf == wi::OVF_UNDERFLOW)
692 min = wide_int_to_tree (type, type_min);
693 else if (min_ovf == wi::OVF_OVERFLOW)
694 min = wide_int_to_tree (type, type_max);
695 else
696 min = wide_int_to_tree (type, wmin);
697
698 if (max_ovf == wi::OVF_UNDERFLOW)
699 max = wide_int_to_tree (type, type_min);
700 else if (max_ovf == wi::OVF_OVERFLOW)
701 max = wide_int_to_tree (type, type_max);
702 else
703 max = wide_int_to_tree (type, wmax);
704 }
705 }
706
707 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
708
709 static void
710 extract_range_from_pointer_plus_expr (value_range *vr,
711 enum tree_code code,
712 tree expr_type,
713 const value_range *vr0,
714 const value_range *vr1)
715 {
716 gcc_checking_assert (POINTER_TYPE_P (expr_type)
717 && code == POINTER_PLUS_EXPR);
718 /* For pointer types, we are really only interested in asserting
719 whether the expression evaluates to non-NULL.
720 With -fno-delete-null-pointer-checks we need to be more
721 conservative. As some object might reside at address 0,
722 then some offset could be added to it and the same offset
723 subtracted again and the result would be NULL.
724 E.g.
725 static int a[12]; where &a[0] is NULL and
726 ptr = &a[6];
727 ptr -= 6;
728 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
729 where the first range doesn't include zero and the second one
730 doesn't either. As the second operand is sizetype (unsigned),
731 consider all ranges where the MSB could be set as possible
732 subtractions where the result might be NULL. */
733 if ((!range_includes_zero_p (vr0)
734 || !range_includes_zero_p (vr1))
735 && !TYPE_OVERFLOW_WRAPS (expr_type)
736 && (flag_delete_null_pointer_checks
737 || (range_int_cst_p (vr1)
738 && !tree_int_cst_sign_bit (vr1->max ()))))
739 vr->set_nonzero (expr_type);
740 else if (vr0->zero_p () && vr1->zero_p ())
741 vr->set_zero (expr_type);
742 else
743 vr->set_varying (expr_type);
744 }
745
746 /* Extract range information from a PLUS/MINUS_EXPR and store the
747 result in *VR. */
748
749 static void
750 extract_range_from_plus_minus_expr (value_range *vr,
751 enum tree_code code,
752 tree expr_type,
753 const value_range *vr0_,
754 const value_range *vr1_)
755 {
756 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
757
758 value_range vr0 = *vr0_, vr1 = *vr1_;
759 value_range vrtem0, vrtem1;
760
761 /* Now canonicalize anti-ranges to ranges when they are not symbolic
762 and express ~[] op X as ([]' op X) U ([]'' op X). */
763 if (vr0.kind () == VR_ANTI_RANGE
764 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
765 {
766 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
767 if (!vrtem1.undefined_p ())
768 {
769 value_range vrres;
770 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
771 &vrtem1, vr1_);
772 vr->union_ (vrres);
773 }
774 return;
775 }
776 /* Likewise for X op ~[]. */
777 if (vr1.kind () == VR_ANTI_RANGE
778 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
779 {
780 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
781 if (!vrtem1.undefined_p ())
782 {
783 value_range vrres;
784 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
785 vr0_, &vrtem1);
786 vr->union_ (vrres);
787 }
788 return;
789 }
790
791 value_range_kind kind;
792 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
793 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
794 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
795 tree min = NULL_TREE, max = NULL_TREE;
796
797 /* This will normalize things such that calculating
798 [0,0] - VR_VARYING is not dropped to varying, but is
799 calculated as [MIN+1, MAX]. */
800 if (vr0.varying_p ())
801 {
802 vr0_kind = VR_RANGE;
803 vr0_min = vrp_val_min (expr_type);
804 vr0_max = vrp_val_max (expr_type);
805 }
806 if (vr1.varying_p ())
807 {
808 vr1_kind = VR_RANGE;
809 vr1_min = vrp_val_min (expr_type);
810 vr1_max = vrp_val_max (expr_type);
811 }
812
813 const bool minus_p = (code == MINUS_EXPR);
814 tree min_op0 = vr0_min;
815 tree min_op1 = minus_p ? vr1_max : vr1_min;
816 tree max_op0 = vr0_max;
817 tree max_op1 = minus_p ? vr1_min : vr1_max;
818 tree sym_min_op0 = NULL_TREE;
819 tree sym_min_op1 = NULL_TREE;
820 tree sym_max_op0 = NULL_TREE;
821 tree sym_max_op1 = NULL_TREE;
822 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
823
824 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
825
826 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
827 single-symbolic ranges, try to compute the precise resulting range,
828 but only if we know that this resulting range will also be constant
829 or single-symbolic. */
830 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
831 && (TREE_CODE (min_op0) == INTEGER_CST
832 || (sym_min_op0
833 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
834 && (TREE_CODE (min_op1) == INTEGER_CST
835 || (sym_min_op1
836 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
837 && (!(sym_min_op0 && sym_min_op1)
838 || (sym_min_op0 == sym_min_op1
839 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
840 && (TREE_CODE (max_op0) == INTEGER_CST
841 || (sym_max_op0
842 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
843 && (TREE_CODE (max_op1) == INTEGER_CST
844 || (sym_max_op1
845 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
846 && (!(sym_max_op0 && sym_max_op1)
847 || (sym_max_op0 == sym_max_op1
848 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
849 {
850 wide_int wmin, wmax;
851 wi::overflow_type min_ovf = wi::OVF_NONE;
852 wi::overflow_type max_ovf = wi::OVF_NONE;
853
854 /* Build the bounds. */
855 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
856 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
857
858 /* If the resulting range will be symbolic, we need to eliminate any
859 explicit or implicit overflow introduced in the above computation
860 because compare_values could make an incorrect use of it. That's
861 why we require one of the ranges to be a singleton. */
862 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
863 && ((bool)min_ovf || (bool)max_ovf
864 || (min_op0 != max_op0 && min_op1 != max_op1)))
865 {
866 vr->set_varying (expr_type);
867 return;
868 }
869
870 /* Adjust the range for possible overflow. */
871 set_value_range_with_overflow (kind, min, max, expr_type,
872 wmin, wmax, min_ovf, max_ovf);
873 if (kind == VR_VARYING)
874 {
875 vr->set_varying (expr_type);
876 return;
877 }
878
879 /* Build the symbolic bounds if needed. */
880 adjust_symbolic_bound (min, code, expr_type,
881 sym_min_op0, sym_min_op1,
882 neg_min_op0, neg_min_op1);
883 adjust_symbolic_bound (max, code, expr_type,
884 sym_max_op0, sym_max_op1,
885 neg_max_op0, neg_max_op1);
886 }
887 else
888 {
889 /* For other cases, for example if we have a PLUS_EXPR with two
890 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
891 to compute a precise range for such a case.
892 ??? General even mixed range kind operations can be expressed
893 by for example transforming ~[3, 5] + [1, 2] to range-only
894 operations and a union primitive:
895 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
896 [-INF+1, 4] U [6, +INF(OVF)]
897 though usually the union is not exactly representable with
898 a single range or anti-range as the above is
899 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
900 but one could use a scheme similar to equivalences for this. */
901 vr->set_varying (expr_type);
902 return;
903 }
904
905 /* If either MIN or MAX overflowed, then set the resulting range to
906 VARYING. */
907 if (min == NULL_TREE
908 || TREE_OVERFLOW_P (min)
909 || max == NULL_TREE
910 || TREE_OVERFLOW_P (max))
911 {
912 vr->set_varying (expr_type);
913 return;
914 }
915
916 int cmp = compare_values (min, max);
917 if (cmp == -2 || cmp == 1)
918 {
919 /* If the new range has its limits swapped around (MIN > MAX),
920 then the operation caused one of them to wrap around, mark
921 the new range VARYING. */
922 vr->set_varying (expr_type);
923 }
924 else
925 vr->set (min, max, kind);
926 }
927
928 /* If the types passed are supported, return TRUE, otherwise set VR to
929 VARYING and return FALSE. */
930
931 static bool
932 supported_types_p (value_range *vr,
933 tree type0,
934 tree type1 = NULL)
935 {
936 if (!value_range_equiv::supports_p (type0)
937 || (type1 && !value_range_equiv::supports_p (type1)))
938 {
939 vr->set_varying (type0);
940 return false;
941 }
942 return true;
943 }
944
945 /* If any of the ranges passed are defined, return TRUE, otherwise set
946 VR to UNDEFINED and return FALSE. */
947
948 static bool
949 defined_ranges_p (value_range *vr,
950 const value_range *vr0, const value_range *vr1 = NULL)
951 {
952 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
953 {
954 vr->set_undefined ();
955 return false;
956 }
957 return true;
958 }
959
960 static value_range
961 drop_undefines_to_varying (const value_range *vr, tree expr_type)
962 {
963 if (vr->undefined_p ())
964 return value_range (expr_type);
965 else
966 return *vr;
967 }
968
969 /* If any operand is symbolic, perform a binary operation on them and
970 return TRUE, otherwise return FALSE. */
971
972 static bool
973 range_fold_binary_symbolics_p (value_range *vr,
974 tree_code code,
975 tree expr_type,
976 const value_range *vr0_,
977 const value_range *vr1_)
978 {
979 if (vr0_->symbolic_p () || vr1_->symbolic_p ())
980 {
981 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
982 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
983 if ((code == PLUS_EXPR || code == MINUS_EXPR))
984 {
985 extract_range_from_plus_minus_expr (vr, code, expr_type,
986 &vr0, &vr1);
987 return true;
988 }
989 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
990 {
991 extract_range_from_pointer_plus_expr (vr, code, expr_type,
992 &vr0, &vr1);
993 return true;
994 }
995 range_op_handler op (code, expr_type);
996 if (!op)
997 vr->set_varying (expr_type);
998 vr0.normalize_symbolics ();
999 vr1.normalize_symbolics ();
1000 return op.fold_range (*vr, expr_type, vr0, vr1);
1001 }
1002 return false;
1003 }
1004
1005 /* If operand is symbolic, perform a unary operation on it and return
1006 TRUE, otherwise return FALSE. */
1007
1008 static bool
1009 range_fold_unary_symbolics_p (value_range *vr,
1010 tree_code code,
1011 tree expr_type,
1012 const value_range *vr0)
1013 {
1014 if (vr0->symbolic_p ())
1015 {
1016 if (code == NEGATE_EXPR)
1017 {
1018 /* -X is simply 0 - X. */
1019 value_range zero;
1020 zero.set_zero (vr0->type ());
1021 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1022 return true;
1023 }
1024 if (code == BIT_NOT_EXPR)
1025 {
1026 /* ~X is simply -1 - X. */
1027 value_range minusone;
1028 tree t = build_int_cst (vr0->type (), -1);
1029 minusone.set (t, t);
1030 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1031 return true;
1032 }
1033 range_op_handler op (code, expr_type);
1034 if (!op)
1035 vr->set_varying (expr_type);
1036 value_range vr0_cst (*vr0);
1037 vr0_cst.normalize_symbolics ();
1038 return op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1039 }
1040 return false;
1041 }
1042
1043 /* Perform a binary operation on a pair of ranges. */
1044
1045 void
1046 range_fold_binary_expr (value_range *vr,
1047 enum tree_code code,
1048 tree expr_type,
1049 const value_range *vr0_,
1050 const value_range *vr1_)
1051 {
1052 if (!supported_types_p (vr, expr_type)
1053 || !defined_ranges_p (vr, vr0_, vr1_))
1054 return;
1055 range_op_handler op (code, expr_type);
1056 if (!op)
1057 {
1058 vr->set_varying (expr_type);
1059 return;
1060 }
1061
1062 if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1063 return;
1064
1065 value_range vr0 (*vr0_);
1066 value_range vr1 (*vr1_);
1067 if (vr0.undefined_p ())
1068 vr0.set_varying (expr_type);
1069 if (vr1.undefined_p ())
1070 vr1.set_varying (expr_type);
1071 vr0.normalize_addresses ();
1072 vr1.normalize_addresses ();
1073 if (!op.fold_range (*vr, expr_type, vr0, vr1))
1074 vr->set_varying (expr_type);
1075 }
1076
1077 /* Perform a unary operation on a range. */
1078
1079 void
1080 range_fold_unary_expr (value_range *vr,
1081 enum tree_code code, tree expr_type,
1082 const value_range *vr0,
1083 tree vr0_type)
1084 {
1085 if (!supported_types_p (vr, expr_type, vr0_type)
1086 || !defined_ranges_p (vr, vr0))
1087 return;
1088 range_op_handler op (code, expr_type);
1089 if (!op)
1090 {
1091 vr->set_varying (expr_type);
1092 return;
1093 }
1094
1095 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1096 return;
1097
1098 value_range vr0_cst (*vr0);
1099 vr0_cst.normalize_addresses ();
1100 if (!op.fold_range (*vr, expr_type, vr0_cst, value_range (expr_type)))
1101 vr->set_varying (expr_type);
1102 }
1103
1104 /* If the range of values taken by OP can be inferred after STMT executes,
1105 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1106 describes the inferred range. Return true if a range could be
1107 inferred. */
1108
1109 bool
1110 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1111 {
1112 *val_p = NULL_TREE;
1113 *comp_code_p = ERROR_MARK;
1114
1115 /* Do not attempt to infer anything in names that flow through
1116 abnormal edges. */
1117 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1118 return false;
1119
1120 /* If STMT is the last statement of a basic block with no normal
1121 successors, there is no point inferring anything about any of its
1122 operands. We would not be able to find a proper insertion point
1123 for the assertion, anyway. */
1124 if (stmt_ends_bb_p (stmt))
1125 {
1126 edge_iterator ei;
1127 edge e;
1128
1129 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1130 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1131 break;
1132 if (e == NULL)
1133 return false;
1134 }
1135
1136 if (infer_nonnull_range (stmt, op))
1137 {
1138 *val_p = build_int_cst (TREE_TYPE (op), 0);
1139 *comp_code_p = NE_EXPR;
1140 return true;
1141 }
1142
1143 return false;
1144 }
1145
1146 /* Dump assert_info structure. */
1147
1148 void
1149 dump_assert_info (FILE *file, const assert_info &assert)
1150 {
1151 fprintf (file, "Assert for: ");
1152 print_generic_expr (file, assert.name);
1153 fprintf (file, "\n\tPREDICATE: expr=[");
1154 print_generic_expr (file, assert.expr);
1155 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1156 fprintf (file, "val=[");
1157 print_generic_expr (file, assert.val);
1158 fprintf (file, "]\n\n");
1159 }
1160
1161 DEBUG_FUNCTION void
1162 debug (const assert_info &assert)
1163 {
1164 dump_assert_info (stderr, assert);
1165 }
1166
1167 /* Dump a vector of assert_info's. */
1168
1169 void
1170 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1171 {
1172 for (unsigned i = 0; i < asserts.length (); ++i)
1173 {
1174 dump_assert_info (file, asserts[i]);
1175 fprintf (file, "\n");
1176 }
1177 }
1178
1179 DEBUG_FUNCTION void
1180 debug (const vec<assert_info> &asserts)
1181 {
1182 dump_asserts_info (stderr, asserts);
1183 }
1184
1185 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1186
1187 static void
1188 add_assert_info (vec<assert_info> &asserts,
1189 tree name, tree expr, enum tree_code comp_code, tree val)
1190 {
1191 assert_info info;
1192 info.comp_code = comp_code;
1193 info.name = name;
1194 if (TREE_OVERFLOW_P (val))
1195 val = drop_tree_overflow (val);
1196 info.val = val;
1197 info.expr = expr;
1198 asserts.safe_push (info);
1199 if (dump_enabled_p ())
1200 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1201 "Adding assert for %T from %T %s %T\n",
1202 name, expr, op_symbol_code (comp_code), val);
1203 }
1204
1205 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1206 Extract a suitable test code and value and store them into *CODE_P and
1207 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1208
1209 If no extraction was possible, return FALSE, otherwise return TRUE.
1210
1211 If INVERT is true, then we invert the result stored into *CODE_P. */
1212
1213 static bool
1214 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1215 tree cond_op0, tree cond_op1,
1216 bool invert, enum tree_code *code_p,
1217 tree *val_p)
1218 {
1219 enum tree_code comp_code;
1220 tree val;
1221
1222 /* Otherwise, we have a comparison of the form NAME COMP VAL
1223 or VAL COMP NAME. */
1224 if (name == cond_op1)
1225 {
1226 /* If the predicate is of the form VAL COMP NAME, flip
1227 COMP around because we need to register NAME as the
1228 first operand in the predicate. */
1229 comp_code = swap_tree_comparison (cond_code);
1230 val = cond_op0;
1231 }
1232 else if (name == cond_op0)
1233 {
1234 /* The comparison is of the form NAME COMP VAL, so the
1235 comparison code remains unchanged. */
1236 comp_code = cond_code;
1237 val = cond_op1;
1238 }
1239 else
1240 gcc_unreachable ();
1241
1242 /* Invert the comparison code as necessary. */
1243 if (invert)
1244 comp_code = invert_tree_comparison (comp_code, 0);
1245
1246 /* VRP only handles integral and pointer types. */
1247 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1248 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1249 return false;
1250
1251 /* Do not register always-false predicates.
1252 FIXME: this works around a limitation in fold() when dealing with
1253 enumerations. Given 'enum { N1, N2 } x;', fold will not
1254 fold 'if (x > N2)' to 'if (0)'. */
1255 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1256 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1257 {
1258 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1259 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1260
1261 if (comp_code == GT_EXPR
1262 && (!max
1263 || compare_values (val, max) == 0))
1264 return false;
1265
1266 if (comp_code == LT_EXPR
1267 && (!min
1268 || compare_values (val, min) == 0))
1269 return false;
1270 }
1271 *code_p = comp_code;
1272 *val_p = val;
1273 return true;
1274 }
1275
1276 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1277 (otherwise return VAL). VAL and MASK must be zero-extended for
1278 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1279 (to transform signed values into unsigned) and at the end xor
1280 SGNBIT back. */
1281
1282 wide_int
1283 masked_increment (const wide_int &val_in, const wide_int &mask,
1284 const wide_int &sgnbit, unsigned int prec)
1285 {
1286 wide_int bit = wi::one (prec), res;
1287 unsigned int i;
1288
1289 wide_int val = val_in ^ sgnbit;
1290 for (i = 0; i < prec; i++, bit += bit)
1291 {
1292 res = mask;
1293 if ((res & bit) == 0)
1294 continue;
1295 res = bit - 1;
1296 res = wi::bit_and_not (val + bit, res);
1297 res &= mask;
1298 if (wi::gtu_p (res, val))
1299 return res ^ sgnbit;
1300 }
1301 return val ^ sgnbit;
1302 }
1303
1304 /* Helper for overflow_comparison_p
1305
1306 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1307 OP1's defining statement to see if it ultimately has the form
1308 OP0 CODE (OP0 PLUS INTEGER_CST)
1309
1310 If so, return TRUE indicating this is an overflow test and store into
1311 *NEW_CST an updated constant that can be used in a narrowed range test.
1312
1313 REVERSED indicates if the comparison was originally:
1314
1315 OP1 CODE' OP0.
1316
1317 This affects how we build the updated constant. */
1318
1319 static bool
1320 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1321 bool follow_assert_exprs, bool reversed, tree *new_cst)
1322 {
1323 /* See if this is a relational operation between two SSA_NAMES with
1324 unsigned, overflow wrapping values. If so, check it more deeply. */
1325 if ((code == LT_EXPR || code == LE_EXPR
1326 || code == GE_EXPR || code == GT_EXPR)
1327 && TREE_CODE (op0) == SSA_NAME
1328 && TREE_CODE (op1) == SSA_NAME
1329 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1330 && TYPE_UNSIGNED (TREE_TYPE (op0))
1331 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1332 {
1333 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1334
1335 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1336 if (follow_assert_exprs)
1337 {
1338 while (gimple_assign_single_p (op1_def)
1339 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1340 {
1341 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1342 if (TREE_CODE (op1) != SSA_NAME)
1343 break;
1344 op1_def = SSA_NAME_DEF_STMT (op1);
1345 }
1346 }
1347
1348 /* Now look at the defining statement of OP1 to see if it adds
1349 or subtracts a nonzero constant from another operand. */
1350 if (op1_def
1351 && is_gimple_assign (op1_def)
1352 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1353 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1354 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1355 {
1356 tree target = gimple_assign_rhs1 (op1_def);
1357
1358 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1359 for one where TARGET appears on the RHS. */
1360 if (follow_assert_exprs)
1361 {
1362 /* Now see if that "other operand" is op0, following the chain
1363 of ASSERT_EXPRs if necessary. */
1364 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1365 while (op0 != target
1366 && gimple_assign_single_p (op0_def)
1367 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1368 {
1369 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1370 if (TREE_CODE (op0) != SSA_NAME)
1371 break;
1372 op0_def = SSA_NAME_DEF_STMT (op0);
1373 }
1374 }
1375
1376 /* If we did not find our target SSA_NAME, then this is not
1377 an overflow test. */
1378 if (op0 != target)
1379 return false;
1380
1381 tree type = TREE_TYPE (op0);
1382 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1383 tree inc = gimple_assign_rhs2 (op1_def);
1384 if (reversed)
1385 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1386 else
1387 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1388 return true;
1389 }
1390 }
1391 return false;
1392 }
1393
1394 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1395 OP1's defining statement to see if it ultimately has the form
1396 OP0 CODE (OP0 PLUS INTEGER_CST)
1397
1398 If so, return TRUE indicating this is an overflow test and store into
1399 *NEW_CST an updated constant that can be used in a narrowed range test.
1400
1401 These statements are left as-is in the IL to facilitate discovery of
1402 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1403 the alternate range representation is often useful within VRP. */
1404
1405 bool
1406 overflow_comparison_p (tree_code code, tree name, tree val,
1407 bool use_equiv_p, tree *new_cst)
1408 {
1409 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1410 return true;
1411 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1412 use_equiv_p, true, new_cst);
1413 }
1414
1415
1416 /* Try to register an edge assertion for SSA name NAME on edge E for
1417 the condition COND contributing to the conditional jump pointed to by BSI.
1418 Invert the condition COND if INVERT is true. */
1419
1420 static void
1421 register_edge_assert_for_2 (tree name, edge e,
1422 enum tree_code cond_code,
1423 tree cond_op0, tree cond_op1, bool invert,
1424 vec<assert_info> &asserts)
1425 {
1426 tree val;
1427 enum tree_code comp_code;
1428
1429 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1430 cond_op0,
1431 cond_op1,
1432 invert, &comp_code, &val))
1433 return;
1434
1435 /* Queue the assert. */
1436 tree x;
1437 if (overflow_comparison_p (comp_code, name, val, false, &x))
1438 {
1439 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1440 ? GT_EXPR : LE_EXPR);
1441 add_assert_info (asserts, name, name, new_code, x);
1442 }
1443 add_assert_info (asserts, name, name, comp_code, val);
1444
1445 /* In the case of NAME <= CST and NAME being defined as
1446 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1447 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1448 This catches range and anti-range tests. */
1449 if ((comp_code == LE_EXPR
1450 || comp_code == GT_EXPR)
1451 && TREE_CODE (val) == INTEGER_CST
1452 && TYPE_UNSIGNED (TREE_TYPE (val)))
1453 {
1454 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1455 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1456
1457 /* Extract CST2 from the (optional) addition. */
1458 if (is_gimple_assign (def_stmt)
1459 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1460 {
1461 name2 = gimple_assign_rhs1 (def_stmt);
1462 cst2 = gimple_assign_rhs2 (def_stmt);
1463 if (TREE_CODE (name2) == SSA_NAME
1464 && TREE_CODE (cst2) == INTEGER_CST)
1465 def_stmt = SSA_NAME_DEF_STMT (name2);
1466 }
1467
1468 /* Extract NAME2 from the (optional) sign-changing cast. */
1469 if (gassign *ass = dyn_cast <gassign *> (def_stmt))
1470 {
1471 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
1472 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
1473 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
1474 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
1475 name3 = gimple_assign_rhs1 (ass);
1476 }
1477
1478 /* If name3 is used later, create an ASSERT_EXPR for it. */
1479 if (name3 != NULL_TREE
1480 && TREE_CODE (name3) == SSA_NAME
1481 && (cst2 == NULL_TREE
1482 || TREE_CODE (cst2) == INTEGER_CST)
1483 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1484 {
1485 tree tmp;
1486
1487 /* Build an expression for the range test. */
1488 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1489 if (cst2 != NULL_TREE)
1490 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1491 add_assert_info (asserts, name3, tmp, comp_code, val);
1492 }
1493
1494 /* If name2 is used later, create an ASSERT_EXPR for it. */
1495 if (name2 != NULL_TREE
1496 && TREE_CODE (name2) == SSA_NAME
1497 && TREE_CODE (cst2) == INTEGER_CST
1498 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1499 {
1500 tree tmp;
1501
1502 /* Build an expression for the range test. */
1503 tmp = name2;
1504 if (TREE_TYPE (name) != TREE_TYPE (name2))
1505 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1506 if (cst2 != NULL_TREE)
1507 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1508 add_assert_info (asserts, name2, tmp, comp_code, val);
1509 }
1510 }
1511
1512 /* In the case of post-in/decrement tests like if (i++) ... and uses
1513 of the in/decremented value on the edge the extra name we want to
1514 assert for is not on the def chain of the name compared. Instead
1515 it is in the set of use stmts.
1516 Similar cases happen for conversions that were simplified through
1517 fold_{sign_changed,widened}_comparison. */
1518 if ((comp_code == NE_EXPR
1519 || comp_code == EQ_EXPR)
1520 && TREE_CODE (val) == INTEGER_CST)
1521 {
1522 imm_use_iterator ui;
1523 gimple *use_stmt;
1524 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1525 {
1526 if (!is_gimple_assign (use_stmt))
1527 continue;
1528
1529 /* Cut off to use-stmts that are dominating the predecessor. */
1530 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1531 continue;
1532
1533 tree name2 = gimple_assign_lhs (use_stmt);
1534 if (TREE_CODE (name2) != SSA_NAME)
1535 continue;
1536
1537 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1538 tree cst;
1539 if (code == PLUS_EXPR
1540 || code == MINUS_EXPR)
1541 {
1542 cst = gimple_assign_rhs2 (use_stmt);
1543 if (TREE_CODE (cst) != INTEGER_CST)
1544 continue;
1545 cst = int_const_binop (code, val, cst);
1546 }
1547 else if (CONVERT_EXPR_CODE_P (code))
1548 {
1549 /* For truncating conversions we cannot record
1550 an inequality. */
1551 if (comp_code == NE_EXPR
1552 && (TYPE_PRECISION (TREE_TYPE (name2))
1553 < TYPE_PRECISION (TREE_TYPE (name))))
1554 continue;
1555 cst = fold_convert (TREE_TYPE (name2), val);
1556 }
1557 else
1558 continue;
1559
1560 if (TREE_OVERFLOW_P (cst))
1561 cst = drop_tree_overflow (cst);
1562 add_assert_info (asserts, name2, name2, comp_code, cst);
1563 }
1564 }
1565
1566 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1567 && TREE_CODE (val) == INTEGER_CST)
1568 {
1569 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1570 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1571 tree val2 = NULL_TREE;
1572 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1573 wide_int mask = wi::zero (prec);
1574 unsigned int nprec = prec;
1575 enum tree_code rhs_code = ERROR_MARK;
1576
1577 if (is_gimple_assign (def_stmt))
1578 rhs_code = gimple_assign_rhs_code (def_stmt);
1579
1580 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1581 assert that A != CST1 -+ CST2. */
1582 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1583 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1584 {
1585 tree op0 = gimple_assign_rhs1 (def_stmt);
1586 tree op1 = gimple_assign_rhs2 (def_stmt);
1587 if (TREE_CODE (op0) == SSA_NAME
1588 && TREE_CODE (op1) == INTEGER_CST)
1589 {
1590 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1591 ? MINUS_EXPR : PLUS_EXPR);
1592 op1 = int_const_binop (reverse_op, val, op1);
1593 if (TREE_OVERFLOW (op1))
1594 op1 = drop_tree_overflow (op1);
1595 add_assert_info (asserts, op0, op0, comp_code, op1);
1596 }
1597 }
1598
1599 /* Add asserts for NAME cmp CST and NAME being defined
1600 as NAME = (int) NAME2. */
1601 if (!TYPE_UNSIGNED (TREE_TYPE (val))
1602 && (comp_code == LE_EXPR || comp_code == LT_EXPR
1603 || comp_code == GT_EXPR || comp_code == GE_EXPR)
1604 && gimple_assign_cast_p (def_stmt))
1605 {
1606 name2 = gimple_assign_rhs1 (def_stmt);
1607 if (CONVERT_EXPR_CODE_P (rhs_code)
1608 && TREE_CODE (name2) == SSA_NAME
1609 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1610 && TYPE_UNSIGNED (TREE_TYPE (name2))
1611 && prec == TYPE_PRECISION (TREE_TYPE (name2))
1612 && (comp_code == LE_EXPR || comp_code == GT_EXPR
1613 || !tree_int_cst_equal (val,
1614 TYPE_MIN_VALUE (TREE_TYPE (val)))))
1615 {
1616 tree tmp, cst;
1617 enum tree_code new_comp_code = comp_code;
1618
1619 cst = fold_convert (TREE_TYPE (name2),
1620 TYPE_MIN_VALUE (TREE_TYPE (val)));
1621 /* Build an expression for the range test. */
1622 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1623 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1624 fold_convert (TREE_TYPE (name2), val));
1625 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1626 {
1627 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1628 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1629 build_int_cst (TREE_TYPE (name2), 1));
1630 }
1631 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1632 }
1633 }
1634
1635 /* Add asserts for NAME cmp CST and NAME being defined as
1636 NAME = NAME2 >> CST2.
1637
1638 Extract CST2 from the right shift. */
1639 if (rhs_code == RSHIFT_EXPR)
1640 {
1641 name2 = gimple_assign_rhs1 (def_stmt);
1642 cst2 = gimple_assign_rhs2 (def_stmt);
1643 if (TREE_CODE (name2) == SSA_NAME
1644 && tree_fits_uhwi_p (cst2)
1645 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1646 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1647 && type_has_mode_precision_p (TREE_TYPE (val)))
1648 {
1649 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1650 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1651 }
1652 }
1653 if (val2 != NULL_TREE
1654 && TREE_CODE (val2) == INTEGER_CST
1655 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1656 TREE_TYPE (val),
1657 val2, cst2), val))
1658 {
1659 enum tree_code new_comp_code = comp_code;
1660 tree tmp, new_val;
1661
1662 tmp = name2;
1663 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1664 {
1665 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1666 {
1667 tree type = build_nonstandard_integer_type (prec, 1);
1668 tmp = build1 (NOP_EXPR, type, name2);
1669 val2 = fold_convert (type, val2);
1670 }
1671 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1672 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1673 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1674 }
1675 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1676 {
1677 wide_int minval
1678 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1679 new_val = val2;
1680 if (minval == wi::to_wide (new_val))
1681 new_val = NULL_TREE;
1682 }
1683 else
1684 {
1685 wide_int maxval
1686 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1687 mask |= wi::to_wide (val2);
1688 if (wi::eq_p (mask, maxval))
1689 new_val = NULL_TREE;
1690 else
1691 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1692 }
1693
1694 if (new_val)
1695 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1696 }
1697
1698 /* If we have a conversion that doesn't change the value of the source
1699 simply register the same assert for it. */
1700 if (CONVERT_EXPR_CODE_P (rhs_code))
1701 {
1702 value_range vr;
1703 tree rhs1 = gimple_assign_rhs1 (def_stmt);
1704 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1705 && TREE_CODE (rhs1) == SSA_NAME
1706 /* Make sure the relation preserves the upper/lower boundary of
1707 the range conservatively. */
1708 && (comp_code == NE_EXPR
1709 || comp_code == EQ_EXPR
1710 || (TYPE_SIGN (TREE_TYPE (name))
1711 == TYPE_SIGN (TREE_TYPE (rhs1)))
1712 || ((comp_code == LE_EXPR
1713 || comp_code == LT_EXPR)
1714 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1715 || ((comp_code == GE_EXPR
1716 || comp_code == GT_EXPR)
1717 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1718 /* And the conversion does not alter the value we compare
1719 against and all values in rhs1 can be represented in
1720 the converted to type. */
1721 && int_fits_type_p (val, TREE_TYPE (rhs1))
1722 && ((TYPE_PRECISION (TREE_TYPE (name))
1723 > TYPE_PRECISION (TREE_TYPE (rhs1)))
1724 || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
1725 && vr.kind () == VR_RANGE)
1726 && wi::fits_to_tree_p
1727 (widest_int::from (vr.lower_bound (),
1728 TYPE_SIGN (TREE_TYPE (rhs1))),
1729 TREE_TYPE (name))
1730 && wi::fits_to_tree_p
1731 (widest_int::from (vr.upper_bound (),
1732 TYPE_SIGN (TREE_TYPE (rhs1))),
1733 TREE_TYPE (name)))))
1734 add_assert_info (asserts, rhs1, rhs1,
1735 comp_code, fold_convert (TREE_TYPE (rhs1), val));
1736 }
1737
1738 /* Add asserts for NAME cmp CST and NAME being defined as
1739 NAME = NAME2 & CST2.
1740
1741 Extract CST2 from the and.
1742
1743 Also handle
1744 NAME = (unsigned) NAME2;
1745 casts where NAME's type is unsigned and has smaller precision
1746 than NAME2's type as if it was NAME = NAME2 & MASK. */
1747 names[0] = NULL_TREE;
1748 names[1] = NULL_TREE;
1749 cst2 = NULL_TREE;
1750 if (rhs_code == BIT_AND_EXPR
1751 || (CONVERT_EXPR_CODE_P (rhs_code)
1752 && INTEGRAL_TYPE_P (TREE_TYPE (val))
1753 && TYPE_UNSIGNED (TREE_TYPE (val))
1754 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1755 > prec))
1756 {
1757 name2 = gimple_assign_rhs1 (def_stmt);
1758 if (rhs_code == BIT_AND_EXPR)
1759 cst2 = gimple_assign_rhs2 (def_stmt);
1760 else
1761 {
1762 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1763 nprec = TYPE_PRECISION (TREE_TYPE (name2));
1764 }
1765 if (TREE_CODE (name2) == SSA_NAME
1766 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1767 && TREE_CODE (cst2) == INTEGER_CST
1768 && !integer_zerop (cst2)
1769 && (nprec > 1
1770 || TYPE_UNSIGNED (TREE_TYPE (val))))
1771 {
1772 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1773 if (gimple_assign_cast_p (def_stmt2))
1774 {
1775 names[1] = gimple_assign_rhs1 (def_stmt2);
1776 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1777 || TREE_CODE (names[1]) != SSA_NAME
1778 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1779 || (TYPE_PRECISION (TREE_TYPE (name2))
1780 != TYPE_PRECISION (TREE_TYPE (names[1]))))
1781 names[1] = NULL_TREE;
1782 }
1783 names[0] = name2;
1784 }
1785 }
1786 if (names[0] || names[1])
1787 {
1788 wide_int minv, maxv, valv, cst2v;
1789 wide_int tem, sgnbit;
1790 bool valid_p = false, valn, cst2n;
1791 enum tree_code ccode = comp_code;
1792
1793 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1794 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1795 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1796 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1797 /* If CST2 doesn't have most significant bit set,
1798 but VAL is negative, we have comparison like
1799 if ((x & 0x123) > -4) (always true). Just give up. */
1800 if (!cst2n && valn)
1801 ccode = ERROR_MARK;
1802 if (cst2n)
1803 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1804 else
1805 sgnbit = wi::zero (nprec);
1806 minv = valv & cst2v;
1807 switch (ccode)
1808 {
1809 case EQ_EXPR:
1810 /* Minimum unsigned value for equality is VAL & CST2
1811 (should be equal to VAL, otherwise we probably should
1812 have folded the comparison into false) and
1813 maximum unsigned value is VAL | ~CST2. */
1814 maxv = valv | ~cst2v;
1815 valid_p = true;
1816 break;
1817
1818 case NE_EXPR:
1819 tem = valv | ~cst2v;
1820 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
1821 if (valv == 0)
1822 {
1823 cst2n = false;
1824 sgnbit = wi::zero (nprec);
1825 goto gt_expr;
1826 }
1827 /* If (VAL | ~CST2) is all ones, handle it as
1828 (X & CST2) < VAL. */
1829 if (tem == -1)
1830 {
1831 cst2n = false;
1832 valn = false;
1833 sgnbit = wi::zero (nprec);
1834 goto lt_expr;
1835 }
1836 if (!cst2n && wi::neg_p (cst2v))
1837 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1838 if (sgnbit != 0)
1839 {
1840 if (valv == sgnbit)
1841 {
1842 cst2n = true;
1843 valn = true;
1844 goto gt_expr;
1845 }
1846 if (tem == wi::mask (nprec - 1, false, nprec))
1847 {
1848 cst2n = true;
1849 goto lt_expr;
1850 }
1851 if (!cst2n)
1852 sgnbit = wi::zero (nprec);
1853 }
1854 break;
1855
1856 case GE_EXPR:
1857 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1858 is VAL and maximum unsigned value is ~0. For signed
1859 comparison, if CST2 doesn't have most significant bit
1860 set, handle it similarly. If CST2 has MSB set,
1861 the minimum is the same, and maximum is ~0U/2. */
1862 if (minv != valv)
1863 {
1864 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1865 VAL. */
1866 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1867 if (minv == valv)
1868 break;
1869 }
1870 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1871 valid_p = true;
1872 break;
1873
1874 case GT_EXPR:
1875 gt_expr:
1876 /* Find out smallest MINV where MINV > VAL
1877 && (MINV & CST2) == MINV, if any. If VAL is signed and
1878 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
1879 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1880 if (minv == valv)
1881 break;
1882 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1883 valid_p = true;
1884 break;
1885
1886 case LE_EXPR:
1887 /* Minimum unsigned value for <= is 0 and maximum
1888 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1889 Otherwise, find smallest VAL2 where VAL2 > VAL
1890 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1891 as maximum.
1892 For signed comparison, if CST2 doesn't have most
1893 significant bit set, handle it similarly. If CST2 has
1894 MSB set, the maximum is the same and minimum is INT_MIN. */
1895 if (minv == valv)
1896 maxv = valv;
1897 else
1898 {
1899 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1900 if (maxv == valv)
1901 break;
1902 maxv -= 1;
1903 }
1904 maxv |= ~cst2v;
1905 minv = sgnbit;
1906 valid_p = true;
1907 break;
1908
1909 case LT_EXPR:
1910 lt_expr:
1911 /* Minimum unsigned value for < is 0 and maximum
1912 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1913 Otherwise, find smallest VAL2 where VAL2 > VAL
1914 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1915 as maximum.
1916 For signed comparison, if CST2 doesn't have most
1917 significant bit set, handle it similarly. If CST2 has
1918 MSB set, the maximum is the same and minimum is INT_MIN. */
1919 if (minv == valv)
1920 {
1921 if (valv == sgnbit)
1922 break;
1923 maxv = valv;
1924 }
1925 else
1926 {
1927 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1928 if (maxv == valv)
1929 break;
1930 }
1931 maxv -= 1;
1932 maxv |= ~cst2v;
1933 minv = sgnbit;
1934 valid_p = true;
1935 break;
1936
1937 default:
1938 break;
1939 }
1940 if (valid_p
1941 && (maxv - minv) != -1)
1942 {
1943 tree tmp, new_val, type;
1944 int i;
1945
1946 for (i = 0; i < 2; i++)
1947 if (names[i])
1948 {
1949 wide_int maxv2 = maxv;
1950 tmp = names[i];
1951 type = TREE_TYPE (names[i]);
1952 if (!TYPE_UNSIGNED (type))
1953 {
1954 type = build_nonstandard_integer_type (nprec, 1);
1955 tmp = build1 (NOP_EXPR, type, names[i]);
1956 }
1957 if (minv != 0)
1958 {
1959 tmp = build2 (PLUS_EXPR, type, tmp,
1960 wide_int_to_tree (type, -minv));
1961 maxv2 = maxv - minv;
1962 }
1963 new_val = wide_int_to_tree (type, maxv2);
1964 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1965 }
1966 }
1967 }
1968 }
1969 }
1970
1971 /* OP is an operand of a truth value expression which is known to have
1972 a particular value. Register any asserts for OP and for any
1973 operands in OP's defining statement.
1974
1975 If CODE is EQ_EXPR, then we want to register OP is zero (false),
1976 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
1977
1978 static void
1979 register_edge_assert_for_1 (tree op, enum tree_code code,
1980 edge e, vec<assert_info> &asserts)
1981 {
1982 gimple *op_def;
1983 tree val;
1984 enum tree_code rhs_code;
1985
1986 /* We only care about SSA_NAMEs. */
1987 if (TREE_CODE (op) != SSA_NAME)
1988 return;
1989
1990 /* We know that OP will have a zero or nonzero value. */
1991 val = build_int_cst (TREE_TYPE (op), 0);
1992 add_assert_info (asserts, op, op, code, val);
1993
1994 /* Now look at how OP is set. If it's set from a comparison,
1995 a truth operation or some bit operations, then we may be able
1996 to register information about the operands of that assignment. */
1997 op_def = SSA_NAME_DEF_STMT (op);
1998 if (gimple_code (op_def) != GIMPLE_ASSIGN)
1999 return;
2000
2001 rhs_code = gimple_assign_rhs_code (op_def);
2002
2003 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2004 {
2005 bool invert = (code == EQ_EXPR ? true : false);
2006 tree op0 = gimple_assign_rhs1 (op_def);
2007 tree op1 = gimple_assign_rhs2 (op_def);
2008
2009 if (TREE_CODE (op0) == SSA_NAME)
2010 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2011 if (TREE_CODE (op1) == SSA_NAME)
2012 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2013 }
2014 else if ((code == NE_EXPR
2015 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2016 || (code == EQ_EXPR
2017 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2018 {
2019 /* Recurse on each operand. */
2020 tree op0 = gimple_assign_rhs1 (op_def);
2021 tree op1 = gimple_assign_rhs2 (op_def);
2022 if (TREE_CODE (op0) == SSA_NAME
2023 && has_single_use (op0))
2024 register_edge_assert_for_1 (op0, code, e, asserts);
2025 if (TREE_CODE (op1) == SSA_NAME
2026 && has_single_use (op1))
2027 register_edge_assert_for_1 (op1, code, e, asserts);
2028 }
2029 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2030 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2031 {
2032 /* Recurse, flipping CODE. */
2033 code = invert_tree_comparison (code, false);
2034 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2035 }
2036 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2037 {
2038 /* Recurse through the copy. */
2039 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2040 }
2041 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2042 {
2043 /* Recurse through the type conversion, unless it is a narrowing
2044 conversion or conversion from non-integral type. */
2045 tree rhs = gimple_assign_rhs1 (op_def);
2046 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2047 && (TYPE_PRECISION (TREE_TYPE (rhs))
2048 <= TYPE_PRECISION (TREE_TYPE (op))))
2049 register_edge_assert_for_1 (rhs, code, e, asserts);
2050 }
2051 }
2052
2053 /* Check if comparison
2054 NAME COND_OP INTEGER_CST
2055 has a form of
2056 (X & 11...100..0) COND_OP XX...X00...0
2057 Such comparison can yield assertions like
2058 X >= XX...X00...0
2059 X <= XX...X11...1
2060 in case of COND_OP being EQ_EXPR or
2061 X < XX...X00...0
2062 X > XX...X11...1
2063 in case of NE_EXPR. */
2064
2065 static bool
2066 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2067 tree *new_name, tree *low, enum tree_code *low_code,
2068 tree *high, enum tree_code *high_code)
2069 {
2070 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2071
2072 if (!is_gimple_assign (def_stmt)
2073 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2074 return false;
2075
2076 tree t = gimple_assign_rhs1 (def_stmt);
2077 tree maskt = gimple_assign_rhs2 (def_stmt);
2078 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2079 return false;
2080
2081 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2082 wide_int inv_mask = ~mask;
2083 /* Must have been removed by now so don't bother optimizing. */
2084 if (mask == 0 || inv_mask == 0)
2085 return false;
2086
2087 /* Assume VALT is INTEGER_CST. */
2088 wi::tree_to_wide_ref val = wi::to_wide (valt);
2089
2090 if ((inv_mask & (inv_mask + 1)) != 0
2091 || (val & mask) != val)
2092 return false;
2093
2094 bool is_range = cond_code == EQ_EXPR;
2095
2096 tree type = TREE_TYPE (t);
2097 wide_int min = wi::min_value (type),
2098 max = wi::max_value (type);
2099
2100 if (is_range)
2101 {
2102 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2103 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2104 }
2105 else
2106 {
2107 /* We can still generate assertion if one of alternatives
2108 is known to always be false. */
2109 if (val == min)
2110 {
2111 *low_code = (enum tree_code) 0;
2112 *high_code = GT_EXPR;
2113 }
2114 else if ((val | inv_mask) == max)
2115 {
2116 *low_code = LT_EXPR;
2117 *high_code = (enum tree_code) 0;
2118 }
2119 else
2120 return false;
2121 }
2122
2123 *new_name = t;
2124 *low = wide_int_to_tree (type, val);
2125 *high = wide_int_to_tree (type, val | inv_mask);
2126
2127 return true;
2128 }
2129
2130 /* Try to register an edge assertion for SSA name NAME on edge E for
2131 the condition COND contributing to the conditional jump pointed to by
2132 SI. */
2133
2134 void
2135 register_edge_assert_for (tree name, edge e,
2136 enum tree_code cond_code, tree cond_op0,
2137 tree cond_op1, vec<assert_info> &asserts)
2138 {
2139 tree val;
2140 enum tree_code comp_code;
2141 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2142
2143 /* Do not attempt to infer anything in names that flow through
2144 abnormal edges. */
2145 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2146 return;
2147
2148 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2149 cond_op0, cond_op1,
2150 is_else_edge,
2151 &comp_code, &val))
2152 return;
2153
2154 /* Register ASSERT_EXPRs for name. */
2155 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2156 cond_op1, is_else_edge, asserts);
2157
2158
2159 /* If COND is effectively an equality test of an SSA_NAME against
2160 the value zero or one, then we may be able to assert values
2161 for SSA_NAMEs which flow into COND. */
2162
2163 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2164 statement of NAME we can assert both operands of the BIT_AND_EXPR
2165 have nonzero value. */
2166 if ((comp_code == EQ_EXPR && integer_onep (val))
2167 || (comp_code == NE_EXPR && integer_zerop (val)))
2168 {
2169 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2170
2171 if (is_gimple_assign (def_stmt)
2172 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2173 {
2174 tree op0 = gimple_assign_rhs1 (def_stmt);
2175 tree op1 = gimple_assign_rhs2 (def_stmt);
2176 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2177 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2178 }
2179 else if (is_gimple_assign (def_stmt)
2180 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2181 == tcc_comparison))
2182 register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2183 }
2184
2185 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2186 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2187 have zero value. */
2188 if ((comp_code == EQ_EXPR && integer_zerop (val))
2189 || (comp_code == NE_EXPR
2190 && integer_onep (val)
2191 && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2192 {
2193 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2194
2195 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2196 necessarily zero value, or if type-precision is one. */
2197 if (is_gimple_assign (def_stmt)
2198 && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2199 {
2200 tree op0 = gimple_assign_rhs1 (def_stmt);
2201 tree op1 = gimple_assign_rhs2 (def_stmt);
2202 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2203 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2204 }
2205 else if (is_gimple_assign (def_stmt)
2206 && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2207 == tcc_comparison))
2208 register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2209 }
2210
2211 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2212 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2213 && TREE_CODE (val) == INTEGER_CST)
2214 {
2215 enum tree_code low_code, high_code;
2216 tree low, high;
2217 if (is_masked_range_test (name, val, comp_code, &name, &low,
2218 &low_code, &high, &high_code))
2219 {
2220 if (low_code != ERROR_MARK)
2221 register_edge_assert_for_2 (name, e, low_code, name,
2222 low, /*invert*/false, asserts);
2223 if (high_code != ERROR_MARK)
2224 register_edge_assert_for_2 (name, e, high_code, name,
2225 high, /*invert*/false, asserts);
2226 }
2227 }
2228 }
2229
2230 /* Handle
2231 _4 = x_3 & 31;
2232 if (_4 != 0)
2233 goto <bb 6>;
2234 else
2235 goto <bb 7>;
2236 <bb 6>:
2237 __builtin_unreachable ();
2238 <bb 7>:
2239 x_5 = ASSERT_EXPR <x_3, ...>;
2240 If x_3 has no other immediate uses (checked by caller),
2241 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2242 from the non-zero bitmask. */
2243
2244 void
2245 maybe_set_nonzero_bits (edge e, tree var)
2246 {
2247 basic_block cond_bb = e->src;
2248 gimple *stmt = last_stmt (cond_bb);
2249 tree cst;
2250
2251 if (stmt == NULL
2252 || gimple_code (stmt) != GIMPLE_COND
2253 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2254 ? EQ_EXPR : NE_EXPR)
2255 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2256 || !integer_zerop (gimple_cond_rhs (stmt)))
2257 return;
2258
2259 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2260 if (!is_gimple_assign (stmt)
2261 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2262 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2263 return;
2264 if (gimple_assign_rhs1 (stmt) != var)
2265 {
2266 gimple *stmt2;
2267
2268 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2269 return;
2270 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2271 if (!gimple_assign_cast_p (stmt2)
2272 || gimple_assign_rhs1 (stmt2) != var
2273 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2274 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2275 != TYPE_PRECISION (TREE_TYPE (var))))
2276 return;
2277 }
2278 cst = gimple_assign_rhs2 (stmt);
2279 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2280 wi::to_wide (cst)));
2281 }
2282
2283 /* Return true if STMT is interesting for VRP. */
2284
2285 bool
2286 stmt_interesting_for_vrp (gimple *stmt)
2287 {
2288 if (gimple_code (stmt) == GIMPLE_PHI)
2289 {
2290 tree res = gimple_phi_result (stmt);
2291 return (!virtual_operand_p (res)
2292 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2293 || POINTER_TYPE_P (TREE_TYPE (res))));
2294 }
2295 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2296 {
2297 tree lhs = gimple_get_lhs (stmt);
2298
2299 /* In general, assignments with virtual operands are not useful
2300 for deriving ranges, with the obvious exception of calls to
2301 builtin functions. */
2302 if (lhs && TREE_CODE (lhs) == SSA_NAME
2303 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2304 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2305 && (is_gimple_call (stmt)
2306 || !gimple_vuse (stmt)))
2307 return true;
2308 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2309 switch (gimple_call_internal_fn (stmt))
2310 {
2311 case IFN_ADD_OVERFLOW:
2312 case IFN_SUB_OVERFLOW:
2313 case IFN_MUL_OVERFLOW:
2314 case IFN_ATOMIC_COMPARE_EXCHANGE:
2315 /* These internal calls return _Complex integer type,
2316 but are interesting to VRP nevertheless. */
2317 if (lhs && TREE_CODE (lhs) == SSA_NAME)
2318 return true;
2319 break;
2320 default:
2321 break;
2322 }
2323 }
2324 else if (gimple_code (stmt) == GIMPLE_COND
2325 || gimple_code (stmt) == GIMPLE_SWITCH)
2326 return true;
2327
2328 return false;
2329 }
2330
2331 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2332 that includes the value VAL. The search is restricted to the range
2333 [START_IDX, n - 1] where n is the size of VEC.
2334
2335 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2336 returned.
2337
2338 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2339 it is placed in IDX and false is returned.
2340
2341 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2342 returned. */
2343
2344 bool
2345 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2346 {
2347 size_t n = gimple_switch_num_labels (stmt);
2348 size_t low, high;
2349
2350 /* Find case label for minimum of the value range or the next one.
2351 At each iteration we are searching in [low, high - 1]. */
2352
2353 for (low = start_idx, high = n; high != low; )
2354 {
2355 tree t;
2356 int cmp;
2357 /* Note that i != high, so we never ask for n. */
2358 size_t i = (high + low) / 2;
2359 t = gimple_switch_label (stmt, i);
2360
2361 /* Cache the result of comparing CASE_LOW and val. */
2362 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2363
2364 if (cmp == 0)
2365 {
2366 /* Ranges cannot be empty. */
2367 *idx = i;
2368 return true;
2369 }
2370 else if (cmp > 0)
2371 high = i;
2372 else
2373 {
2374 low = i + 1;
2375 if (CASE_HIGH (t) != NULL
2376 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2377 {
2378 *idx = i;
2379 return true;
2380 }
2381 }
2382 }
2383
2384 *idx = high;
2385 return false;
2386 }
2387
2388 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2389 for values between MIN and MAX. The first index is placed in MIN_IDX. The
2390 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2391 then MAX_IDX < MIN_IDX.
2392 Returns true if the default label is not needed. */
2393
2394 bool
2395 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2396 size_t *max_idx)
2397 {
2398 size_t i, j;
2399 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2400 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2401
2402 if (i == j
2403 && min_take_default
2404 && max_take_default)
2405 {
2406 /* Only the default case label reached.
2407 Return an empty range. */
2408 *min_idx = 1;
2409 *max_idx = 0;
2410 return false;
2411 }
2412 else
2413 {
2414 bool take_default = min_take_default || max_take_default;
2415 tree low, high;
2416 size_t k;
2417
2418 if (max_take_default)
2419 j--;
2420
2421 /* If the case label range is continuous, we do not need
2422 the default case label. Verify that. */
2423 high = CASE_LOW (gimple_switch_label (stmt, i));
2424 if (CASE_HIGH (gimple_switch_label (stmt, i)))
2425 high = CASE_HIGH (gimple_switch_label (stmt, i));
2426 for (k = i + 1; k <= j; ++k)
2427 {
2428 low = CASE_LOW (gimple_switch_label (stmt, k));
2429 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2430 {
2431 take_default = true;
2432 break;
2433 }
2434 high = low;
2435 if (CASE_HIGH (gimple_switch_label (stmt, k)))
2436 high = CASE_HIGH (gimple_switch_label (stmt, k));
2437 }
2438
2439 *min_idx = i;
2440 *max_idx = j;
2441 return !take_default;
2442 }
2443 }
2444
2445 /* Given a SWITCH_STMT, return the case label that encompasses the
2446 known possible values for the switch operand. RANGE_OF_OP is a
2447 range for the known values of the switch operand. */
2448
2449 tree
2450 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2451 {
2452 if (range_of_op->undefined_p ()
2453 || range_of_op->varying_p ()
2454 || range_of_op->symbolic_p ())
2455 return NULL_TREE;
2456
2457 size_t i, j;
2458 tree op = gimple_switch_index (switch_stmt);
2459 tree type = TREE_TYPE (op);
2460 tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2461 tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2462 find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2463 if (i == j)
2464 {
2465 /* Look for exactly one label that encompasses the range of
2466 the operand. */
2467 tree label = gimple_switch_label (switch_stmt, i);
2468 tree case_high
2469 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2470 int_range_max label_range (CASE_LOW (label), case_high);
2471 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2472 range_cast (label_range, range_of_op->type ());
2473 label_range.intersect (*range_of_op);
2474 if (label_range == *range_of_op)
2475 return label;
2476 }
2477 else if (i > j)
2478 {
2479 /* If there are no labels at all, take the default. */
2480 return gimple_switch_label (switch_stmt, 0);
2481 }
2482 else
2483 {
2484 /* Otherwise, there are various labels that can encompass
2485 the range of operand. In which case, see if the range of
2486 the operand is entirely *outside* the bounds of all the
2487 (non-default) case labels. If so, take the default. */
2488 unsigned n = gimple_switch_num_labels (switch_stmt);
2489 tree min_label = gimple_switch_label (switch_stmt, 1);
2490 tree max_label = gimple_switch_label (switch_stmt, n - 1);
2491 tree case_high = CASE_HIGH (max_label);
2492 if (!case_high)
2493 case_high = CASE_LOW (max_label);
2494 int_range_max label_range (CASE_LOW (min_label), case_high);
2495 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2496 range_cast (label_range, range_of_op->type ());
2497 label_range.intersect (*range_of_op);
2498 if (label_range.undefined_p ())
2499 return gimple_switch_label (switch_stmt, 0);
2500 }
2501 return NULL_TREE;
2502 }
2503
2504 struct case_info
2505 {
2506 tree expr;
2507 basic_block bb;
2508 };
2509
2510 /* Location information for ASSERT_EXPRs. Each instance of this
2511 structure describes an ASSERT_EXPR for an SSA name. Since a single
2512 SSA name may have more than one assertion associated with it, these
2513 locations are kept in a linked list attached to the corresponding
2514 SSA name. */
2515 struct assert_locus
2516 {
2517 /* Basic block where the assertion would be inserted. */
2518 basic_block bb;
2519
2520 /* Some assertions need to be inserted on an edge (e.g., assertions
2521 generated by COND_EXPRs). In those cases, BB will be NULL. */
2522 edge e;
2523
2524 /* Pointer to the statement that generated this assertion. */
2525 gimple_stmt_iterator si;
2526
2527 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
2528 enum tree_code comp_code;
2529
2530 /* Value being compared against. */
2531 tree val;
2532
2533 /* Expression to compare. */
2534 tree expr;
2535
2536 /* Next node in the linked list. */
2537 assert_locus *next;
2538 };
2539
2540 /* Class to traverse the flowgraph looking for conditional jumps to
2541 insert ASSERT_EXPR range expressions. These range expressions are
2542 meant to provide information to optimizations that need to reason
2543 in terms of value ranges. They will not be expanded into RTL. */
2544
2545 class vrp_asserts
2546 {
2547 public:
2548 vrp_asserts (struct function *fn) : fun (fn) { }
2549
2550 void insert_range_assertions ();
2551
2552 /* Convert range assertion expressions into the implied copies and
2553 copy propagate away the copies. */
2554 void remove_range_assertions ();
2555
2556 /* Dump all the registered assertions for all the names to FILE. */
2557 void dump (FILE *);
2558
2559 /* Dump all the registered assertions for NAME to FILE. */
2560 void dump (FILE *file, tree name);
2561
2562 /* Dump all the registered assertions for NAME to stderr. */
2563 void debug (tree name)
2564 {
2565 dump (stderr, name);
2566 }
2567
2568 /* Dump all the registered assertions for all the names to stderr. */
2569 void debug ()
2570 {
2571 dump (stderr);
2572 }
2573
2574 private:
2575 /* Set of SSA names found live during the RPO traversal of the function
2576 for still active basic-blocks. */
2577 live_names live;
2578
2579 /* Function to work on. */
2580 struct function *fun;
2581
2582 /* If bit I is present, it means that SSA name N_i has a list of
2583 assertions that should be inserted in the IL. */
2584 bitmap need_assert_for;
2585
2586 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
2587 holds a list of ASSERT_LOCUS_T nodes that describe where
2588 ASSERT_EXPRs for SSA name N_I should be inserted. */
2589 assert_locus **asserts_for;
2590
2591 /* Finish found ASSERTS for E and register them at GSI. */
2592 void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2593 vec<assert_info> &asserts);
2594
2595 /* Determine whether the outgoing edges of BB should receive an
2596 ASSERT_EXPR for each of the operands of BB's LAST statement. The
2597 last statement of BB must be a SWITCH_EXPR.
2598
2599 If any of the sub-graphs rooted at BB have an interesting use of
2600 the predicate operands, an assert location node is added to the
2601 list of assertions for the corresponding operands. */
2602 void find_switch_asserts (basic_block bb, gswitch *last);
2603
2604 /* Do an RPO walk over the function computing SSA name liveness
2605 on-the-fly and deciding on assert expressions to insert. */
2606 void find_assert_locations ();
2607
2608 /* Traverse all the statements in block BB looking for statements that
2609 may generate useful assertions for the SSA names in their operand.
2610 See method implementation comentary for more information. */
2611 void find_assert_locations_in_bb (basic_block bb);
2612
2613 /* Determine whether the outgoing edges of BB should receive an
2614 ASSERT_EXPR for each of the operands of BB's LAST statement.
2615 The last statement of BB must be a COND_EXPR.
2616
2617 If any of the sub-graphs rooted at BB have an interesting use of
2618 the predicate operands, an assert location node is added to the
2619 list of assertions for the corresponding operands. */
2620 void find_conditional_asserts (basic_block bb, gcond *last);
2621
2622 /* Process all the insertions registered for every name N_i registered
2623 in NEED_ASSERT_FOR. The list of assertions to be inserted are
2624 found in ASSERTS_FOR[i]. */
2625 void process_assert_insertions ();
2626
2627 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2628 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2629 E->DEST, then register this location as a possible insertion point
2630 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2631
2632 BB, E and SI provide the exact insertion point for the new
2633 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2634 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2635 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2636 must not be NULL. */
2637 void register_new_assert_for (tree name, tree expr,
2638 enum tree_code comp_code,
2639 tree val, basic_block bb,
2640 edge e, gimple_stmt_iterator si);
2641
2642 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2643 create a new SSA name N and return the assertion assignment
2644 'N = ASSERT_EXPR <V, V OP W>'. */
2645 gimple *build_assert_expr_for (tree cond, tree v);
2646
2647 /* Create an ASSERT_EXPR for NAME and insert it in the location
2648 indicated by LOC. Return true if we made any edge insertions. */
2649 bool process_assert_insertions_for (tree name, assert_locus *loc);
2650
2651 /* Qsort callback for sorting assert locations. */
2652 template <bool stable> static int compare_assert_loc (const void *,
2653 const void *);
2654
2655 /* Return false if EXPR is a predicate expression involving floating
2656 point values. */
2657 bool fp_predicate (gimple *stmt)
2658 {
2659 GIMPLE_CHECK (stmt, GIMPLE_COND);
2660 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2661 }
2662
2663 bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2664 basic_block cond_bb);
2665
2666 static int compare_case_labels (const void *, const void *);
2667 };
2668
2669 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2670 create a new SSA name N and return the assertion assignment
2671 'N = ASSERT_EXPR <V, V OP W>'. */
2672
2673 gimple *
2674 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2675 {
2676 tree a;
2677 gassign *assertion;
2678
2679 gcc_assert (TREE_CODE (v) == SSA_NAME
2680 && COMPARISON_CLASS_P (cond));
2681
2682 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2683 assertion = gimple_build_assign (NULL_TREE, a);
2684
2685 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2686 operand of the ASSERT_EXPR. Create it so the new name and the old one
2687 are registered in the replacement table so that we can fix the SSA web
2688 after adding all the ASSERT_EXPRs. */
2689 tree new_def = create_new_def_for (v, assertion, NULL);
2690 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2691 given we have to be able to fully propagate those out to re-create
2692 valid SSA when removing the asserts. */
2693 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2694 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2695
2696 return assertion;
2697 }
2698
2699 /* Dump all the registered assertions for NAME to FILE. */
2700
2701 void
2702 vrp_asserts::dump (FILE *file, tree name)
2703 {
2704 assert_locus *loc;
2705
2706 fprintf (file, "Assertions to be inserted for ");
2707 print_generic_expr (file, name);
2708 fprintf (file, "\n");
2709
2710 loc = asserts_for[SSA_NAME_VERSION (name)];
2711 while (loc)
2712 {
2713 fprintf (file, "\t");
2714 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2715 fprintf (file, "\n\tBB #%d", loc->bb->index);
2716 if (loc->e)
2717 {
2718 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2719 loc->e->dest->index);
2720 dump_edge_info (file, loc->e, dump_flags, 0);
2721 }
2722 fprintf (file, "\n\tPREDICATE: ");
2723 print_generic_expr (file, loc->expr);
2724 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2725 print_generic_expr (file, loc->val);
2726 fprintf (file, "\n\n");
2727 loc = loc->next;
2728 }
2729
2730 fprintf (file, "\n");
2731 }
2732
2733 /* Dump all the registered assertions for all the names to FILE. */
2734
2735 void
2736 vrp_asserts::dump (FILE *file)
2737 {
2738 unsigned i;
2739 bitmap_iterator bi;
2740
2741 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2742 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2743 dump (file, ssa_name (i));
2744 fprintf (file, "\n");
2745 }
2746
2747 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2748 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2749 E->DEST, then register this location as a possible insertion point
2750 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2751
2752 BB, E and SI provide the exact insertion point for the new
2753 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2754 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2755 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2756 must not be NULL. */
2757
2758 void
2759 vrp_asserts::register_new_assert_for (tree name, tree expr,
2760 enum tree_code comp_code,
2761 tree val,
2762 basic_block bb,
2763 edge e,
2764 gimple_stmt_iterator si)
2765 {
2766 assert_locus *n, *loc, *last_loc;
2767 basic_block dest_bb;
2768
2769 gcc_checking_assert (bb == NULL || e == NULL);
2770
2771 if (e == NULL)
2772 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2773 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2774
2775 /* Never build an assert comparing against an integer constant with
2776 TREE_OVERFLOW set. This confuses our undefined overflow warning
2777 machinery. */
2778 if (TREE_OVERFLOW_P (val))
2779 val = drop_tree_overflow (val);
2780
2781 /* The new assertion A will be inserted at BB or E. We need to
2782 determine if the new location is dominated by a previously
2783 registered location for A. If we are doing an edge insertion,
2784 assume that A will be inserted at E->DEST. Note that this is not
2785 necessarily true.
2786
2787 If E is a critical edge, it will be split. But even if E is
2788 split, the new block will dominate the same set of blocks that
2789 E->DEST dominates.
2790
2791 The reverse, however, is not true, blocks dominated by E->DEST
2792 will not be dominated by the new block created to split E. So,
2793 if the insertion location is on a critical edge, we will not use
2794 the new location to move another assertion previously registered
2795 at a block dominated by E->DEST. */
2796 dest_bb = (bb) ? bb : e->dest;
2797
2798 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2799 VAL at a block dominating DEST_BB, then we don't need to insert a new
2800 one. Similarly, if the same assertion already exists at a block
2801 dominated by DEST_BB and the new location is not on a critical
2802 edge, then update the existing location for the assertion (i.e.,
2803 move the assertion up in the dominance tree).
2804
2805 Note, this is implemented as a simple linked list because there
2806 should not be more than a handful of assertions registered per
2807 name. If this becomes a performance problem, a table hashed by
2808 COMP_CODE and VAL could be implemented. */
2809 loc = asserts_for[SSA_NAME_VERSION (name)];
2810 last_loc = loc;
2811 while (loc)
2812 {
2813 if (loc->comp_code == comp_code
2814 && (loc->val == val
2815 || operand_equal_p (loc->val, val, 0))
2816 && (loc->expr == expr
2817 || operand_equal_p (loc->expr, expr, 0)))
2818 {
2819 /* If E is not a critical edge and DEST_BB
2820 dominates the existing location for the assertion, move
2821 the assertion up in the dominance tree by updating its
2822 location information. */
2823 if ((e == NULL || !EDGE_CRITICAL_P (e))
2824 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2825 {
2826 loc->bb = dest_bb;
2827 loc->e = e;
2828 loc->si = si;
2829 return;
2830 }
2831 }
2832
2833 /* Update the last node of the list and move to the next one. */
2834 last_loc = loc;
2835 loc = loc->next;
2836 }
2837
2838 /* If we didn't find an assertion already registered for
2839 NAME COMP_CODE VAL, add a new one at the end of the list of
2840 assertions associated with NAME. */
2841 n = XNEW (struct assert_locus);
2842 n->bb = dest_bb;
2843 n->e = e;
2844 n->si = si;
2845 n->comp_code = comp_code;
2846 n->val = val;
2847 n->expr = expr;
2848 n->next = NULL;
2849
2850 if (last_loc)
2851 last_loc->next = n;
2852 else
2853 asserts_for[SSA_NAME_VERSION (name)] = n;
2854
2855 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2856 }
2857
2858 /* Finish found ASSERTS for E and register them at GSI. */
2859
2860 void
2861 vrp_asserts::finish_register_edge_assert_for (edge e,
2862 gimple_stmt_iterator gsi,
2863 vec<assert_info> &asserts)
2864 {
2865 for (unsigned i = 0; i < asserts.length (); ++i)
2866 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2867 reachable from E. */
2868 if (live.live_on_edge_p (asserts[i].name, e))
2869 register_new_assert_for (asserts[i].name, asserts[i].expr,
2870 asserts[i].comp_code, asserts[i].val,
2871 NULL, e, gsi);
2872 }
2873
2874 /* Determine whether the outgoing edges of BB should receive an
2875 ASSERT_EXPR for each of the operands of BB's LAST statement.
2876 The last statement of BB must be a COND_EXPR.
2877
2878 If any of the sub-graphs rooted at BB have an interesting use of
2879 the predicate operands, an assert location node is added to the
2880 list of assertions for the corresponding operands. */
2881
2882 void
2883 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2884 {
2885 gimple_stmt_iterator bsi;
2886 tree op;
2887 edge_iterator ei;
2888 edge e;
2889 ssa_op_iter iter;
2890
2891 bsi = gsi_for_stmt (last);
2892
2893 /* Look for uses of the operands in each of the sub-graphs
2894 rooted at BB. We need to check each of the outgoing edges
2895 separately, so that we know what kind of ASSERT_EXPR to
2896 insert. */
2897 FOR_EACH_EDGE (e, ei, bb->succs)
2898 {
2899 if (e->dest == bb)
2900 continue;
2901
2902 /* Register the necessary assertions for each operand in the
2903 conditional predicate. */
2904 auto_vec<assert_info, 8> asserts;
2905 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2906 register_edge_assert_for (op, e,
2907 gimple_cond_code (last),
2908 gimple_cond_lhs (last),
2909 gimple_cond_rhs (last), asserts);
2910 finish_register_edge_assert_for (e, bsi, asserts);
2911 }
2912 }
2913
2914 /* Compare two case labels sorting first by the destination bb index
2915 and then by the case value. */
2916
2917 int
2918 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2919 {
2920 const struct case_info *ci1 = (const struct case_info *) p1;
2921 const struct case_info *ci2 = (const struct case_info *) p2;
2922 int idx1 = ci1->bb->index;
2923 int idx2 = ci2->bb->index;
2924
2925 if (idx1 < idx2)
2926 return -1;
2927 else if (idx1 == idx2)
2928 {
2929 /* Make sure the default label is first in a group. */
2930 if (!CASE_LOW (ci1->expr))
2931 return -1;
2932 else if (!CASE_LOW (ci2->expr))
2933 return 1;
2934 else
2935 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2936 CASE_LOW (ci2->expr));
2937 }
2938 else
2939 return 1;
2940 }
2941
2942 /* Determine whether the outgoing edges of BB should receive an
2943 ASSERT_EXPR for each of the operands of BB's LAST statement.
2944 The last statement of BB must be a SWITCH_EXPR.
2945
2946 If any of the sub-graphs rooted at BB have an interesting use of
2947 the predicate operands, an assert location node is added to the
2948 list of assertions for the corresponding operands. */
2949
2950 void
2951 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
2952 {
2953 gimple_stmt_iterator bsi;
2954 tree op;
2955 edge e;
2956 struct case_info *ci;
2957 size_t n = gimple_switch_num_labels (last);
2958 #if GCC_VERSION >= 4000
2959 unsigned int idx;
2960 #else
2961 /* Work around GCC 3.4 bug (PR 37086). */
2962 volatile unsigned int idx;
2963 #endif
2964
2965 bsi = gsi_for_stmt (last);
2966 op = gimple_switch_index (last);
2967 if (TREE_CODE (op) != SSA_NAME)
2968 return;
2969
2970 /* Build a vector of case labels sorted by destination label. */
2971 ci = XNEWVEC (struct case_info, n);
2972 for (idx = 0; idx < n; ++idx)
2973 {
2974 ci[idx].expr = gimple_switch_label (last, idx);
2975 ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
2976 }
2977 edge default_edge = find_edge (bb, ci[0].bb);
2978 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2979
2980 for (idx = 0; idx < n; ++idx)
2981 {
2982 tree min, max;
2983 tree cl = ci[idx].expr;
2984 basic_block cbb = ci[idx].bb;
2985
2986 min = CASE_LOW (cl);
2987 max = CASE_HIGH (cl);
2988
2989 /* If there are multiple case labels with the same destination
2990 we need to combine them to a single value range for the edge. */
2991 if (idx + 1 < n && cbb == ci[idx + 1].bb)
2992 {
2993 /* Skip labels until the last of the group. */
2994 do {
2995 ++idx;
2996 } while (idx < n && cbb == ci[idx].bb);
2997 --idx;
2998
2999 /* Pick up the maximum of the case label range. */
3000 if (CASE_HIGH (ci[idx].expr))
3001 max = CASE_HIGH (ci[idx].expr);
3002 else
3003 max = CASE_LOW (ci[idx].expr);
3004 }
3005
3006 /* Can't extract a useful assertion out of a range that includes the
3007 default label. */
3008 if (min == NULL_TREE)
3009 continue;
3010
3011 /* Find the edge to register the assert expr on. */
3012 e = find_edge (bb, cbb);
3013
3014 /* Register the necessary assertions for the operand in the
3015 SWITCH_EXPR. */
3016 auto_vec<assert_info, 8> asserts;
3017 register_edge_assert_for (op, e,
3018 max ? GE_EXPR : EQ_EXPR,
3019 op, fold_convert (TREE_TYPE (op), min),
3020 asserts);
3021 if (max)
3022 register_edge_assert_for (op, e, LE_EXPR, op,
3023 fold_convert (TREE_TYPE (op), max),
3024 asserts);
3025 finish_register_edge_assert_for (e, bsi, asserts);
3026 }
3027
3028 XDELETEVEC (ci);
3029
3030 if (!live.live_on_edge_p (op, default_edge))
3031 return;
3032
3033 /* Now register along the default label assertions that correspond to the
3034 anti-range of each label. */
3035 int insertion_limit = param_max_vrp_switch_assertions;
3036 if (insertion_limit == 0)
3037 return;
3038
3039 /* We can't do this if the default case shares a label with another case. */
3040 tree default_cl = gimple_switch_default_label (last);
3041 for (idx = 1; idx < n; idx++)
3042 {
3043 tree min, max;
3044 tree cl = gimple_switch_label (last, idx);
3045 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3046 continue;
3047
3048 min = CASE_LOW (cl);
3049 max = CASE_HIGH (cl);
3050
3051 /* Combine contiguous case ranges to reduce the number of assertions
3052 to insert. */
3053 for (idx = idx + 1; idx < n; idx++)
3054 {
3055 tree next_min, next_max;
3056 tree next_cl = gimple_switch_label (last, idx);
3057 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3058 break;
3059
3060 next_min = CASE_LOW (next_cl);
3061 next_max = CASE_HIGH (next_cl);
3062
3063 wide_int difference = (wi::to_wide (next_min)
3064 - wi::to_wide (max ? max : min));
3065 if (wi::eq_p (difference, 1))
3066 max = next_max ? next_max : next_min;
3067 else
3068 break;
3069 }
3070 idx--;
3071
3072 if (max == NULL_TREE)
3073 {
3074 /* Register the assertion OP != MIN. */
3075 auto_vec<assert_info, 8> asserts;
3076 min = fold_convert (TREE_TYPE (op), min);
3077 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3078 asserts);
3079 finish_register_edge_assert_for (default_edge, bsi, asserts);
3080 }
3081 else
3082 {
3083 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3084 which will give OP the anti-range ~[MIN,MAX]. */
3085 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3086 min = fold_convert (TREE_TYPE (uop), min);
3087 max = fold_convert (TREE_TYPE (uop), max);
3088
3089 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3090 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3091 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3092 NULL, default_edge, bsi);
3093 }
3094
3095 if (--insertion_limit == 0)
3096 break;
3097 }
3098 }
3099
3100 /* Traverse all the statements in block BB looking for statements that
3101 may generate useful assertions for the SSA names in their operand.
3102 If a statement produces a useful assertion A for name N_i, then the
3103 list of assertions already generated for N_i is scanned to
3104 determine if A is actually needed.
3105
3106 If N_i already had the assertion A at a location dominating the
3107 current location, then nothing needs to be done. Otherwise, the
3108 new location for A is recorded instead.
3109
3110 1- For every statement S in BB, all the variables used by S are
3111 added to bitmap FOUND_IN_SUBGRAPH.
3112
3113 2- If statement S uses an operand N in a way that exposes a known
3114 value range for N, then if N was not already generated by an
3115 ASSERT_EXPR, create a new assert location for N. For instance,
3116 if N is a pointer and the statement dereferences it, we can
3117 assume that N is not NULL.
3118
3119 3- COND_EXPRs are a special case of #2. We can derive range
3120 information from the predicate but need to insert different
3121 ASSERT_EXPRs for each of the sub-graphs rooted at the
3122 conditional block. If the last statement of BB is a conditional
3123 expression of the form 'X op Y', then
3124
3125 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3126
3127 b) If the conditional is the only entry point to the sub-graph
3128 corresponding to the THEN_CLAUSE, recurse into it. On
3129 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3130 an ASSERT_EXPR is added for the corresponding variable.
3131
3132 c) Repeat step (b) on the ELSE_CLAUSE.
3133
3134 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3135
3136 For instance,
3137
3138 if (a == 9)
3139 b = a;
3140 else
3141 b = c + 1;
3142
3143 In this case, an assertion on the THEN clause is useful to
3144 determine that 'a' is always 9 on that edge. However, an assertion
3145 on the ELSE clause would be unnecessary.
3146
3147 4- If BB does not end in a conditional expression, then we recurse
3148 into BB's dominator children.
3149
3150 At the end of the recursive traversal, every SSA name will have a
3151 list of locations where ASSERT_EXPRs should be added. When a new
3152 location for name N is found, it is registered by calling
3153 register_new_assert_for. That function keeps track of all the
3154 registered assertions to prevent adding unnecessary assertions.
3155 For instance, if a pointer P_4 is dereferenced more than once in a
3156 dominator tree, only the location dominating all the dereference of
3157 P_4 will receive an ASSERT_EXPR. */
3158
3159 void
3160 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3161 {
3162 gimple *last;
3163
3164 last = last_stmt (bb);
3165
3166 /* If BB's last statement is a conditional statement involving integer
3167 operands, determine if we need to add ASSERT_EXPRs. */
3168 if (last
3169 && gimple_code (last) == GIMPLE_COND
3170 && !fp_predicate (last)
3171 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3172 find_conditional_asserts (bb, as_a <gcond *> (last));
3173
3174 /* If BB's last statement is a switch statement involving integer
3175 operands, determine if we need to add ASSERT_EXPRs. */
3176 if (last
3177 && gimple_code (last) == GIMPLE_SWITCH
3178 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3179 find_switch_asserts (bb, as_a <gswitch *> (last));
3180
3181 /* Traverse all the statements in BB marking used names and looking
3182 for statements that may infer assertions for their used operands. */
3183 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3184 gsi_prev (&si))
3185 {
3186 gimple *stmt;
3187 tree op;
3188 ssa_op_iter i;
3189
3190 stmt = gsi_stmt (si);
3191
3192 if (is_gimple_debug (stmt))
3193 continue;
3194
3195 /* See if we can derive an assertion for any of STMT's operands. */
3196 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3197 {
3198 tree value;
3199 enum tree_code comp_code;
3200
3201 /* If op is not live beyond this stmt, do not bother to insert
3202 asserts for it. */
3203 if (!live.live_on_block_p (op, bb))
3204 continue;
3205
3206 /* If OP is used in such a way that we can infer a value
3207 range for it, and we don't find a previous assertion for
3208 it, create a new assertion location node for OP. */
3209 if (infer_value_range (stmt, op, &comp_code, &value))
3210 {
3211 /* If we are able to infer a nonzero value range for OP,
3212 then walk backwards through the use-def chain to see if OP
3213 was set via a typecast.
3214
3215 If so, then we can also infer a nonzero value range
3216 for the operand of the NOP_EXPR. */
3217 if (comp_code == NE_EXPR && integer_zerop (value))
3218 {
3219 tree t = op;
3220 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3221
3222 while (is_gimple_assign (def_stmt)
3223 && CONVERT_EXPR_CODE_P
3224 (gimple_assign_rhs_code (def_stmt))
3225 && TREE_CODE
3226 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3227 && POINTER_TYPE_P
3228 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3229 {
3230 t = gimple_assign_rhs1 (def_stmt);
3231 def_stmt = SSA_NAME_DEF_STMT (t);
3232
3233 /* Note we want to register the assert for the
3234 operand of the NOP_EXPR after SI, not after the
3235 conversion. */
3236 if (live.live_on_block_p (t, bb))
3237 register_new_assert_for (t, t, comp_code, value,
3238 bb, NULL, si);
3239 }
3240 }
3241
3242 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3243 }
3244 }
3245
3246 /* Update live. */
3247 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3248 live.set (op, bb);
3249 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3250 live.clear (op, bb);
3251 }
3252
3253 /* Traverse all PHI nodes in BB, updating live. */
3254 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3255 gsi_next (&si))
3256 {
3257 use_operand_p arg_p;
3258 ssa_op_iter i;
3259 gphi *phi = si.phi ();
3260 tree res = gimple_phi_result (phi);
3261
3262 if (virtual_operand_p (res))
3263 continue;
3264
3265 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3266 {
3267 tree arg = USE_FROM_PTR (arg_p);
3268 if (TREE_CODE (arg) == SSA_NAME)
3269 live.set (arg, bb);
3270 }
3271
3272 live.clear (res, bb);
3273 }
3274 }
3275
3276 /* Do an RPO walk over the function computing SSA name liveness
3277 on-the-fly and deciding on assert expressions to insert. */
3278
3279 void
3280 vrp_asserts::find_assert_locations (void)
3281 {
3282 int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3283 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3284 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3285 int rpo_cnt, i;
3286
3287 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3288 for (i = 0; i < rpo_cnt; ++i)
3289 bb_rpo[rpo[i]] = i;
3290
3291 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3292 the order we compute liveness and insert asserts we otherwise
3293 fail to insert asserts into the loop latch. */
3294 for (auto loop : loops_list (cfun, 0))
3295 {
3296 i = loop->latch->index;
3297 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3298 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3299 !gsi_end_p (gsi); gsi_next (&gsi))
3300 {
3301 gphi *phi = gsi.phi ();
3302 if (virtual_operand_p (gimple_phi_result (phi)))
3303 continue;
3304 tree arg = gimple_phi_arg_def (phi, j);
3305 if (TREE_CODE (arg) == SSA_NAME)
3306 live.set (arg, loop->latch);
3307 }
3308 }
3309
3310 for (i = rpo_cnt - 1; i >= 0; --i)
3311 {
3312 basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3313 edge e;
3314 edge_iterator ei;
3315
3316 /* Process BB and update the live information with uses in
3317 this block. */
3318 find_assert_locations_in_bb (bb);
3319
3320 /* Merge liveness into the predecessor blocks and free it. */
3321 if (!live.block_has_live_names_p (bb))
3322 {
3323 int pred_rpo = i;
3324 FOR_EACH_EDGE (e, ei, bb->preds)
3325 {
3326 int pred = e->src->index;
3327 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3328 continue;
3329
3330 live.merge (e->src, bb);
3331
3332 if (bb_rpo[pred] < pred_rpo)
3333 pred_rpo = bb_rpo[pred];
3334 }
3335
3336 /* Record the RPO number of the last visited block that needs
3337 live information from this block. */
3338 last_rpo[rpo[i]] = pred_rpo;
3339 }
3340 else
3341 live.clear_block (bb);
3342
3343 /* We can free all successors live bitmaps if all their
3344 predecessors have been visited already. */
3345 FOR_EACH_EDGE (e, ei, bb->succs)
3346 if (last_rpo[e->dest->index] == i)
3347 live.clear_block (e->dest);
3348 }
3349
3350 XDELETEVEC (rpo);
3351 XDELETEVEC (bb_rpo);
3352 XDELETEVEC (last_rpo);
3353 }
3354
3355 /* Create an ASSERT_EXPR for NAME and insert it in the location
3356 indicated by LOC. Return true if we made any edge insertions. */
3357
3358 bool
3359 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3360 {
3361 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3362 gimple *stmt;
3363 tree cond;
3364 gimple *assert_stmt;
3365 edge_iterator ei;
3366 edge e;
3367
3368 /* If we have X <=> X do not insert an assert expr for that. */
3369 if (loc->expr == loc->val)
3370 return false;
3371
3372 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3373 assert_stmt = build_assert_expr_for (cond, name);
3374 if (loc->e)
3375 {
3376 /* We have been asked to insert the assertion on an edge. This
3377 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3378 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3379 || (gimple_code (gsi_stmt (loc->si))
3380 == GIMPLE_SWITCH));
3381
3382 gsi_insert_on_edge (loc->e, assert_stmt);
3383 return true;
3384 }
3385
3386 /* If the stmt iterator points at the end then this is an insertion
3387 at the beginning of a block. */
3388 if (gsi_end_p (loc->si))
3389 {
3390 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3391 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3392 return false;
3393
3394 }
3395 /* Otherwise, we can insert right after LOC->SI iff the
3396 statement must not be the last statement in the block. */
3397 stmt = gsi_stmt (loc->si);
3398 if (!stmt_ends_bb_p (stmt))
3399 {
3400 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3401 return false;
3402 }
3403
3404 /* If STMT must be the last statement in BB, we can only insert new
3405 assertions on the non-abnormal edge out of BB. Note that since
3406 STMT is not control flow, there may only be one non-abnormal/eh edge
3407 out of BB. */
3408 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3409 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3410 {
3411 gsi_insert_on_edge (e, assert_stmt);
3412 return true;
3413 }
3414
3415 gcc_unreachable ();
3416 }
3417
3418 /* Qsort helper for sorting assert locations. If stable is true, don't
3419 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3420 on the other side some pointers might be NULL. */
3421
3422 template <bool stable>
3423 int
3424 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3425 {
3426 assert_locus * const a = *(assert_locus * const *)pa;
3427 assert_locus * const b = *(assert_locus * const *)pb;
3428
3429 /* If stable, some asserts might be optimized away already, sort
3430 them last. */
3431 if (stable)
3432 {
3433 if (a == NULL)
3434 return b != NULL;
3435 else if (b == NULL)
3436 return -1;
3437 }
3438
3439 if (a->e == NULL && b->e != NULL)
3440 return 1;
3441 else if (a->e != NULL && b->e == NULL)
3442 return -1;
3443
3444 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3445 no need to test both a->e and b->e. */
3446
3447 /* Sort after destination index. */
3448 if (a->e == NULL)
3449 ;
3450 else if (a->e->dest->index > b->e->dest->index)
3451 return 1;
3452 else if (a->e->dest->index < b->e->dest->index)
3453 return -1;
3454
3455 /* Sort after comp_code. */
3456 if (a->comp_code > b->comp_code)
3457 return 1;
3458 else if (a->comp_code < b->comp_code)
3459 return -1;
3460
3461 hashval_t ha, hb;
3462
3463 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3464 uses DECL_UID of the VAR_DECL, so sorting might differ between
3465 -g and -g0. When doing the removal of redundant assert exprs
3466 and commonization to successors, this does not matter, but for
3467 the final sort needs to be stable. */
3468 if (stable)
3469 {
3470 ha = 0;
3471 hb = 0;
3472 }
3473 else
3474 {
3475 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3476 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3477 }
3478
3479 /* Break the tie using hashing and source/bb index. */
3480 if (ha == hb)
3481 return (a->e != NULL
3482 ? a->e->src->index - b->e->src->index
3483 : a->bb->index - b->bb->index);
3484 return ha > hb ? 1 : -1;
3485 }
3486
3487 /* Process all the insertions registered for every name N_i registered
3488 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3489 found in ASSERTS_FOR[i]. */
3490
3491 void
3492 vrp_asserts::process_assert_insertions ()
3493 {
3494 unsigned i;
3495 bitmap_iterator bi;
3496 bool update_edges_p = false;
3497 int num_asserts = 0;
3498
3499 if (dump_file && (dump_flags & TDF_DETAILS))
3500 dump (dump_file);
3501
3502 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3503 {
3504 assert_locus *loc = asserts_for[i];
3505 gcc_assert (loc);
3506
3507 auto_vec<assert_locus *, 16> asserts;
3508 for (; loc; loc = loc->next)
3509 asserts.safe_push (loc);
3510 asserts.qsort (compare_assert_loc<false>);
3511
3512 /* Push down common asserts to successors and remove redundant ones. */
3513 unsigned ecnt = 0;
3514 assert_locus *common = NULL;
3515 unsigned commonj = 0;
3516 for (unsigned j = 0; j < asserts.length (); ++j)
3517 {
3518 loc = asserts[j];
3519 if (! loc->e)
3520 common = NULL;
3521 else if (! common
3522 || loc->e->dest != common->e->dest
3523 || loc->comp_code != common->comp_code
3524 || ! operand_equal_p (loc->val, common->val, 0)
3525 || ! operand_equal_p (loc->expr, common->expr, 0))
3526 {
3527 commonj = j;
3528 common = loc;
3529 ecnt = 1;
3530 }
3531 else if (loc->e == asserts[j-1]->e)
3532 {
3533 /* Remove duplicate asserts. */
3534 if (commonj == j - 1)
3535 {
3536 commonj = j;
3537 common = loc;
3538 }
3539 free (asserts[j-1]);
3540 asserts[j-1] = NULL;
3541 }
3542 else
3543 {
3544 ecnt++;
3545 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3546 {
3547 /* We have the same assertion on all incoming edges of a BB.
3548 Insert it at the beginning of that block. */
3549 loc->bb = loc->e->dest;
3550 loc->e = NULL;
3551 loc->si = gsi_none ();
3552 common = NULL;
3553 /* Clear asserts commoned. */
3554 for (; commonj != j; ++commonj)
3555 if (asserts[commonj])
3556 {
3557 free (asserts[commonj]);
3558 asserts[commonj] = NULL;
3559 }
3560 }
3561 }
3562 }
3563
3564 /* The asserts vector sorting above might be unstable for
3565 -fcompare-debug, sort again to ensure a stable sort. */
3566 asserts.qsort (compare_assert_loc<true>);
3567 for (unsigned j = 0; j < asserts.length (); ++j)
3568 {
3569 loc = asserts[j];
3570 if (! loc)
3571 break;
3572 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3573 num_asserts++;
3574 free (loc);
3575 }
3576 }
3577
3578 if (update_edges_p)
3579 gsi_commit_edge_inserts ();
3580
3581 statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3582 num_asserts);
3583 }
3584
3585 /* Traverse the flowgraph looking for conditional jumps to insert range
3586 expressions. These range expressions are meant to provide information
3587 to optimizations that need to reason in terms of value ranges. They
3588 will not be expanded into RTL. For instance, given:
3589
3590 x = ...
3591 y = ...
3592 if (x < y)
3593 y = x - 2;
3594 else
3595 x = y + 3;
3596
3597 this pass will transform the code into:
3598
3599 x = ...
3600 y = ...
3601 if (x < y)
3602 {
3603 x = ASSERT_EXPR <x, x < y>
3604 y = x - 2
3605 }
3606 else
3607 {
3608 y = ASSERT_EXPR <y, x >= y>
3609 x = y + 3
3610 }
3611
3612 The idea is that once copy and constant propagation have run, other
3613 optimizations will be able to determine what ranges of values can 'x'
3614 take in different paths of the code, simply by checking the reaching
3615 definition of 'x'. */
3616
3617 void
3618 vrp_asserts::insert_range_assertions (void)
3619 {
3620 need_assert_for = BITMAP_ALLOC (NULL);
3621 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3622
3623 calculate_dominance_info (CDI_DOMINATORS);
3624
3625 find_assert_locations ();
3626 if (!bitmap_empty_p (need_assert_for))
3627 {
3628 process_assert_insertions ();
3629 update_ssa (TODO_update_ssa_no_phi);
3630 }
3631
3632 if (dump_file && (dump_flags & TDF_DETAILS))
3633 {
3634 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3635 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3636 }
3637
3638 free (asserts_for);
3639 BITMAP_FREE (need_assert_for);
3640 }
3641
3642 /* Return true if all imm uses of VAR are either in STMT, or
3643 feed (optionally through a chain of single imm uses) GIMPLE_COND
3644 in basic block COND_BB. */
3645
3646 bool
3647 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3648 gimple *stmt,
3649 basic_block cond_bb)
3650 {
3651 use_operand_p use_p, use2_p;
3652 imm_use_iterator iter;
3653
3654 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3655 if (USE_STMT (use_p) != stmt)
3656 {
3657 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3658 if (is_gimple_debug (use_stmt))
3659 continue;
3660 while (is_gimple_assign (use_stmt)
3661 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3662 && single_imm_use (gimple_assign_lhs (use_stmt),
3663 &use2_p, &use_stmt2))
3664 use_stmt = use_stmt2;
3665 if (gimple_code (use_stmt) != GIMPLE_COND
3666 || gimple_bb (use_stmt) != cond_bb)
3667 return false;
3668 }
3669 return true;
3670 }
3671
3672 /* Convert range assertion expressions into the implied copies and
3673 copy propagate away the copies. Doing the trivial copy propagation
3674 here avoids the need to run the full copy propagation pass after
3675 VRP.
3676
3677 FIXME, this will eventually lead to copy propagation removing the
3678 names that had useful range information attached to them. For
3679 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3680 then N_i will have the range [3, +INF].
3681
3682 However, by converting the assertion into the implied copy
3683 operation N_i = N_j, we will then copy-propagate N_j into the uses
3684 of N_i and lose the range information.
3685
3686 The problem with keeping ASSERT_EXPRs around is that passes after
3687 VRP need to handle them appropriately.
3688
3689 Another approach would be to make the range information a first
3690 class property of the SSA_NAME so that it can be queried from
3691 any pass. This is made somewhat more complex by the need for
3692 multiple ranges to be associated with one SSA_NAME. */
3693
3694 void
3695 vrp_asserts::remove_range_assertions ()
3696 {
3697 basic_block bb;
3698 gimple_stmt_iterator si;
3699 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3700 a basic block preceeded by GIMPLE_COND branching to it and
3701 __builtin_trap, -1 if not yet checked, 0 otherwise. */
3702 int is_unreachable;
3703
3704 /* Note that the BSI iterator bump happens at the bottom of the
3705 loop and no bump is necessary if we're removing the statement
3706 referenced by the current BSI. */
3707 FOR_EACH_BB_FN (bb, fun)
3708 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3709 {
3710 gimple *stmt = gsi_stmt (si);
3711
3712 if (is_gimple_assign (stmt)
3713 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3714 {
3715 tree lhs = gimple_assign_lhs (stmt);
3716 tree rhs = gimple_assign_rhs1 (stmt);
3717 tree var;
3718
3719 var = ASSERT_EXPR_VAR (rhs);
3720
3721 if (TREE_CODE (var) == SSA_NAME
3722 && !POINTER_TYPE_P (TREE_TYPE (lhs))
3723 && SSA_NAME_RANGE_INFO (lhs))
3724 {
3725 if (is_unreachable == -1)
3726 {
3727 is_unreachable = 0;
3728 if (single_pred_p (bb)
3729 && assert_unreachable_fallthru_edge_p
3730 (single_pred_edge (bb)))
3731 is_unreachable = 1;
3732 }
3733 /* Handle
3734 if (x_7 >= 10 && x_7 < 20)
3735 __builtin_unreachable ();
3736 x_8 = ASSERT_EXPR <x_7, ...>;
3737 if the only uses of x_7 are in the ASSERT_EXPR and
3738 in the condition. In that case, we can copy the
3739 range info from x_8 computed in this pass also
3740 for x_7. */
3741 if (is_unreachable
3742 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3743 single_pred (bb)))
3744 {
3745 if (SSA_NAME_RANGE_INFO (var))
3746 {
3747 /* ?? This is a minor wart exposing the
3748 internals of SSA_NAME_RANGE_INFO in order
3749 to maintain existing behavior. This is
3750 because duplicate_ssa_name_range_info below
3751 needs a NULL destination range. This is
3752 all slated for removal... */
3753 ggc_free (SSA_NAME_RANGE_INFO (var));
3754 SSA_NAME_RANGE_INFO (var) = NULL;
3755 }
3756 duplicate_ssa_name_range_info (var, lhs);
3757 maybe_set_nonzero_bits (single_pred_edge (bb), var);
3758 }
3759 }
3760
3761 /* Propagate the RHS into every use of the LHS. For SSA names
3762 also propagate abnormals as it merely restores the original
3763 IL in this case (an replace_uses_by would assert). */
3764 if (TREE_CODE (var) == SSA_NAME)
3765 {
3766 imm_use_iterator iter;
3767 use_operand_p use_p;
3768 gimple *use_stmt;
3769 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3770 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3771 SET_USE (use_p, var);
3772 }
3773 else
3774 replace_uses_by (lhs, var);
3775
3776 /* And finally, remove the copy, it is not needed. */
3777 gsi_remove (&si, true);
3778 release_defs (stmt);
3779 }
3780 else
3781 {
3782 if (!is_gimple_debug (gsi_stmt (si)))
3783 is_unreachable = 0;
3784 gsi_next (&si);
3785 }
3786 }
3787 }
3788
3789 class vrp_prop : public ssa_propagation_engine
3790 {
3791 public:
3792 vrp_prop (vr_values *v)
3793 : ssa_propagation_engine (),
3794 m_vr_values (v) { }
3795
3796 void initialize (struct function *);
3797 void finalize ();
3798
3799 private:
3800 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
3801 enum ssa_prop_result visit_phi (gphi *) final override;
3802
3803 struct function *fun;
3804 vr_values *m_vr_values;
3805 };
3806
3807 /* Initialization required by ssa_propagate engine. */
3808
3809 void
3810 vrp_prop::initialize (struct function *fn)
3811 {
3812 basic_block bb;
3813 fun = fn;
3814
3815 FOR_EACH_BB_FN (bb, fun)
3816 {
3817 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3818 gsi_next (&si))
3819 {
3820 gphi *phi = si.phi ();
3821 if (!stmt_interesting_for_vrp (phi))
3822 {
3823 tree lhs = PHI_RESULT (phi);
3824 m_vr_values->set_def_to_varying (lhs);
3825 prop_set_simulate_again (phi, false);
3826 }
3827 else
3828 prop_set_simulate_again (phi, true);
3829 }
3830
3831 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3832 gsi_next (&si))
3833 {
3834 gimple *stmt = gsi_stmt (si);
3835
3836 /* If the statement is a control insn, then we do not
3837 want to avoid simulating the statement once. Failure
3838 to do so means that those edges will never get added. */
3839 if (stmt_ends_bb_p (stmt))
3840 prop_set_simulate_again (stmt, true);
3841 else if (!stmt_interesting_for_vrp (stmt))
3842 {
3843 m_vr_values->set_defs_to_varying (stmt);
3844 prop_set_simulate_again (stmt, false);
3845 }
3846 else
3847 prop_set_simulate_again (stmt, true);
3848 }
3849 }
3850 }
3851
3852 /* Evaluate statement STMT. If the statement produces a useful range,
3853 return SSA_PROP_INTERESTING and record the SSA name with the
3854 interesting range into *OUTPUT_P.
3855
3856 If STMT is a conditional branch and we can determine its truth
3857 value, the taken edge is recorded in *TAKEN_EDGE_P.
3858
3859 If STMT produces a varying value, return SSA_PROP_VARYING. */
3860
3861 enum ssa_prop_result
3862 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3863 {
3864 tree lhs = gimple_get_lhs (stmt);
3865 value_range_equiv vr;
3866 m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3867
3868 if (*output_p)
3869 {
3870 if (m_vr_values->update_value_range (*output_p, &vr))
3871 {
3872 if (dump_file && (dump_flags & TDF_DETAILS))
3873 {
3874 fprintf (dump_file, "Found new range for ");
3875 print_generic_expr (dump_file, *output_p);
3876 fprintf (dump_file, ": ");
3877 dump_value_range (dump_file, &vr);
3878 fprintf (dump_file, "\n");
3879 }
3880
3881 if (vr.varying_p ())
3882 return SSA_PROP_VARYING;
3883
3884 return SSA_PROP_INTERESTING;
3885 }
3886 return SSA_PROP_NOT_INTERESTING;
3887 }
3888
3889 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3890 switch (gimple_call_internal_fn (stmt))
3891 {
3892 case IFN_ADD_OVERFLOW:
3893 case IFN_SUB_OVERFLOW:
3894 case IFN_MUL_OVERFLOW:
3895 case IFN_ATOMIC_COMPARE_EXCHANGE:
3896 /* These internal calls return _Complex integer type,
3897 which VRP does not track, but the immediate uses
3898 thereof might be interesting. */
3899 if (lhs && TREE_CODE (lhs) == SSA_NAME)
3900 {
3901 imm_use_iterator iter;
3902 use_operand_p use_p;
3903 enum ssa_prop_result res = SSA_PROP_VARYING;
3904
3905 m_vr_values->set_def_to_varying (lhs);
3906
3907 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3908 {
3909 gimple *use_stmt = USE_STMT (use_p);
3910 if (!is_gimple_assign (use_stmt))
3911 continue;
3912 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3913 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3914 continue;
3915 tree rhs1 = gimple_assign_rhs1 (use_stmt);
3916 tree use_lhs = gimple_assign_lhs (use_stmt);
3917 if (TREE_CODE (rhs1) != rhs_code
3918 || TREE_OPERAND (rhs1, 0) != lhs
3919 || TREE_CODE (use_lhs) != SSA_NAME
3920 || !stmt_interesting_for_vrp (use_stmt)
3921 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3922 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3923 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3924 continue;
3925
3926 /* If there is a change in the value range for any of the
3927 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3928 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
3929 or IMAGPART_EXPR immediate uses, but none of them have
3930 a change in their value ranges, return
3931 SSA_PROP_NOT_INTERESTING. If there are no
3932 {REAL,IMAG}PART_EXPR uses at all,
3933 return SSA_PROP_VARYING. */
3934 value_range_equiv new_vr;
3935 m_vr_values->extract_range_basic (&new_vr, use_stmt);
3936 const value_range_equiv *old_vr
3937 = m_vr_values->get_value_range (use_lhs);
3938 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3939 res = SSA_PROP_INTERESTING;
3940 else
3941 res = SSA_PROP_NOT_INTERESTING;
3942 new_vr.equiv_clear ();
3943 if (res == SSA_PROP_INTERESTING)
3944 {
3945 *output_p = lhs;
3946 return res;
3947 }
3948 }
3949
3950 return res;
3951 }
3952 break;
3953 default:
3954 break;
3955 }
3956
3957 /* All other statements produce nothing of interest for VRP, so mark
3958 their outputs varying and prevent further simulation. */
3959 m_vr_values->set_defs_to_varying (stmt);
3960
3961 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
3962 }
3963
3964 /* Visit all arguments for PHI node PHI that flow through executable
3965 edges. If a valid value range can be derived from all the incoming
3966 value ranges, set a new range for the LHS of PHI. */
3967
3968 enum ssa_prop_result
3969 vrp_prop::visit_phi (gphi *phi)
3970 {
3971 tree lhs = PHI_RESULT (phi);
3972 value_range_equiv vr_result;
3973 m_vr_values->extract_range_from_phi_node (phi, &vr_result);
3974 if (m_vr_values->update_value_range (lhs, &vr_result))
3975 {
3976 if (dump_file && (dump_flags & TDF_DETAILS))
3977 {
3978 fprintf (dump_file, "Found new range for ");
3979 print_generic_expr (dump_file, lhs);
3980 fprintf (dump_file, ": ");
3981 dump_value_range (dump_file, &vr_result);
3982 fprintf (dump_file, "\n");
3983 }
3984
3985 if (vr_result.varying_p ())
3986 return SSA_PROP_VARYING;
3987
3988 return SSA_PROP_INTERESTING;
3989 }
3990
3991 /* Nothing changed, don't add outgoing edges. */
3992 return SSA_PROP_NOT_INTERESTING;
3993 }
3994
3995 /* Traverse all the blocks folding conditionals with known ranges. */
3996
3997 void
3998 vrp_prop::finalize ()
3999 {
4000 size_t i;
4001
4002 /* We have completed propagating through the lattice. */
4003 m_vr_values->set_lattice_propagation_complete ();
4004
4005 if (dump_file)
4006 {
4007 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4008 m_vr_values->dump (dump_file);
4009 fprintf (dump_file, "\n");
4010 }
4011
4012 /* Set value range to non pointer SSA_NAMEs. */
4013 for (i = 0; i < num_ssa_names; i++)
4014 {
4015 tree name = ssa_name (i);
4016 if (!name)
4017 continue;
4018
4019 const value_range_equiv *vr = m_vr_values->get_value_range (name);
4020 if (!name || vr->varying_p () || !vr->constant_p ())
4021 continue;
4022
4023 if (POINTER_TYPE_P (TREE_TYPE (name))
4024 && range_includes_zero_p (vr) == 0)
4025 set_ptr_nonnull (name);
4026 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4027 set_range_info (name, *vr);
4028 }
4029 }
4030
4031 class vrp_folder : public substitute_and_fold_engine
4032 {
4033 public:
4034 vrp_folder (vr_values *v)
4035 : substitute_and_fold_engine (/* Fold all stmts. */ true),
4036 m_vr_values (v), simplifier (v)
4037 { }
4038 void simplify_casted_conds (function *fun);
4039
4040 private:
4041 tree value_of_expr (tree name, gimple *stmt) override
4042 {
4043 return m_vr_values->value_of_expr (name, stmt);
4044 }
4045 bool fold_stmt (gimple_stmt_iterator *) final override;
4046 bool fold_predicate_in (gimple_stmt_iterator *);
4047
4048 vr_values *m_vr_values;
4049 simplify_using_ranges simplifier;
4050 };
4051
4052 /* If the statement pointed by SI has a predicate whose value can be
4053 computed using the value range information computed by VRP, compute
4054 its value and return true. Otherwise, return false. */
4055
4056 bool
4057 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4058 {
4059 bool assignment_p = false;
4060 tree val;
4061 gimple *stmt = gsi_stmt (*si);
4062
4063 if (is_gimple_assign (stmt)
4064 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4065 {
4066 assignment_p = true;
4067 val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4068 gimple_assign_rhs1 (stmt),
4069 gimple_assign_rhs2 (stmt),
4070 stmt);
4071 }
4072 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4073 val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4074 gimple_cond_lhs (cond_stmt),
4075 gimple_cond_rhs (cond_stmt),
4076 stmt);
4077 else
4078 return false;
4079
4080 if (val)
4081 {
4082 if (assignment_p)
4083 val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
4084
4085 if (dump_file)
4086 {
4087 fprintf (dump_file, "Folding predicate ");
4088 print_gimple_expr (dump_file, stmt, 0);
4089 fprintf (dump_file, " to ");
4090 print_generic_expr (dump_file, val);
4091 fprintf (dump_file, "\n");
4092 }
4093
4094 if (is_gimple_assign (stmt))
4095 gimple_assign_set_rhs_from_tree (si, val);
4096 else
4097 {
4098 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4099 gcond *cond_stmt = as_a <gcond *> (stmt);
4100 if (integer_zerop (val))
4101 gimple_cond_make_false (cond_stmt);
4102 else if (integer_onep (val))
4103 gimple_cond_make_true (cond_stmt);
4104 else
4105 gcc_unreachable ();
4106 }
4107
4108 return true;
4109 }
4110
4111 return false;
4112 }
4113
4114 /* Callback for substitute_and_fold folding the stmt at *SI. */
4115
4116 bool
4117 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4118 {
4119 if (fold_predicate_in (si))
4120 return true;
4121
4122 return simplifier.simplify (si);
4123 }
4124
4125 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4126 was set by a type conversion can often be rewritten to use the RHS
4127 of the type conversion. Do this optimization for all conditionals
4128 in FUN. */
4129
4130 void
4131 vrp_folder::simplify_casted_conds (function *fun)
4132 {
4133 basic_block bb;
4134 FOR_EACH_BB_FN (bb, fun)
4135 {
4136 gimple *last = last_stmt (bb);
4137 if (last && gimple_code (last) == GIMPLE_COND)
4138 {
4139 if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
4140 {
4141 if (dump_file && (dump_flags & TDF_DETAILS))
4142 {
4143 fprintf (dump_file, "Folded into: ");
4144 print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
4145 fprintf (dump_file, "\n");
4146 }
4147 }
4148 }
4149 }
4150 }
4151
4152 /* Main entry point to VRP (Value Range Propagation). This pass is
4153 loosely based on J. R. C. Patterson, ``Accurate Static Branch
4154 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4155 Programming Language Design and Implementation, pp. 67-78, 1995.
4156 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4157
4158 This is essentially an SSA-CCP pass modified to deal with ranges
4159 instead of constants.
4160
4161 While propagating ranges, we may find that two or more SSA name
4162 have equivalent, though distinct ranges. For instance,
4163
4164 1 x_9 = p_3->a;
4165 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4166 3 if (p_4 == q_2)
4167 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4168 5 endif
4169 6 if (q_2)
4170
4171 In the code above, pointer p_5 has range [q_2, q_2], but from the
4172 code we can also determine that p_5 cannot be NULL and, if q_2 had
4173 a non-varying range, p_5's range should also be compatible with it.
4174
4175 These equivalences are created by two expressions: ASSERT_EXPR and
4176 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
4177 result of another assertion, then we can use the fact that p_5 and
4178 p_4 are equivalent when evaluating p_5's range.
4179
4180 Together with value ranges, we also propagate these equivalences
4181 between names so that we can take advantage of information from
4182 multiple ranges when doing final replacement. Note that this
4183 equivalency relation is transitive but not symmetric.
4184
4185 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4186 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4187 in contexts where that assertion does not hold (e.g., in line 6).
4188
4189 TODO, the main difference between this pass and Patterson's is that
4190 we do not propagate edge probabilities. We only compute whether
4191 edges can be taken or not. That is, instead of having a spectrum
4192 of jump probabilities between 0 and 1, we only deal with 0, 1 and
4193 DON'T KNOW. In the future, it may be worthwhile to propagate
4194 probabilities to aid branch prediction. */
4195
4196 static unsigned int
4197 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4198 {
4199 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4200 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4201 scev_initialize ();
4202
4203 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
4204 Inserting assertions may split edges which will invalidate
4205 EDGE_DFS_BACK. */
4206 vrp_asserts assert_engine (fun);
4207 assert_engine.insert_range_assertions ();
4208
4209 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
4210 mark_dfs_back_edges ();
4211
4212 vr_values vrp_vr_values;
4213
4214 class vrp_prop vrp_prop (&vrp_vr_values);
4215 vrp_prop.initialize (fun);
4216 vrp_prop.ssa_propagate ();
4217
4218 /* Instantiate the folder here, so that edge cleanups happen at the
4219 end of this function. */
4220 vrp_folder folder (&vrp_vr_values);
4221 vrp_prop.finalize ();
4222
4223 /* If we're checking array refs, we want to merge information on
4224 the executability of each edge between vrp_folder and the
4225 check_array_bounds_dom_walker: each can clear the
4226 EDGE_EXECUTABLE flag on edges, in different ways.
4227
4228 Hence, if we're going to call check_all_array_refs, set
4229 the flag on every edge now, rather than in
4230 check_array_bounds_dom_walker's ctor; vrp_folder may clear
4231 it from some edges. */
4232 if (warn_array_bounds && warn_array_bounds_p)
4233 set_all_edges_as_executable (fun);
4234
4235 folder.substitute_and_fold ();
4236
4237 if (warn_array_bounds && warn_array_bounds_p)
4238 {
4239 array_bounds_checker array_checker (fun, &vrp_vr_values);
4240 array_checker.check ();
4241 }
4242
4243 folder.simplify_casted_conds (fun);
4244
4245 free_numbers_of_iterations_estimates (fun);
4246
4247 assert_engine.remove_range_assertions ();
4248
4249 scev_finalize ();
4250 loop_optimizer_finalize ();
4251 return 0;
4252 }
4253
4254 // This is a ranger based folder which continues to use the dominator
4255 // walk to access the substitute and fold machinery. Ranges are calculated
4256 // on demand.
4257
4258 class rvrp_folder : public substitute_and_fold_engine
4259 {
4260 public:
4261
4262 rvrp_folder (gimple_ranger *r) : substitute_and_fold_engine (),
4263 m_simplifier (r, r->non_executable_edge_flag)
4264 {
4265 m_ranger = r;
4266 m_pta = new pointer_equiv_analyzer (m_ranger);
4267 }
4268
4269 ~rvrp_folder ()
4270 {
4271 delete m_pta;
4272 }
4273
4274 tree value_of_expr (tree name, gimple *s = NULL) override
4275 {
4276 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4277 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4278 return NULL;
4279 tree ret = m_ranger->value_of_expr (name, s);
4280 if (!ret && supported_pointer_equiv_p (name))
4281 ret = m_pta->get_equiv (name);
4282 return ret;
4283 }
4284
4285 tree value_on_edge (edge e, tree name) override
4286 {
4287 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4288 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4289 return NULL;
4290 tree ret = m_ranger->value_on_edge (e, name);
4291 if (!ret && supported_pointer_equiv_p (name))
4292 ret = m_pta->get_equiv (name);
4293 return ret;
4294 }
4295
4296 tree value_of_stmt (gimple *s, tree name = NULL) override
4297 {
4298 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4299 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4300 return NULL;
4301 return m_ranger->value_of_stmt (s, name);
4302 }
4303
4304 void pre_fold_bb (basic_block bb) override
4305 {
4306 m_pta->enter (bb);
4307 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
4308 gsi_next (&gsi))
4309 m_ranger->register_inferred_ranges (gsi.phi ());
4310 }
4311
4312 void post_fold_bb (basic_block bb) override
4313 {
4314 m_pta->leave (bb);
4315 }
4316
4317 void pre_fold_stmt (gimple *stmt) override
4318 {
4319 m_pta->visit_stmt (stmt);
4320 }
4321
4322 bool fold_stmt (gimple_stmt_iterator *gsi) override
4323 {
4324 bool ret = m_simplifier.simplify (gsi);
4325 if (!ret)
4326 ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
4327 m_ranger->register_inferred_ranges (gsi_stmt (*gsi));
4328 return ret;
4329 }
4330
4331 private:
4332 DISABLE_COPY_AND_ASSIGN (rvrp_folder);
4333 gimple_ranger *m_ranger;
4334 simplify_using_ranges m_simplifier;
4335 pointer_equiv_analyzer *m_pta;
4336 };
4337
4338 /* Main entry point for a VRP pass using just ranger. This can be called
4339 from anywhere to perform a VRP pass, including from EVRP. */
4340
4341 unsigned int
4342 execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p)
4343 {
4344 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4345 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4346 scev_initialize ();
4347 calculate_dominance_info (CDI_DOMINATORS);
4348
4349 set_all_edges_as_executable (fun);
4350 gimple_ranger *ranger = enable_ranger (fun, false);
4351 rvrp_folder folder (ranger);
4352 folder.substitute_and_fold ();
4353 if (dump_file && (dump_flags & TDF_DETAILS))
4354 ranger->dump (dump_file);
4355
4356 if (warn_array_bounds && warn_array_bounds_p)
4357 {
4358 // Set all edges as executable, except those ranger says aren't.
4359 int non_exec_flag = ranger->non_executable_edge_flag;
4360 basic_block bb;
4361 FOR_ALL_BB_FN (bb, fun)
4362 {
4363 edge_iterator ei;
4364 edge e;
4365 FOR_EACH_EDGE (e, ei, bb->succs)
4366 if (e->flags & non_exec_flag)
4367 e->flags &= ~EDGE_EXECUTABLE;
4368 else
4369 e->flags |= EDGE_EXECUTABLE;
4370 }
4371 scev_reset ();
4372 array_bounds_checker array_checker (fun, ranger);
4373 array_checker.check ();
4374 }
4375
4376 disable_ranger (fun);
4377 scev_finalize ();
4378 loop_optimizer_finalize ();
4379 return 0;
4380 }
4381
4382 namespace {
4383
4384 const pass_data pass_data_vrp =
4385 {
4386 GIMPLE_PASS, /* type */
4387 "vrp", /* name */
4388 OPTGROUP_NONE, /* optinfo_flags */
4389 TV_TREE_VRP, /* tv_id */
4390 PROP_ssa, /* properties_required */
4391 0, /* properties_provided */
4392 0, /* properties_destroyed */
4393 0, /* todo_flags_start */
4394 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4395 };
4396
4397 const pass_data pass_data_early_vrp =
4398 {
4399 GIMPLE_PASS, /* type */
4400 "evrp", /* name */
4401 OPTGROUP_NONE, /* optinfo_flags */
4402 TV_TREE_EARLY_VRP, /* tv_id */
4403 PROP_ssa, /* properties_required */
4404 0, /* properties_provided */
4405 0, /* properties_destroyed */
4406 0, /* todo_flags_start */
4407 ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
4408 };
4409
4410 static int vrp_pass_num = 0;
4411 class pass_vrp : public gimple_opt_pass
4412 {
4413 public:
4414 pass_vrp (gcc::context *ctxt, const pass_data &data_)
4415 : gimple_opt_pass (data_, ctxt), data (data_), warn_array_bounds_p (false),
4416 my_pass (vrp_pass_num++)
4417 {}
4418
4419 /* opt_pass methods: */
4420 opt_pass * clone () final override { return new pass_vrp (m_ctxt, data); }
4421 void set_pass_param (unsigned int n, bool param) final override
4422 {
4423 gcc_assert (n == 0);
4424 warn_array_bounds_p = param;
4425 }
4426 bool gate (function *) final override { return flag_tree_vrp != 0; }
4427 unsigned int execute (function *fun) final override
4428 {
4429 // Early VRP pass.
4430 if (my_pass == 0)
4431 return execute_ranger_vrp (fun, /*warn_array_bounds_p=*/false);
4432
4433 if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
4434 || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
4435 return execute_ranger_vrp (fun, warn_array_bounds_p);
4436 return execute_vrp (fun, warn_array_bounds_p);
4437 }
4438
4439 private:
4440 const pass_data &data;
4441 bool warn_array_bounds_p;
4442 int my_pass;
4443 }; // class pass_vrp
4444
4445 const pass_data pass_data_assumptions =
4446 {
4447 GIMPLE_PASS, /* type */
4448 "assumptions", /* name */
4449 OPTGROUP_NONE, /* optinfo_flags */
4450 TV_TREE_ASSUMPTIONS, /* tv_id */
4451 PROP_ssa, /* properties_required */
4452 PROP_assumptions_done, /* properties_provided */
4453 0, /* properties_destroyed */
4454 0, /* todo_flags_start */
4455 0, /* todo_flags_end */
4456 };
4457
4458 class pass_assumptions : public gimple_opt_pass
4459 {
4460 public:
4461 pass_assumptions (gcc::context *ctxt)
4462 : gimple_opt_pass (pass_data_assumptions, ctxt)
4463 {}
4464
4465 /* opt_pass methods: */
4466 bool gate (function *fun) final override { return fun->assume_function; }
4467 unsigned int execute (function *) final override
4468 {
4469 assume_query query;
4470 if (dump_file)
4471 fprintf (dump_file, "Assumptions :\n--------------\n");
4472
4473 for (tree arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
4474 {
4475 tree name = ssa_default_def (cfun, arg);
4476 if (!name || !gimple_range_ssa_p (name))
4477 continue;
4478 tree type = TREE_TYPE (name);
4479 if (!Value_Range::supports_type_p (type))
4480 continue;
4481 Value_Range assume_range (type);
4482 if (query.assume_range_p (assume_range, name))
4483 {
4484 // Set the global range of NAME to anything calculated.
4485 set_range_info (name, assume_range);
4486 if (dump_file)
4487 {
4488 print_generic_expr (dump_file, name, TDF_SLIM);
4489 fprintf (dump_file, " -> ");
4490 assume_range.dump (dump_file);
4491 fputc ('\n', dump_file);
4492 }
4493 }
4494 }
4495 if (dump_file)
4496 {
4497 fputc ('\n', dump_file);
4498 gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS);
4499 if (dump_flags & TDF_DETAILS)
4500 query.dump (dump_file);
4501 }
4502 return TODO_discard_function;
4503 }
4504
4505 }; // class pass_assumptions
4506
4507 } // anon namespace
4508
4509 gimple_opt_pass *
4510 make_pass_vrp (gcc::context *ctxt)
4511 {
4512 return new pass_vrp (ctxt, pass_data_vrp);
4513 }
4514
4515 gimple_opt_pass *
4516 make_pass_early_vrp (gcc::context *ctxt)
4517 {
4518 return new pass_vrp (ctxt, pass_data_early_vrp);
4519 }
4520
4521 gimple_opt_pass *
4522 make_pass_assumptions (gcc::context *ctx)
4523 {
4524 return new pass_assumptions (ctx);
4525 }