1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
45 #include "internal-fn.h"
46 #include "gimple-range.h"
49 /* The maximum number of dominator BBs we search for conditions
50 of loop header copies we use for simplifying a conditional
52 #define MAX_DOMINATORS_TO_WALK 8
56 Analysis of number of iterations of an affine exit test.
60 /* Bounds on some value, BELOW <= X <= UP. */
67 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
70 split_to_var_and_offset (tree expr
, tree
*var
, mpz_t offset
)
72 tree type
= TREE_TYPE (expr
);
77 mpz_set_ui (offset
, 0);
79 switch (TREE_CODE (expr
))
86 case POINTER_PLUS_EXPR
:
87 op0
= TREE_OPERAND (expr
, 0);
88 op1
= TREE_OPERAND (expr
, 1);
90 if (TREE_CODE (op1
) != INTEGER_CST
)
94 /* Always sign extend the offset. */
95 wi::to_mpz (wi::to_wide (op1
), offset
, SIGNED
);
97 mpz_neg (offset
, offset
);
101 *var
= build_int_cst_type (type
, 0);
102 wi::to_mpz (wi::to_wide (expr
), offset
, TYPE_SIGN (type
));
110 /* From condition C0 CMP C1 derives information regarding the value range
111 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
114 refine_value_range_using_guard (tree type
, tree var
,
115 tree c0
, enum tree_code cmp
, tree c1
,
116 mpz_t below
, mpz_t up
)
118 tree varc0
, varc1
, ctype
;
120 mpz_t mint
, maxt
, minc1
, maxc1
;
121 bool no_wrap
= nowrap_type_p (type
);
123 signop sgn
= TYPE_SIGN (type
);
131 STRIP_SIGN_NOPS (c0
);
132 STRIP_SIGN_NOPS (c1
);
133 ctype
= TREE_TYPE (c0
);
134 if (!useless_type_conversion_p (ctype
, type
))
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1
) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type
))
152 /* Ensure that the condition speaks about an expression in the same
154 ctype
= TREE_TYPE (c0
);
155 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
157 c0
= fold_convert (type
, c0
);
158 c1
= fold_convert (type
, c1
);
160 if (operand_equal_p (var
, c0
, 0))
162 /* Case of comparing VAR with its below/up bounds. */
164 wi::to_mpz (wi::to_wide (c1
), valc1
, TYPE_SIGN (type
));
165 if (mpz_cmp (valc1
, below
) == 0)
167 if (mpz_cmp (valc1
, up
) == 0)
172 /* Case of comparing with the bounds of the type. */
173 wide_int min
= wi::min_value (type
);
174 wide_int max
= wi::max_value (type
);
176 if (wi::to_wide (c1
) == min
)
178 if (wi::to_wide (c1
) == max
)
182 /* Quick return if no useful information. */
194 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
195 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
197 /* We are only interested in comparisons of expressions based on VAR. */
198 if (operand_equal_p (var
, varc1
, 0))
200 std::swap (varc0
, varc1
);
201 mpz_swap (offc0
, offc1
);
202 cmp
= swap_tree_comparison (cmp
);
204 else if (!operand_equal_p (var
, varc0
, 0))
213 get_type_static_bounds (type
, mint
, maxt
);
216 Value_Range
r (TREE_TYPE (varc1
));
217 /* Setup range information for varc1. */
218 if (integer_zerop (varc1
))
220 wi::to_mpz (0, minc1
, TYPE_SIGN (type
));
221 wi::to_mpz (0, maxc1
, TYPE_SIGN (type
));
223 else if (TREE_CODE (varc1
) == SSA_NAME
224 && INTEGRAL_TYPE_P (type
)
225 && get_range_query (cfun
)->range_of_expr (r
, varc1
)
229 gcc_assert (wi::le_p (r
.lower_bound (), r
.upper_bound (), sgn
));
230 wi::to_mpz (r
.lower_bound (), minc1
, sgn
);
231 wi::to_mpz (r
.upper_bound (), maxc1
, sgn
);
235 mpz_set (minc1
, mint
);
236 mpz_set (maxc1
, maxt
);
239 /* Compute valid range information for varc1 + offc1. Note nothing
240 useful can be derived if it overflows or underflows. Overflow or
241 underflow could happen when:
243 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
244 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
245 mpz_add (minc1
, minc1
, offc1
);
246 mpz_add (maxc1
, maxc1
, offc1
);
248 || mpz_sgn (offc1
) == 0
249 || (mpz_sgn (offc1
) < 0 && mpz_cmp (minc1
, mint
) >= 0)
250 || (mpz_sgn (offc1
) > 0 && mpz_cmp (maxc1
, maxt
) <= 0));
254 if (mpz_cmp (minc1
, mint
) < 0)
255 mpz_set (minc1
, mint
);
256 if (mpz_cmp (maxc1
, maxt
) > 0)
257 mpz_set (maxc1
, maxt
);
262 mpz_sub_ui (maxc1
, maxc1
, 1);
267 mpz_add_ui (minc1
, minc1
, 1);
270 /* Compute range information for varc0. If there is no overflow,
271 the condition implied that
273 (varc0) cmp (varc1 + offc1 - offc0)
275 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
276 or the below bound if cmp is GE_EXPR.
278 To prove there is no overflow/underflow, we need to check below
280 1) cmp == LE_EXPR && offc0 > 0
282 (varc0 + offc0) doesn't overflow
283 && (varc1 + offc1 - offc0) doesn't underflow
285 2) cmp == LE_EXPR && offc0 < 0
287 (varc0 + offc0) doesn't underflow
288 && (varc1 + offc1 - offc0) doesn't overfloe
290 In this case, (varc0 + offc0) will never underflow if we can
291 prove (varc1 + offc1 - offc0) doesn't overflow.
293 3) cmp == GE_EXPR && offc0 < 0
295 (varc0 + offc0) doesn't underflow
296 && (varc1 + offc1 - offc0) doesn't overflow
298 4) cmp == GE_EXPR && offc0 > 0
300 (varc0 + offc0) doesn't overflow
301 && (varc1 + offc1 - offc0) doesn't underflow
303 In this case, (varc0 + offc0) will never overflow if we can
304 prove (varc1 + offc1 - offc0) doesn't underflow.
306 Note we only handle case 2 and 4 in below code. */
308 mpz_sub (minc1
, minc1
, offc0
);
309 mpz_sub (maxc1
, maxc1
, offc0
);
311 || mpz_sgn (offc0
) == 0
313 && mpz_sgn (offc0
) < 0 && mpz_cmp (maxc1
, maxt
) <= 0)
315 && mpz_sgn (offc0
) > 0 && mpz_cmp (minc1
, mint
) >= 0));
321 if (mpz_cmp (up
, maxc1
) > 0)
326 if (mpz_cmp (below
, minc1
) < 0)
327 mpz_set (below
, minc1
);
339 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
340 in TYPE to MIN and MAX. */
343 determine_value_range (class loop
*loop
, tree type
, tree var
, mpz_t off
,
344 mpz_t min
, mpz_t max
)
350 enum value_range_kind rtype
= VR_VARYING
;
352 /* If the expression is a constant, we know its value exactly. */
353 if (integer_zerop (var
))
360 get_type_static_bounds (type
, min
, max
);
362 /* See if we have some range info from VRP. */
363 if (TREE_CODE (var
) == SSA_NAME
&& INTEGRAL_TYPE_P (type
))
365 edge e
= loop_preheader_edge (loop
);
366 signop sgn
= TYPE_SIGN (type
);
369 /* Either for VAR itself... */
370 Value_Range
var_range (TREE_TYPE (var
));
371 get_range_query (cfun
)->range_of_expr (var_range
, var
);
372 if (var_range
.varying_p () || var_range
.undefined_p ())
376 if (!var_range
.undefined_p ())
378 minv
= var_range
.lower_bound ();
379 maxv
= var_range
.upper_bound ();
382 /* Or for PHI results in loop->header where VAR is used as
383 PHI argument from the loop preheader edge. */
384 Value_Range
phi_range (TREE_TYPE (var
));
385 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
387 gphi
*phi
= gsi
.phi ();
388 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == var
389 && get_range_query (cfun
)->range_of_expr (phi_range
,
390 gimple_phi_result (phi
))
391 && !phi_range
.varying_p ()
392 && !phi_range
.undefined_p ())
394 if (rtype
!= VR_RANGE
)
397 minv
= phi_range
.lower_bound ();
398 maxv
= phi_range
.upper_bound ();
402 minv
= wi::max (minv
, phi_range
.lower_bound (), sgn
);
403 maxv
= wi::min (maxv
, phi_range
.upper_bound (), sgn
);
404 /* If the PHI result range are inconsistent with
405 the VAR range, give up on looking at the PHI
406 results. This can happen if VR_UNDEFINED is
408 if (wi::gt_p (minv
, maxv
, sgn
))
410 Value_Range
vr (TREE_TYPE (var
));
411 get_range_query (cfun
)->range_of_expr (vr
, var
);
412 if (vr
.varying_p () || vr
.undefined_p ())
416 if (!vr
.undefined_p ())
418 minv
= vr
.lower_bound ();
419 maxv
= vr
.upper_bound ();
428 if (rtype
!= VR_RANGE
)
435 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
436 wi::to_mpz (minv
, minm
, sgn
);
437 wi::to_mpz (maxv
, maxm
, sgn
);
439 /* Now walk the dominators of the loop header and use the entry
440 guards to refine the estimates. */
441 for (bb
= loop
->header
;
442 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
443 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
449 if (!single_pred_p (bb
))
451 e
= single_pred_edge (bb
);
453 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
456 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (e
->src
));
457 c0
= gimple_cond_lhs (cond
);
458 cmp
= gimple_cond_code (cond
);
459 c1
= gimple_cond_rhs (cond
);
461 if (e
->flags
& EDGE_FALSE_VALUE
)
462 cmp
= invert_tree_comparison (cmp
, false);
464 refine_value_range_using_guard (type
, var
, c0
, cmp
, c1
, minm
, maxm
);
468 mpz_add (minm
, minm
, off
);
469 mpz_add (maxm
, maxm
, off
);
470 /* If the computation may not wrap or off is zero, then this
471 is always fine. If off is negative and minv + off isn't
472 smaller than type's minimum, or off is positive and
473 maxv + off isn't bigger than type's maximum, use the more
474 precise range too. */
475 if (nowrap_type_p (type
)
476 || mpz_sgn (off
) == 0
477 || (mpz_sgn (off
) < 0 && mpz_cmp (minm
, min
) >= 0)
478 || (mpz_sgn (off
) > 0 && mpz_cmp (maxm
, max
) <= 0))
490 /* If the computation may wrap, we know nothing about the value, except for
491 the range of the type. */
492 if (!nowrap_type_p (type
))
495 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
496 add it to MIN, otherwise to MAX. */
497 if (mpz_sgn (off
) < 0)
498 mpz_add (max
, max
, off
);
500 mpz_add (min
, min
, off
);
503 /* Stores the bounds on the difference of the values of the expressions
504 (var + X) and (var + Y), computed in TYPE, to BNDS. */
507 bound_difference_of_offsetted_base (tree type
, mpz_t x
, mpz_t y
,
510 int rel
= mpz_cmp (x
, y
);
511 bool may_wrap
= !nowrap_type_p (type
);
513 /* If X == Y, then the expressions are always equal.
514 If X > Y, there are the following possibilities:
515 a) neither of var + X and var + Y overflow or underflow, or both of
516 them do. Then their difference is X - Y.
517 b) var + X overflows, and var + Y does not. Then the values of the
518 expressions are var + X - M and var + Y, where M is the range of
519 the type, and their difference is X - Y - M.
520 c) var + Y underflows and var + X does not. Their difference again
522 Therefore, if the arithmetics in type does not overflow, then the
523 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
524 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
525 (X - Y, X - Y + M). */
529 mpz_set_ui (bnds
->below
, 0);
530 mpz_set_ui (bnds
->up
, 0);
535 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), m
, UNSIGNED
);
536 mpz_add_ui (m
, m
, 1);
537 mpz_sub (bnds
->up
, x
, y
);
538 mpz_set (bnds
->below
, bnds
->up
);
543 mpz_sub (bnds
->below
, bnds
->below
, m
);
545 mpz_add (bnds
->up
, bnds
->up
, m
);
549 /* From condition C0 CMP C1 derives information regarding the
550 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
551 and stores it to BNDS. */
554 refine_bounds_using_guard (tree type
, tree varx
, mpz_t offx
,
555 tree vary
, mpz_t offy
,
556 tree c0
, enum tree_code cmp
, tree c1
,
559 tree varc0
, varc1
, ctype
;
560 mpz_t offc0
, offc1
, loffx
, loffy
, bnd
;
562 bool no_wrap
= nowrap_type_p (type
);
571 STRIP_SIGN_NOPS (c0
);
572 STRIP_SIGN_NOPS (c1
);
573 ctype
= TREE_TYPE (c0
);
574 if (!useless_type_conversion_p (ctype
, type
))
580 /* We could derive quite precise information from EQ_EXPR, however, such
581 a guard is unlikely to appear, so we do not bother with handling
586 /* NE_EXPR comparisons do not contain much of useful information, except for
587 special case of comparing with the bounds of the type. */
588 if (TREE_CODE (c1
) != INTEGER_CST
589 || !INTEGRAL_TYPE_P (type
))
592 /* Ensure that the condition speaks about an expression in the same type
594 ctype
= TREE_TYPE (c0
);
595 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
597 c0
= fold_convert (type
, c0
);
598 c1
= fold_convert (type
, c1
);
600 if (TYPE_MIN_VALUE (type
)
601 && operand_equal_p (c1
, TYPE_MIN_VALUE (type
), 0))
606 if (TYPE_MAX_VALUE (type
)
607 && operand_equal_p (c1
, TYPE_MAX_VALUE (type
), 0))
620 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
621 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
623 /* We are only interested in comparisons of expressions based on VARX and
624 VARY. TODO -- we might also be able to derive some bounds from
625 expressions containing just one of the variables. */
627 if (operand_equal_p (varx
, varc1
, 0))
629 std::swap (varc0
, varc1
);
630 mpz_swap (offc0
, offc1
);
631 cmp
= swap_tree_comparison (cmp
);
634 if (!operand_equal_p (varx
, varc0
, 0)
635 || !operand_equal_p (vary
, varc1
, 0))
638 mpz_init_set (loffx
, offx
);
639 mpz_init_set (loffy
, offy
);
641 if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
643 std::swap (varx
, vary
);
644 mpz_swap (offc0
, offc1
);
645 mpz_swap (loffx
, loffy
);
646 cmp
= swap_tree_comparison (cmp
);
650 /* If there is no overflow, the condition implies that
652 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
654 The overflows and underflows may complicate things a bit; each
655 overflow decreases the appropriate offset by M, and underflow
656 increases it by M. The above inequality would not necessarily be
659 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
660 VARX + OFFC0 overflows, but VARX + OFFX does not.
661 This may only happen if OFFX < OFFC0.
662 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
663 VARY + OFFC1 underflows and VARY + OFFY does not.
664 This may only happen if OFFY > OFFC1. */
673 x_ok
= (integer_zerop (varx
)
674 || mpz_cmp (loffx
, offc0
) >= 0);
675 y_ok
= (integer_zerop (vary
)
676 || mpz_cmp (loffy
, offc1
) <= 0);
682 mpz_sub (bnd
, loffx
, loffy
);
683 mpz_add (bnd
, bnd
, offc1
);
684 mpz_sub (bnd
, bnd
, offc0
);
687 mpz_sub_ui (bnd
, bnd
, 1);
692 if (mpz_cmp (bnds
->below
, bnd
) < 0)
693 mpz_set (bnds
->below
, bnd
);
697 if (mpz_cmp (bnd
, bnds
->up
) < 0)
698 mpz_set (bnds
->up
, bnd
);
710 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
711 The subtraction is considered to be performed in arbitrary precision,
714 We do not attempt to be too clever regarding the value ranges of X and
715 Y; most of the time, they are just integers or ssa names offsetted by
716 integer. However, we try to use the information contained in the
717 comparisons before the loop (usually created by loop header copying). */
720 bound_difference (class loop
*loop
, tree x
, tree y
, bounds
*bnds
)
722 tree type
= TREE_TYPE (x
);
731 /* Get rid of unnecessary casts, but preserve the value of
736 mpz_init (bnds
->below
);
740 split_to_var_and_offset (x
, &varx
, offx
);
741 split_to_var_and_offset (y
, &vary
, offy
);
743 if (!integer_zerop (varx
)
744 && operand_equal_p (varx
, vary
, 0))
746 /* Special case VARX == VARY -- we just need to compare the
747 offsets. The matters are a bit more complicated in the
748 case addition of offsets may wrap. */
749 bound_difference_of_offsetted_base (type
, offx
, offy
, bnds
);
753 /* Otherwise, use the value ranges to determine the initial
754 estimates on below and up. */
755 auto_mpz minx
, maxx
, miny
, maxy
;
756 determine_value_range (loop
, type
, varx
, offx
, minx
, maxx
);
757 determine_value_range (loop
, type
, vary
, offy
, miny
, maxy
);
759 mpz_sub (bnds
->below
, minx
, maxy
);
760 mpz_sub (bnds
->up
, maxx
, miny
);
763 /* If both X and Y are constants, we cannot get any more precise. */
764 if (integer_zerop (varx
) && integer_zerop (vary
))
767 /* Now walk the dominators of the loop header and use the entry
768 guards to refine the estimates. */
769 for (bb
= loop
->header
;
770 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
771 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
773 if (!single_pred_p (bb
))
775 e
= single_pred_edge (bb
);
777 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
780 gcond
*cond
= as_a
<gcond
*> (*gsi_last_bb (e
->src
));
781 c0
= gimple_cond_lhs (cond
);
782 cmp
= gimple_cond_code (cond
);
783 c1
= gimple_cond_rhs (cond
);
785 if (e
->flags
& EDGE_FALSE_VALUE
)
786 cmp
= invert_tree_comparison (cmp
, false);
788 refine_bounds_using_guard (type
, varx
, offx
, vary
, offy
,
798 /* Update the bounds in BNDS that restrict the value of X to the bounds
799 that restrict the value of X + DELTA. X can be obtained as a
800 difference of two values in TYPE. */
803 bounds_add (bounds
*bnds
, const widest_int
&delta
, tree type
)
808 wi::to_mpz (delta
, mdelta
, SIGNED
);
811 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
813 mpz_add (bnds
->up
, bnds
->up
, mdelta
);
814 mpz_add (bnds
->below
, bnds
->below
, mdelta
);
816 if (mpz_cmp (bnds
->up
, max
) > 0)
817 mpz_set (bnds
->up
, max
);
820 if (mpz_cmp (bnds
->below
, max
) < 0)
821 mpz_set (bnds
->below
, max
);
827 /* Update the bounds in BNDS that restrict the value of X to the bounds
828 that restrict the value of -X. */
831 bounds_negate (bounds
*bnds
)
835 mpz_init_set (tmp
, bnds
->up
);
836 mpz_neg (bnds
->up
, bnds
->below
);
837 mpz_neg (bnds
->below
, tmp
);
841 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
844 inverse (tree x
, tree mask
)
846 tree type
= TREE_TYPE (x
);
848 unsigned ctr
= tree_floor_log2 (mask
);
850 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
852 unsigned HOST_WIDE_INT ix
;
853 unsigned HOST_WIDE_INT imask
;
854 unsigned HOST_WIDE_INT irslt
= 1;
856 gcc_assert (cst_and_fits_in_hwi (x
));
857 gcc_assert (cst_and_fits_in_hwi (mask
));
859 ix
= int_cst_value (x
);
860 imask
= int_cst_value (mask
);
869 rslt
= build_int_cst_type (type
, irslt
);
873 rslt
= build_int_cst (type
, 1);
876 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
);
877 x
= int_const_binop (MULT_EXPR
, x
, x
);
879 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
);
885 /* Derives the upper bound BND on the number of executions of loop with exit
886 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
887 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
888 that the loop ends through this exit, i.e., the induction variable ever
889 reaches the value of C.
891 The value C is equal to final - base, where final and base are the final and
892 initial value of the actual induction variable in the analysed loop. BNDS
893 bounds the value of this difference when computed in signed type with
894 unbounded range, while the computation of C is performed in an unsigned
895 type with the range matching the range of the type of the induction variable.
896 In particular, BNDS.up contains an upper bound on C in the following cases:
897 -- if the iv must reach its final value without overflow, i.e., if
898 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
899 -- if final >= base, which we know to hold when BNDS.below >= 0. */
902 number_of_iterations_ne_max (mpz_t bnd
, bool no_overflow
, tree c
, tree s
,
903 bounds
*bnds
, bool exit_must_be_taken
)
907 tree type
= TREE_TYPE (c
);
908 bool bnds_u_valid
= ((no_overflow
&& exit_must_be_taken
)
909 || mpz_sgn (bnds
->below
) >= 0);
912 || (TREE_CODE (c
) == INTEGER_CST
913 && TREE_CODE (s
) == INTEGER_CST
914 && wi::mod_trunc (wi::to_wide (c
), wi::to_wide (s
),
915 TYPE_SIGN (type
)) == 0)
916 || (TYPE_OVERFLOW_UNDEFINED (type
)
917 && multiple_of_p (type
, c
, s
)))
919 /* If C is an exact multiple of S, then its value will be reached before
920 the induction variable overflows (unless the loop is exited in some
921 other way before). Note that the actual induction variable in the
922 loop (which ranges from base to final instead of from 0 to C) may
923 overflow, in which case BNDS.up will not be giving a correct upper
924 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
926 exit_must_be_taken
= true;
929 /* If the induction variable can overflow, the number of iterations is at
930 most the period of the control variable (or infinite, but in that case
931 the whole # of iterations analysis will fail). */
934 max
= wi::mask
<widest_int
> (TYPE_PRECISION (type
)
935 - wi::ctz (wi::to_wide (s
)), false);
936 wi::to_mpz (max
, bnd
, UNSIGNED
);
940 /* Now we know that the induction variable does not overflow, so the loop
941 iterates at most (range of type / S) times. */
942 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), bnd
, UNSIGNED
);
944 /* If the induction variable is guaranteed to reach the value of C before
946 if (exit_must_be_taken
)
948 /* ... then we can strengthen this to C / S, and possibly we can use
949 the upper bound on C given by BNDS. */
950 if (TREE_CODE (c
) == INTEGER_CST
)
951 wi::to_mpz (wi::to_wide (c
), bnd
, UNSIGNED
);
952 else if (bnds_u_valid
)
953 mpz_set (bnd
, bnds
->up
);
957 wi::to_mpz (wi::to_wide (s
), d
, UNSIGNED
);
958 mpz_fdiv_q (bnd
, bnd
, d
);
962 /* Determines number of iterations of loop whose ending condition
963 is IV <> FINAL. TYPE is the type of the iv. The number of
964 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
965 we know that the exit must be taken eventually, i.e., that the IV
966 ever reaches the value FINAL (we derived this earlier, and possibly set
967 NITER->assumptions to make sure this is the case). BNDS contains the
968 bounds on the difference FINAL - IV->base. */
971 number_of_iterations_ne (class loop
*loop
, tree type
, affine_iv
*iv
,
972 tree final
, class tree_niter_desc
*niter
,
973 bool exit_must_be_taken
, bounds
*bnds
)
975 tree niter_type
= unsigned_type_for (type
);
976 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
978 niter
->control
= *iv
;
979 niter
->bound
= final
;
980 niter
->cmp
= NE_EXPR
;
982 /* Rearrange the terms so that we get inequality S * i <> C, with S
983 positive. Also cast everything to the unsigned type. If IV does
984 not overflow, BNDS bounds the value of C. Also, this is the
985 case if the computation |FINAL - IV->base| does not overflow, i.e.,
986 if BNDS->below in the result is nonnegative. */
987 if (tree_int_cst_sign_bit (iv
->step
))
989 s
= fold_convert (niter_type
,
990 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
991 c
= fold_build2 (MINUS_EXPR
, niter_type
,
992 fold_convert (niter_type
, iv
->base
),
993 fold_convert (niter_type
, final
));
994 bounds_negate (bnds
);
998 s
= fold_convert (niter_type
, iv
->step
);
999 c
= fold_build2 (MINUS_EXPR
, niter_type
,
1000 fold_convert (niter_type
, final
),
1001 fold_convert (niter_type
, iv
->base
));
1005 number_of_iterations_ne_max (max
, iv
->no_overflow
, c
, s
, bnds
,
1006 exit_must_be_taken
);
1007 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, max
, false),
1008 TYPE_SIGN (niter_type
));
1010 /* Compute no-overflow information for the control iv. This can be
1011 proven when below two conditions are satisfied:
1013 1) IV evaluates toward FINAL at beginning, i.e:
1014 base <= FINAL ; step > 0
1015 base >= FINAL ; step < 0
1017 2) |FINAL - base| is an exact multiple of step.
1019 Unfortunately, it's hard to prove above conditions after pass loop-ch
1020 because loop with exit condition (IV != FINAL) usually will be guarded
1021 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1022 can alternatively try to prove below conditions:
1024 1') IV evaluates toward FINAL at beginning, i.e:
1025 new_base = base - step < FINAL ; step > 0
1026 && base - step doesn't underflow
1027 new_base = base - step > FINAL ; step < 0
1028 && base - step doesn't overflow
1030 Please refer to PR34114 as an example of loop-ch's impact.
1032 Note, for NE_EXPR, base equals to FINAL is a special case, in
1033 which the loop exits immediately, and the iv does not overflow.
1035 Also note, we prove condition 2) by checking base and final seperately
1036 along with condition 1) or 1'). Since we ensure the difference
1037 computation of c does not wrap with cond below and the adjusted s
1038 will fit a signed type as well as an unsigned we can safely do
1039 this using the type of the IV if it is not pointer typed. */
1041 if (POINTER_TYPE_P (type
))
1043 if (!niter
->control
.no_overflow
1044 && (integer_onep (s
)
1045 || (multiple_of_p (mtype
, fold_convert (mtype
, iv
->base
),
1046 fold_convert (mtype
, s
), false)
1047 && multiple_of_p (mtype
, fold_convert (mtype
, final
),
1048 fold_convert (mtype
, s
), false))))
1050 tree t
, cond
, relaxed_cond
= boolean_false_node
;
1052 if (tree_int_cst_sign_bit (iv
->step
))
1054 cond
= fold_build2 (GE_EXPR
, boolean_type_node
, iv
->base
, final
);
1055 if (TREE_CODE (type
) == INTEGER_TYPE
)
1057 /* Only when base - step doesn't overflow. */
1058 t
= TYPE_MAX_VALUE (type
);
1059 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1060 t
= fold_build2 (GE_EXPR
, boolean_type_node
, t
, iv
->base
);
1061 if (integer_nonzerop (t
))
1063 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1064 relaxed_cond
= fold_build2 (GT_EXPR
, boolean_type_node
, t
,
1071 cond
= fold_build2 (LE_EXPR
, boolean_type_node
, iv
->base
, final
);
1072 if (TREE_CODE (type
) == INTEGER_TYPE
)
1074 /* Only when base - step doesn't underflow. */
1075 t
= TYPE_MIN_VALUE (type
);
1076 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1077 t
= fold_build2 (LE_EXPR
, boolean_type_node
, t
, iv
->base
);
1078 if (integer_nonzerop (t
))
1080 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1081 relaxed_cond
= fold_build2 (LT_EXPR
, boolean_type_node
, t
,
1087 t
= simplify_using_initial_conditions (loop
, cond
);
1088 if (!t
|| !integer_onep (t
))
1089 t
= simplify_using_initial_conditions (loop
, relaxed_cond
);
1091 if (t
&& integer_onep (t
))
1093 niter
->control
.no_overflow
= true;
1094 niter
->niter
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, s
);
1099 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1100 is infinite. Otherwise, the number of iterations is
1101 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1102 bits
= num_ending_zeros (s
);
1103 bound
= build_low_bits_mask (niter_type
,
1104 (TYPE_PRECISION (niter_type
)
1105 - tree_to_uhwi (bits
)));
1107 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
1108 build_int_cst (niter_type
, 1), bits
);
1109 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
1111 if (!exit_must_be_taken
)
1113 /* If we cannot assume that the exit is taken eventually, record the
1114 assumptions for divisibility of c. */
1115 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
1116 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
1117 assumption
, build_int_cst (niter_type
, 0));
1118 if (!integer_nonzerop (assumption
))
1119 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1120 niter
->assumptions
, assumption
);
1123 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
1124 if (integer_onep (s
))
1130 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
1131 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
1136 /* Checks whether we can determine the final value of the control variable
1137 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1138 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1139 of the step. The assumptions necessary to ensure that the computation
1140 of the final value does not overflow are recorded in NITER. If we
1141 find the final value, we adjust DELTA and return TRUE. Otherwise
1142 we return false. BNDS bounds the value of IV1->base - IV0->base,
1143 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1144 true if we know that the exit must be taken eventually. */
1147 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1148 class tree_niter_desc
*niter
,
1149 tree
*delta
, tree step
,
1150 bool exit_must_be_taken
, bounds
*bnds
)
1152 tree niter_type
= TREE_TYPE (step
);
1153 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
1155 tree assumption
= boolean_true_node
, bound
, noloop
;
1156 bool fv_comp_no_overflow
;
1158 if (POINTER_TYPE_P (type
))
1161 if (TREE_CODE (mod
) != INTEGER_CST
)
1163 if (integer_nonzerop (mod
))
1164 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
1165 tmod
= fold_convert (type1
, mod
);
1168 wi::to_mpz (wi::to_wide (mod
), mmod
, UNSIGNED
);
1169 mpz_neg (mmod
, mmod
);
1171 /* If the induction variable does not overflow and the exit is taken,
1172 then the computation of the final value does not overflow. This is
1173 also obviously the case if the new final value is equal to the
1174 current one. Finally, we postulate this for pointer type variables,
1175 as the code cannot rely on the object to that the pointer points being
1176 placed at the end of the address space (and more pragmatically,
1177 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1178 if (integer_zerop (mod
) || POINTER_TYPE_P (type
))
1179 fv_comp_no_overflow
= true;
1180 else if (!exit_must_be_taken
)
1181 fv_comp_no_overflow
= false;
1183 fv_comp_no_overflow
=
1184 (iv0
->no_overflow
&& integer_nonzerop (iv0
->step
))
1185 || (iv1
->no_overflow
&& integer_nonzerop (iv1
->step
));
1187 if (integer_nonzerop (iv0
->step
))
1189 /* The final value of the iv is iv1->base + MOD, assuming that this
1190 computation does not overflow, and that
1191 iv0->base <= iv1->base + MOD. */
1192 if (!fv_comp_no_overflow
)
1194 bound
= fold_build2 (MINUS_EXPR
, type1
,
1195 TYPE_MAX_VALUE (type1
), tmod
);
1196 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1198 if (integer_zerop (assumption
))
1201 if (mpz_cmp (mmod
, bnds
->below
) < 0)
1202 noloop
= boolean_false_node
;
1203 else if (POINTER_TYPE_P (type
))
1204 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1206 fold_build_pointer_plus (iv1
->base
, tmod
));
1208 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1210 fold_build2 (PLUS_EXPR
, type1
,
1215 /* The final value of the iv is iv0->base - MOD, assuming that this
1216 computation does not overflow, and that
1217 iv0->base - MOD <= iv1->base. */
1218 if (!fv_comp_no_overflow
)
1220 bound
= fold_build2 (PLUS_EXPR
, type1
,
1221 TYPE_MIN_VALUE (type1
), tmod
);
1222 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1224 if (integer_zerop (assumption
))
1227 if (mpz_cmp (mmod
, bnds
->below
) < 0)
1228 noloop
= boolean_false_node
;
1229 else if (POINTER_TYPE_P (type
))
1230 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1231 fold_build_pointer_plus (iv0
->base
,
1232 fold_build1 (NEGATE_EXPR
,
1236 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1237 fold_build2 (MINUS_EXPR
, type1
,
1242 if (!integer_nonzerop (assumption
))
1243 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1246 if (!integer_zerop (noloop
))
1247 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1250 bounds_add (bnds
, wi::to_widest (mod
), type
);
1251 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
1256 /* Add assertions to NITER that ensure that the control variable of the loop
1257 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1258 are TYPE. Returns false if we can prove that there is an overflow, true
1259 otherwise. STEP is the absolute value of the step. */
1262 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1263 class tree_niter_desc
*niter
, tree step
)
1265 tree bound
, d
, assumption
, diff
;
1266 tree niter_type
= TREE_TYPE (step
);
1268 if (integer_nonzerop (iv0
->step
))
1270 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1271 if (iv0
->no_overflow
)
1274 /* If iv0->base is a constant, we can determine the last value before
1275 overflow precisely; otherwise we conservatively assume
1278 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
1280 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1281 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
1282 fold_convert (niter_type
, iv0
->base
));
1283 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1286 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1287 build_int_cst (niter_type
, 1));
1288 bound
= fold_build2 (MINUS_EXPR
, type
,
1289 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
1290 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1295 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1296 if (iv1
->no_overflow
)
1299 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
1301 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1302 fold_convert (niter_type
, iv1
->base
),
1303 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
1304 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1307 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1308 build_int_cst (niter_type
, 1));
1309 bound
= fold_build2 (PLUS_EXPR
, type
,
1310 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
1311 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1315 if (integer_zerop (assumption
))
1317 if (!integer_nonzerop (assumption
))
1318 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1319 niter
->assumptions
, assumption
);
1321 iv0
->no_overflow
= true;
1322 iv1
->no_overflow
= true;
1326 /* Add an assumption to NITER that a loop whose ending condition
1327 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1328 bounds the value of IV1->base - IV0->base. */
1331 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1332 class tree_niter_desc
*niter
, bounds
*bnds
)
1334 tree assumption
= boolean_true_node
, bound
, diff
;
1335 tree mbz
, mbzl
, mbzr
, type1
;
1336 bool rolls_p
, no_overflow_p
;
1340 /* We are going to compute the number of iterations as
1341 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1342 variant of TYPE. This formula only works if
1344 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1346 (where MAX is the maximum value of the unsigned variant of TYPE, and
1347 the computations in this formula are performed in full precision,
1348 i.e., without overflows).
1350 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1351 we have a condition of the form iv0->base - step < iv1->base before the loop,
1352 and for loops iv0->base < iv1->base - step * i the condition
1353 iv0->base < iv1->base + step, due to loop header copying, which enable us
1354 to prove the lower bound.
1356 The upper bound is more complicated. Unless the expressions for initial
1357 and final value themselves contain enough information, we usually cannot
1358 derive it from the context. */
1360 /* First check whether the answer does not follow from the bounds we gathered
1362 if (integer_nonzerop (iv0
->step
))
1363 dstep
= wi::to_widest (iv0
->step
);
1366 dstep
= wi::sext (wi::to_widest (iv1
->step
), TYPE_PRECISION (type
));
1371 wi::to_mpz (dstep
, mstep
, UNSIGNED
);
1372 mpz_neg (mstep
, mstep
);
1373 mpz_add_ui (mstep
, mstep
, 1);
1375 rolls_p
= mpz_cmp (mstep
, bnds
->below
) <= 0;
1378 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
1379 mpz_add (max
, max
, mstep
);
1380 no_overflow_p
= (mpz_cmp (bnds
->up
, max
) <= 0
1381 /* For pointers, only values lying inside a single object
1382 can be compared or manipulated by pointer arithmetics.
1383 Gcc in general does not allow or handle objects larger
1384 than half of the address space, hence the upper bound
1385 is satisfied for pointers. */
1386 || POINTER_TYPE_P (type
));
1390 if (rolls_p
&& no_overflow_p
)
1394 if (POINTER_TYPE_P (type
))
1397 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1398 we must be careful not to introduce overflow. */
1400 if (integer_nonzerop (iv0
->step
))
1402 diff
= fold_build2 (MINUS_EXPR
, type1
,
1403 iv0
->step
, build_int_cst (type1
, 1));
1405 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1406 0 address never belongs to any object, we can assume this for
1408 if (!POINTER_TYPE_P (type
))
1410 bound
= fold_build2 (PLUS_EXPR
, type1
,
1411 TYPE_MIN_VALUE (type
), diff
);
1412 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1416 /* And then we can compute iv0->base - diff, and compare it with
1418 mbzl
= fold_build2 (MINUS_EXPR
, type1
,
1419 fold_convert (type1
, iv0
->base
), diff
);
1420 mbzr
= fold_convert (type1
, iv1
->base
);
1424 diff
= fold_build2 (PLUS_EXPR
, type1
,
1425 iv1
->step
, build_int_cst (type1
, 1));
1427 if (!POINTER_TYPE_P (type
))
1429 bound
= fold_build2 (PLUS_EXPR
, type1
,
1430 TYPE_MAX_VALUE (type
), diff
);
1431 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1435 mbzl
= fold_convert (type1
, iv0
->base
);
1436 mbzr
= fold_build2 (MINUS_EXPR
, type1
,
1437 fold_convert (type1
, iv1
->base
), diff
);
1440 if (!integer_nonzerop (assumption
))
1441 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1442 niter
->assumptions
, assumption
);
1445 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
1446 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1447 niter
->may_be_zero
, mbz
);
1451 /* Determines number of iterations of loop whose ending condition
1452 is IV0 < IV1 which likes: {base, -C} < n, or n < {base, C}.
1453 The number of iterations is stored to NITER. */
1456 number_of_iterations_until_wrap (class loop
*loop
, tree type
, affine_iv
*iv0
,
1457 affine_iv
*iv1
, class tree_niter_desc
*niter
)
1459 tree niter_type
= unsigned_type_for (type
);
1460 tree step
, num
, assumptions
, may_be_zero
, span
;
1461 wide_int high
, low
, max
, min
;
1463 may_be_zero
= fold_build2 (LE_EXPR
, boolean_type_node
, iv1
->base
, iv0
->base
);
1464 if (integer_onep (may_be_zero
))
1467 int prec
= TYPE_PRECISION (type
);
1468 signop sgn
= TYPE_SIGN (type
);
1469 min
= wi::min_value (prec
, sgn
);
1470 max
= wi::max_value (prec
, sgn
);
1472 /* n < {base, C}. */
1473 if (integer_zerop (iv0
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
1476 /* MIN + C - 1 <= n. */
1477 tree last
= wide_int_to_tree (type
, min
+ wi::to_wide (step
) - 1);
1478 assumptions
= fold_build2 (LE_EXPR
, boolean_type_node
, last
, iv0
->base
);
1479 if (integer_zerop (assumptions
))
1482 num
= fold_build2 (MINUS_EXPR
, niter_type
,
1483 wide_int_to_tree (niter_type
, max
),
1484 fold_convert (niter_type
, iv1
->base
));
1486 /* When base has the form iv + 1, if we know iv >= n, then iv + 1 < n
1487 only when iv + 1 overflows, i.e. when iv == TYPE_VALUE_MAX. */
1489 && integer_onep (step
)
1490 && TREE_CODE (iv1
->base
) == PLUS_EXPR
1491 && integer_onep (TREE_OPERAND (iv1
->base
, 1)))
1493 tree cond
= fold_build2 (GE_EXPR
, boolean_type_node
,
1494 TREE_OPERAND (iv1
->base
, 0), iv0
->base
);
1495 cond
= simplify_using_initial_conditions (loop
, cond
);
1496 if (integer_onep (cond
))
1497 may_be_zero
= fold_build2 (EQ_EXPR
, boolean_type_node
,
1498 TREE_OPERAND (iv1
->base
, 0),
1499 TYPE_MAX_VALUE (type
));
1503 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
1504 low
= wi::to_wide (iv1
->base
) - 1;
1505 else if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
1506 low
= wi::to_wide (iv0
->base
);
1510 /* {base, -C} < n. */
1511 else if (tree_int_cst_sign_bit (iv0
->step
) && integer_zerop (iv1
->step
))
1513 step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (iv0
->step
), iv0
->step
);
1514 /* MAX - C + 1 >= n. */
1515 tree last
= wide_int_to_tree (type
, max
- wi::to_wide (step
) + 1);
1516 assumptions
= fold_build2 (GE_EXPR
, boolean_type_node
, last
, iv1
->base
);
1517 if (integer_zerop (assumptions
))
1520 num
= fold_build2 (MINUS_EXPR
, niter_type
,
1521 fold_convert (niter_type
, iv0
->base
),
1522 wide_int_to_tree (niter_type
, min
));
1524 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
1525 high
= wi::to_wide (iv0
->base
) + 1;
1526 else if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
1527 high
= wi::to_wide (iv1
->base
);
1534 /* (delta + step - 1) / step */
1535 step
= fold_convert (niter_type
, step
);
1536 num
= fold_build2 (PLUS_EXPR
, niter_type
, num
, step
);
1537 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, num
, step
);
1539 widest_int delta
, s
;
1540 delta
= widest_int::from (high
, sgn
) - widest_int::from (low
, sgn
);
1541 s
= wi::to_widest (step
);
1542 delta
= delta
+ s
- 1;
1543 niter
->max
= wi::udiv_floor (delta
, s
);
1545 niter
->may_be_zero
= may_be_zero
;
1547 if (!integer_nonzerop (assumptions
))
1548 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1549 niter
->assumptions
, assumptions
);
1551 niter
->control
.no_overflow
= false;
1553 /* Update bound and exit condition as:
1554 bound = niter * STEP + (IVbase - STEP).
1555 { IVbase - STEP, +, STEP } != bound
1556 Here, biasing IVbase by 1 step makes 'bound' be the value before wrap.
1558 tree base_type
= TREE_TYPE (niter
->control
.base
);
1559 if (POINTER_TYPE_P (base_type
))
1561 tree utype
= unsigned_type_for (base_type
);
1563 = fold_build2 (MINUS_EXPR
, utype
,
1564 fold_convert (utype
, niter
->control
.base
),
1565 fold_convert (utype
, niter
->control
.step
));
1566 niter
->control
.base
= fold_convert (base_type
, niter
->control
.base
);
1570 = fold_build2 (MINUS_EXPR
, base_type
, niter
->control
.base
,
1571 niter
->control
.step
);
1573 span
= fold_build2 (MULT_EXPR
, niter_type
, niter
->niter
,
1574 fold_convert (niter_type
, niter
->control
.step
));
1575 niter
->bound
= fold_build2 (PLUS_EXPR
, niter_type
, span
,
1576 fold_convert (niter_type
, niter
->control
.base
));
1577 niter
->bound
= fold_convert (type
, niter
->bound
);
1578 niter
->cmp
= NE_EXPR
;
1583 /* Determines number of iterations of loop whose ending condition
1584 is IV0 < IV1. TYPE is the type of the iv. The number of
1585 iterations is stored to NITER. BNDS bounds the difference
1586 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1587 that the exit must be taken eventually. */
1590 number_of_iterations_lt (class loop
*loop
, tree type
, affine_iv
*iv0
,
1591 affine_iv
*iv1
, class tree_niter_desc
*niter
,
1592 bool exit_must_be_taken
, bounds
*bnds
)
1594 tree niter_type
= unsigned_type_for (type
);
1595 tree delta
, step
, s
;
1598 if (integer_nonzerop (iv0
->step
))
1600 niter
->control
= *iv0
;
1601 niter
->cmp
= LT_EXPR
;
1602 niter
->bound
= iv1
->base
;
1606 niter
->control
= *iv1
;
1607 niter
->cmp
= GT_EXPR
;
1608 niter
->bound
= iv0
->base
;
1611 /* {base, -C} < n, or n < {base, C} */
1612 if (tree_int_cst_sign_bit (iv0
->step
)
1613 || (!integer_zerop (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
)))
1614 return number_of_iterations_until_wrap (loop
, type
, iv0
, iv1
, niter
);
1616 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
1617 fold_convert (niter_type
, iv1
->base
),
1618 fold_convert (niter_type
, iv0
->base
));
1620 /* First handle the special case that the step is +-1. */
1621 if ((integer_onep (iv0
->step
) && integer_zerop (iv1
->step
))
1622 || (integer_all_onesp (iv1
->step
) && integer_zerop (iv0
->step
)))
1624 /* for (i = iv0->base; i < iv1->base; i++)
1628 for (i = iv1->base; i > iv0->base; i--).
1630 In both cases # of iterations is iv1->base - iv0->base, assuming that
1631 iv1->base >= iv0->base.
1633 First try to derive a lower bound on the value of
1634 iv1->base - iv0->base, computed in full precision. If the difference
1635 is nonnegative, we are done, otherwise we must record the
1638 if (mpz_sgn (bnds
->below
) < 0)
1639 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
1640 iv1
->base
, iv0
->base
);
1641 niter
->niter
= delta
;
1642 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, bnds
->up
, false),
1643 TYPE_SIGN (niter_type
));
1644 niter
->control
.no_overflow
= true;
1648 if (integer_nonzerop (iv0
->step
))
1649 step
= fold_convert (niter_type
, iv0
->step
);
1651 step
= fold_convert (niter_type
,
1652 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
1654 /* If we can determine the final value of the control iv exactly, we can
1655 transform the condition to != comparison. In particular, this will be
1656 the case if DELTA is constant. */
1657 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
,
1658 exit_must_be_taken
, bnds
))
1662 zps
.base
= build_int_cst (niter_type
, 0);
1664 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1665 zps does not overflow. */
1666 zps
.no_overflow
= true;
1668 return number_of_iterations_ne (loop
, type
, &zps
,
1669 delta
, niter
, true, bnds
);
1672 /* Make sure that the control iv does not overflow. */
1673 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
1676 /* We determine the number of iterations as (delta + step - 1) / step. For
1677 this to work, we must know that iv1->base >= iv0->base - step + 1,
1678 otherwise the loop does not roll. */
1679 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
, bnds
);
1681 s
= fold_build2 (MINUS_EXPR
, niter_type
,
1682 step
, build_int_cst (niter_type
, 1));
1683 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
1684 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
1688 wi::to_mpz (wi::to_wide (step
), mstep
, UNSIGNED
);
1689 mpz_add (tmp
, bnds
->up
, mstep
);
1690 mpz_sub_ui (tmp
, tmp
, 1);
1691 mpz_fdiv_q (tmp
, tmp
, mstep
);
1692 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, tmp
, false),
1693 TYPE_SIGN (niter_type
));
1700 /* Determines number of iterations of loop whose ending condition
1701 is IV0 <= IV1. TYPE is the type of the iv. The number of
1702 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1703 we know that this condition must eventually become false (we derived this
1704 earlier, and possibly set NITER->assumptions to make sure this
1705 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1708 number_of_iterations_le (class loop
*loop
, tree type
, affine_iv
*iv0
,
1709 affine_iv
*iv1
, class tree_niter_desc
*niter
,
1710 bool exit_must_be_taken
, bounds
*bnds
)
1714 if (POINTER_TYPE_P (type
))
1717 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1718 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1719 value of the type. This we must know anyway, since if it is
1720 equal to this value, the loop rolls forever. We do not check
1721 this condition for pointer type ivs, as the code cannot rely on
1722 the object to that the pointer points being placed at the end of
1723 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1724 not defined for pointers). */
1726 if (!exit_must_be_taken
&& !POINTER_TYPE_P (type
))
1728 if (integer_nonzerop (iv0
->step
))
1729 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1730 iv1
->base
, TYPE_MAX_VALUE (type
));
1732 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1733 iv0
->base
, TYPE_MIN_VALUE (type
));
1735 if (integer_zerop (assumption
))
1737 if (!integer_nonzerop (assumption
))
1738 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1739 niter
->assumptions
, assumption
);
1742 if (integer_nonzerop (iv0
->step
))
1744 if (POINTER_TYPE_P (type
))
1745 iv1
->base
= fold_build_pointer_plus_hwi (iv1
->base
, 1);
1747 iv1
->base
= fold_build2 (PLUS_EXPR
, type1
, iv1
->base
,
1748 build_int_cst (type1
, 1));
1750 else if (POINTER_TYPE_P (type
))
1751 iv0
->base
= fold_build_pointer_plus_hwi (iv0
->base
, -1);
1753 iv0
->base
= fold_build2 (MINUS_EXPR
, type1
,
1754 iv0
->base
, build_int_cst (type1
, 1));
1756 bounds_add (bnds
, 1, type1
);
1758 return number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1762 /* Dumps description of affine induction variable IV to FILE. */
1765 dump_affine_iv (FILE *file
, affine_iv
*iv
)
1767 if (!integer_zerop (iv
->step
))
1768 fprintf (file
, "[");
1770 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
1772 if (!integer_zerop (iv
->step
))
1774 fprintf (file
, ", + , ");
1775 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
1776 fprintf (file
, "]%s", iv
->no_overflow
? "(no_overflow)" : "");
1780 /* Determine the number of iterations according to condition (for staying
1781 inside loop) which compares two induction variables using comparison
1782 operator CODE. The induction variable on left side of the comparison
1783 is IV0, the right-hand side is IV1. Both induction variables must have
1784 type TYPE, which must be an integer or pointer type. The steps of the
1785 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1787 LOOP is the loop whose number of iterations we are determining.
1789 ONLY_EXIT is true if we are sure this is the only way the loop could be
1790 exited (including possibly non-returning function calls, exceptions, etc.)
1791 -- in this case we can use the information whether the control induction
1792 variables can overflow or not in a more efficient way.
1794 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1796 The results (number of iterations and assumptions as described in
1797 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1798 Returns false if it fails to determine number of iterations, true if it
1799 was determined (possibly with some assumptions). */
1802 number_of_iterations_cond (class loop
*loop
,
1803 tree type
, affine_iv
*iv0
, enum tree_code code
,
1804 affine_iv
*iv1
, class tree_niter_desc
*niter
,
1805 bool only_exit
, bool every_iteration
)
1807 bool exit_must_be_taken
= false, ret
;
1810 /* If the test is not executed every iteration, wrapping may make the test
1812 TODO: the overflow case can be still used as unreliable estimate of upper
1813 bound. But we have no API to pass it down to number of iterations code
1814 and, at present, it will not use it anyway. */
1815 if (!every_iteration
1816 && (!iv0
->no_overflow
|| !iv1
->no_overflow
1817 || code
== NE_EXPR
|| code
== EQ_EXPR
))
1820 /* The meaning of these assumptions is this:
1822 then the rest of information does not have to be valid
1823 if may_be_zero then the loop does not roll, even if
1825 niter
->assumptions
= boolean_true_node
;
1826 niter
->may_be_zero
= boolean_false_node
;
1827 niter
->niter
= NULL_TREE
;
1829 niter
->bound
= NULL_TREE
;
1830 niter
->cmp
= ERROR_MARK
;
1832 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1833 the control variable is on lhs. */
1834 if (code
== GE_EXPR
|| code
== GT_EXPR
1835 || (code
== NE_EXPR
&& integer_zerop (iv0
->step
)))
1837 std::swap (iv0
, iv1
);
1838 code
= swap_tree_comparison (code
);
1841 if (POINTER_TYPE_P (type
))
1843 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1844 to the same object. If they do, the control variable cannot wrap
1845 (as wrap around the bounds of memory will never return a pointer
1846 that would be guaranteed to point to the same object, even if we
1847 avoid undefined behavior by casting to size_t and back). */
1848 iv0
->no_overflow
= true;
1849 iv1
->no_overflow
= true;
1852 /* If the control induction variable does not overflow and the only exit
1853 from the loop is the one that we analyze, we know it must be taken
1857 if (!integer_zerop (iv0
->step
) && iv0
->no_overflow
)
1858 exit_must_be_taken
= true;
1859 else if (!integer_zerop (iv1
->step
) && iv1
->no_overflow
)
1860 exit_must_be_taken
= true;
1863 /* We can handle cases which neither of the sides of the comparison is
1866 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1868 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1870 provided that either below condition is satisfied:
1872 a) the test is NE_EXPR;
1873 b) iv0 and iv1 do not overflow and iv0.step - iv1.step is of
1874 the same sign and of less or equal magnitude than iv0.step
1876 This rarely occurs in practice, but it is simple enough to manage. */
1877 if (!integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1879 tree step_type
= POINTER_TYPE_P (type
) ? sizetype
: type
;
1880 tree step
= fold_binary_to_constant (MINUS_EXPR
, step_type
,
1881 iv0
->step
, iv1
->step
);
1883 /* For code other than NE_EXPR we have to ensure moving the evolution
1884 of IV1 to that of IV0 does not introduce overflow. */
1885 if (TREE_CODE (step
) != INTEGER_CST
1886 || !iv0
->no_overflow
|| !iv1
->no_overflow
)
1888 if (code
!= NE_EXPR
)
1890 iv0
->no_overflow
= false;
1892 /* If the new step of IV0 has changed sign or is of greater
1893 magnitude then we do not know whether IV0 does overflow
1894 and thus the transform is not valid for code other than NE_EXPR. */
1895 else if (tree_int_cst_sign_bit (step
) != tree_int_cst_sign_bit (iv0
->step
)
1896 || wi::gtu_p (wi::abs (wi::to_widest (step
)),
1897 wi::abs (wi::to_widest (iv0
->step
))))
1899 if (POINTER_TYPE_P (type
) && code
!= NE_EXPR
)
1900 /* For relational pointer compares we have further guarantees
1901 that the pointers always point to the same object (or one
1902 after it) and that objects do not cross the zero page. So
1903 not only is the transform always valid for relational
1904 pointer compares, we also know the resulting IV does not
1907 else if (code
!= NE_EXPR
)
1910 iv0
->no_overflow
= false;
1914 iv1
->step
= build_int_cst (step_type
, 0);
1915 iv1
->no_overflow
= true;
1918 /* If the result of the comparison is a constant, the loop is weird. More
1919 precise handling would be possible, but the situation is not common enough
1920 to waste time on it. */
1921 if (integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1924 /* If the loop exits immediately, there is nothing to do. */
1925 tree tem
= fold_binary (code
, boolean_type_node
, iv0
->base
, iv1
->base
);
1926 if (tem
&& integer_zerop (tem
))
1928 if (!every_iteration
)
1930 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
1935 /* OK, now we know we have a senseful loop. Handle several cases, depending
1936 on what comparison operator is used. */
1937 bound_difference (loop
, iv1
->base
, iv0
->base
, &bnds
);
1939 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1942 "Analyzing # of iterations of loop %d\n", loop
->num
);
1944 fprintf (dump_file
, " exit condition ");
1945 dump_affine_iv (dump_file
, iv0
);
1946 fprintf (dump_file
, " %s ",
1947 code
== NE_EXPR
? "!="
1948 : code
== LT_EXPR
? "<"
1950 dump_affine_iv (dump_file
, iv1
);
1951 fprintf (dump_file
, "\n");
1953 fprintf (dump_file
, " bounds on difference of bases: ");
1954 mpz_out_str (dump_file
, 10, bnds
.below
);
1955 fprintf (dump_file
, " ... ");
1956 mpz_out_str (dump_file
, 10, bnds
.up
);
1957 fprintf (dump_file
, "\n");
1963 gcc_assert (integer_zerop (iv1
->step
));
1964 ret
= number_of_iterations_ne (loop
, type
, iv0
, iv1
->base
, niter
,
1965 exit_must_be_taken
, &bnds
);
1969 ret
= number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
,
1970 exit_must_be_taken
, &bnds
);
1974 ret
= number_of_iterations_le (loop
, type
, iv0
, iv1
, niter
,
1975 exit_must_be_taken
, &bnds
);
1982 mpz_clear (bnds
.up
);
1983 mpz_clear (bnds
.below
);
1985 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1989 fprintf (dump_file
, " result:\n");
1990 if (!integer_nonzerop (niter
->assumptions
))
1992 fprintf (dump_file
, " under assumptions ");
1993 print_generic_expr (dump_file
, niter
->assumptions
, TDF_SLIM
);
1994 fprintf (dump_file
, "\n");
1997 if (!integer_zerop (niter
->may_be_zero
))
1999 fprintf (dump_file
, " zero if ");
2000 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
2001 fprintf (dump_file
, "\n");
2004 fprintf (dump_file
, " # of iterations ");
2005 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
2006 fprintf (dump_file
, ", bounded by ");
2007 print_decu (niter
->max
, dump_file
);
2008 fprintf (dump_file
, "\n");
2011 fprintf (dump_file
, " failed\n\n");
2016 /* Return an expression that computes the popcount of src. */
2019 build_popcount_expr (tree src
)
2022 bool use_ifn
= false;
2023 int prec
= TYPE_PRECISION (TREE_TYPE (src
));
2024 int i_prec
= TYPE_PRECISION (integer_type_node
);
2025 int li_prec
= TYPE_PRECISION (long_integer_type_node
);
2026 int lli_prec
= TYPE_PRECISION (long_long_integer_type_node
);
2028 tree utype
= unsigned_type_for (TREE_TYPE (src
));
2029 src
= fold_convert (utype
, src
);
2031 if (direct_internal_fn_supported_p (IFN_POPCOUNT
, utype
, OPTIMIZE_FOR_BOTH
))
2033 else if (prec
<= i_prec
)
2034 fn
= builtin_decl_implicit (BUILT_IN_POPCOUNT
);
2035 else if (prec
== li_prec
)
2036 fn
= builtin_decl_implicit (BUILT_IN_POPCOUNTL
);
2037 else if (prec
== lli_prec
|| prec
== 2 * lli_prec
)
2038 fn
= builtin_decl_implicit (BUILT_IN_POPCOUNTLL
);
2044 call
= build_call_expr_internal_loc (UNKNOWN_LOCATION
, IFN_POPCOUNT
,
2045 integer_type_node
, 1, src
);
2046 else if (prec
== 2 * lli_prec
)
2048 tree src1
= fold_convert (long_long_unsigned_type_node
,
2049 fold_build2 (RSHIFT_EXPR
, TREE_TYPE (src
),
2051 build_int_cst (integer_type_node
,
2053 tree src2
= fold_convert (long_long_unsigned_type_node
, src
);
2054 tree call1
= build_call_expr (fn
, 1, src1
);
2055 tree call2
= build_call_expr (fn
, 1, src2
);
2056 call
= fold_build2 (PLUS_EXPR
, integer_type_node
, call1
, call2
);
2061 src
= fold_convert (unsigned_type_node
, src
);
2063 call
= build_call_expr (fn
, 1, src
);
2069 /* Utility function to check if OP is defined by a stmt
2070 that is a val - 1. */
2073 ssa_defined_by_minus_one_stmt_p (tree op
, tree val
)
2076 return (TREE_CODE (op
) == SSA_NAME
2077 && (stmt
= SSA_NAME_DEF_STMT (op
))
2078 && is_gimple_assign (stmt
)
2079 && (gimple_assign_rhs_code (stmt
) == PLUS_EXPR
)
2080 && val
== gimple_assign_rhs1 (stmt
)
2081 && integer_minus_onep (gimple_assign_rhs2 (stmt
)));
2084 /* See comment below for number_of_iterations_bitcount.
2085 For popcount, we have:
2100 number_of_iterations_popcount (loop_p loop
, edge exit
,
2101 enum tree_code code
,
2102 class tree_niter_desc
*niter
)
2104 bool modify_before_test
= true;
2107 /* Check that condition for staying inside the loop is like
2109 gcond
*cond_stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (exit
->src
));
2112 || !integer_zerop (gimple_cond_rhs (cond_stmt
))
2113 || TREE_CODE (gimple_cond_lhs (cond_stmt
)) != SSA_NAME
)
2116 tree iv_2
= gimple_cond_lhs (cond_stmt
);
2117 gimple
*iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2119 /* If the test comes before the iv modification, then these will actually be
2120 iv_1 and a phi node. */
2121 if (gimple_code (iv_2_stmt
) == GIMPLE_PHI
2122 && gimple_bb (iv_2_stmt
) == loop
->header
2123 && gimple_phi_num_args (iv_2_stmt
) == 2
2124 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt
,
2125 loop_latch_edge (loop
)->dest_idx
))
2128 /* iv_2 is actually one of the inputs to the phi. */
2129 iv_2
= gimple_phi_arg_def (iv_2_stmt
, loop_latch_edge (loop
)->dest_idx
);
2130 iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2131 modify_before_test
= false;
2134 /* Make sure iv_2_stmt is an and stmt (iv_2 = _1 & iv_1). */
2135 if (!is_gimple_assign (iv_2_stmt
)
2136 || gimple_assign_rhs_code (iv_2_stmt
) != BIT_AND_EXPR
)
2139 tree iv_1
= gimple_assign_rhs1 (iv_2_stmt
);
2140 tree _1
= gimple_assign_rhs2 (iv_2_stmt
);
2142 /* Check that _1 is defined by (_1 = iv_1 + -1).
2143 Also make sure that _1 is the same in and_stmt and _1 defining stmt.
2144 Also canonicalize if _1 and _b11 are revrsed. */
2145 if (ssa_defined_by_minus_one_stmt_p (iv_1
, _1
))
2146 std::swap (iv_1
, _1
);
2147 else if (ssa_defined_by_minus_one_stmt_p (_1
, iv_1
))
2152 /* Check the recurrence. */
2153 gimple
*phi
= SSA_NAME_DEF_STMT (iv_1
);
2154 if (gimple_code (phi
) != GIMPLE_PHI
2155 || (gimple_bb (phi
) != loop_latch_edge (loop
)->dest
)
2156 || (iv_2
!= gimple_phi_arg_def (phi
, loop_latch_edge (loop
)->dest_idx
)))
2159 /* We found a match. */
2160 tree src
= gimple_phi_arg_def (phi
, loop_preheader_edge (loop
)->dest_idx
);
2161 int src_precision
= TYPE_PRECISION (TREE_TYPE (src
));
2163 /* Get the corresponding popcount builtin. */
2164 tree expr
= build_popcount_expr (src
);
2169 max
= src_precision
;
2171 tree may_be_zero
= boolean_false_node
;
2173 if (modify_before_test
)
2175 expr
= fold_build2 (MINUS_EXPR
, integer_type_node
, expr
,
2178 may_be_zero
= fold_build2 (EQ_EXPR
, boolean_type_node
, src
,
2179 build_zero_cst (TREE_TYPE (src
)));
2182 expr
= fold_convert (unsigned_type_node
, expr
);
2184 niter
->assumptions
= boolean_true_node
;
2185 niter
->may_be_zero
= simplify_using_initial_conditions (loop
, may_be_zero
);
2186 niter
->niter
= simplify_using_initial_conditions(loop
, expr
);
2188 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2189 niter
->max
= tree_to_uhwi (niter
->niter
);
2193 niter
->bound
= NULL_TREE
;
2194 niter
->cmp
= ERROR_MARK
;
2198 /* Return an expression that counts the leading/trailing zeroes of src.
2200 If define_at_zero is true, then the built expression will be defined to
2201 return the precision of src when src == 0 (using either a conditional
2202 expression or a suitable internal function).
2203 Otherwise, we can elide the conditional expression and let src = 0 invoke
2204 undefined behaviour. */
2207 build_cltz_expr (tree src
, bool leading
, bool define_at_zero
)
2210 internal_fn ifn
= leading
? IFN_CLZ
: IFN_CTZ
;
2211 bool use_ifn
= false;
2212 int prec
= TYPE_PRECISION (TREE_TYPE (src
));
2213 int i_prec
= TYPE_PRECISION (integer_type_node
);
2214 int li_prec
= TYPE_PRECISION (long_integer_type_node
);
2215 int lli_prec
= TYPE_PRECISION (long_long_integer_type_node
);
2217 tree utype
= unsigned_type_for (TREE_TYPE (src
));
2218 src
= fold_convert (utype
, src
);
2220 if (direct_internal_fn_supported_p (ifn
, utype
, OPTIMIZE_FOR_BOTH
))
2222 else if (prec
<= i_prec
)
2223 fn
= leading
? builtin_decl_implicit (BUILT_IN_CLZ
)
2224 : builtin_decl_implicit (BUILT_IN_CTZ
);
2225 else if (prec
== li_prec
)
2226 fn
= leading
? builtin_decl_implicit (BUILT_IN_CLZL
)
2227 : builtin_decl_implicit (BUILT_IN_CTZL
);
2228 else if (prec
== lli_prec
|| prec
== 2 * lli_prec
)
2229 fn
= leading
? builtin_decl_implicit (BUILT_IN_CLZLL
)
2230 : builtin_decl_implicit (BUILT_IN_CTZLL
);
2237 call
= build_call_expr_internal_loc (UNKNOWN_LOCATION
, ifn
,
2238 integer_type_node
, 1, src
);
2240 int optab_defined_at_zero
2242 ? CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (utype
), val
)
2243 : CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (utype
), val
));
2244 if (define_at_zero
&& !(optab_defined_at_zero
== 2 && val
== prec
))
2246 tree is_zero
= fold_build2 (NE_EXPR
, boolean_type_node
, src
,
2247 build_zero_cst (TREE_TYPE (src
)));
2248 call
= fold_build3 (COND_EXPR
, integer_type_node
, is_zero
, call
,
2249 build_int_cst (integer_type_node
, prec
));
2252 else if (prec
== 2 * lli_prec
)
2254 tree src1
= fold_convert (long_long_unsigned_type_node
,
2255 fold_build2 (RSHIFT_EXPR
, TREE_TYPE (src
),
2257 build_int_cst (integer_type_node
,
2259 tree src2
= fold_convert (long_long_unsigned_type_node
, src
);
2260 /* We count the zeroes in src1, and add the number in src2 when src1
2263 std::swap (src1
, src2
);
2264 tree call1
= build_call_expr (fn
, 1, src1
);
2265 tree call2
= build_call_expr (fn
, 1, src2
);
2268 tree is_zero2
= fold_build2 (NE_EXPR
, boolean_type_node
, src2
,
2269 build_zero_cst (TREE_TYPE (src2
)));
2270 call2
= fold_build3 (COND_EXPR
, integer_type_node
, is_zero2
, call2
,
2271 build_int_cst (integer_type_node
, lli_prec
));
2273 tree is_zero1
= fold_build2 (NE_EXPR
, boolean_type_node
, src1
,
2274 build_zero_cst (TREE_TYPE (src1
)));
2275 call
= fold_build3 (COND_EXPR
, integer_type_node
, is_zero1
, call1
,
2276 fold_build2 (PLUS_EXPR
, integer_type_node
, call2
,
2277 build_int_cst (integer_type_node
,
2283 src
= fold_convert (unsigned_type_node
, src
);
2285 call
= build_call_expr (fn
, 1, src
);
2288 tree is_zero
= fold_build2 (NE_EXPR
, boolean_type_node
, src
,
2289 build_zero_cst (TREE_TYPE (src
)));
2290 call
= fold_build3 (COND_EXPR
, integer_type_node
, is_zero
, call
,
2291 build_int_cst (integer_type_node
, prec
));
2294 if (leading
&& prec
< i_prec
)
2295 call
= fold_build2 (MINUS_EXPR
, integer_type_node
, call
,
2296 build_int_cst (integer_type_node
, i_prec
- prec
));
2302 /* See comment below for number_of_iterations_bitcount.
2303 For c[lt]z, we have:
2306 iv_2 = iv_1 << 1 OR iv_1 >> 1
2309 if (iv & 1 << (prec-1)) OR (iv & 1)
2312 src precision - c[lt]z (src)
2317 number_of_iterations_cltz (loop_p loop
, edge exit
,
2318 enum tree_code code
,
2319 class tree_niter_desc
*niter
)
2321 bool modify_before_test
= true;
2326 /* Check that condition for staying inside the loop is like
2328 gcond
*cond_stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (exit
->src
));
2330 || (code
!= EQ_EXPR
&& code
!= GE_EXPR
)
2331 || !integer_zerop (gimple_cond_rhs (cond_stmt
))
2332 || TREE_CODE (gimple_cond_lhs (cond_stmt
)) != SSA_NAME
)
2335 if (code
== EQ_EXPR
)
2337 /* Make sure we check a bitwise and with a suitable constant */
2338 gimple
*and_stmt
= SSA_NAME_DEF_STMT (gimple_cond_lhs (cond_stmt
));
2339 if (!is_gimple_assign (and_stmt
)
2340 || gimple_assign_rhs_code (and_stmt
) != BIT_AND_EXPR
2341 || !integer_pow2p (gimple_assign_rhs2 (and_stmt
))
2342 || TREE_CODE (gimple_assign_rhs1 (and_stmt
)) != SSA_NAME
)
2345 checked_bit
= tree_log2 (gimple_assign_rhs2 (and_stmt
));
2347 iv_2
= gimple_assign_rhs1 (and_stmt
);
2351 /* We have a GE_EXPR - a signed comparison with zero is equivalent to
2352 testing the leading bit, so check for this pattern too. */
2354 iv_2
= gimple_cond_lhs (cond_stmt
);
2355 tree test_value_type
= TREE_TYPE (iv_2
);
2357 if (TYPE_UNSIGNED (test_value_type
))
2360 gimple
*test_value_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2362 if (is_gimple_assign (test_value_stmt
)
2363 && gimple_assign_rhs_code (test_value_stmt
) == NOP_EXPR
)
2365 /* If the test value comes from a NOP_EXPR, then we need to unwrap
2366 this. We conservatively require that both types have the same
2368 iv_2
= gimple_assign_rhs1 (test_value_stmt
);
2369 tree rhs_type
= TREE_TYPE (iv_2
);
2370 if (TREE_CODE (iv_2
) != SSA_NAME
2371 || TREE_CODE (rhs_type
) != INTEGER_TYPE
2372 || (TYPE_PRECISION (rhs_type
)
2373 != TYPE_PRECISION (test_value_type
)))
2377 checked_bit
= TYPE_PRECISION (test_value_type
) - 1;
2380 gimple
*iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2382 /* If the test comes before the iv modification, then these will actually be
2383 iv_1 and a phi node. */
2384 if (gimple_code (iv_2_stmt
) == GIMPLE_PHI
2385 && gimple_bb (iv_2_stmt
) == loop
->header
2386 && gimple_phi_num_args (iv_2_stmt
) == 2
2387 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt
,
2388 loop_latch_edge (loop
)->dest_idx
))
2391 /* iv_2 is actually one of the inputs to the phi. */
2392 iv_2
= gimple_phi_arg_def (iv_2_stmt
, loop_latch_edge (loop
)->dest_idx
);
2393 iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2394 modify_before_test
= false;
2397 /* Make sure iv_2_stmt is a logical shift by one stmt:
2398 iv_2 = iv_1 {<<|>>} 1 */
2399 if (!is_gimple_assign (iv_2_stmt
)
2400 || (gimple_assign_rhs_code (iv_2_stmt
) != LSHIFT_EXPR
2401 && (gimple_assign_rhs_code (iv_2_stmt
) != RSHIFT_EXPR
2402 || !TYPE_UNSIGNED (TREE_TYPE (gimple_assign_lhs (iv_2_stmt
)))))
2403 || !integer_onep (gimple_assign_rhs2 (iv_2_stmt
)))
2406 bool left_shift
= (gimple_assign_rhs_code (iv_2_stmt
) == LSHIFT_EXPR
);
2408 tree iv_1
= gimple_assign_rhs1 (iv_2_stmt
);
2410 /* Check the recurrence. */
2411 gimple
*phi
= SSA_NAME_DEF_STMT (iv_1
);
2412 if (gimple_code (phi
) != GIMPLE_PHI
2413 || (gimple_bb (phi
) != loop_latch_edge (loop
)->dest
)
2414 || (iv_2
!= gimple_phi_arg_def (phi
, loop_latch_edge (loop
)->dest_idx
)))
2417 /* We found a match. */
2418 tree src
= gimple_phi_arg_def (phi
, loop_preheader_edge (loop
)->dest_idx
);
2419 int src_precision
= TYPE_PRECISION (TREE_TYPE (src
));
2421 /* Apply any needed preprocessing to src. */
2422 int num_ignored_bits
;
2424 num_ignored_bits
= src_precision
- checked_bit
- 1;
2426 num_ignored_bits
= checked_bit
;
2428 if (modify_before_test
)
2431 if (num_ignored_bits
!= 0)
2432 src
= fold_build2 (left_shift
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2433 TREE_TYPE (src
), src
,
2434 build_int_cst (integer_type_node
, num_ignored_bits
));
2436 /* Get the corresponding c[lt]z builtin. */
2437 tree expr
= build_cltz_expr (src
, left_shift
, false);
2442 max
= src_precision
- num_ignored_bits
- 1;
2444 expr
= fold_convert (unsigned_type_node
, expr
);
2446 tree assumptions
= fold_build2 (NE_EXPR
, boolean_type_node
, src
,
2447 build_zero_cst (TREE_TYPE (src
)));
2449 niter
->assumptions
= simplify_using_initial_conditions (loop
, assumptions
);
2450 niter
->may_be_zero
= boolean_false_node
;
2451 niter
->niter
= simplify_using_initial_conditions (loop
, expr
);
2453 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2454 niter
->max
= tree_to_uhwi (niter
->niter
);
2458 niter
->bound
= NULL_TREE
;
2459 niter
->cmp
= ERROR_MARK
;
2464 /* See comment below for number_of_iterations_bitcount.
2465 For c[lt]z complement, we have:
2468 iv_2 = iv_1 >> 1 OR iv_1 << 1
2474 src precision - c[lt]z (src)
2479 number_of_iterations_cltz_complement (loop_p loop
, edge exit
,
2480 enum tree_code code
,
2481 class tree_niter_desc
*niter
)
2483 bool modify_before_test
= true;
2486 /* Check that condition for staying inside the loop is like
2488 gcond
*cond_stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (exit
->src
));
2491 || !integer_zerop (gimple_cond_rhs (cond_stmt
))
2492 || TREE_CODE (gimple_cond_lhs (cond_stmt
)) != SSA_NAME
)
2495 tree iv_2
= gimple_cond_lhs (cond_stmt
);
2496 gimple
*iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2498 /* If the test comes before the iv modification, then these will actually be
2499 iv_1 and a phi node. */
2500 if (gimple_code (iv_2_stmt
) == GIMPLE_PHI
2501 && gimple_bb (iv_2_stmt
) == loop
->header
2502 && gimple_phi_num_args (iv_2_stmt
) == 2
2503 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt
,
2504 loop_latch_edge (loop
)->dest_idx
))
2507 /* iv_2 is actually one of the inputs to the phi. */
2508 iv_2
= gimple_phi_arg_def (iv_2_stmt
, loop_latch_edge (loop
)->dest_idx
);
2509 iv_2_stmt
= SSA_NAME_DEF_STMT (iv_2
);
2510 modify_before_test
= false;
2513 /* Make sure iv_2_stmt is a logical shift by one stmt:
2514 iv_2 = iv_1 {>>|<<} 1 */
2515 if (!is_gimple_assign (iv_2_stmt
)
2516 || (gimple_assign_rhs_code (iv_2_stmt
) != LSHIFT_EXPR
2517 && (gimple_assign_rhs_code (iv_2_stmt
) != RSHIFT_EXPR
2518 || !TYPE_UNSIGNED (TREE_TYPE (gimple_assign_lhs (iv_2_stmt
)))))
2519 || !integer_onep (gimple_assign_rhs2 (iv_2_stmt
)))
2522 bool left_shift
= (gimple_assign_rhs_code (iv_2_stmt
) == LSHIFT_EXPR
);
2524 tree iv_1
= gimple_assign_rhs1 (iv_2_stmt
);
2526 /* Check the recurrence. */
2527 gimple
*phi
= SSA_NAME_DEF_STMT (iv_1
);
2528 if (gimple_code (phi
) != GIMPLE_PHI
2529 || (gimple_bb (phi
) != loop_latch_edge (loop
)->dest
)
2530 || (iv_2
!= gimple_phi_arg_def (phi
, loop_latch_edge (loop
)->dest_idx
)))
2533 /* We found a match. */
2534 tree src
= gimple_phi_arg_def (phi
, loop_preheader_edge (loop
)->dest_idx
);
2535 int src_precision
= TYPE_PRECISION (TREE_TYPE (src
));
2537 /* Get the corresponding c[lt]z builtin. */
2538 tree expr
= build_cltz_expr (src
, !left_shift
, true);
2543 expr
= fold_build2 (MINUS_EXPR
, integer_type_node
,
2544 build_int_cst (integer_type_node
, src_precision
),
2547 max
= src_precision
;
2549 tree may_be_zero
= boolean_false_node
;
2551 if (modify_before_test
)
2553 expr
= fold_build2 (MINUS_EXPR
, integer_type_node
, expr
,
2556 may_be_zero
= fold_build2 (EQ_EXPR
, boolean_type_node
, src
,
2557 build_zero_cst (TREE_TYPE (src
)));
2560 expr
= fold_convert (unsigned_type_node
, expr
);
2562 niter
->assumptions
= boolean_true_node
;
2563 niter
->may_be_zero
= simplify_using_initial_conditions (loop
, may_be_zero
);
2564 niter
->niter
= simplify_using_initial_conditions (loop
, expr
);
2566 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2567 niter
->max
= tree_to_uhwi (niter
->niter
);
2571 niter
->bound
= NULL_TREE
;
2572 niter
->cmp
= ERROR_MARK
;
2576 /* See if LOOP contains a bit counting idiom. The idiom consists of two parts:
2577 1. A modification to the induction variabler;.
2578 2. A test to determine whether or not to exit the loop.
2580 These can come in either order - i.e.:
2583 iv_1 = PHI <src(2), iv_2(4)>
2590 iv_2 = modify (iv_1)
2596 iv_1 = PHI <src(2), iv_2(4)>
2597 iv_2 = modify (iv_1)
2605 The second form can be generated by copying the loop header out of the loop.
2607 In the first case, the number of latch executions will be equal to the
2608 number of induction variable modifications required before the test fails.
2610 In the second case (modify_before_test), if we assume that the number of
2611 modifications required before the test fails is nonzero, then the number of
2612 latch executions will be one less than this number.
2614 If we recognise the pattern, then we update niter accordingly, and return
2618 number_of_iterations_bitcount (loop_p loop
, edge exit
,
2619 enum tree_code code
,
2620 class tree_niter_desc
*niter
)
2622 return (number_of_iterations_popcount (loop
, exit
, code
, niter
)
2623 || number_of_iterations_cltz (loop
, exit
, code
, niter
)
2624 || number_of_iterations_cltz_complement (loop
, exit
, code
, niter
));
2627 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
2628 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
2629 all SSA names are replaced with the result of calling the VALUEIZE
2630 function with the SSA name as argument. */
2633 simplify_replace_tree (tree expr
, tree old
, tree new_tree
,
2634 tree (*valueize
) (tree
, void*), void *context
,
2638 tree ret
= NULL_TREE
, e
, se
;
2643 /* Do not bother to replace constants. */
2644 if (CONSTANT_CLASS_P (expr
))
2649 if (TREE_CODE (expr
) == SSA_NAME
)
2651 new_tree
= valueize (expr
, context
);
2652 if (new_tree
!= expr
)
2656 else if (expr
== old
2657 || operand_equal_p (expr
, old
, 0))
2658 return unshare_expr (new_tree
);
2663 n
= TREE_OPERAND_LENGTH (expr
);
2664 for (i
= 0; i
< n
; i
++)
2666 e
= TREE_OPERAND (expr
, i
);
2667 se
= simplify_replace_tree (e
, old
, new_tree
, valueize
, context
, do_fold
);
2672 ret
= copy_node (expr
);
2674 TREE_OPERAND (ret
, i
) = se
;
2677 return (ret
? (do_fold
? fold (ret
) : ret
) : expr
);
2680 /* Expand definitions of ssa names in EXPR as long as they are simple
2681 enough, and return the new expression. If STOP is specified, stop
2682 expanding if EXPR equals to it. */
2685 expand_simple_operations (tree expr
, tree stop
, hash_map
<tree
, tree
> &cache
)
2688 tree ret
= NULL_TREE
, e
, ee
, e1
;
2689 enum tree_code code
;
2692 if (expr
== NULL_TREE
)
2695 if (is_gimple_min_invariant (expr
))
2698 code
= TREE_CODE (expr
);
2699 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
2701 n
= TREE_OPERAND_LENGTH (expr
);
2702 for (i
= 0; i
< n
; i
++)
2704 e
= TREE_OPERAND (expr
, i
);
2707 /* SCEV analysis feeds us with a proper expression
2708 graph matching the SSA graph. Avoid turning it
2709 into a tree here, thus handle tree sharing
2711 ??? The SSA walk below still turns the SSA graph
2712 into a tree but until we find a testcase do not
2713 introduce additional tree sharing here. */
2715 tree
&cee
= cache
.get_or_insert (e
, &existed_p
);
2721 ee
= expand_simple_operations (e
, stop
, cache
);
2723 *cache
.get (e
) = ee
;
2729 ret
= copy_node (expr
);
2731 TREE_OPERAND (ret
, i
) = ee
;
2737 fold_defer_overflow_warnings ();
2739 fold_undefer_and_ignore_overflow_warnings ();
2743 /* Stop if it's not ssa name or the one we don't want to expand. */
2744 if (TREE_CODE (expr
) != SSA_NAME
|| expr
== stop
)
2747 stmt
= SSA_NAME_DEF_STMT (expr
);
2748 if (gimple_code (stmt
) == GIMPLE_PHI
)
2750 basic_block src
, dest
;
2752 if (gimple_phi_num_args (stmt
) != 1)
2754 e
= PHI_ARG_DEF (stmt
, 0);
2756 /* Avoid propagating through loop exit phi nodes, which
2757 could break loop-closed SSA form restrictions. */
2758 dest
= gimple_bb (stmt
);
2759 src
= single_pred (dest
);
2760 if (TREE_CODE (e
) == SSA_NAME
2761 && src
->loop_father
!= dest
->loop_father
)
2764 return expand_simple_operations (e
, stop
, cache
);
2766 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
2769 /* Avoid expanding to expressions that contain SSA names that need
2770 to take part in abnormal coalescing. */
2772 FOR_EACH_SSA_TREE_OPERAND (e
, stmt
, iter
, SSA_OP_USE
)
2773 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e
))
2776 e
= gimple_assign_rhs1 (stmt
);
2777 code
= gimple_assign_rhs_code (stmt
);
2778 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
2780 if (is_gimple_min_invariant (e
))
2783 if (code
== SSA_NAME
)
2784 return expand_simple_operations (e
, stop
, cache
);
2785 else if (code
== ADDR_EXPR
)
2788 tree base
= get_addr_base_and_unit_offset (TREE_OPERAND (e
, 0),
2791 && TREE_CODE (base
) == MEM_REF
)
2793 ee
= expand_simple_operations (TREE_OPERAND (base
, 0), stop
,
2795 return fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (expr
), ee
,
2796 wide_int_to_tree (sizetype
,
2797 mem_ref_offset (base
)
2808 /* Casts are simple. */
2809 ee
= expand_simple_operations (e
, stop
, cache
);
2810 return fold_build1 (code
, TREE_TYPE (expr
), ee
);
2815 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr
))
2816 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr
)))
2819 case POINTER_PLUS_EXPR
:
2820 /* And increments and decrements by a constant are simple. */
2821 e1
= gimple_assign_rhs2 (stmt
);
2822 if (!is_gimple_min_invariant (e1
))
2825 ee
= expand_simple_operations (e
, stop
, cache
);
2826 return fold_build2 (code
, TREE_TYPE (expr
), ee
, e1
);
2834 expand_simple_operations (tree expr
, tree stop
)
2836 hash_map
<tree
, tree
> cache
;
2837 return expand_simple_operations (expr
, stop
, cache
);
2840 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2841 expression (or EXPR unchanged, if no simplification was possible). */
2844 tree_simplify_using_condition_1 (tree cond
, tree expr
)
2847 tree e
, e0
, e1
, e2
, notcond
;
2848 enum tree_code code
= TREE_CODE (expr
);
2850 if (code
== INTEGER_CST
)
2853 if (code
== TRUTH_OR_EXPR
2854 || code
== TRUTH_AND_EXPR
2855 || code
== COND_EXPR
)
2859 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
2860 if (TREE_OPERAND (expr
, 0) != e0
)
2863 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
2864 if (TREE_OPERAND (expr
, 1) != e1
)
2867 if (code
== COND_EXPR
)
2869 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
2870 if (TREE_OPERAND (expr
, 2) != e2
)
2878 if (code
== COND_EXPR
)
2879 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
2881 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
2887 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2888 propagation, and vice versa. Fold does not handle this, since it is
2889 considered too expensive. */
2890 if (TREE_CODE (cond
) == EQ_EXPR
)
2892 e0
= TREE_OPERAND (cond
, 0);
2893 e1
= TREE_OPERAND (cond
, 1);
2895 /* We know that e0 == e1. Check whether we cannot simplify expr
2897 e
= simplify_replace_tree (expr
, e0
, e1
);
2898 if (integer_zerop (e
) || integer_nonzerop (e
))
2901 e
= simplify_replace_tree (expr
, e1
, e0
);
2902 if (integer_zerop (e
) || integer_nonzerop (e
))
2905 if (TREE_CODE (expr
) == EQ_EXPR
)
2907 e0
= TREE_OPERAND (expr
, 0);
2908 e1
= TREE_OPERAND (expr
, 1);
2910 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2911 e
= simplify_replace_tree (cond
, e0
, e1
);
2912 if (integer_zerop (e
))
2914 e
= simplify_replace_tree (cond
, e1
, e0
);
2915 if (integer_zerop (e
))
2918 if (TREE_CODE (expr
) == NE_EXPR
)
2920 e0
= TREE_OPERAND (expr
, 0);
2921 e1
= TREE_OPERAND (expr
, 1);
2923 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2924 e
= simplify_replace_tree (cond
, e0
, e1
);
2925 if (integer_zerop (e
))
2926 return boolean_true_node
;
2927 e
= simplify_replace_tree (cond
, e1
, e0
);
2928 if (integer_zerop (e
))
2929 return boolean_true_node
;
2932 /* Check whether COND ==> EXPR. */
2933 notcond
= invert_truthvalue (cond
);
2934 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, expr
);
2935 if (e
&& integer_nonzerop (e
))
2938 /* Check whether COND ==> not EXPR. */
2939 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, expr
);
2940 if (e
&& integer_zerop (e
))
2946 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2947 expression (or EXPR unchanged, if no simplification was possible).
2948 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2949 of simple operations in definitions of ssa names in COND are expanded,
2950 so that things like casts or incrementing the value of the bound before
2951 the loop do not cause us to fail. */
2954 tree_simplify_using_condition (tree cond
, tree expr
)
2956 cond
= expand_simple_operations (cond
);
2958 return tree_simplify_using_condition_1 (cond
, expr
);
2961 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2962 Returns the simplified expression (or EXPR unchanged, if no
2963 simplification was possible). */
2966 simplify_using_initial_conditions (class loop
*loop
, tree expr
)
2970 tree cond
, expanded
, backup
;
2973 if (TREE_CODE (expr
) == INTEGER_CST
)
2976 backup
= expanded
= expand_simple_operations (expr
);
2978 /* Limit walking the dominators to avoid quadraticness in
2979 the number of BBs times the number of loops in degenerate
2981 for (bb
= loop
->header
;
2982 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
2983 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
2985 if (!single_pred_p (bb
))
2987 e
= single_pred_edge (bb
);
2989 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2992 gcond
*stmt
= as_a
<gcond
*> (*gsi_last_bb (e
->src
));
2993 cond
= fold_build2 (gimple_cond_code (stmt
),
2995 gimple_cond_lhs (stmt
),
2996 gimple_cond_rhs (stmt
));
2997 if (e
->flags
& EDGE_FALSE_VALUE
)
2998 cond
= invert_truthvalue (cond
);
2999 expanded
= tree_simplify_using_condition (cond
, expanded
);
3000 /* Break if EXPR is simplified to const values. */
3002 && (integer_zerop (expanded
) || integer_nonzerop (expanded
)))
3008 /* Return the original expression if no simplification is done. */
3009 return operand_equal_p (backup
, expanded
, 0) ? expr
: expanded
;
3012 /* Tries to simplify EXPR using the evolutions of the loop invariants
3013 in the superloops of LOOP. Returns the simplified expression
3014 (or EXPR unchanged, if no simplification was possible). */
3017 simplify_using_outer_evolutions (class loop
*loop
, tree expr
)
3019 enum tree_code code
= TREE_CODE (expr
);
3023 if (is_gimple_min_invariant (expr
))
3026 if (code
== TRUTH_OR_EXPR
3027 || code
== TRUTH_AND_EXPR
3028 || code
== COND_EXPR
)
3032 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
3033 if (TREE_OPERAND (expr
, 0) != e0
)
3036 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
3037 if (TREE_OPERAND (expr
, 1) != e1
)
3040 if (code
== COND_EXPR
)
3042 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
3043 if (TREE_OPERAND (expr
, 2) != e2
)
3051 if (code
== COND_EXPR
)
3052 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
3054 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
3060 e
= instantiate_parameters (loop
, expr
);
3061 if (is_gimple_min_invariant (e
))
3067 /* Returns true if EXIT is the only possible exit from LOOP. */
3070 loop_only_exit_p (const class loop
*loop
, basic_block
*body
, const_edge exit
)
3072 gimple_stmt_iterator bsi
;
3075 if (exit
!= single_exit (loop
))
3078 for (i
= 0; i
< loop
->num_nodes
; i
++)
3079 for (bsi
= gsi_start_bb (body
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
3080 if (stmt_can_terminate_bb_p (gsi_stmt (bsi
)))
3086 /* Stores description of number of iterations of LOOP derived from
3087 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
3088 information could be derived (and fields of NITER have meaning described
3089 in comments at class tree_niter_desc declaration), false otherwise.
3090 When EVERY_ITERATION is true, only tests that are known to be executed
3091 every iteration are considered (i.e. only test that alone bounds the loop).
3092 If AT_STMT is not NULL, this function stores LOOP's condition statement in
3093 it when returning true. */
3096 number_of_iterations_exit_assumptions (class loop
*loop
, edge exit
,
3097 class tree_niter_desc
*niter
,
3098 gcond
**at_stmt
, bool every_iteration
,
3103 enum tree_code code
;
3107 /* The condition at a fake exit (if it exists) does not control its
3109 if (exit
->flags
& EDGE_FAKE
)
3112 /* Nothing to analyze if the loop is known to be infinite. */
3113 if (loop_constraint_set_p (loop
, LOOP_C_INFINITE
))
3116 safe
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
);
3118 if (every_iteration
&& !safe
)
3121 niter
->assumptions
= boolean_false_node
;
3122 niter
->control
.base
= NULL_TREE
;
3123 niter
->control
.step
= NULL_TREE
;
3124 niter
->control
.no_overflow
= false;
3125 gcond
*stmt
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (exit
->src
));
3132 /* We want the condition for staying inside loop. */
3133 code
= gimple_cond_code (stmt
);
3134 if (exit
->flags
& EDGE_TRUE_VALUE
)
3135 code
= invert_tree_comparison (code
, false);
3147 return number_of_iterations_cltz (loop
, exit
, code
, niter
);
3153 op0
= gimple_cond_lhs (stmt
);
3154 op1
= gimple_cond_rhs (stmt
);
3155 type
= TREE_TYPE (op0
);
3157 if (TREE_CODE (type
) != INTEGER_TYPE
3158 && !POINTER_TYPE_P (type
))
3161 tree iv0_niters
= NULL_TREE
;
3162 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
3163 op0
, &iv0
, safe
? &iv0_niters
: NULL
, false))
3164 return number_of_iterations_bitcount (loop
, exit
, code
, niter
);
3165 tree iv1_niters
= NULL_TREE
;
3166 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
3167 op1
, &iv1
, safe
? &iv1_niters
: NULL
, false))
3169 /* Give up on complicated case. */
3170 if (iv0_niters
&& iv1_niters
)
3173 /* We don't want to see undefined signed overflow warnings while
3174 computing the number of iterations. */
3175 fold_defer_overflow_warnings ();
3177 iv0
.base
= expand_simple_operations (iv0
.base
);
3178 iv1
.base
= expand_simple_operations (iv1
.base
);
3179 bool body_from_caller
= true;
3182 body
= get_loop_body (loop
);
3183 body_from_caller
= false;
3185 bool only_exit_p
= loop_only_exit_p (loop
, body
, exit
);
3186 if (!body_from_caller
)
3188 if (!number_of_iterations_cond (loop
, type
, &iv0
, code
, &iv1
, niter
,
3191 fold_undefer_and_ignore_overflow_warnings ();
3195 /* Incorporate additional assumption implied by control iv. */
3196 tree iv_niters
= iv0_niters
? iv0_niters
: iv1_niters
;
3199 tree assumption
= fold_build2 (LE_EXPR
, boolean_type_node
, niter
->niter
,
3200 fold_convert (TREE_TYPE (niter
->niter
),
3203 if (!integer_nonzerop (assumption
))
3204 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
3205 niter
->assumptions
, assumption
);
3207 /* Refine upper bound if possible. */
3208 if (TREE_CODE (iv_niters
) == INTEGER_CST
3209 && niter
->max
> wi::to_widest (iv_niters
))
3210 niter
->max
= wi::to_widest (iv_niters
);
3213 /* There is no assumptions if the loop is known to be finite. */
3214 if (!integer_zerop (niter
->assumptions
)
3215 && loop_constraint_set_p (loop
, LOOP_C_FINITE
))
3216 niter
->assumptions
= boolean_true_node
;
3220 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
3221 niter
->assumptions
);
3222 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
3223 niter
->may_be_zero
);
3224 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
3228 = simplify_using_initial_conditions (loop
,
3229 niter
->assumptions
);
3231 = simplify_using_initial_conditions (loop
,
3232 niter
->may_be_zero
);
3234 fold_undefer_and_ignore_overflow_warnings ();
3236 /* If NITER has simplified into a constant, update MAX. */
3237 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
3238 niter
->max
= wi::to_widest (niter
->niter
);
3240 return (!integer_zerop (niter
->assumptions
));
3243 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
3244 the niter information holds unconditionally. */
3247 number_of_iterations_exit (class loop
*loop
, edge exit
,
3248 class tree_niter_desc
*niter
,
3249 bool warn
, bool every_iteration
,
3253 if (!number_of_iterations_exit_assumptions (loop
, exit
, niter
,
3254 &stmt
, every_iteration
, body
))
3257 if (integer_nonzerop (niter
->assumptions
))
3260 if (warn
&& dump_enabled_p ())
3261 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, stmt
,
3262 "missed loop optimization: niters analysis ends up "
3263 "with assumptions.\n");
3268 /* Try to determine the number of iterations of LOOP. If we succeed,
3269 expression giving number of iterations is returned and *EXIT is
3270 set to the edge from that the information is obtained. Otherwise
3271 chrec_dont_know is returned. */
3274 find_loop_niter (class loop
*loop
, edge
*exit
)
3277 auto_vec
<edge
> exits
= get_loop_exit_edges (loop
);
3279 tree niter
= NULL_TREE
, aniter
;
3280 class tree_niter_desc desc
;
3283 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3285 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
3288 if (integer_nonzerop (desc
.may_be_zero
))
3290 /* We exit in the first iteration through this exit.
3291 We won't find anything better. */
3292 niter
= build_int_cst (unsigned_type_node
, 0);
3297 if (!integer_zerop (desc
.may_be_zero
))
3300 aniter
= desc
.niter
;
3304 /* Nothing recorded yet. */
3310 /* Prefer constants, the lower the better. */
3311 if (TREE_CODE (aniter
) != INTEGER_CST
)
3314 if (TREE_CODE (niter
) != INTEGER_CST
)
3321 if (tree_int_cst_lt (aniter
, niter
))
3329 return niter
? niter
: chrec_dont_know
;
3332 /* Return true if loop is known to have bounded number of iterations. */
3335 finite_loop_p (class loop
*loop
)
3340 flags
= flags_from_decl_or_type (current_function_decl
);
3341 if ((flags
& (ECF_CONST
|ECF_PURE
)) && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
3343 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3344 fprintf (dump_file
, "Found loop %i to be finite: it is within pure or const function.\n",
3349 if (loop
->any_upper_bound
3350 || max_loop_iterations (loop
, &nit
))
3352 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3353 fprintf (dump_file
, "Found loop %i to be finite: upper bound found.\n",
3361 auto_vec
<edge
> exits
= get_loop_exit_edges (loop
);
3364 /* If the loop has a normal exit, we can assume it will terminate. */
3365 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3366 if (!(ex
->flags
& (EDGE_EH
| EDGE_ABNORMAL
| EDGE_FAKE
)))
3369 fprintf (dump_file
, "Assume loop %i to be finite: it has an exit "
3370 "and -ffinite-loops is on.\n", loop
->num
);
3380 Analysis of a number of iterations of a loop by a brute-force evaluation.
3384 /* Bound on the number of iterations we try to evaluate. */
3386 #define MAX_ITERATIONS_TO_TRACK \
3387 ((unsigned) param_max_iterations_to_track)
3389 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
3390 result by a chain of operations such that all but exactly one of their
3391 operands are constants. */
3394 chain_of_csts_start (class loop
*loop
, tree x
)
3396 gimple
*stmt
= SSA_NAME_DEF_STMT (x
);
3398 basic_block bb
= gimple_bb (stmt
);
3399 enum tree_code code
;
3402 || !flow_bb_inside_loop_p (loop
, bb
))
3405 if (gimple_code (stmt
) == GIMPLE_PHI
)
3407 if (bb
== loop
->header
)
3408 return as_a
<gphi
*> (stmt
);
3413 if (gimple_code (stmt
) != GIMPLE_ASSIGN
3414 || gimple_assign_rhs_class (stmt
) == GIMPLE_TERNARY_RHS
)
3417 code
= gimple_assign_rhs_code (stmt
);
3418 if (gimple_references_memory_p (stmt
)
3419 || TREE_CODE_CLASS (code
) == tcc_reference
3420 || (code
== ADDR_EXPR
3421 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt
))))
3424 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
3425 if (use
== NULL_TREE
)
3428 return chain_of_csts_start (loop
, use
);
3431 /* Determines whether the expression X is derived from a result of a phi node
3432 in header of LOOP such that
3434 * the derivation of X consists only from operations with constants
3435 * the initial value of the phi node is constant
3436 * the value of the phi node in the next iteration can be derived from the
3437 value in the current iteration by a chain of operations with constants,
3438 or is also a constant
3440 If such phi node exists, it is returned, otherwise NULL is returned. */
3443 get_base_for (class loop
*loop
, tree x
)
3448 if (is_gimple_min_invariant (x
))
3451 phi
= chain_of_csts_start (loop
, x
);
3455 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
3456 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
3458 if (!is_gimple_min_invariant (init
))
3461 if (TREE_CODE (next
) == SSA_NAME
3462 && chain_of_csts_start (loop
, next
) != phi
)
3468 /* Given an expression X, then
3470 * if X is NULL_TREE, we return the constant BASE.
3471 * if X is a constant, we return the constant X.
3472 * otherwise X is a SSA name, whose value in the considered loop is derived
3473 by a chain of operations with constant from a result of a phi node in
3474 the header of the loop. Then we return value of X when the value of the
3475 result of this phi node is given by the constant BASE. */
3478 get_val_for (tree x
, tree base
)
3482 gcc_checking_assert (is_gimple_min_invariant (base
));
3486 else if (is_gimple_min_invariant (x
))
3489 stmt
= SSA_NAME_DEF_STMT (x
);
3490 if (gimple_code (stmt
) == GIMPLE_PHI
)
3493 gcc_checking_assert (is_gimple_assign (stmt
));
3495 /* STMT must be either an assignment of a single SSA name or an
3496 expression involving an SSA name and a constant. Try to fold that
3497 expression using the value for the SSA name. */
3498 if (gimple_assign_ssa_name_copy_p (stmt
))
3499 return get_val_for (gimple_assign_rhs1 (stmt
), base
);
3500 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_UNARY_RHS
3501 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
3502 return fold_build1 (gimple_assign_rhs_code (stmt
),
3503 TREE_TYPE (gimple_assign_lhs (stmt
)),
3504 get_val_for (gimple_assign_rhs1 (stmt
), base
));
3505 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_BINARY_RHS
)
3507 tree rhs1
= gimple_assign_rhs1 (stmt
);
3508 tree rhs2
= gimple_assign_rhs2 (stmt
);
3509 if (TREE_CODE (rhs1
) == SSA_NAME
)
3510 rhs1
= get_val_for (rhs1
, base
);
3511 else if (TREE_CODE (rhs2
) == SSA_NAME
)
3512 rhs2
= get_val_for (rhs2
, base
);
3515 return fold_build2 (gimple_assign_rhs_code (stmt
),
3516 TREE_TYPE (gimple_assign_lhs (stmt
)), rhs1
, rhs2
);
3523 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3524 by brute force -- i.e. by determining the value of the operands of the
3525 condition at EXIT in first few iterations of the loop (assuming that
3526 these values are constant) and determining the first one in that the
3527 condition is not satisfied. Returns the constant giving the number
3528 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3531 loop_niter_by_eval (class loop
*loop
, edge exit
)
3534 tree op
[2], val
[2], next
[2], aval
[2];
3539 gcond
*cond
= safe_dyn_cast
<gcond
*> (*gsi_last_bb (exit
->src
));
3541 return chrec_dont_know
;
3543 cmp
= gimple_cond_code (cond
);
3544 if (exit
->flags
& EDGE_TRUE_VALUE
)
3545 cmp
= invert_tree_comparison (cmp
, false);
3555 op
[0] = gimple_cond_lhs (cond
);
3556 op
[1] = gimple_cond_rhs (cond
);
3560 return chrec_dont_know
;
3563 for (j
= 0; j
< 2; j
++)
3565 if (is_gimple_min_invariant (op
[j
]))
3568 next
[j
] = NULL_TREE
;
3573 phi
= get_base_for (loop
, op
[j
]);
3575 return chrec_dont_know
;
3576 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
3577 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
3581 /* Don't issue signed overflow warnings. */
3582 fold_defer_overflow_warnings ();
3584 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
3586 for (j
= 0; j
< 2; j
++)
3587 aval
[j
] = get_val_for (op
[j
], val
[j
]);
3589 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
3590 if (acnd
&& integer_zerop (acnd
))
3592 fold_undefer_and_ignore_overflow_warnings ();
3593 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3595 "Proved that loop %d iterates %d times using brute force.\n",
3597 return build_int_cst (unsigned_type_node
, i
);
3600 for (j
= 0; j
< 2; j
++)
3603 val
[j
] = get_val_for (next
[j
], val
[j
]);
3604 if (!is_gimple_min_invariant (val
[j
]))
3606 fold_undefer_and_ignore_overflow_warnings ();
3607 return chrec_dont_know
;
3611 /* If the next iteration would use the same base values
3612 as the current one, there is no point looping further,
3613 all following iterations will be the same as this one. */
3614 if (val
[0] == aval
[0] && val
[1] == aval
[1])
3618 fold_undefer_and_ignore_overflow_warnings ();
3620 return chrec_dont_know
;
3623 /* Finds the exit of the LOOP by that the loop exits after a constant
3624 number of iterations and stores the exit edge to *EXIT. The constant
3625 giving the number of iterations of LOOP is returned. The number of
3626 iterations is determined using loop_niter_by_eval (i.e. by brute force
3627 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3628 determines the number of iterations, chrec_dont_know is returned. */
3631 find_loop_niter_by_eval (class loop
*loop
, edge
*exit
)
3634 auto_vec
<edge
> exits
= get_loop_exit_edges (loop
);
3636 tree niter
= NULL_TREE
, aniter
;
3640 /* Loops with multiple exits are expensive to handle and less important. */
3641 if (!flag_expensive_optimizations
3642 && exits
.length () > 1)
3643 return chrec_dont_know
;
3645 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3647 if (!just_once_each_iteration_p (loop
, ex
->src
))
3650 aniter
= loop_niter_by_eval (loop
, ex
);
3651 if (chrec_contains_undetermined (aniter
))
3655 && !tree_int_cst_lt (aniter
, niter
))
3662 return niter
? niter
: chrec_dont_know
;
3667 Analysis of upper bounds on number of iterations of a loop.
3671 static widest_int
derive_constant_upper_bound_ops (tree
, tree
,
3672 enum tree_code
, tree
);
3674 /* Returns a constant upper bound on the value of the right-hand side of
3675 an assignment statement STMT. */
3678 derive_constant_upper_bound_assign (gimple
*stmt
)
3680 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3681 tree op0
= gimple_assign_rhs1 (stmt
);
3682 tree op1
= gimple_assign_rhs2 (stmt
);
3684 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt
)),
3688 /* Returns a constant upper bound on the value of expression VAL. VAL
3689 is considered to be unsigned. If its type is signed, its value must
3693 derive_constant_upper_bound (tree val
)
3695 enum tree_code code
;
3698 extract_ops_from_tree (val
, &code
, &op0
, &op1
, &op2
);
3699 return derive_constant_upper_bound_ops (TREE_TYPE (val
), op0
, code
, op1
);
3702 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3703 whose type is TYPE. The expression is considered to be unsigned. If
3704 its type is signed, its value must be nonnegative. */
3707 derive_constant_upper_bound_ops (tree type
, tree op0
,
3708 enum tree_code code
, tree op1
)
3711 widest_int bnd
, max
, cst
;
3714 if (INTEGRAL_TYPE_P (type
))
3715 maxt
= TYPE_MAX_VALUE (type
);
3717 maxt
= upper_bound_in_type (type
, type
);
3719 max
= wi::to_widest (maxt
);
3724 return wi::to_widest (op0
);
3727 subtype
= TREE_TYPE (op0
);
3728 if (!TYPE_UNSIGNED (subtype
)
3729 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3730 that OP0 is nonnegative. */
3731 && TYPE_UNSIGNED (type
)
3732 && !tree_expr_nonnegative_p (op0
))
3734 /* If we cannot prove that the casted expression is nonnegative,
3735 we cannot establish more useful upper bound than the precision
3736 of the type gives us. */
3740 /* We now know that op0 is an nonnegative value. Try deriving an upper
3742 bnd
= derive_constant_upper_bound (op0
);
3744 /* If the bound does not fit in TYPE, max. value of TYPE could be
3746 if (wi::ltu_p (max
, bnd
))
3752 case POINTER_PLUS_EXPR
:
3754 if (TREE_CODE (op1
) != INTEGER_CST
3755 || !tree_expr_nonnegative_p (op0
))
3758 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3759 choose the most logical way how to treat this constant regardless
3760 of the signedness of the type. */
3761 cst
= wi::sext (wi::to_widest (op1
), TYPE_PRECISION (type
));
3762 if (code
!= MINUS_EXPR
)
3765 bnd
= derive_constant_upper_bound (op0
);
3767 if (wi::neg_p (cst
))
3770 /* Avoid CST == 0x80000... */
3771 if (wi::neg_p (cst
))
3774 /* OP0 + CST. We need to check that
3775 BND <= MAX (type) - CST. */
3777 widest_int mmax
= max
- cst
;
3778 if (wi::leu_p (bnd
, mmax
))
3785 /* OP0 - CST, where CST >= 0.
3787 If TYPE is signed, we have already verified that OP0 >= 0, and we
3788 know that the result is nonnegative. This implies that
3791 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3792 otherwise the operation underflows.
3795 /* This should only happen if the type is unsigned; however, for
3796 buggy programs that use overflowing signed arithmetics even with
3797 -fno-wrapv, this condition may also be true for signed values. */
3798 if (wi::ltu_p (bnd
, cst
))
3801 if (TYPE_UNSIGNED (type
))
3803 tree tem
= fold_binary (GE_EXPR
, boolean_type_node
, op0
,
3804 wide_int_to_tree (type
, cst
));
3805 if (!tem
|| integer_nonzerop (tem
))
3814 case FLOOR_DIV_EXPR
:
3815 case EXACT_DIV_EXPR
:
3816 if (TREE_CODE (op1
) != INTEGER_CST
3817 || tree_int_cst_sign_bit (op1
))
3820 bnd
= derive_constant_upper_bound (op0
);
3821 return wi::udiv_floor (bnd
, wi::to_widest (op1
));
3824 if (TREE_CODE (op1
) != INTEGER_CST
3825 || tree_int_cst_sign_bit (op1
))
3827 return wi::to_widest (op1
);
3830 stmt
= SSA_NAME_DEF_STMT (op0
);
3831 if (gimple_code (stmt
) != GIMPLE_ASSIGN
3832 || gimple_assign_lhs (stmt
) != op0
)
3834 return derive_constant_upper_bound_assign (stmt
);
3841 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3844 do_warn_aggressive_loop_optimizations (class loop
*loop
,
3845 widest_int i_bound
, gimple
*stmt
)
3847 /* Don't warn if the loop doesn't have known constant bound. */
3848 if (!loop
->nb_iterations
3849 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
3850 || !warn_aggressive_loop_optimizations
3851 /* To avoid warning multiple times for the same loop,
3852 only start warning when we preserve loops. */
3853 || (cfun
->curr_properties
& PROP_loops
) == 0
3854 /* Only warn once per loop. */
3855 || loop
->warned_aggressive_loop_optimizations
3856 /* Only warn if undefined behavior gives us lower estimate than the
3857 known constant bound. */
3858 || wi::cmpu (i_bound
, wi::to_widest (loop
->nb_iterations
)) >= 0
3859 /* And undefined behavior happens unconditionally. */
3860 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (stmt
)))
3863 edge e
= single_exit (loop
);
3867 gimple
*estmt
= last_nondebug_stmt (e
->src
);
3868 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
3869 print_dec (i_bound
, buf
, TYPE_UNSIGNED (TREE_TYPE (loop
->nb_iterations
))
3870 ? UNSIGNED
: SIGNED
);
3871 auto_diagnostic_group d
;
3872 if (warning_at (gimple_location (stmt
), OPT_Waggressive_loop_optimizations
,
3873 "iteration %s invokes undefined behavior", buf
))
3874 inform (gimple_location (estmt
), "within this loop");
3875 loop
->warned_aggressive_loop_optimizations
= true;
3878 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3879 is true if the loop is exited immediately after STMT, and this exit
3880 is taken at last when the STMT is executed BOUND + 1 times.
3881 REALISTIC is true if BOUND is expected to be close to the real number
3882 of iterations. UPPER is true if we are sure the loop iterates at most
3883 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3886 record_estimate (class loop
*loop
, tree bound
, const widest_int
&i_bound
,
3887 gimple
*at_stmt
, bool is_exit
, bool realistic
, bool upper
)
3891 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3893 fprintf (dump_file
, "Statement %s", is_exit
? "(exit)" : "");
3894 print_gimple_stmt (dump_file
, at_stmt
, 0, TDF_SLIM
);
3895 fprintf (dump_file
, " is %sexecuted at most ",
3896 upper
? "" : "probably ");
3897 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
3898 fprintf (dump_file
, " (bounded by ");
3899 print_decu (i_bound
, dump_file
);
3900 fprintf (dump_file
, ") + 1 times in loop %d.\n", loop
->num
);
3903 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3904 real number of iterations. */
3905 if (TREE_CODE (bound
) != INTEGER_CST
)
3908 gcc_checking_assert (i_bound
== wi::to_widest (bound
));
3910 /* If we have a guaranteed upper bound, record it in the appropriate
3911 list, unless this is an !is_exit bound (i.e. undefined behavior in
3912 at_stmt) in a loop with known constant number of iterations. */
3915 || loop
->nb_iterations
== NULL_TREE
3916 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
))
3918 class nb_iter_bound
*elt
= ggc_alloc
<nb_iter_bound
> ();
3920 elt
->bound
= i_bound
;
3921 elt
->stmt
= at_stmt
;
3922 elt
->is_exit
= is_exit
;
3923 elt
->next
= loop
->bounds
;
3927 /* If statement is executed on every path to the loop latch, we can directly
3928 infer the upper bound on the # of iterations of the loop. */
3929 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (at_stmt
)))
3932 /* Update the number of iteration estimates according to the bound.
3933 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3934 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3935 later if such statement must be executed on last iteration */
3940 widest_int new_i_bound
= i_bound
+ delta
;
3942 /* If an overflow occurred, ignore the result. */
3943 if (wi::ltu_p (new_i_bound
, delta
))
3946 if (upper
&& !is_exit
)
3947 do_warn_aggressive_loop_optimizations (loop
, new_i_bound
, at_stmt
);
3948 record_niter_bound (loop
, new_i_bound
, realistic
, upper
);
3951 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3952 and doesn't overflow. */
3955 record_control_iv (class loop
*loop
, class tree_niter_desc
*niter
)
3957 struct control_iv
*iv
;
3959 if (!niter
->control
.base
|| !niter
->control
.step
)
3962 if (!integer_onep (niter
->assumptions
) || !niter
->control
.no_overflow
)
3965 iv
= ggc_alloc
<control_iv
> ();
3966 iv
->base
= niter
->control
.base
;
3967 iv
->step
= niter
->control
.step
;
3968 iv
->next
= loop
->control_ivs
;
3969 loop
->control_ivs
= iv
;
3974 /* This function returns TRUE if below conditions are satisfied:
3975 1) VAR is SSA variable.
3976 2) VAR is an IV:{base, step} in its defining loop.
3977 3) IV doesn't overflow.
3978 4) Both base and step are integer constants.
3979 5) Base is the MIN/MAX value depends on IS_MIN.
3980 Store value of base to INIT correspondingly. */
3983 get_cst_init_from_scev (tree var
, wide_int
*init
, bool is_min
)
3985 if (TREE_CODE (var
) != SSA_NAME
)
3988 gimple
*def_stmt
= SSA_NAME_DEF_STMT (var
);
3989 class loop
*loop
= loop_containing_stmt (def_stmt
);
3995 if (!simple_iv (loop
, loop
, var
, &iv
, false))
3998 if (!iv
.no_overflow
)
4001 if (TREE_CODE (iv
.base
) != INTEGER_CST
|| TREE_CODE (iv
.step
) != INTEGER_CST
)
4004 if (is_min
== tree_int_cst_sign_bit (iv
.step
))
4007 *init
= wi::to_wide (iv
.base
);
4011 /* Record the estimate on number of iterations of LOOP based on the fact that
4012 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
4013 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
4014 estimated number of iterations is expected to be close to the real one.
4015 UPPER is true if we are sure the induction variable does not wrap. */
4018 record_nonwrapping_iv (class loop
*loop
, tree base
, tree step
, gimple
*stmt
,
4019 tree low
, tree high
, bool realistic
, bool upper
)
4021 tree niter_bound
, extreme
, delta
;
4022 tree type
= TREE_TYPE (base
), unsigned_type
;
4023 tree orig_base
= base
;
4025 if (TREE_CODE (step
) != INTEGER_CST
|| integer_zerop (step
))
4028 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4030 fprintf (dump_file
, "Induction variable (");
4031 print_generic_expr (dump_file
, TREE_TYPE (base
), TDF_SLIM
);
4032 fprintf (dump_file
, ") ");
4033 print_generic_expr (dump_file
, base
, TDF_SLIM
);
4034 fprintf (dump_file
, " + ");
4035 print_generic_expr (dump_file
, step
, TDF_SLIM
);
4036 fprintf (dump_file
, " * iteration does not wrap in statement ");
4037 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
4038 fprintf (dump_file
, " in loop %d.\n", loop
->num
);
4041 unsigned_type
= unsigned_type_for (type
);
4042 base
= fold_convert (unsigned_type
, base
);
4043 step
= fold_convert (unsigned_type
, step
);
4045 if (tree_int_cst_sign_bit (step
))
4048 Value_Range
base_range (TREE_TYPE (orig_base
));
4049 if (get_range_query (cfun
)->range_of_expr (base_range
, orig_base
)
4050 && !base_range
.undefined_p ())
4051 max
= base_range
.upper_bound ();
4052 extreme
= fold_convert (unsigned_type
, low
);
4053 if (TREE_CODE (orig_base
) == SSA_NAME
4054 && TREE_CODE (high
) == INTEGER_CST
4055 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
4056 && ((!base_range
.varying_p ()
4057 && !base_range
.undefined_p ())
4058 || get_cst_init_from_scev (orig_base
, &max
, false))
4059 && wi::gts_p (wi::to_wide (high
), max
))
4060 base
= wide_int_to_tree (unsigned_type
, max
);
4061 else if (TREE_CODE (base
) != INTEGER_CST
4062 && dominated_by_p (CDI_DOMINATORS
,
4063 loop
->latch
, gimple_bb (stmt
)))
4064 base
= fold_convert (unsigned_type
, high
);
4065 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
4066 step
= fold_build1 (NEGATE_EXPR
, unsigned_type
, step
);
4071 Value_Range
base_range (TREE_TYPE (orig_base
));
4072 if (get_range_query (cfun
)->range_of_expr (base_range
, orig_base
)
4073 && !base_range
.undefined_p ())
4074 min
= base_range
.lower_bound ();
4075 extreme
= fold_convert (unsigned_type
, high
);
4076 if (TREE_CODE (orig_base
) == SSA_NAME
4077 && TREE_CODE (low
) == INTEGER_CST
4078 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
4079 && ((!base_range
.varying_p ()
4080 && !base_range
.undefined_p ())
4081 || get_cst_init_from_scev (orig_base
, &min
, true))
4082 && wi::gts_p (min
, wi::to_wide (low
)))
4083 base
= wide_int_to_tree (unsigned_type
, min
);
4084 else if (TREE_CODE (base
) != INTEGER_CST
4085 && dominated_by_p (CDI_DOMINATORS
,
4086 loop
->latch
, gimple_bb (stmt
)))
4087 base
= fold_convert (unsigned_type
, low
);
4088 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
4091 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
4092 would get out of the range. */
4093 niter_bound
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step
);
4094 widest_int max
= derive_constant_upper_bound (niter_bound
);
4095 record_estimate (loop
, niter_bound
, max
, stmt
, false, realistic
, upper
);
4098 /* Determine information about number of iterations a LOOP from the index
4099 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
4100 guaranteed to be executed in every iteration of LOOP. Callback for
4110 idx_infer_loop_bounds (tree base
, tree
*idx
, void *dta
)
4112 struct ilb_data
*data
= (struct ilb_data
*) dta
;
4113 tree ev
, init
, step
;
4114 tree low
, high
, type
, next
;
4115 bool sign
, upper
= true, has_flexible_size
= false;
4116 class loop
*loop
= data
->loop
;
4118 if (TREE_CODE (base
) != ARRAY_REF
)
4121 /* For arrays that might have flexible sizes, it is not guaranteed that they
4122 do not really extend over their declared size. */
4123 if (array_ref_flexible_size_p (base
))
4125 has_flexible_size
= true;
4129 class loop
*dloop
= loop_containing_stmt (data
->stmt
);
4133 ev
= analyze_scalar_evolution (dloop
, *idx
);
4134 ev
= instantiate_parameters (loop
, ev
);
4135 init
= initial_condition (ev
);
4136 step
= evolution_part_in_loop_num (ev
, loop
->num
);
4140 || TREE_CODE (step
) != INTEGER_CST
4141 || integer_zerop (step
)
4142 || tree_contains_chrecs (init
, NULL
)
4143 || chrec_contains_symbols_defined_in_loop (init
, loop
->num
))
4146 low
= array_ref_low_bound (base
);
4147 high
= array_ref_up_bound (base
);
4149 /* The case of nonconstant bounds could be handled, but it would be
4151 if (TREE_CODE (low
) != INTEGER_CST
4153 || TREE_CODE (high
) != INTEGER_CST
)
4155 sign
= tree_int_cst_sign_bit (step
);
4156 type
= TREE_TYPE (step
);
4158 /* The array that might have flexible size most likely extends
4159 beyond its bounds. */
4160 if (has_flexible_size
4161 && operand_equal_p (low
, high
, 0))
4164 /* In case the relevant bound of the array does not fit in type, or
4165 it does, but bound + step (in type) still belongs into the range of the
4166 array, the index may wrap and still stay within the range of the array
4167 (consider e.g. if the array is indexed by the full range of
4170 To make things simpler, we require both bounds to fit into type, although
4171 there are cases where this would not be strictly necessary. */
4172 if (!int_fits_type_p (high
, type
)
4173 || !int_fits_type_p (low
, type
))
4175 low
= fold_convert (type
, low
);
4176 high
= fold_convert (type
, high
);
4179 next
= fold_binary (PLUS_EXPR
, type
, low
, step
);
4181 next
= fold_binary (PLUS_EXPR
, type
, high
, step
);
4183 if (tree_int_cst_compare (low
, next
) <= 0
4184 && tree_int_cst_compare (next
, high
) <= 0)
4187 /* If access is not executed on every iteration, we must ensure that overlow
4188 may not make the access valid later. */
4189 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (data
->stmt
))
4190 && scev_probably_wraps_p (NULL_TREE
,
4191 initial_condition_in_loop_num (ev
, loop
->num
),
4192 step
, data
->stmt
, loop
, true))
4195 record_nonwrapping_iv (loop
, init
, step
, data
->stmt
, low
, high
, false, upper
);
4199 /* Determine information about number of iterations a LOOP from the bounds
4200 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
4201 STMT is guaranteed to be executed in every iteration of LOOP.*/
4204 infer_loop_bounds_from_ref (class loop
*loop
, gimple
*stmt
, tree ref
)
4206 struct ilb_data data
;
4210 for_each_index (&ref
, idx_infer_loop_bounds
, &data
);
4213 /* Determine information about number of iterations of a LOOP from the way
4214 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
4215 executed in every iteration of LOOP. */
4218 infer_loop_bounds_from_array (class loop
*loop
, gimple
*stmt
)
4220 if (is_gimple_assign (stmt
))
4222 tree op0
= gimple_assign_lhs (stmt
);
4223 tree op1
= gimple_assign_rhs1 (stmt
);
4225 /* For each memory access, analyze its access function
4226 and record a bound on the loop iteration domain. */
4227 if (REFERENCE_CLASS_P (op0
))
4228 infer_loop_bounds_from_ref (loop
, stmt
, op0
);
4230 if (REFERENCE_CLASS_P (op1
))
4231 infer_loop_bounds_from_ref (loop
, stmt
, op1
);
4233 else if (is_gimple_call (stmt
))
4236 unsigned i
, n
= gimple_call_num_args (stmt
);
4238 lhs
= gimple_call_lhs (stmt
);
4239 if (lhs
&& REFERENCE_CLASS_P (lhs
))
4240 infer_loop_bounds_from_ref (loop
, stmt
, lhs
);
4242 for (i
= 0; i
< n
; i
++)
4244 arg
= gimple_call_arg (stmt
, i
);
4245 if (REFERENCE_CLASS_P (arg
))
4246 infer_loop_bounds_from_ref (loop
, stmt
, arg
);
4251 /* Determine information about number of iterations of a LOOP from the fact
4252 that pointer arithmetics in STMT does not overflow. */
4255 infer_loop_bounds_from_pointer_arith (class loop
*loop
, gimple
*stmt
)
4257 tree def
, base
, step
, scev
, type
, low
, high
;
4260 if (!is_gimple_assign (stmt
)
4261 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
4264 def
= gimple_assign_lhs (stmt
);
4265 if (TREE_CODE (def
) != SSA_NAME
)
4268 type
= TREE_TYPE (def
);
4269 if (!nowrap_type_p (type
))
4272 ptr
= gimple_assign_rhs1 (stmt
);
4273 if (!expr_invariant_in_loop_p (loop
, ptr
))
4276 var
= gimple_assign_rhs2 (stmt
);
4277 if (TYPE_PRECISION (type
) != TYPE_PRECISION (TREE_TYPE (var
)))
4280 class loop
*uloop
= loop_containing_stmt (stmt
);
4281 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (uloop
, def
));
4282 if (chrec_contains_undetermined (scev
))
4285 base
= initial_condition_in_loop_num (scev
, loop
->num
);
4286 step
= evolution_part_in_loop_num (scev
, loop
->num
);
4289 || TREE_CODE (step
) != INTEGER_CST
4290 || tree_contains_chrecs (base
, NULL
)
4291 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
4294 low
= lower_bound_in_type (type
, type
);
4295 high
= upper_bound_in_type (type
, type
);
4297 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
4298 produce a NULL pointer. The contrary would mean NULL points to an object,
4299 while NULL is supposed to compare unequal with the address of all objects.
4300 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
4301 NULL pointer since that would mean wrapping, which we assume here not to
4302 happen. So, we can exclude NULL from the valid range of pointer
4304 if (flag_delete_null_pointer_checks
&& int_cst_value (low
) == 0)
4305 low
= build_int_cstu (TREE_TYPE (low
), TYPE_ALIGN_UNIT (TREE_TYPE (type
)));
4307 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
4310 /* Determine information about number of iterations of a LOOP from the fact
4311 that signed arithmetics in STMT does not overflow. */
4314 infer_loop_bounds_from_signedness (class loop
*loop
, gimple
*stmt
)
4316 tree def
, base
, step
, scev
, type
, low
, high
;
4318 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
4321 def
= gimple_assign_lhs (stmt
);
4323 if (TREE_CODE (def
) != SSA_NAME
)
4326 type
= TREE_TYPE (def
);
4327 if (!INTEGRAL_TYPE_P (type
)
4328 || !TYPE_OVERFLOW_UNDEFINED (type
))
4331 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
4332 if (chrec_contains_undetermined (scev
))
4335 base
= initial_condition_in_loop_num (scev
, loop
->num
);
4336 step
= evolution_part_in_loop_num (scev
, loop
->num
);
4339 || TREE_CODE (step
) != INTEGER_CST
4340 || tree_contains_chrecs (base
, NULL
)
4341 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
4344 low
= lower_bound_in_type (type
, type
);
4345 high
= upper_bound_in_type (type
, type
);
4346 Value_Range
r (TREE_TYPE (def
));
4347 get_range_query (cfun
)->range_of_expr (r
, def
);
4348 if (!r
.varying_p () && !r
.undefined_p ())
4350 low
= wide_int_to_tree (type
, r
.lower_bound ());
4351 high
= wide_int_to_tree (type
, r
.upper_bound ());
4354 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
4357 /* The following analyzers are extracting informations on the bounds
4358 of LOOP from the following undefined behaviors:
4360 - data references should not access elements over the statically
4363 - signed variables should not overflow when flag_wrapv is not set.
4367 infer_loop_bounds_from_undefined (class loop
*loop
, basic_block
*bbs
)
4370 gimple_stmt_iterator bsi
;
4374 for (i
= 0; i
< loop
->num_nodes
; i
++)
4378 /* If BB is not executed in each iteration of the loop, we cannot
4379 use the operations in it to infer reliable upper bound on the
4380 # of iterations of the loop. However, we can use it as a guess.
4381 Reliable guesses come only from array bounds. */
4382 reliable
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
);
4384 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
4386 gimple
*stmt
= gsi_stmt (bsi
);
4388 infer_loop_bounds_from_array (loop
, stmt
);
4392 infer_loop_bounds_from_signedness (loop
, stmt
);
4393 infer_loop_bounds_from_pointer_arith (loop
, stmt
);
4400 /* Compare wide ints, callback for qsort. */
4403 wide_int_cmp (const void *p1
, const void *p2
)
4405 const widest_int
*d1
= (const widest_int
*) p1
;
4406 const widest_int
*d2
= (const widest_int
*) p2
;
4407 return wi::cmpu (*d1
, *d2
);
4410 /* Return index of BOUND in BOUNDS array sorted in increasing order.
4411 Lookup by binary search. */
4414 bound_index (const vec
<widest_int
> &bounds
, const widest_int
&bound
)
4416 unsigned int end
= bounds
.length ();
4417 unsigned int begin
= 0;
4419 /* Find a matching index by means of a binary search. */
4420 while (begin
!= end
)
4422 unsigned int middle
= (begin
+ end
) / 2;
4423 widest_int index
= bounds
[middle
];
4427 else if (wi::ltu_p (index
, bound
))
4435 /* We recorded loop bounds only for statements dominating loop latch (and thus
4436 executed each loop iteration). If there are any bounds on statements not
4437 dominating the loop latch we can improve the estimate by walking the loop
4438 body and seeing if every path from loop header to loop latch contains
4439 some bounded statement. */
4442 discover_iteration_bound_by_body_walk (class loop
*loop
)
4444 class nb_iter_bound
*elt
;
4445 auto_vec
<widest_int
> bounds
;
4446 vec
<vec
<basic_block
> > queues
= vNULL
;
4447 vec
<basic_block
> queue
= vNULL
;
4448 ptrdiff_t queue_index
;
4449 ptrdiff_t latch_index
= 0;
4451 /* Discover what bounds may interest us. */
4452 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
4454 widest_int bound
= elt
->bound
;
4456 /* Exit terminates loop at given iteration, while non-exits produce undefined
4457 effect on the next iteration. */
4461 /* If an overflow occurred, ignore the result. */
4466 if (!loop
->any_upper_bound
4467 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
4468 bounds
.safe_push (bound
);
4471 /* Exit early if there is nothing to do. */
4472 if (!bounds
.exists ())
4475 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4476 fprintf (dump_file
, " Trying to walk loop body to reduce the bound.\n");
4478 /* Sort the bounds in decreasing order. */
4479 bounds
.qsort (wide_int_cmp
);
4481 /* For every basic block record the lowest bound that is guaranteed to
4482 terminate the loop. */
4484 hash_map
<basic_block
, ptrdiff_t> bb_bounds
;
4485 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
4487 widest_int bound
= elt
->bound
;
4491 /* If an overflow occurred, ignore the result. */
4496 if (!loop
->any_upper_bound
4497 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
4499 ptrdiff_t index
= bound_index (bounds
, bound
);
4500 ptrdiff_t *entry
= bb_bounds
.get (gimple_bb (elt
->stmt
));
4502 bb_bounds
.put (gimple_bb (elt
->stmt
), index
);
4503 else if ((ptrdiff_t)*entry
> index
)
4508 hash_map
<basic_block
, ptrdiff_t> block_priority
;
4510 /* Perform shortest path discovery loop->header ... loop->latch.
4512 The "distance" is given by the smallest loop bound of basic block
4513 present in the path and we look for path with largest smallest bound
4516 To avoid the need for fibonacci heap on double ints we simply compress
4517 double ints into indexes to BOUNDS array and then represent the queue
4518 as arrays of queues for every index.
4519 Index of BOUNDS.length() means that the execution of given BB has
4520 no bounds determined.
4522 VISITED is a pointer map translating basic block into smallest index
4523 it was inserted into the priority queue with. */
4526 /* Start walk in loop header with index set to infinite bound. */
4527 queue_index
= bounds
.length ();
4528 queues
.safe_grow_cleared (queue_index
+ 1, true);
4529 queue
.safe_push (loop
->header
);
4530 queues
[queue_index
] = queue
;
4531 block_priority
.put (loop
->header
, queue_index
);
4533 for (; queue_index
>= 0; queue_index
--)
4535 if (latch_index
< queue_index
)
4537 while (queues
[queue_index
].length ())
4540 ptrdiff_t bound_index
= queue_index
;
4544 queue
= queues
[queue_index
];
4547 /* OK, we later inserted the BB with lower priority, skip it. */
4548 if (*block_priority
.get (bb
) > queue_index
)
4551 /* See if we can improve the bound. */
4552 ptrdiff_t *entry
= bb_bounds
.get (bb
);
4553 if (entry
&& *entry
< bound_index
)
4554 bound_index
= *entry
;
4556 /* Insert succesors into the queue, watch for latch edge
4557 and record greatest index we saw. */
4558 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4560 bool insert
= false;
4562 if (loop_exit_edge_p (loop
, e
))
4565 if (e
== loop_latch_edge (loop
)
4566 && latch_index
< bound_index
)
4567 latch_index
= bound_index
;
4568 else if (!(entry
= block_priority
.get (e
->dest
)))
4571 block_priority
.put (e
->dest
, bound_index
);
4573 else if (*entry
< bound_index
)
4576 *entry
= bound_index
;
4580 queues
[bound_index
].safe_push (e
->dest
);
4584 queues
[queue_index
].release ();
4587 gcc_assert (latch_index
>= 0);
4588 if ((unsigned)latch_index
< bounds
.length ())
4590 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4592 fprintf (dump_file
, "Found better loop bound ");
4593 print_decu (bounds
[latch_index
], dump_file
);
4594 fprintf (dump_file
, "\n");
4596 record_niter_bound (loop
, bounds
[latch_index
], false, true);
4602 /* See if every path cross the loop goes through a statement that is known
4603 to not execute at the last iteration. In that case we can decrese iteration
4607 maybe_lower_iteration_bound (class loop
*loop
)
4609 hash_set
<gimple
*> *not_executed_last_iteration
= NULL
;
4610 class nb_iter_bound
*elt
;
4611 bool found_exit
= false;
4612 auto_vec
<basic_block
> queue
;
4615 /* Collect all statements with interesting (i.e. lower than
4616 nb_iterations_upper_bound) bound on them.
4618 TODO: Due to the way record_estimate choose estimates to store, the bounds
4619 will be always nb_iterations_upper_bound-1. We can change this to record
4620 also statements not dominating the loop latch and update the walk bellow
4621 to the shortest path algorithm. */
4622 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
4625 && wi::ltu_p (elt
->bound
, loop
->nb_iterations_upper_bound
))
4627 if (!not_executed_last_iteration
)
4628 not_executed_last_iteration
= new hash_set
<gimple
*>;
4629 not_executed_last_iteration
->add (elt
->stmt
);
4632 if (!not_executed_last_iteration
)
4635 /* Start DFS walk in the loop header and see if we can reach the
4636 loop latch or any of the exits (including statements with side
4637 effects that may terminate the loop otherwise) without visiting
4638 any of the statements known to have undefined effect on the last
4640 queue
.safe_push (loop
->header
);
4641 visited
= BITMAP_ALLOC (NULL
);
4642 bitmap_set_bit (visited
, loop
->header
->index
);
4647 basic_block bb
= queue
.pop ();
4648 gimple_stmt_iterator gsi
;
4649 bool stmt_found
= false;
4651 /* Loop for possible exits and statements bounding the execution. */
4652 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4654 gimple
*stmt
= gsi_stmt (gsi
);
4655 if (not_executed_last_iteration
->contains (stmt
))
4660 if (gimple_has_side_effects (stmt
))
4669 /* If no bounding statement is found, continue the walk. */
4675 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4677 if (loop_exit_edge_p (loop
, e
)
4678 || e
== loop_latch_edge (loop
))
4683 if (bitmap_set_bit (visited
, e
->dest
->index
))
4684 queue
.safe_push (e
->dest
);
4688 while (queue
.length () && !found_exit
);
4690 /* If every path through the loop reach bounding statement before exit,
4691 then we know the last iteration of the loop will have undefined effect
4692 and we can decrease number of iterations. */
4696 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4697 fprintf (dump_file
, "Reducing loop iteration estimate by 1; "
4698 "undefined statement must be executed at the last iteration.\n");
4699 record_niter_bound (loop
, loop
->nb_iterations_upper_bound
- 1,
4703 BITMAP_FREE (visited
);
4704 delete not_executed_last_iteration
;
4707 /* Get expected upper bound for number of loop iterations for
4708 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4711 get_upper_bound_based_on_builtin_expr_with_prob (gcond
*cond
)
4716 tree lhs
= gimple_cond_lhs (cond
);
4717 if (TREE_CODE (lhs
) != SSA_NAME
)
4720 gimple
*stmt
= SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
));
4721 gcall
*def
= dyn_cast
<gcall
*> (stmt
);
4725 tree decl
= gimple_call_fndecl (def
);
4727 || !fndecl_built_in_p (decl
, BUILT_IN_EXPECT_WITH_PROBABILITY
)
4728 || gimple_call_num_args (stmt
) != 3)
4731 tree c
= gimple_call_arg (def
, 1);
4732 tree condt
= TREE_TYPE (lhs
);
4733 tree res
= fold_build2 (gimple_cond_code (cond
),
4735 gimple_cond_rhs (cond
));
4736 if (TREE_CODE (res
) != INTEGER_CST
)
4740 tree prob
= gimple_call_arg (def
, 2);
4741 tree t
= TREE_TYPE (prob
);
4743 = build_real_from_int_cst (t
,
4745 if (integer_zerop (res
))
4746 prob
= fold_build2 (MINUS_EXPR
, t
, one
, prob
);
4747 tree r
= fold_build2 (RDIV_EXPR
, t
, one
, prob
);
4748 if (TREE_CODE (r
) != REAL_CST
)
4752 = real_to_integer (TREE_REAL_CST_PTR (r
));
4753 return build_int_cst (condt
, probi
);
4756 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4757 is true also use estimates derived from undefined behavior. */
4760 estimate_numbers_of_iterations (class loop
*loop
)
4764 class tree_niter_desc niter_desc
;
4769 /* Give up if we already have tried to compute an estimation. */
4770 if (loop
->estimate_state
!= EST_NOT_COMPUTED
)
4773 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4774 fprintf (dump_file
, "Estimating # of iterations of loop %d\n", loop
->num
);
4776 loop
->estimate_state
= EST_AVAILABLE
;
4778 /* If we have a measured profile, use it to estimate the number of
4779 iterations. Normally this is recorded by branch_prob right after
4780 reading the profile. In case we however found a new loop, record the
4783 Explicitly check for profile status so we do not report
4784 wrong prediction hitrates for guessed loop iterations heuristics.
4785 Do not recompute already recorded bounds - we ought to be better on
4786 updating iteration bounds than updating profile in general and thus
4787 recomputing iteration bounds later in the compilation process will just
4788 introduce random roundoff errors. */
4789 if (!loop
->any_estimate
4790 && loop
->header
->count
.reliable_p ())
4792 gcov_type nit
= expected_loop_iterations_unbounded (loop
);
4793 bound
= gcov_type_to_wide_int (nit
);
4794 record_niter_bound (loop
, bound
, true, false);
4797 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4798 to be constant, we avoid undefined behavior implied bounds and instead
4799 diagnose those loops with -Waggressive-loop-optimizations. */
4800 number_of_latch_executions (loop
);
4802 basic_block
*body
= get_loop_body (loop
);
4803 auto_vec
<edge
> exits
= get_loop_exit_edges (loop
, body
);
4804 likely_exit
= single_likely_exit (loop
, exits
);
4805 FOR_EACH_VEC_ELT (exits
, i
, ex
)
4807 if (ex
== likely_exit
)
4809 gimple
*stmt
= *gsi_last_bb (ex
->src
);
4812 gcond
*cond
= dyn_cast
<gcond
*> (stmt
);
4814 = get_upper_bound_based_on_builtin_expr_with_prob (cond
);
4815 if (niter_bound
!= NULL_TREE
)
4817 widest_int max
= derive_constant_upper_bound (niter_bound
);
4818 record_estimate (loop
, niter_bound
, max
, cond
,
4824 if (!number_of_iterations_exit (loop
, ex
, &niter_desc
,
4825 false, false, body
))
4828 niter
= niter_desc
.niter
;
4829 type
= TREE_TYPE (niter
);
4830 if (TREE_CODE (niter_desc
.may_be_zero
) != INTEGER_CST
)
4831 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
4832 build_int_cst (type
, 0),
4834 record_estimate (loop
, niter
, niter_desc
.max
,
4835 last_nondebug_stmt (ex
->src
),
4836 true, ex
== likely_exit
, true);
4837 record_control_iv (loop
, &niter_desc
);
4840 if (flag_aggressive_loop_optimizations
)
4841 infer_loop_bounds_from_undefined (loop
, body
);
4844 discover_iteration_bound_by_body_walk (loop
);
4846 maybe_lower_iteration_bound (loop
);
4848 /* If we know the exact number of iterations of this loop, try to
4849 not break code with undefined behavior by not recording smaller
4850 maximum number of iterations. */
4851 if (loop
->nb_iterations
4852 && TREE_CODE (loop
->nb_iterations
) == INTEGER_CST
)
4854 loop
->any_upper_bound
= true;
4855 loop
->nb_iterations_upper_bound
= wi::to_widest (loop
->nb_iterations
);
4859 /* Sets NIT to the estimated number of executions of the latch of the
4860 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4861 large as the number of iterations. If we have no reliable estimate,
4862 the function returns false, otherwise returns true. */
4865 estimated_loop_iterations (class loop
*loop
, widest_int
*nit
)
4867 /* When SCEV information is available, try to update loop iterations
4868 estimate. Otherwise just return whatever we recorded earlier. */
4869 if (scev_initialized_p ())
4870 estimate_numbers_of_iterations (loop
);
4872 return (get_estimated_loop_iterations (loop
, nit
));
4875 /* Similar to estimated_loop_iterations, but returns the estimate only
4876 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4877 on the number of iterations of LOOP could not be derived, returns -1. */
4880 estimated_loop_iterations_int (class loop
*loop
)
4883 HOST_WIDE_INT hwi_nit
;
4885 if (!estimated_loop_iterations (loop
, &nit
))
4888 if (!wi::fits_shwi_p (nit
))
4890 hwi_nit
= nit
.to_shwi ();
4892 return hwi_nit
< 0 ? -1 : hwi_nit
;
4896 /* Sets NIT to an upper bound for the maximum number of executions of the
4897 latch of the LOOP. If we have no reliable estimate, the function returns
4898 false, otherwise returns true. */
4901 max_loop_iterations (class loop
*loop
, widest_int
*nit
)
4903 /* When SCEV information is available, try to update loop iterations
4904 estimate. Otherwise just return whatever we recorded earlier. */
4905 if (scev_initialized_p ())
4906 estimate_numbers_of_iterations (loop
);
4908 return get_max_loop_iterations (loop
, nit
);
4911 /* Similar to max_loop_iterations, but returns the estimate only
4912 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4913 on the number of iterations of LOOP could not be derived, returns -1. */
4916 max_loop_iterations_int (class loop
*loop
)
4919 HOST_WIDE_INT hwi_nit
;
4921 if (!max_loop_iterations (loop
, &nit
))
4924 if (!wi::fits_shwi_p (nit
))
4926 hwi_nit
= nit
.to_shwi ();
4928 return hwi_nit
< 0 ? -1 : hwi_nit
;
4931 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4932 latch of the LOOP. If we have no reliable estimate, the function returns
4933 false, otherwise returns true. */
4936 likely_max_loop_iterations (class loop
*loop
, widest_int
*nit
)
4938 /* When SCEV information is available, try to update loop iterations
4939 estimate. Otherwise just return whatever we recorded earlier. */
4940 if (scev_initialized_p ())
4941 estimate_numbers_of_iterations (loop
);
4943 return get_likely_max_loop_iterations (loop
, nit
);
4946 /* Similar to max_loop_iterations, but returns the estimate only
4947 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4948 on the number of iterations of LOOP could not be derived, returns -1. */
4951 likely_max_loop_iterations_int (class loop
*loop
)
4954 HOST_WIDE_INT hwi_nit
;
4956 if (!likely_max_loop_iterations (loop
, &nit
))
4959 if (!wi::fits_shwi_p (nit
))
4961 hwi_nit
= nit
.to_shwi ();
4963 return hwi_nit
< 0 ? -1 : hwi_nit
;
4966 /* Returns an estimate for the number of executions of statements
4967 in the LOOP. For statements before the loop exit, this exceeds
4968 the number of execution of the latch by one. */
4971 estimated_stmt_executions_int (class loop
*loop
)
4973 HOST_WIDE_INT nit
= estimated_loop_iterations_int (loop
);
4979 snit
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) nit
+ 1);
4981 /* If the computation overflows, return -1. */
4982 return snit
< 0 ? -1 : snit
;
4985 /* Sets NIT to the maximum number of executions of the latch of the
4986 LOOP, plus one. If we have no reliable estimate, the function returns
4987 false, otherwise returns true. */
4990 max_stmt_executions (class loop
*loop
, widest_int
*nit
)
4992 widest_int nit_minus_one
;
4994 if (!max_loop_iterations (loop
, nit
))
4997 nit_minus_one
= *nit
;
5001 return wi::gtu_p (*nit
, nit_minus_one
);
5004 /* Sets NIT to the estimated maximum number of executions of the latch of the
5005 LOOP, plus one. If we have no likely estimate, the function returns
5006 false, otherwise returns true. */
5009 likely_max_stmt_executions (class loop
*loop
, widest_int
*nit
)
5011 widest_int nit_minus_one
;
5013 if (!likely_max_loop_iterations (loop
, nit
))
5016 nit_minus_one
= *nit
;
5020 return wi::gtu_p (*nit
, nit_minus_one
);
5023 /* Sets NIT to the estimated number of executions of the latch of the
5024 LOOP, plus one. If we have no reliable estimate, the function returns
5025 false, otherwise returns true. */
5028 estimated_stmt_executions (class loop
*loop
, widest_int
*nit
)
5030 widest_int nit_minus_one
;
5032 if (!estimated_loop_iterations (loop
, nit
))
5035 nit_minus_one
= *nit
;
5039 return wi::gtu_p (*nit
, nit_minus_one
);
5042 /* Records estimates on numbers of iterations of loops. */
5045 estimate_numbers_of_iterations (function
*fn
)
5047 /* We don't want to issue signed overflow warnings while getting
5048 loop iteration estimates. */
5049 fold_defer_overflow_warnings ();
5051 for (auto loop
: loops_list (fn
, 0))
5052 estimate_numbers_of_iterations (loop
);
5054 fold_undefer_and_ignore_overflow_warnings ();
5057 /* Returns true if statement S1 dominates statement S2. */
5060 stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
5062 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
5070 gimple_stmt_iterator bsi
;
5072 if (gimple_code (s2
) == GIMPLE_PHI
)
5075 if (gimple_code (s1
) == GIMPLE_PHI
)
5078 for (bsi
= gsi_start_bb (bb1
); gsi_stmt (bsi
) != s2
; gsi_next (&bsi
))
5079 if (gsi_stmt (bsi
) == s1
)
5085 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
5088 /* Returns true when we can prove that the number of executions of
5089 STMT in the loop is at most NITER, according to the bound on
5090 the number of executions of the statement NITER_BOUND->stmt recorded in
5091 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
5093 ??? This code can become quite a CPU hog - we can have many bounds,
5094 and large basic block forcing stmt_dominates_stmt_p to be queried
5095 many times on a large basic blocks, so the whole thing is O(n^2)
5096 for scev_probably_wraps_p invocation (that can be done n times).
5098 It would make more sense (and give better answers) to remember BB
5099 bounds computed by discover_iteration_bound_by_body_walk. */
5102 n_of_executions_at_most (gimple
*stmt
,
5103 class nb_iter_bound
*niter_bound
,
5106 widest_int bound
= niter_bound
->bound
;
5107 tree nit_type
= TREE_TYPE (niter
), e
;
5110 gcc_assert (TYPE_UNSIGNED (nit_type
));
5112 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
5113 the number of iterations is small. */
5114 if (!wi::fits_to_tree_p (bound
, nit_type
))
5117 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
5118 times. This means that:
5120 -- if NITER_BOUND->is_exit is true, then everything after
5121 it at most NITER_BOUND->bound times.
5123 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
5124 is executed, then NITER_BOUND->stmt is executed as well in the same
5125 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
5127 If we can determine that NITER_BOUND->stmt is always executed
5128 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
5129 We conclude that if both statements belong to the same
5130 basic block and STMT is before NITER_BOUND->stmt and there are no
5131 statements with side effects in between. */
5133 if (niter_bound
->is_exit
)
5135 if (stmt
== niter_bound
->stmt
5136 || !stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
5142 if (!stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
5144 gimple_stmt_iterator bsi
;
5145 if (gimple_bb (stmt
) != gimple_bb (niter_bound
->stmt
)
5146 || gimple_code (stmt
) == GIMPLE_PHI
5147 || gimple_code (niter_bound
->stmt
) == GIMPLE_PHI
)
5150 /* By stmt_dominates_stmt_p we already know that STMT appears
5151 before NITER_BOUND->STMT. Still need to test that the loop
5152 cannot be terinated by a side effect in between. */
5153 for (bsi
= gsi_for_stmt (stmt
); gsi_stmt (bsi
) != niter_bound
->stmt
;
5155 if (gimple_has_side_effects (gsi_stmt (bsi
)))
5159 || !wi::fits_to_tree_p (bound
, nit_type
))
5165 e
= fold_binary (cmp
, boolean_type_node
,
5166 niter
, wide_int_to_tree (nit_type
, bound
));
5167 return e
&& integer_nonzerop (e
);
5170 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
5173 nowrap_type_p (tree type
)
5175 if (ANY_INTEGRAL_TYPE_P (type
)
5176 && TYPE_OVERFLOW_UNDEFINED (type
))
5179 if (POINTER_TYPE_P (type
))
5185 /* Return true if we can prove LOOP is exited before evolution of induction
5186 variable {BASE, STEP} overflows with respect to its type bound. */
5189 loop_exits_before_overflow (tree base
, tree step
,
5190 gimple
*at_stmt
, class loop
*loop
)
5193 struct control_iv
*civ
;
5194 class nb_iter_bound
*bound
;
5195 tree e
, delta
, step_abs
, unsigned_base
;
5196 tree type
= TREE_TYPE (step
);
5197 tree unsigned_type
, valid_niter
;
5199 /* Don't issue signed overflow warnings. */
5200 fold_defer_overflow_warnings ();
5202 /* Compute the number of iterations before we reach the bound of the
5203 type, and verify that the loop is exited before this occurs. */
5204 unsigned_type
= unsigned_type_for (type
);
5205 unsigned_base
= fold_convert (unsigned_type
, base
);
5207 if (tree_int_cst_sign_bit (step
))
5209 tree extreme
= fold_convert (unsigned_type
,
5210 lower_bound_in_type (type
, type
));
5211 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, unsigned_base
, extreme
);
5212 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
5213 fold_convert (unsigned_type
, step
));
5217 tree extreme
= fold_convert (unsigned_type
,
5218 upper_bound_in_type (type
, type
));
5219 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, unsigned_base
);
5220 step_abs
= fold_convert (unsigned_type
, step
);
5223 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
5225 estimate_numbers_of_iterations (loop
);
5227 if (max_loop_iterations (loop
, &niter
)
5228 && wi::fits_to_tree_p (niter
, TREE_TYPE (valid_niter
))
5229 && (e
= fold_binary (GT_EXPR
, boolean_type_node
, valid_niter
,
5230 wide_int_to_tree (TREE_TYPE (valid_niter
),
5232 && integer_nonzerop (e
))
5234 fold_undefer_and_ignore_overflow_warnings ();
5238 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
5240 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
5242 fold_undefer_and_ignore_overflow_warnings ();
5246 fold_undefer_and_ignore_overflow_warnings ();
5248 /* Try to prove loop is exited before {base, step} overflows with the
5249 help of analyzed loop control IV. This is done only for IVs with
5250 constant step because otherwise we don't have the information. */
5251 if (TREE_CODE (step
) == INTEGER_CST
)
5253 for (civ
= loop
->control_ivs
; civ
; civ
= civ
->next
)
5255 enum tree_code code
;
5256 tree civ_type
= TREE_TYPE (civ
->step
);
5258 /* Have to consider type difference because operand_equal_p ignores
5259 that for constants. */
5260 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (civ_type
)
5261 || element_precision (type
) != element_precision (civ_type
))
5264 /* Only consider control IV with same step. */
5265 if (!operand_equal_p (step
, civ
->step
, 0))
5268 /* Done proving if this is a no-overflow control IV. */
5269 if (operand_equal_p (base
, civ
->base
, 0))
5272 /* Control IV is recorded after expanding simple operations,
5273 Here we expand base and compare it too. */
5274 tree expanded_base
= expand_simple_operations (base
);
5275 if (operand_equal_p (expanded_base
, civ
->base
, 0))
5278 /* If this is a before stepping control IV, in other words, we have
5280 {civ_base, step} = {base + step, step}
5282 Because civ {base + step, step} doesn't overflow during loop
5283 iterations, {base, step} will not overflow if we can prove the
5284 operation "base + step" does not overflow. Specifically, we try
5285 to prove below conditions are satisfied:
5287 base <= UPPER_BOUND (type) - step ;;step > 0
5288 base >= LOWER_BOUND (type) - step ;;step < 0
5290 by proving the reverse conditions are false using loop's initial
5292 if (POINTER_TYPE_P (TREE_TYPE (base
)))
5293 code
= POINTER_PLUS_EXPR
;
5297 tree stepped
= fold_build2 (code
, TREE_TYPE (base
), base
, step
);
5298 tree expanded_stepped
= fold_build2 (code
, TREE_TYPE (base
),
5299 expanded_base
, step
);
5300 if (operand_equal_p (stepped
, civ
->base
, 0)
5301 || operand_equal_p (expanded_stepped
, civ
->base
, 0))
5305 if (tree_int_cst_sign_bit (step
))
5308 extreme
= lower_bound_in_type (type
, type
);
5313 extreme
= upper_bound_in_type (type
, type
);
5315 extreme
= fold_build2 (MINUS_EXPR
, type
, extreme
, step
);
5316 e
= fold_build2 (code
, boolean_type_node
, base
, extreme
);
5317 e
= simplify_using_initial_conditions (loop
, e
);
5318 if (integer_zerop (e
))
5327 /* VAR is scev variable whose evolution part is constant STEP, this function
5328 proves that VAR can't overflow by using value range info. If VAR's value
5329 range is [MIN, MAX], it can be proven by:
5330 MAX + step doesn't overflow ; if step > 0
5332 MIN + step doesn't underflow ; if step < 0.
5334 We can only do this if var is computed in every loop iteration, i.e, var's
5335 definition has to dominate loop latch. Consider below example:
5343 # RANGE [0, 4294967294] NONZERO 65535
5344 # i_21 = PHI <0(3), i_18(9)>
5351 # RANGE [0, 65533] NONZERO 65535
5352 _6 = i_21 + 4294967295;
5353 # RANGE [0, 65533] NONZERO 65535
5354 _7 = (long unsigned int) _6;
5355 # RANGE [0, 524264] NONZERO 524280
5357 # PT = nonlocal escaped
5362 # RANGE [1, 65535] NONZERO 65535
5376 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
5377 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
5378 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
5379 (4294967295, 4294967296, ...). */
5382 scev_var_range_cant_overflow (tree var
, tree step
, class loop
*loop
)
5385 wide_int minv
, maxv
, diff
, step_wi
;
5387 if (TREE_CODE (step
) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (var
)))
5390 /* Check if VAR evaluates in every loop iteration. It's not the case
5391 if VAR is default definition or does not dominate loop's latch. */
5392 basic_block def_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
5393 if (!def_bb
|| !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, def_bb
))
5396 Value_Range
r (TREE_TYPE (var
));
5397 get_range_query (cfun
)->range_of_expr (r
, var
);
5398 if (r
.varying_p () || r
.undefined_p ())
5401 /* VAR is a scev whose evolution part is STEP and value range info
5402 is [MIN, MAX], we can prove its no-overflowness by conditions:
5404 type_MAX - MAX >= step ; if step > 0
5405 MIN - type_MIN >= |step| ; if step < 0.
5407 Or VAR must take value outside of value range, which is not true. */
5408 step_wi
= wi::to_wide (step
);
5409 type
= TREE_TYPE (var
);
5410 if (tree_int_cst_sign_bit (step
))
5412 diff
= r
.lower_bound () - wi::to_wide (lower_bound_in_type (type
, type
));
5413 step_wi
= - step_wi
;
5416 diff
= wi::to_wide (upper_bound_in_type (type
, type
)) - r
.upper_bound ();
5418 return (wi::geu_p (diff
, step_wi
));
5421 /* Return false only when the induction variable BASE + STEP * I is
5422 known to not overflow: i.e. when the number of iterations is small
5423 enough with respect to the step and initial condition in order to
5424 keep the evolution confined in TYPEs bounds. Return true when the
5425 iv is known to overflow or when the property is not computable.
5427 USE_OVERFLOW_SEMANTICS is true if this function should assume that
5428 the rules for overflow of the given language apply (e.g., that signed
5429 arithmetics in C does not overflow).
5431 If VAR is a ssa variable, this function also returns false if VAR can
5432 be proven not overflow with value range info. */
5435 scev_probably_wraps_p (tree var
, tree base
, tree step
,
5436 gimple
*at_stmt
, class loop
*loop
,
5437 bool use_overflow_semantics
)
5439 /* FIXME: We really need something like
5440 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
5442 We used to test for the following situation that frequently appears
5443 during address arithmetics:
5445 D.1621_13 = (long unsigned intD.4) D.1620_12;
5446 D.1622_14 = D.1621_13 * 8;
5447 D.1623_15 = (doubleD.29 *) D.1622_14;
5449 And derived that the sequence corresponding to D_14
5450 can be proved to not wrap because it is used for computing a
5451 memory access; however, this is not really the case -- for example,
5452 if D_12 = (unsigned char) [254,+,1], then D_14 has values
5453 2032, 2040, 0, 8, ..., but the code is still legal. */
5455 if (chrec_contains_undetermined (base
)
5456 || chrec_contains_undetermined (step
))
5459 if (integer_zerop (step
))
5462 /* If we can use the fact that signed and pointer arithmetics does not
5463 wrap, we are done. */
5464 if (use_overflow_semantics
&& nowrap_type_p (TREE_TYPE (base
)))
5467 /* To be able to use estimates on number of iterations of the loop,
5468 we must have an upper bound on the absolute value of the step. */
5469 if (TREE_CODE (step
) != INTEGER_CST
)
5472 /* Check if var can be proven not overflow with value range info. */
5473 if (var
&& TREE_CODE (var
) == SSA_NAME
5474 && scev_var_range_cant_overflow (var
, step
, loop
))
5477 if (loop_exits_before_overflow (base
, step
, at_stmt
, loop
))
5480 /* At this point we still don't have a proof that the iv does not
5481 overflow: give up. */
5485 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
5488 free_numbers_of_iterations_estimates (class loop
*loop
)
5490 struct control_iv
*civ
;
5491 class nb_iter_bound
*bound
;
5493 loop
->nb_iterations
= NULL
;
5494 loop
->estimate_state
= EST_NOT_COMPUTED
;
5495 for (bound
= loop
->bounds
; bound
;)
5497 class nb_iter_bound
*next
= bound
->next
;
5501 loop
->bounds
= NULL
;
5503 for (civ
= loop
->control_ivs
; civ
;)
5505 struct control_iv
*next
= civ
->next
;
5509 loop
->control_ivs
= NULL
;
5512 /* Frees the information on upper bounds on numbers of iterations of loops. */
5515 free_numbers_of_iterations_estimates (function
*fn
)
5517 for (auto loop
: loops_list (fn
, 0))
5518 free_numbers_of_iterations_estimates (loop
);
5521 /* Substitute value VAL for ssa name NAME inside expressions held
5525 substitute_in_loop_info (class loop
*loop
, tree name
, tree val
)
5527 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);