]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-niter.c
tree-ssa-loop-niter.h (estimate_numbers_of_iterations): Take struct function as arg.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-niter.c
CommitLineData
e9eb809d 1/* Functions to determine/estimate number of iterations of a loop.
cbe34bb5 2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
b8698a0f 3
e9eb809d 4This file is part of GCC.
b8698a0f 5
e9eb809d
ZD
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
9dcd6f09 8Free Software Foundation; either version 3, or (at your option) any
e9eb809d 9later version.
b8698a0f 10
e9eb809d
ZD
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
b8698a0f 15
e9eb809d 16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
e9eb809d
ZD
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
c7131fb2 23#include "backend.h"
957060b5 24#include "rtl.h"
e9eb809d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5 27#include "tree-pass.h"
c7131fb2 28#include "ssa.h"
957060b5
AM
29#include "gimple-pretty-print.h"
30#include "diagnostic-core.h"
2f07b722 31#include "stor-layout.h"
40e23961 32#include "fold-const.h"
d8a2d370 33#include "calls.h"
f9cc1a70 34#include "intl.h"
45b0be94 35#include "gimplify.h"
5be5c238 36#include "gimple-iterator.h"
442b4905 37#include "tree-cfg.h"
e28030cf
AM
38#include "tree-ssa-loop-ivopts.h"
39#include "tree-ssa-loop-niter.h"
442b4905 40#include "tree-ssa-loop.h"
e9eb809d 41#include "cfgloop.h"
e9eb809d
ZD
42#include "tree-chrec.h"
43#include "tree-scalar-evolution.h"
44#include "params.h"
e9eb809d 45
71343877 46
b3ce5b6e
ZD
47/* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50#define MAX_DOMINATORS_TO_WALK 8
e9eb809d
ZD
51
52/*
53
54 Analysis of number of iterations of an affine exit test.
55
56*/
57
b3ce5b6e
ZD
58/* Bounds on some value, BELOW <= X <= UP. */
59
a79683d5 60struct bounds
b3ce5b6e
ZD
61{
62 mpz_t below, up;
a79683d5 63};
b3ce5b6e 64
b3ce5b6e
ZD
65
66/* Splits expression EXPR to a variable part VAR and constant OFFSET. */
67
68static void
69split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
70{
71 tree type = TREE_TYPE (expr);
72 tree op0, op1;
b3ce5b6e
ZD
73 bool negate = false;
74
75 *var = expr;
76 mpz_set_ui (offset, 0);
77
78 switch (TREE_CODE (expr))
79 {
80 case MINUS_EXPR:
81 negate = true;
82 /* Fallthru. */
83
84 case PLUS_EXPR:
5be014d5 85 case POINTER_PLUS_EXPR:
b3ce5b6e
ZD
86 op0 = TREE_OPERAND (expr, 0);
87 op1 = TREE_OPERAND (expr, 1);
88
89 if (TREE_CODE (op1) != INTEGER_CST)
90 break;
91
92 *var = op0;
93 /* Always sign extend the offset. */
807e902e 94 wi::to_mpz (op1, offset, SIGNED);
eab1da69
UB
95 if (negate)
96 mpz_neg (offset, offset);
b3ce5b6e
ZD
97 break;
98
99 case INTEGER_CST:
100 *var = build_int_cst_type (type, 0);
807e902e 101 wi::to_mpz (expr, offset, TYPE_SIGN (type));
b3ce5b6e
ZD
102 break;
103
104 default:
105 break;
106 }
107}
108
7b008bbc
BC
109/* From condition C0 CMP C1 derives information regarding the value range
110 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
111
112static void
113refine_value_range_using_guard (tree type, tree var,
114 tree c0, enum tree_code cmp, tree c1,
115 mpz_t below, mpz_t up)
116{
117 tree varc0, varc1, ctype;
118 mpz_t offc0, offc1;
119 mpz_t mint, maxt, minc1, maxc1;
120 wide_int minv, maxv;
121 bool no_wrap = nowrap_type_p (type);
122 bool c0_ok, c1_ok;
123 signop sgn = TYPE_SIGN (type);
124
125 switch (cmp)
126 {
127 case LT_EXPR:
128 case LE_EXPR:
129 case GT_EXPR:
130 case GE_EXPR:
131 STRIP_SIGN_NOPS (c0);
132 STRIP_SIGN_NOPS (c1);
133 ctype = TREE_TYPE (c0);
134 if (!useless_type_conversion_p (ctype, type))
135 return;
136
137 break;
138
139 case EQ_EXPR:
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
142 handling it. */
143 return;
144
145 case NE_EXPR:
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type))
150 return;
151
152 /* Ensure that the condition speaks about an expression in the same
153 type as X and Y. */
154 ctype = TREE_TYPE (c0);
155 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
156 return;
157 c0 = fold_convert (type, c0);
158 c1 = fold_convert (type, c1);
159
160 if (operand_equal_p (var, c0, 0))
161 {
162 mpz_t valc1;
163
164 /* Case of comparing VAR with its below/up bounds. */
165 mpz_init (valc1);
166 wi::to_mpz (c1, valc1, TYPE_SIGN (type));
167 if (mpz_cmp (valc1, below) == 0)
168 cmp = GT_EXPR;
169 if (mpz_cmp (valc1, up) == 0)
170 cmp = LT_EXPR;
171
172 mpz_clear (valc1);
173 }
174 else
175 {
176 /* Case of comparing with the bounds of the type. */
177 wide_int min = wi::min_value (type);
178 wide_int max = wi::max_value (type);
179
180 if (wi::eq_p (c1, min))
181 cmp = GT_EXPR;
182 if (wi::eq_p (c1, max))
183 cmp = LT_EXPR;
184 }
185
186 /* Quick return if no useful information. */
187 if (cmp == NE_EXPR)
188 return;
189
190 break;
191
192 default:
193 return;
194 }
195
196 mpz_init (offc0);
197 mpz_init (offc1);
198 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
200
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var, varc1, 0))
203 {
204 std::swap (varc0, varc1);
205 mpz_swap (offc0, offc1);
206 cmp = swap_tree_comparison (cmp);
207 }
208 else if (!operand_equal_p (var, varc0, 0))
209 {
210 mpz_clear (offc0);
211 mpz_clear (offc1);
212 return;
213 }
214
215 mpz_init (mint);
216 mpz_init (maxt);
217 get_type_static_bounds (type, mint, maxt);
218 mpz_init (minc1);
219 mpz_init (maxc1);
220 /* Setup range information for varc1. */
221 if (integer_zerop (varc1))
222 {
223 wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
224 wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
225 }
226 else if (TREE_CODE (varc1) == SSA_NAME
227 && INTEGRAL_TYPE_P (type)
228 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
229 {
230 gcc_assert (wi::le_p (minv, maxv, sgn));
231 wi::to_mpz (minv, minc1, sgn);
232 wi::to_mpz (maxv, maxc1, sgn);
233 }
234 else
235 {
236 mpz_set (minc1, mint);
237 mpz_set (maxc1, maxt);
238 }
239
240 /* Compute valid range information for varc1 + offc1. Note nothing
241 useful can be derived if it overflows or underflows. Overflow or
242 underflow could happen when:
243
244 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
246 mpz_add (minc1, minc1, offc1);
247 mpz_add (maxc1, maxc1, offc1);
248 c1_ok = (no_wrap
249 || mpz_sgn (offc1) == 0
250 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
251 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
252 if (!c1_ok)
253 goto end;
254
255 if (mpz_cmp (minc1, mint) < 0)
256 mpz_set (minc1, mint);
257 if (mpz_cmp (maxc1, maxt) > 0)
258 mpz_set (maxc1, maxt);
259
260 if (cmp == LT_EXPR)
261 {
262 cmp = LE_EXPR;
263 mpz_sub_ui (maxc1, maxc1, 1);
264 }
265 if (cmp == GT_EXPR)
266 {
267 cmp = GE_EXPR;
268 mpz_add_ui (minc1, minc1, 1);
269 }
270
271 /* Compute range information for varc0. If there is no overflow,
272 the condition implied that
273
274 (varc0) cmp (varc1 + offc1 - offc0)
275
276 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277 or the below bound if cmp is GE_EXPR.
278
279 To prove there is no overflow/underflow, we need to check below
280 four cases:
281 1) cmp == LE_EXPR && offc0 > 0
282
283 (varc0 + offc0) doesn't overflow
284 && (varc1 + offc1 - offc0) doesn't underflow
285
286 2) cmp == LE_EXPR && offc0 < 0
287
288 (varc0 + offc0) doesn't underflow
289 && (varc1 + offc1 - offc0) doesn't overfloe
290
291 In this case, (varc0 + offc0) will never underflow if we can
292 prove (varc1 + offc1 - offc0) doesn't overflow.
293
294 3) cmp == GE_EXPR && offc0 < 0
295
296 (varc0 + offc0) doesn't underflow
297 && (varc1 + offc1 - offc0) doesn't overflow
298
299 4) cmp == GE_EXPR && offc0 > 0
300
301 (varc0 + offc0) doesn't overflow
302 && (varc1 + offc1 - offc0) doesn't underflow
303
304 In this case, (varc0 + offc0) will never overflow if we can
305 prove (varc1 + offc1 - offc0) doesn't underflow.
306
307 Note we only handle case 2 and 4 in below code. */
308
309 mpz_sub (minc1, minc1, offc0);
310 mpz_sub (maxc1, maxc1, offc0);
311 c0_ok = (no_wrap
312 || mpz_sgn (offc0) == 0
313 || (cmp == LE_EXPR
314 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
315 || (cmp == GE_EXPR
316 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
317 if (!c0_ok)
318 goto end;
319
320 if (cmp == LE_EXPR)
321 {
322 if (mpz_cmp (up, maxc1) > 0)
323 mpz_set (up, maxc1);
324 }
325 else
326 {
327 if (mpz_cmp (below, minc1) < 0)
328 mpz_set (below, minc1);
329 }
330
331end:
332 mpz_clear (mint);
333 mpz_clear (maxt);
334 mpz_clear (minc1);
335 mpz_clear (maxc1);
336 mpz_clear (offc0);
337 mpz_clear (offc1);
338}
339
b3ce5b6e
ZD
340/* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341 in TYPE to MIN and MAX. */
342
343static void
7190fdc1 344determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
b3ce5b6e
ZD
345 mpz_t min, mpz_t max)
346{
7b008bbc
BC
347 int cnt = 0;
348 mpz_t minm, maxm;
349 basic_block bb;
807e902e 350 wide_int minv, maxv;
7190fdc1
JJ
351 enum value_range_type rtype = VR_VARYING;
352
b3ce5b6e
ZD
353 /* If the expression is a constant, we know its value exactly. */
354 if (integer_zerop (var))
355 {
356 mpz_set (min, off);
357 mpz_set (max, off);
358 return;
359 }
360
7190fdc1
JJ
361 get_type_static_bounds (type, min, max);
362
363 /* See if we have some range info from VRP. */
364 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
365 {
366 edge e = loop_preheader_edge (loop);
807e902e 367 signop sgn = TYPE_SIGN (type);
538dd0b7 368 gphi_iterator gsi;
7190fdc1
JJ
369
370 /* Either for VAR itself... */
371 rtype = get_range_info (var, &minv, &maxv);
372 /* Or for PHI results in loop->header where VAR is used as
373 PHI argument from the loop preheader edge. */
374 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
375 {
538dd0b7 376 gphi *phi = gsi.phi ();
807e902e 377 wide_int minc, maxc;
7190fdc1
JJ
378 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
379 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
380 == VR_RANGE))
381 {
382 if (rtype != VR_RANGE)
383 {
384 rtype = VR_RANGE;
385 minv = minc;
386 maxv = maxc;
387 }
388 else
389 {
807e902e
KZ
390 minv = wi::max (minv, minc, sgn);
391 maxv = wi::min (maxv, maxc, sgn);
20adc5b1
JJ
392 /* If the PHI result range are inconsistent with
393 the VAR range, give up on looking at the PHI
394 results. This can happen if VR_UNDEFINED is
395 involved. */
807e902e 396 if (wi::gt_p (minv, maxv, sgn))
20adc5b1
JJ
397 {
398 rtype = get_range_info (var, &minv, &maxv);
399 break;
400 }
7190fdc1
JJ
401 }
402 }
403 }
7b008bbc
BC
404 mpz_init (minm);
405 mpz_init (maxm);
406 if (rtype != VR_RANGE)
407 {
408 mpz_set (minm, min);
409 mpz_set (maxm, max);
410 }
411 else
7190fdc1 412 {
807e902e 413 gcc_assert (wi::le_p (minv, maxv, sgn));
807e902e
KZ
414 wi::to_mpz (minv, minm, sgn);
415 wi::to_mpz (maxv, maxm, sgn);
7b008bbc
BC
416 }
417 /* Now walk the dominators of the loop header and use the entry
418 guards to refine the estimates. */
419 for (bb = loop->header;
420 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
421 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
422 {
423 edge e;
424 tree c0, c1;
355fe088 425 gimple *cond;
7b008bbc
BC
426 enum tree_code cmp;
427
428 if (!single_pred_p (bb))
429 continue;
430 e = single_pred_edge (bb);
431
432 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
433 continue;
434
435 cond = last_stmt (e->src);
436 c0 = gimple_cond_lhs (cond);
437 cmp = gimple_cond_code (cond);
438 c1 = gimple_cond_rhs (cond);
439
440 if (e->flags & EDGE_FALSE_VALUE)
441 cmp = invert_tree_comparison (cmp, false);
442
443 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
444 ++cnt;
445 }
446
447 mpz_add (minm, minm, off);
448 mpz_add (maxm, maxm, off);
449 /* If the computation may not wrap or off is zero, then this
450 is always fine. If off is negative and minv + off isn't
451 smaller than type's minimum, or off is positive and
452 maxv + off isn't bigger than type's maximum, use the more
453 precise range too. */
454 if (nowrap_type_p (type)
455 || mpz_sgn (off) == 0
456 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
457 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
458 {
459 mpz_set (min, minm);
460 mpz_set (max, maxm);
7190fdc1
JJ
461 mpz_clear (minm);
462 mpz_clear (maxm);
7b008bbc 463 return;
7190fdc1 464 }
7b008bbc
BC
465 mpz_clear (minm);
466 mpz_clear (maxm);
7190fdc1
JJ
467 }
468
b3ce5b6e
ZD
469 /* If the computation may wrap, we know nothing about the value, except for
470 the range of the type. */
b3ce5b6e
ZD
471 if (!nowrap_type_p (type))
472 return;
473
474 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475 add it to MIN, otherwise to MAX. */
476 if (mpz_sgn (off) < 0)
477 mpz_add (max, max, off);
478 else
479 mpz_add (min, min, off);
480}
481
482/* Stores the bounds on the difference of the values of the expressions
483 (var + X) and (var + Y), computed in TYPE, to BNDS. */
484
485static void
486bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
487 bounds *bnds)
488{
489 int rel = mpz_cmp (x, y);
490 bool may_wrap = !nowrap_type_p (type);
491 mpz_t m;
492
493 /* If X == Y, then the expressions are always equal.
494 If X > Y, there are the following possibilities:
495 a) neither of var + X and var + Y overflow or underflow, or both of
496 them do. Then their difference is X - Y.
497 b) var + X overflows, and var + Y does not. Then the values of the
498 expressions are var + X - M and var + Y, where M is the range of
499 the type, and their difference is X - Y - M.
500 c) var + Y underflows and var + X does not. Their difference again
501 is M - X + Y.
502 Therefore, if the arithmetics in type does not overflow, then the
503 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505 (X - Y, X - Y + M). */
506
507 if (rel == 0)
508 {
509 mpz_set_ui (bnds->below, 0);
510 mpz_set_ui (bnds->up, 0);
511 return;
512 }
513
514 mpz_init (m);
807e902e 515 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
b3ce5b6e
ZD
516 mpz_add_ui (m, m, 1);
517 mpz_sub (bnds->up, x, y);
518 mpz_set (bnds->below, bnds->up);
519
520 if (may_wrap)
521 {
522 if (rel > 0)
523 mpz_sub (bnds->below, bnds->below, m);
524 else
525 mpz_add (bnds->up, bnds->up, m);
526 }
527
528 mpz_clear (m);
529}
530
531/* From condition C0 CMP C1 derives information regarding the
532 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533 and stores it to BNDS. */
534
535static void
536refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
537 tree vary, mpz_t offy,
538 tree c0, enum tree_code cmp, tree c1,
539 bounds *bnds)
540{
6b4db501 541 tree varc0, varc1, ctype;
b3ce5b6e
ZD
542 mpz_t offc0, offc1, loffx, loffy, bnd;
543 bool lbound = false;
544 bool no_wrap = nowrap_type_p (type);
545 bool x_ok, y_ok;
546
547 switch (cmp)
548 {
549 case LT_EXPR:
550 case LE_EXPR:
551 case GT_EXPR:
552 case GE_EXPR:
17b236ed
ZD
553 STRIP_SIGN_NOPS (c0);
554 STRIP_SIGN_NOPS (c1);
555 ctype = TREE_TYPE (c0);
36618b93 556 if (!useless_type_conversion_p (ctype, type))
17b236ed
ZD
557 return;
558
b3ce5b6e
ZD
559 break;
560
561 case EQ_EXPR:
562 /* We could derive quite precise information from EQ_EXPR, however, such
17b236ed
ZD
563 a guard is unlikely to appear, so we do not bother with handling
564 it. */
b3ce5b6e
ZD
565 return;
566
567 case NE_EXPR:
17b236ed
ZD
568 /* NE_EXPR comparisons do not contain much of useful information, except for
569 special case of comparing with the bounds of the type. */
570 if (TREE_CODE (c1) != INTEGER_CST
571 || !INTEGRAL_TYPE_P (type))
572 return;
573
574 /* Ensure that the condition speaks about an expression in the same type
575 as X and Y. */
576 ctype = TREE_TYPE (c0);
577 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
578 return;
579 c0 = fold_convert (type, c0);
580 c1 = fold_convert (type, c1);
581
582 if (TYPE_MIN_VALUE (type)
583 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
584 {
585 cmp = GT_EXPR;
586 break;
587 }
588 if (TYPE_MAX_VALUE (type)
589 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
590 {
591 cmp = LT_EXPR;
592 break;
593 }
594
b3ce5b6e
ZD
595 return;
596 default:
597 return;
b8698a0f 598 }
b3ce5b6e
ZD
599
600 mpz_init (offc0);
601 mpz_init (offc1);
602 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
603 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
604
605 /* We are only interested in comparisons of expressions based on VARX and
606 VARY. TODO -- we might also be able to derive some bounds from
607 expressions containing just one of the variables. */
608
609 if (operand_equal_p (varx, varc1, 0))
610 {
6b4db501 611 std::swap (varc0, varc1);
b3ce5b6e
ZD
612 mpz_swap (offc0, offc1);
613 cmp = swap_tree_comparison (cmp);
614 }
615
616 if (!operand_equal_p (varx, varc0, 0)
617 || !operand_equal_p (vary, varc1, 0))
618 goto end;
619
620 mpz_init_set (loffx, offx);
621 mpz_init_set (loffy, offy);
622
623 if (cmp == GT_EXPR || cmp == GE_EXPR)
624 {
6b4db501 625 std::swap (varx, vary);
b3ce5b6e
ZD
626 mpz_swap (offc0, offc1);
627 mpz_swap (loffx, loffy);
628 cmp = swap_tree_comparison (cmp);
629 lbound = true;
630 }
631
632 /* If there is no overflow, the condition implies that
633
634 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
635
636 The overflows and underflows may complicate things a bit; each
637 overflow decreases the appropriate offset by M, and underflow
638 increases it by M. The above inequality would not necessarily be
639 true if
b8698a0f 640
b3ce5b6e
ZD
641 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 VARX + OFFC0 overflows, but VARX + OFFX does not.
643 This may only happen if OFFX < OFFC0.
644 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 VARY + OFFC1 underflows and VARY + OFFY does not.
646 This may only happen if OFFY > OFFC1. */
647
648 if (no_wrap)
649 {
650 x_ok = true;
651 y_ok = true;
652 }
653 else
654 {
655 x_ok = (integer_zerop (varx)
656 || mpz_cmp (loffx, offc0) >= 0);
657 y_ok = (integer_zerop (vary)
658 || mpz_cmp (loffy, offc1) <= 0);
659 }
660
661 if (x_ok && y_ok)
662 {
663 mpz_init (bnd);
664 mpz_sub (bnd, loffx, loffy);
665 mpz_add (bnd, bnd, offc1);
666 mpz_sub (bnd, bnd, offc0);
667
668 if (cmp == LT_EXPR)
669 mpz_sub_ui (bnd, bnd, 1);
670
671 if (lbound)
672 {
673 mpz_neg (bnd, bnd);
674 if (mpz_cmp (bnds->below, bnd) < 0)
675 mpz_set (bnds->below, bnd);
676 }
677 else
678 {
679 if (mpz_cmp (bnd, bnds->up) < 0)
680 mpz_set (bnds->up, bnd);
681 }
682 mpz_clear (bnd);
683 }
684
685 mpz_clear (loffx);
686 mpz_clear (loffy);
687end:
688 mpz_clear (offc0);
689 mpz_clear (offc1);
690}
691
692/* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693 The subtraction is considered to be performed in arbitrary precision,
694 without overflows.
b8698a0f 695
b3ce5b6e
ZD
696 We do not attempt to be too clever regarding the value ranges of X and
697 Y; most of the time, they are just integers or ssa names offsetted by
698 integer. However, we try to use the information contained in the
699 comparisons before the loop (usually created by loop header copying). */
700
701static void
702bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
703{
704 tree type = TREE_TYPE (x);
705 tree varx, vary;
706 mpz_t offx, offy;
707 mpz_t minx, maxx, miny, maxy;
708 int cnt = 0;
709 edge e;
710 basic_block bb;
726a989a 711 tree c0, c1;
355fe088 712 gimple *cond;
b3ce5b6e
ZD
713 enum tree_code cmp;
714
17b236ed
ZD
715 /* Get rid of unnecessary casts, but preserve the value of
716 the expressions. */
717 STRIP_SIGN_NOPS (x);
718 STRIP_SIGN_NOPS (y);
719
b3ce5b6e
ZD
720 mpz_init (bnds->below);
721 mpz_init (bnds->up);
722 mpz_init (offx);
723 mpz_init (offy);
724 split_to_var_and_offset (x, &varx, offx);
725 split_to_var_and_offset (y, &vary, offy);
726
727 if (!integer_zerop (varx)
728 && operand_equal_p (varx, vary, 0))
729 {
730 /* Special case VARX == VARY -- we just need to compare the
731 offsets. The matters are a bit more complicated in the
732 case addition of offsets may wrap. */
733 bound_difference_of_offsetted_base (type, offx, offy, bnds);
734 }
735 else
736 {
737 /* Otherwise, use the value ranges to determine the initial
738 estimates on below and up. */
739 mpz_init (minx);
740 mpz_init (maxx);
741 mpz_init (miny);
742 mpz_init (maxy);
7190fdc1
JJ
743 determine_value_range (loop, type, varx, offx, minx, maxx);
744 determine_value_range (loop, type, vary, offy, miny, maxy);
b3ce5b6e
ZD
745
746 mpz_sub (bnds->below, minx, maxy);
747 mpz_sub (bnds->up, maxx, miny);
748 mpz_clear (minx);
749 mpz_clear (maxx);
750 mpz_clear (miny);
751 mpz_clear (maxy);
752 }
753
754 /* If both X and Y are constants, we cannot get any more precise. */
755 if (integer_zerop (varx) && integer_zerop (vary))
756 goto end;
757
758 /* Now walk the dominators of the loop header and use the entry
759 guards to refine the estimates. */
760 for (bb = loop->header;
fefa31b5 761 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
b3ce5b6e
ZD
762 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
763 {
764 if (!single_pred_p (bb))
765 continue;
766 e = single_pred_edge (bb);
767
768 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
769 continue;
770
726a989a
RB
771 cond = last_stmt (e->src);
772 c0 = gimple_cond_lhs (cond);
773 cmp = gimple_cond_code (cond);
774 c1 = gimple_cond_rhs (cond);
b3ce5b6e
ZD
775
776 if (e->flags & EDGE_FALSE_VALUE)
777 cmp = invert_tree_comparison (cmp, false);
778
779 refine_bounds_using_guard (type, varx, offx, vary, offy,
780 c0, cmp, c1, bnds);
781 ++cnt;
782 }
783
784end:
785 mpz_clear (offx);
786 mpz_clear (offy);
787}
788
789/* Update the bounds in BNDS that restrict the value of X to the bounds
790 that restrict the value of X + DELTA. X can be obtained as a
791 difference of two values in TYPE. */
792
793static void
807e902e 794bounds_add (bounds *bnds, const widest_int &delta, tree type)
b3ce5b6e
ZD
795{
796 mpz_t mdelta, max;
797
798 mpz_init (mdelta);
807e902e 799 wi::to_mpz (delta, mdelta, SIGNED);
b3ce5b6e
ZD
800
801 mpz_init (max);
807e902e 802 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
b3ce5b6e
ZD
803
804 mpz_add (bnds->up, bnds->up, mdelta);
805 mpz_add (bnds->below, bnds->below, mdelta);
806
807 if (mpz_cmp (bnds->up, max) > 0)
808 mpz_set (bnds->up, max);
809
810 mpz_neg (max, max);
811 if (mpz_cmp (bnds->below, max) < 0)
812 mpz_set (bnds->below, max);
813
814 mpz_clear (mdelta);
815 mpz_clear (max);
816}
817
818/* Update the bounds in BNDS that restrict the value of X to the bounds
819 that restrict the value of -X. */
820
821static void
822bounds_negate (bounds *bnds)
823{
824 mpz_t tmp;
825
826 mpz_init_set (tmp, bnds->up);
827 mpz_neg (bnds->up, bnds->below);
828 mpz_neg (bnds->below, tmp);
829 mpz_clear (tmp);
830}
831
e9eb809d
ZD
832/* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
833
834static tree
835inverse (tree x, tree mask)
836{
837 tree type = TREE_TYPE (x);
26630a99
ZD
838 tree rslt;
839 unsigned ctr = tree_floor_log2 (mask);
840
841 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
842 {
843 unsigned HOST_WIDE_INT ix;
844 unsigned HOST_WIDE_INT imask;
845 unsigned HOST_WIDE_INT irslt = 1;
846
847 gcc_assert (cst_and_fits_in_hwi (x));
848 gcc_assert (cst_and_fits_in_hwi (mask));
849
850 ix = int_cst_value (x);
851 imask = int_cst_value (mask);
852
853 for (; ctr; ctr--)
854 {
855 irslt *= ix;
856 ix *= ix;
857 }
858 irslt &= imask;
e9eb809d 859
26630a99
ZD
860 rslt = build_int_cst_type (type, irslt);
861 }
862 else
e9eb809d 863 {
ff5e9a94 864 rslt = build_int_cst (type, 1);
26630a99
ZD
865 for (; ctr; ctr--)
866 {
d35936ab
RG
867 rslt = int_const_binop (MULT_EXPR, rslt, x);
868 x = int_const_binop (MULT_EXPR, x, x);
26630a99 869 }
d35936ab 870 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
e9eb809d
ZD
871 }
872
873 return rslt;
874}
875
b3ce5b6e 876/* Derives the upper bound BND on the number of executions of loop with exit
1987baa3
ZD
877 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
878 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
879 that the loop ends through this exit, i.e., the induction variable ever
880 reaches the value of C.
881
882 The value C is equal to final - base, where final and base are the final and
883 initial value of the actual induction variable in the analysed loop. BNDS
884 bounds the value of this difference when computed in signed type with
885 unbounded range, while the computation of C is performed in an unsigned
886 type with the range matching the range of the type of the induction variable.
887 In particular, BNDS.up contains an upper bound on C in the following cases:
888 -- if the iv must reach its final value without overflow, i.e., if
889 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890 -- if final >= base, which we know to hold when BNDS.below >= 0. */
b3ce5b6e
ZD
891
892static void
893number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
1987baa3 894 bounds *bnds, bool exit_must_be_taken)
b3ce5b6e 895{
807e902e 896 widest_int max;
b3ce5b6e 897 mpz_t d;
5a892248 898 tree type = TREE_TYPE (c);
1987baa3
ZD
899 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
900 || mpz_sgn (bnds->below) >= 0);
b3ce5b6e 901
5a892248
RB
902 if (integer_onep (s)
903 || (TREE_CODE (c) == INTEGER_CST
904 && TREE_CODE (s) == INTEGER_CST
807e902e
KZ
905 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
906 || (TYPE_OVERFLOW_UNDEFINED (type)
5a892248 907 && multiple_of_p (type, c, s)))
1987baa3
ZD
908 {
909 /* If C is an exact multiple of S, then its value will be reached before
910 the induction variable overflows (unless the loop is exited in some
911 other way before). Note that the actual induction variable in the
912 loop (which ranges from base to final instead of from 0 to C) may
913 overflow, in which case BNDS.up will not be giving a correct upper
914 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
915 no_overflow = true;
916 exit_must_be_taken = true;
917 }
918
919 /* If the induction variable can overflow, the number of iterations is at
920 most the period of the control variable (or infinite, but in that case
921 the whole # of iterations analysis will fail). */
922 if (!no_overflow)
b3ce5b6e 923 {
807e902e
KZ
924 max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
925 wi::to_mpz (max, bnd, UNSIGNED);
b3ce5b6e
ZD
926 return;
927 }
928
1987baa3
ZD
929 /* Now we know that the induction variable does not overflow, so the loop
930 iterates at most (range of type / S) times. */
807e902e 931 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
1987baa3
ZD
932
933 /* If the induction variable is guaranteed to reach the value of C before
934 overflow, ... */
935 if (exit_must_be_taken)
936 {
073a8998 937 /* ... then we can strengthen this to C / S, and possibly we can use
1987baa3
ZD
938 the upper bound on C given by BNDS. */
939 if (TREE_CODE (c) == INTEGER_CST)
807e902e 940 wi::to_mpz (c, bnd, UNSIGNED);
1987baa3
ZD
941 else if (bnds_u_valid)
942 mpz_set (bnd, bnds->up);
943 }
b3ce5b6e
ZD
944
945 mpz_init (d);
807e902e 946 wi::to_mpz (s, d, UNSIGNED);
b3ce5b6e
ZD
947 mpz_fdiv_q (bnd, bnd, d);
948 mpz_clear (d);
949}
950
7f17528a
ZD
951/* Determines number of iterations of loop whose ending condition
952 is IV <> FINAL. TYPE is the type of the iv. The number of
e36dc339 953 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
f08ac361
ZD
954 we know that the exit must be taken eventually, i.e., that the IV
955 ever reaches the value FINAL (we derived this earlier, and possibly set
b3ce5b6e
ZD
956 NITER->assumptions to make sure this is the case). BNDS contains the
957 bounds on the difference FINAL - IV->base. */
e9eb809d 958
7f17528a 959static bool
cdf66caf
BC
960number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
961 tree final, struct tree_niter_desc *niter,
962 bool exit_must_be_taken, bounds *bnds)
e9eb809d 963{
7f17528a
ZD
964 tree niter_type = unsigned_type_for (type);
965 tree s, c, d, bits, assumption, tmp, bound;
b3ce5b6e 966 mpz_t max;
e9eb809d 967
17684618
ZD
968 niter->control = *iv;
969 niter->bound = final;
970 niter->cmp = NE_EXPR;
971
b3ce5b6e
ZD
972 /* Rearrange the terms so that we get inequality S * i <> C, with S
973 positive. Also cast everything to the unsigned type. If IV does
974 not overflow, BNDS bounds the value of C. Also, this is the
975 case if the computation |FINAL - IV->base| does not overflow, i.e.,
976 if BNDS->below in the result is nonnegative. */
7f17528a 977 if (tree_int_cst_sign_bit (iv->step))
e9eb809d 978 {
7f17528a
ZD
979 s = fold_convert (niter_type,
980 fold_build1 (NEGATE_EXPR, type, iv->step));
981 c = fold_build2 (MINUS_EXPR, niter_type,
982 fold_convert (niter_type, iv->base),
983 fold_convert (niter_type, final));
b3ce5b6e 984 bounds_negate (bnds);
e9eb809d 985 }
a6f778b2 986 else
e9eb809d 987 {
7f17528a
ZD
988 s = fold_convert (niter_type, iv->step);
989 c = fold_build2 (MINUS_EXPR, niter_type,
990 fold_convert (niter_type, final),
991 fold_convert (niter_type, iv->base));
992 }
e9eb809d 993
b3ce5b6e 994 mpz_init (max);
1987baa3
ZD
995 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
996 exit_must_be_taken);
807e902e
KZ
997 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
998 TYPE_SIGN (niter_type));
b3ce5b6e
ZD
999 mpz_clear (max);
1000
69b806f6 1001 /* Compute no-overflow information for the control iv. This can be
8f21990a 1002 proven when below two conditions are satisfied:
69b806f6 1003
8f21990a 1004 1) IV evaluates toward FINAL at beginning, i.e:
69b806f6
BC
1005 base <= FINAL ; step > 0
1006 base >= FINAL ; step < 0
1007
8f21990a
BC
1008 2) |FINAL - base| is an exact multiple of step.
1009
1010 Unfortunately, it's hard to prove above conditions after pass loop-ch
1011 because loop with exit condition (IV != FINAL) usually will be guarded
1012 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1013 can alternatively try to prove below conditions:
1014
1015 1') IV evaluates toward FINAL at beginning, i.e:
1016 new_base = base - step < FINAL ; step > 0
1017 && base - step doesn't underflow
1018 new_base = base - step > FINAL ; step < 0
1019 && base - step doesn't overflow
69b806f6 1020
8f21990a 1021 2') |FINAL - new_base| is an exact multiple of step.
69b806f6 1022
8f21990a
BC
1023 Please refer to PR34114 as an example of loop-ch's impact, also refer
1024 to PR72817 as an example why condition 2') is necessary.
69b806f6 1025
8f21990a 1026 Note, for NE_EXPR, base equals to FINAL is a special case, in
69b806f6
BC
1027 which the loop exits immediately, and the iv does not overflow. */
1028 if (!niter->control.no_overflow
1029 && (integer_onep (s) || multiple_of_p (type, c, s)))
cdf66caf 1030 {
8f21990a 1031 tree t, cond, new_c, relaxed_cond = boolean_false_node;
69b806f6
BC
1032
1033 if (tree_int_cst_sign_bit (iv->step))
1034 {
1035 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1036 if (TREE_CODE (type) == INTEGER_TYPE)
1037 {
1038 /* Only when base - step doesn't overflow. */
1039 t = TYPE_MAX_VALUE (type);
1040 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1041 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1042 if (integer_nonzerop (t))
1043 {
1044 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
8f21990a
BC
1045 new_c = fold_build2 (MINUS_EXPR, niter_type,
1046 fold_convert (niter_type, t),
1047 fold_convert (niter_type, final));
1048 if (multiple_of_p (type, new_c, s))
1049 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1050 t, final);
69b806f6
BC
1051 }
1052 }
1053 }
1054 else
1055 {
1056 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1057 if (TREE_CODE (type) == INTEGER_TYPE)
1058 {
1059 /* Only when base - step doesn't underflow. */
1060 t = TYPE_MIN_VALUE (type);
1061 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1062 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1063 if (integer_nonzerop (t))
1064 {
1065 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
8f21990a
BC
1066 new_c = fold_build2 (MINUS_EXPR, niter_type,
1067 fold_convert (niter_type, final),
1068 fold_convert (niter_type, t));
1069 if (multiple_of_p (type, new_c, s))
1070 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1071 t, final);
69b806f6
BC
1072 }
1073 }
1074 }
1075
1076 t = simplify_using_initial_conditions (loop, cond);
1077 if (!t || !integer_onep (t))
1078 t = simplify_using_initial_conditions (loop, relaxed_cond);
1079
1080 if (t && integer_onep (t))
1081 niter->control.no_overflow = true;
cdf66caf
BC
1082 }
1083
7f17528a
ZD
1084 /* First the trivial cases -- when the step is 1. */
1085 if (integer_onep (s))
1086 {
1087 niter->niter = c;
1088 return true;
e9eb809d 1089 }
69b806f6
BC
1090 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1091 {
1092 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1093 return true;
1094 }
e9eb809d 1095
7f17528a
ZD
1096 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1097 is infinite. Otherwise, the number of iterations is
1098 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1099 bits = num_ending_zeros (s);
1100 bound = build_low_bits_mask (niter_type,
1101 (TYPE_PRECISION (niter_type)
ae7e9ddd 1102 - tree_to_uhwi (bits)));
e9eb809d 1103
7f17528a 1104 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
ff5e9a94 1105 build_int_cst (niter_type, 1), bits);
7f17528a 1106 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
e9eb809d 1107
e36dc339 1108 if (!exit_must_be_taken)
7f17528a 1109 {
e36dc339 1110 /* If we cannot assume that the exit is taken eventually, record the
7f17528a
ZD
1111 assumptions for divisibility of c. */
1112 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1113 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1114 assumption, build_int_cst (niter_type, 0));
6e682d7e 1115 if (!integer_nonzerop (assumption))
7f17528a
ZD
1116 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1117 niter->assumptions, assumption);
e9eb809d 1118 }
b8698a0f 1119
7f17528a
ZD
1120 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1121 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1122 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1123 return true;
1124}
e9eb809d 1125
7f17528a
ZD
1126/* Checks whether we can determine the final value of the control variable
1127 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1128 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1129 of the step. The assumptions necessary to ensure that the computation
1130 of the final value does not overflow are recorded in NITER. If we
1131 find the final value, we adjust DELTA and return TRUE. Otherwise
b3ce5b6e 1132 we return false. BNDS bounds the value of IV1->base - IV0->base,
e36dc339
ZD
1133 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1134 true if we know that the exit must be taken eventually. */
7f17528a
ZD
1135
1136static bool
1137number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1138 struct tree_niter_desc *niter,
b3ce5b6e 1139 tree *delta, tree step,
e36dc339 1140 bool exit_must_be_taken, bounds *bnds)
7f17528a
ZD
1141{
1142 tree niter_type = TREE_TYPE (step);
1143 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1144 tree tmod;
106d07f8
BC
1145 tree assumption = boolean_true_node, bound;
1146 tree type1 = (POINTER_TYPE_P (type)) ? sizetype : type;
7f17528a
ZD
1147
1148 if (TREE_CODE (mod) != INTEGER_CST)
1149 return false;
6e682d7e 1150 if (integer_nonzerop (mod))
7f17528a 1151 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
5be014d5 1152 tmod = fold_convert (type1, mod);
7f17528a 1153
e36dc339 1154 /* If the induction variable does not overflow and the exit is taken,
106d07f8
BC
1155 then the computation of the final value does not overflow. There
1156 are three cases:
1157 1) The case if the new final value is equal to the current one.
1158 2) Induction varaible has pointer type, as the code cannot rely
1159 on the object to that the pointer points being placed at the
1160 end of the address space (and more pragmatically,
1161 TYPE_{MIN,MAX}_VALUE is not defined for pointers).
1162 3) EXIT_MUST_BE_TAKEN is true, note it implies that the induction
1163 variable does not overflow. */
1164 if (!integer_zerop (mod) && !POINTER_TYPE_P (type) && !exit_must_be_taken)
e9eb809d 1165 {
106d07f8 1166 if (integer_nonzerop (iv0->step))
7f17528a 1167 {
106d07f8
BC
1168 /* The final value of the iv is iv1->base + MOD, assuming
1169 that this computation does not overflow, and that
1170 iv0->base <= iv1->base + MOD. */
97b4ba9f 1171 bound = fold_build2 (MINUS_EXPR, type1,
5be014d5 1172 TYPE_MAX_VALUE (type1), tmod);
7f17528a
ZD
1173 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1174 iv1->base, bound);
7f17528a 1175 }
b3ce5b6e 1176 else
7f17528a 1177 {
106d07f8
BC
1178 /* The final value of the iv is iv0->base - MOD, assuming
1179 that this computation does not overflow, and that
1180 iv0->base - MOD <= iv1->base. */
5be014d5
AP
1181 bound = fold_build2 (PLUS_EXPR, type1,
1182 TYPE_MIN_VALUE (type1), tmod);
7f17528a
ZD
1183 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1184 iv0->base, bound);
7f17528a 1185 }
106d07f8
BC
1186 if (integer_zerop (assumption))
1187 return false;
1188 else if (!integer_nonzerop (assumption))
1189 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1190 niter->assumptions, assumption);
e9eb809d
ZD
1191 }
1192
106d07f8
BC
1193 /* Since we are transforming LT to NE and DELTA is constant, there
1194 is no need to compute may_be_zero because this loop must roll. */
1195
807e902e 1196 bounds_add (bnds, wi::to_widest (mod), type);
7f17528a 1197 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
106d07f8 1198 return true;
7f17528a
ZD
1199}
1200
1201/* Add assertions to NITER that ensure that the control variable of the loop
1202 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1203 are TYPE. Returns false if we can prove that there is an overflow, true
1204 otherwise. STEP is the absolute value of the step. */
e9eb809d 1205
7f17528a
ZD
1206static bool
1207assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1208 struct tree_niter_desc *niter, tree step)
1209{
1210 tree bound, d, assumption, diff;
1211 tree niter_type = TREE_TYPE (step);
1212
6e42ce54 1213 if (integer_nonzerop (iv0->step))
e9eb809d 1214 {
7f17528a
ZD
1215 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1216 if (iv0->no_overflow)
1217 return true;
1218
1219 /* If iv0->base is a constant, we can determine the last value before
1220 overflow precisely; otherwise we conservatively assume
1221 MAX - STEP + 1. */
1222
1223 if (TREE_CODE (iv0->base) == INTEGER_CST)
e9eb809d 1224 {
7f17528a
ZD
1225 d = fold_build2 (MINUS_EXPR, niter_type,
1226 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1227 fold_convert (niter_type, iv0->base));
1228 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
e9eb809d
ZD
1229 }
1230 else
7f17528a 1231 diff = fold_build2 (MINUS_EXPR, niter_type, step,
ff5e9a94 1232 build_int_cst (niter_type, 1));
7f17528a
ZD
1233 bound = fold_build2 (MINUS_EXPR, type,
1234 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1235 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1236 iv1->base, bound);
1237 }
1238 else
1239 {
1240 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1241 if (iv1->no_overflow)
1242 return true;
1243
1244 if (TREE_CODE (iv1->base) == INTEGER_CST)
e9eb809d 1245 {
7f17528a
ZD
1246 d = fold_build2 (MINUS_EXPR, niter_type,
1247 fold_convert (niter_type, iv1->base),
1248 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1249 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
e9eb809d 1250 }
7f17528a
ZD
1251 else
1252 diff = fold_build2 (MINUS_EXPR, niter_type, step,
ff5e9a94 1253 build_int_cst (niter_type, 1));
7f17528a
ZD
1254 bound = fold_build2 (PLUS_EXPR, type,
1255 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1256 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1257 iv0->base, bound);
e9eb809d
ZD
1258 }
1259
6e682d7e 1260 if (integer_zerop (assumption))
7f17528a 1261 return false;
6e682d7e 1262 if (!integer_nonzerop (assumption))
7f17528a
ZD
1263 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1264 niter->assumptions, assumption);
b8698a0f 1265
7f17528a
ZD
1266 iv0->no_overflow = true;
1267 iv1->no_overflow = true;
1268 return true;
1269}
e9eb809d 1270
7f17528a 1271/* Add an assumption to NITER that a loop whose ending condition
b3ce5b6e
ZD
1272 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1273 bounds the value of IV1->base - IV0->base. */
7f17528a
ZD
1274
1275static void
1276assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
b3ce5b6e 1277 struct tree_niter_desc *niter, bounds *bnds)
7f17528a
ZD
1278{
1279 tree assumption = boolean_true_node, bound, diff;
5be014d5 1280 tree mbz, mbzl, mbzr, type1;
b3ce5b6e 1281 bool rolls_p, no_overflow_p;
807e902e 1282 widest_int dstep;
b3ce5b6e
ZD
1283 mpz_t mstep, max;
1284
1285 /* We are going to compute the number of iterations as
1286 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
b8698a0f
L
1287 variant of TYPE. This formula only works if
1288
b3ce5b6e 1289 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
b8698a0f 1290
b3ce5b6e 1291 (where MAX is the maximum value of the unsigned variant of TYPE, and
072edf07
SP
1292 the computations in this formula are performed in full precision,
1293 i.e., without overflows).
b3ce5b6e
ZD
1294
1295 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
072edf07 1296 we have a condition of the form iv0->base - step < iv1->base before the loop,
b3ce5b6e
ZD
1297 and for loops iv0->base < iv1->base - step * i the condition
1298 iv0->base < iv1->base + step, due to loop header copying, which enable us
1299 to prove the lower bound.
b8698a0f 1300
b3ce5b6e
ZD
1301 The upper bound is more complicated. Unless the expressions for initial
1302 and final value themselves contain enough information, we usually cannot
1303 derive it from the context. */
1304
1305 /* First check whether the answer does not follow from the bounds we gathered
1306 before. */
1307 if (integer_nonzerop (iv0->step))
807e902e 1308 dstep = wi::to_widest (iv0->step);
b3ce5b6e
ZD
1309 else
1310 {
807e902e 1311 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
27bcd47c 1312 dstep = -dstep;
b3ce5b6e
ZD
1313 }
1314
1315 mpz_init (mstep);
807e902e 1316 wi::to_mpz (dstep, mstep, UNSIGNED);
b3ce5b6e
ZD
1317 mpz_neg (mstep, mstep);
1318 mpz_add_ui (mstep, mstep, 1);
1319
1320 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1321
1322 mpz_init (max);
807e902e 1323 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
b3ce5b6e
ZD
1324 mpz_add (max, max, mstep);
1325 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1326 /* For pointers, only values lying inside a single object
1327 can be compared or manipulated by pointer arithmetics.
1328 Gcc in general does not allow or handle objects larger
1329 than half of the address space, hence the upper bound
1330 is satisfied for pointers. */
1331 || POINTER_TYPE_P (type));
1332 mpz_clear (mstep);
1333 mpz_clear (max);
1334
1335 if (rolls_p && no_overflow_p)
1336 return;
b8698a0f 1337
5be014d5
AP
1338 type1 = type;
1339 if (POINTER_TYPE_P (type))
1340 type1 = sizetype;
b3ce5b6e
ZD
1341
1342 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1343 we must be careful not to introduce overflow. */
7f17528a 1344
6e42ce54 1345 if (integer_nonzerop (iv0->step))
e9eb809d 1346 {
5be014d5
AP
1347 diff = fold_build2 (MINUS_EXPR, type1,
1348 iv0->step, build_int_cst (type1, 1));
e9eb809d 1349
7f17528a
ZD
1350 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1351 0 address never belongs to any object, we can assume this for
1352 pointers. */
1353 if (!POINTER_TYPE_P (type))
e9eb809d 1354 {
5be014d5 1355 bound = fold_build2 (PLUS_EXPR, type1,
7f17528a
ZD
1356 TYPE_MIN_VALUE (type), diff);
1357 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1358 iv0->base, bound);
e9eb809d
ZD
1359 }
1360
7f17528a 1361 /* And then we can compute iv0->base - diff, and compare it with
b8698a0f
L
1362 iv1->base. */
1363 mbzl = fold_build2 (MINUS_EXPR, type1,
d24a32a1
ZD
1364 fold_convert (type1, iv0->base), diff);
1365 mbzr = fold_convert (type1, iv1->base);
e9eb809d 1366 }
7f17528a 1367 else
e9eb809d 1368 {
5be014d5
AP
1369 diff = fold_build2 (PLUS_EXPR, type1,
1370 iv1->step, build_int_cst (type1, 1));
7f17528a
ZD
1371
1372 if (!POINTER_TYPE_P (type))
e9eb809d 1373 {
5be014d5 1374 bound = fold_build2 (PLUS_EXPR, type1,
7f17528a
ZD
1375 TYPE_MAX_VALUE (type), diff);
1376 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1377 iv1->base, bound);
e9eb809d
ZD
1378 }
1379
d24a32a1
ZD
1380 mbzl = fold_convert (type1, iv0->base);
1381 mbzr = fold_build2 (MINUS_EXPR, type1,
1382 fold_convert (type1, iv1->base), diff);
7f17528a 1383 }
e9eb809d 1384
6e682d7e 1385 if (!integer_nonzerop (assumption))
7f17528a
ZD
1386 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1387 niter->assumptions, assumption);
b3ce5b6e
ZD
1388 if (!rolls_p)
1389 {
1390 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1391 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1392 niter->may_be_zero, mbz);
1393 }
7f17528a 1394}
e9eb809d 1395
7f17528a
ZD
1396/* Determines number of iterations of loop whose ending condition
1397 is IV0 < IV1. TYPE is the type of the iv. The number of
b3ce5b6e 1398 iterations is stored to NITER. BNDS bounds the difference
e36dc339
ZD
1399 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1400 that the exit must be taken eventually. */
7f17528a
ZD
1401
1402static bool
cdf66caf
BC
1403number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1404 affine_iv *iv1, struct tree_niter_desc *niter,
e36dc339 1405 bool exit_must_be_taken, bounds *bnds)
7f17528a
ZD
1406{
1407 tree niter_type = unsigned_type_for (type);
1408 tree delta, step, s;
b3ce5b6e 1409 mpz_t mstep, tmp;
7f17528a 1410
6e42ce54 1411 if (integer_nonzerop (iv0->step))
17684618
ZD
1412 {
1413 niter->control = *iv0;
1414 niter->cmp = LT_EXPR;
1415 niter->bound = iv1->base;
1416 }
1417 else
1418 {
1419 niter->control = *iv1;
1420 niter->cmp = GT_EXPR;
1421 niter->bound = iv0->base;
1422 }
1423
7f17528a
ZD
1424 delta = fold_build2 (MINUS_EXPR, niter_type,
1425 fold_convert (niter_type, iv1->base),
1426 fold_convert (niter_type, iv0->base));
1427
1428 /* First handle the special case that the step is +-1. */
6e42ce54
ZD
1429 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1430 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
7f17528a
ZD
1431 {
1432 /* for (i = iv0->base; i < iv1->base; i++)
1433
1434 or
82b85a85 1435
7f17528a 1436 for (i = iv1->base; i > iv0->base; i--).
b8698a0f 1437
7f17528a 1438 In both cases # of iterations is iv1->base - iv0->base, assuming that
b3ce5b6e
ZD
1439 iv1->base >= iv0->base.
1440
1441 First try to derive a lower bound on the value of
1442 iv1->base - iv0->base, computed in full precision. If the difference
1443 is nonnegative, we are done, otherwise we must record the
1444 condition. */
1445
1446 if (mpz_sgn (bnds->below) < 0)
1447 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1448 iv1->base, iv0->base);
7f17528a 1449 niter->niter = delta;
807e902e
KZ
1450 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1451 TYPE_SIGN (niter_type));
2f07b722 1452 niter->control.no_overflow = true;
7f17528a 1453 return true;
e9eb809d 1454 }
7f17528a 1455
6e42ce54 1456 if (integer_nonzerop (iv0->step))
7f17528a 1457 step = fold_convert (niter_type, iv0->step);
e9eb809d 1458 else
7f17528a
ZD
1459 step = fold_convert (niter_type,
1460 fold_build1 (NEGATE_EXPR, type, iv1->step));
1461
1462 /* If we can determine the final value of the control iv exactly, we can
1463 transform the condition to != comparison. In particular, this will be
1464 the case if DELTA is constant. */
b3ce5b6e 1465 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
e36dc339 1466 exit_must_be_taken, bnds))
e9eb809d 1467 {
7f17528a
ZD
1468 affine_iv zps;
1469
ff5e9a94 1470 zps.base = build_int_cst (niter_type, 0);
7f17528a
ZD
1471 zps.step = step;
1472 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1473 zps does not overflow. */
1474 zps.no_overflow = true;
1475
cdf66caf
BC
1476 return number_of_iterations_ne (loop, type, &zps,
1477 delta, niter, true, bnds);
e9eb809d
ZD
1478 }
1479
7f17528a
ZD
1480 /* Make sure that the control iv does not overflow. */
1481 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1482 return false;
e9eb809d 1483
7f17528a
ZD
1484 /* We determine the number of iterations as (delta + step - 1) / step. For
1485 this to work, we must know that iv1->base >= iv0->base - step + 1,
1486 otherwise the loop does not roll. */
b3ce5b6e 1487 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
7f17528a
ZD
1488
1489 s = fold_build2 (MINUS_EXPR, niter_type,
ff5e9a94 1490 step, build_int_cst (niter_type, 1));
7f17528a
ZD
1491 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1492 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
b3ce5b6e
ZD
1493
1494 mpz_init (mstep);
1495 mpz_init (tmp);
807e902e 1496 wi::to_mpz (step, mstep, UNSIGNED);
b3ce5b6e
ZD
1497 mpz_add (tmp, bnds->up, mstep);
1498 mpz_sub_ui (tmp, tmp, 1);
1499 mpz_fdiv_q (tmp, tmp, mstep);
807e902e
KZ
1500 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1501 TYPE_SIGN (niter_type));
b3ce5b6e
ZD
1502 mpz_clear (mstep);
1503 mpz_clear (tmp);
1504
7f17528a 1505 return true;
e9eb809d
ZD
1506}
1507
7f17528a
ZD
1508/* Determines number of iterations of loop whose ending condition
1509 is IV0 <= IV1. TYPE is the type of the iv. The number of
e36dc339 1510 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
f08ac361 1511 we know that this condition must eventually become false (we derived this
7f17528a 1512 earlier, and possibly set NITER->assumptions to make sure this
b3ce5b6e 1513 is the case). BNDS bounds the difference IV1->base - IV0->base. */
7f17528a
ZD
1514
1515static bool
cdf66caf
BC
1516number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1517 affine_iv *iv1, struct tree_niter_desc *niter,
1518 bool exit_must_be_taken, bounds *bnds)
7f17528a
ZD
1519{
1520 tree assumption;
5be014d5
AP
1521 tree type1 = type;
1522 if (POINTER_TYPE_P (type))
1523 type1 = sizetype;
7f17528a
ZD
1524
1525 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1526 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1527 value of the type. This we must know anyway, since if it is
e36dc339 1528 equal to this value, the loop rolls forever. We do not check
b8698a0f 1529 this condition for pointer type ivs, as the code cannot rely on
e36dc339
ZD
1530 the object to that the pointer points being placed at the end of
1531 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1532 not defined for pointers). */
7f17528a 1533
e36dc339 1534 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
7f17528a 1535 {
6e42ce54 1536 if (integer_nonzerop (iv0->step))
7f17528a 1537 assumption = fold_build2 (NE_EXPR, boolean_type_node,
97b4ba9f 1538 iv1->base, TYPE_MAX_VALUE (type));
7f17528a
ZD
1539 else
1540 assumption = fold_build2 (NE_EXPR, boolean_type_node,
97b4ba9f 1541 iv0->base, TYPE_MIN_VALUE (type));
7f17528a 1542
6e682d7e 1543 if (integer_zerop (assumption))
7f17528a 1544 return false;
6e682d7e 1545 if (!integer_nonzerop (assumption))
7f17528a
ZD
1546 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1547 niter->assumptions, assumption);
1548 }
1549
6e42ce54 1550 if (integer_nonzerop (iv0->step))
97b4ba9f
JJ
1551 {
1552 if (POINTER_TYPE_P (type))
5d49b6a7 1553 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
97b4ba9f
JJ
1554 else
1555 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1556 build_int_cst (type1, 1));
1557 }
1558 else if (POINTER_TYPE_P (type))
5d49b6a7 1559 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
7f17528a 1560 else
5be014d5
AP
1561 iv0->base = fold_build2 (MINUS_EXPR, type1,
1562 iv0->base, build_int_cst (type1, 1));
b3ce5b6e 1563
807e902e 1564 bounds_add (bnds, 1, type1);
b3ce5b6e 1565
cdf66caf 1566 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
e36dc339 1567 bnds);
b3ce5b6e
ZD
1568}
1569
1570/* Dumps description of affine induction variable IV to FILE. */
1571
1572static void
1573dump_affine_iv (FILE *file, affine_iv *iv)
1574{
1575 if (!integer_zerop (iv->step))
1576 fprintf (file, "[");
1577
1578 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1579
1580 if (!integer_zerop (iv->step))
1581 {
1582 fprintf (file, ", + , ");
1583 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1584 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1585 }
7f17528a 1586}
e9eb809d 1587
7f17528a
ZD
1588/* Determine the number of iterations according to condition (for staying
1589 inside loop) which compares two induction variables using comparison
1590 operator CODE. The induction variable on left side of the comparison
1591 is IV0, the right-hand side is IV1. Both induction variables must have
1592 type TYPE, which must be an integer or pointer type. The steps of the
1593 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
f08ac361 1594
b3ce5b6e
ZD
1595 LOOP is the loop whose number of iterations we are determining.
1596
f08ac361
ZD
1597 ONLY_EXIT is true if we are sure this is the only way the loop could be
1598 exited (including possibly non-returning function calls, exceptions, etc.)
1599 -- in this case we can use the information whether the control induction
1600 variables can overflow or not in a more efficient way.
b8698a0f 1601
870ca331
JH
1602 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1603
7f17528a 1604 The results (number of iterations and assumptions as described in
3fadf78a 1605 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
7f17528a
ZD
1606 Returns false if it fails to determine number of iterations, true if it
1607 was determined (possibly with some assumptions). */
c33e657d
ZD
1608
1609static bool
b3ce5b6e
ZD
1610number_of_iterations_cond (struct loop *loop,
1611 tree type, affine_iv *iv0, enum tree_code code,
f08ac361 1612 affine_iv *iv1, struct tree_niter_desc *niter,
870ca331 1613 bool only_exit, bool every_iteration)
e9eb809d 1614{
e36dc339 1615 bool exit_must_be_taken = false, ret;
b3ce5b6e 1616 bounds bnds;
7f17528a 1617
870ca331
JH
1618 /* If the test is not executed every iteration, wrapping may make the test
1619 to pass again.
1620 TODO: the overflow case can be still used as unreliable estimate of upper
1621 bound. But we have no API to pass it down to number of iterations code
1622 and, at present, it will not use it anyway. */
1623 if (!every_iteration
1624 && (!iv0->no_overflow || !iv1->no_overflow
1625 || code == NE_EXPR || code == EQ_EXPR))
1626 return false;
1627
7f17528a
ZD
1628 /* The meaning of these assumptions is this:
1629 if !assumptions
1630 then the rest of information does not have to be valid
1631 if may_be_zero then the loop does not roll, even if
1632 niter != 0. */
1633 niter->assumptions = boolean_true_node;
1634 niter->may_be_zero = boolean_false_node;
1635 niter->niter = NULL_TREE;
807e902e 1636 niter->max = 0;
17684618
ZD
1637 niter->bound = NULL_TREE;
1638 niter->cmp = ERROR_MARK;
1639
7f17528a
ZD
1640 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1641 the control variable is on lhs. */
1642 if (code == GE_EXPR || code == GT_EXPR
6e42ce54 1643 || (code == NE_EXPR && integer_zerop (iv0->step)))
c33e657d 1644 {
6b4db501 1645 std::swap (iv0, iv1);
c33e657d
ZD
1646 code = swap_tree_comparison (code);
1647 }
e9eb809d 1648
7f17528a 1649 if (POINTER_TYPE_P (type))
e9eb809d 1650 {
7f17528a
ZD
1651 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1652 to the same object. If they do, the control variable cannot wrap
1653 (as wrap around the bounds of memory will never return a pointer
1654 that would be guaranteed to point to the same object, even if we
e36dc339 1655 avoid undefined behavior by casting to size_t and back). */
7f17528a
ZD
1656 iv0->no_overflow = true;
1657 iv1->no_overflow = true;
1658 }
e9eb809d 1659
e36dc339
ZD
1660 /* If the control induction variable does not overflow and the only exit
1661 from the loop is the one that we analyze, we know it must be taken
1662 eventually. */
1663 if (only_exit)
1664 {
1665 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1666 exit_must_be_taken = true;
1667 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1668 exit_must_be_taken = true;
1669 }
e9eb809d 1670
7f17528a
ZD
1671 /* We can handle the case when neither of the sides of the comparison is
1672 invariant, provided that the test is NE_EXPR. This rarely occurs in
1673 practice, but it is simple enough to manage. */
6e42ce54 1674 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
7f17528a 1675 {
5ece9847 1676 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
7f17528a
ZD
1677 if (code != NE_EXPR)
1678 return false;
e9eb809d 1679
5ece9847 1680 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
7f17528a
ZD
1681 iv0->step, iv1->step);
1682 iv0->no_overflow = false;
5ece9847 1683 iv1->step = build_int_cst (step_type, 0);
7f17528a
ZD
1684 iv1->no_overflow = true;
1685 }
c33e657d 1686
7f17528a
ZD
1687 /* If the result of the comparison is a constant, the loop is weird. More
1688 precise handling would be possible, but the situation is not common enough
1689 to waste time on it. */
6e42ce54 1690 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
7f17528a 1691 return false;
c33e657d 1692
7f17528a
ZD
1693 /* Ignore loops of while (i-- < 10) type. */
1694 if (code != NE_EXPR)
1695 {
1696 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
c33e657d 1697 return false;
c33e657d 1698
6e42ce54 1699 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
c33e657d 1700 return false;
7f17528a 1701 }
e9eb809d 1702
c0220ea4 1703 /* If the loop exits immediately, there is nothing to do. */
5a892248
RB
1704 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1705 if (tem && integer_zerop (tem))
7f17528a 1706 {
ff5e9a94 1707 niter->niter = build_int_cst (unsigned_type_for (type), 0);
807e902e 1708 niter->max = 0;
7f17528a
ZD
1709 return true;
1710 }
b8698a0f 1711
7f17528a
ZD
1712 /* OK, now we know we have a senseful loop. Handle several cases, depending
1713 on what comparison operator is used. */
b3ce5b6e
ZD
1714 bound_difference (loop, iv1->base, iv0->base, &bnds);
1715
1716 if (dump_file && (dump_flags & TDF_DETAILS))
1717 {
1718 fprintf (dump_file,
4dad0aca 1719 "Analyzing # of iterations of loop %d\n", loop->num);
b3ce5b6e
ZD
1720
1721 fprintf (dump_file, " exit condition ");
1722 dump_affine_iv (dump_file, iv0);
1723 fprintf (dump_file, " %s ",
1724 code == NE_EXPR ? "!="
1725 : code == LT_EXPR ? "<"
1726 : "<=");
1727 dump_affine_iv (dump_file, iv1);
1728 fprintf (dump_file, "\n");
1729
1730 fprintf (dump_file, " bounds on difference of bases: ");
1731 mpz_out_str (dump_file, 10, bnds.below);
1732 fprintf (dump_file, " ... ");
1733 mpz_out_str (dump_file, 10, bnds.up);
1734 fprintf (dump_file, "\n");
1735 }
1736
7f17528a
ZD
1737 switch (code)
1738 {
1739 case NE_EXPR:
6e42ce54 1740 gcc_assert (integer_zerop (iv1->step));
cdf66caf 1741 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
e36dc339 1742 exit_must_be_taken, &bnds);
b3ce5b6e
ZD
1743 break;
1744
7f17528a 1745 case LT_EXPR:
cdf66caf
BC
1746 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1747 exit_must_be_taken, &bnds);
b3ce5b6e
ZD
1748 break;
1749
7f17528a 1750 case LE_EXPR:
cdf66caf
BC
1751 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1752 exit_must_be_taken, &bnds);
b3ce5b6e
ZD
1753 break;
1754
c33e657d
ZD
1755 default:
1756 gcc_unreachable ();
1757 }
b3ce5b6e
ZD
1758
1759 mpz_clear (bnds.up);
1760 mpz_clear (bnds.below);
1761
1762 if (dump_file && (dump_flags & TDF_DETAILS))
1763 {
1764 if (ret)
1765 {
1766 fprintf (dump_file, " result:\n");
1767 if (!integer_nonzerop (niter->assumptions))
1768 {
1769 fprintf (dump_file, " under assumptions ");
1770 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1771 fprintf (dump_file, "\n");
1772 }
1773
1774 if (!integer_zerop (niter->may_be_zero))
1775 {
1776 fprintf (dump_file, " zero if ");
1777 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1778 fprintf (dump_file, "\n");
1779 }
1780
1781 fprintf (dump_file, " # of iterations ");
1782 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1783 fprintf (dump_file, ", bounded by ");
807e902e 1784 print_decu (niter->max, dump_file);
b3ce5b6e
ZD
1785 fprintf (dump_file, "\n");
1786 }
1787 else
1788 fprintf (dump_file, " failed\n\n");
1789 }
1790 return ret;
e9eb809d
ZD
1791}
1792
1d481ba8
ZD
1793/* Substitute NEW for OLD in EXPR and fold the result. */
1794
1795static tree
c22940cd 1796simplify_replace_tree (tree expr, tree old, tree new_tree)
1d481ba8
ZD
1797{
1798 unsigned i, n;
1799 tree ret = NULL_TREE, e, se;
1800
1801 if (!expr)
1802 return NULL_TREE;
1803
76c85743
RG
1804 /* Do not bother to replace constants. */
1805 if (CONSTANT_CLASS_P (old))
1806 return expr;
1807
1d481ba8
ZD
1808 if (expr == old
1809 || operand_equal_p (expr, old, 0))
c22940cd 1810 return unshare_expr (new_tree);
1d481ba8 1811
726a989a 1812 if (!EXPR_P (expr))
1d481ba8
ZD
1813 return expr;
1814
5039610b 1815 n = TREE_OPERAND_LENGTH (expr);
1d481ba8
ZD
1816 for (i = 0; i < n; i++)
1817 {
1818 e = TREE_OPERAND (expr, i);
c22940cd 1819 se = simplify_replace_tree (e, old, new_tree);
1d481ba8
ZD
1820 if (e == se)
1821 continue;
1822
1823 if (!ret)
1824 ret = copy_node (expr);
1825
1826 TREE_OPERAND (ret, i) = se;
1827 }
1828
1829 return (ret ? fold (ret) : expr);
1830}
1831
be1b5cba 1832/* Expand definitions of ssa names in EXPR as long as they are simple
fc06280e
BC
1833 enough, and return the new expression. If STOP is specified, stop
1834 expanding if EXPR equals to it. */
be1b5cba 1835
d7bf3bcf 1836tree
fc06280e 1837expand_simple_operations (tree expr, tree stop)
be1b5cba
ZD
1838{
1839 unsigned i, n;
726a989a 1840 tree ret = NULL_TREE, e, ee, e1;
6fff2603 1841 enum tree_code code;
355fe088 1842 gimple *stmt;
6fff2603
JJ
1843
1844 if (expr == NULL_TREE)
1845 return expr;
be1b5cba
ZD
1846
1847 if (is_gimple_min_invariant (expr))
1848 return expr;
1849
6fff2603 1850 code = TREE_CODE (expr);
be1b5cba
ZD
1851 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1852 {
5039610b 1853 n = TREE_OPERAND_LENGTH (expr);
be1b5cba
ZD
1854 for (i = 0; i < n; i++)
1855 {
1856 e = TREE_OPERAND (expr, i);
fc06280e 1857 ee = expand_simple_operations (e, stop);
be1b5cba
ZD
1858 if (e == ee)
1859 continue;
1860
1861 if (!ret)
1862 ret = copy_node (expr);
1863
1864 TREE_OPERAND (ret, i) = ee;
1865 }
1866
6ac01510
ILT
1867 if (!ret)
1868 return expr;
1869
1870 fold_defer_overflow_warnings ();
1871 ret = fold (ret);
1872 fold_undefer_and_ignore_overflow_warnings ();
1873 return ret;
be1b5cba
ZD
1874 }
1875
fc06280e
BC
1876 /* Stop if it's not ssa name or the one we don't want to expand. */
1877 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
be1b5cba
ZD
1878 return expr;
1879
1880 stmt = SSA_NAME_DEF_STMT (expr);
726a989a 1881 if (gimple_code (stmt) == GIMPLE_PHI)
b3ce5b6e
ZD
1882 {
1883 basic_block src, dest;
1884
726a989a 1885 if (gimple_phi_num_args (stmt) != 1)
b3ce5b6e
ZD
1886 return expr;
1887 e = PHI_ARG_DEF (stmt, 0);
1888
1889 /* Avoid propagating through loop exit phi nodes, which
1890 could break loop-closed SSA form restrictions. */
726a989a 1891 dest = gimple_bb (stmt);
b3ce5b6e
ZD
1892 src = single_pred (dest);
1893 if (TREE_CODE (e) == SSA_NAME
1894 && src->loop_father != dest->loop_father)
1895 return expr;
1896
fc06280e 1897 return expand_simple_operations (e, stop);
b3ce5b6e 1898 }
726a989a 1899 if (gimple_code (stmt) != GIMPLE_ASSIGN)
be1b5cba
ZD
1900 return expr;
1901
c3a9b91b
RB
1902 /* Avoid expanding to expressions that contain SSA names that need
1903 to take part in abnormal coalescing. */
1904 ssa_op_iter iter;
1905 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1906 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1907 return expr;
1908
726a989a
RB
1909 e = gimple_assign_rhs1 (stmt);
1910 code = gimple_assign_rhs_code (stmt);
1911 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1912 {
1913 if (is_gimple_min_invariant (e))
1914 return e;
1915
1916 if (code == SSA_NAME)
fc06280e 1917 return expand_simple_operations (e, stop);
726a989a
RB
1918
1919 return expr;
1920 }
1921
1922 switch (code)
1923 {
1a87cf0c 1924 CASE_CONVERT:
726a989a 1925 /* Casts are simple. */
fc06280e 1926 ee = expand_simple_operations (e, stop);
726a989a
RB
1927 return fold_build1 (code, TREE_TYPE (expr), ee);
1928
1929 case PLUS_EXPR:
1930 case MINUS_EXPR:
20bd649a
MP
1931 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
1932 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
8b228266
RB
1933 return expr;
1934 /* Fallthru. */
726a989a 1935 case POINTER_PLUS_EXPR:
be1b5cba 1936 /* And increments and decrements by a constant are simple. */
726a989a
RB
1937 e1 = gimple_assign_rhs2 (stmt);
1938 if (!is_gimple_min_invariant (e1))
1939 return expr;
1940
fc06280e 1941 ee = expand_simple_operations (e, stop);
726a989a 1942 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
be1b5cba 1943
726a989a
RB
1944 default:
1945 return expr;
1946 }
be1b5cba
ZD
1947}
1948
e9eb809d 1949/* Tries to simplify EXPR using the condition COND. Returns the simplified
be1b5cba 1950 expression (or EXPR unchanged, if no simplification was possible). */
e9eb809d
ZD
1951
1952static tree
8aa46dd2 1953tree_simplify_using_condition_1 (tree cond, tree expr)
e9eb809d
ZD
1954{
1955 bool changed;
8aa46dd2 1956 tree e, e0, e1, e2, notcond;
e9eb809d
ZD
1957 enum tree_code code = TREE_CODE (expr);
1958
1959 if (code == INTEGER_CST)
1960 return expr;
1961
1962 if (code == TRUTH_OR_EXPR
1963 || code == TRUTH_AND_EXPR
1964 || code == COND_EXPR)
1965 {
1966 changed = false;
1967
8aa46dd2 1968 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
e9eb809d
ZD
1969 if (TREE_OPERAND (expr, 0) != e0)
1970 changed = true;
1971
8aa46dd2 1972 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
e9eb809d
ZD
1973 if (TREE_OPERAND (expr, 1) != e1)
1974 changed = true;
1975
1976 if (code == COND_EXPR)
1977 {
8aa46dd2 1978 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
e9eb809d
ZD
1979 if (TREE_OPERAND (expr, 2) != e2)
1980 changed = true;
1981 }
1982 else
1983 e2 = NULL_TREE;
1984
1985 if (changed)
1986 {
1987 if (code == COND_EXPR)
c33e657d 1988 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
e9eb809d 1989 else
c33e657d 1990 expr = fold_build2 (code, boolean_type_node, e0, e1);
e9eb809d
ZD
1991 }
1992
1993 return expr;
1994 }
1995
1d481ba8
ZD
1996 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1997 propagation, and vice versa. Fold does not handle this, since it is
1998 considered too expensive. */
1999 if (TREE_CODE (cond) == EQ_EXPR)
2000 {
2001 e0 = TREE_OPERAND (cond, 0);
2002 e1 = TREE_OPERAND (cond, 1);
2003
2004 /* We know that e0 == e1. Check whether we cannot simplify expr
2005 using this fact. */
2006 e = simplify_replace_tree (expr, e0, e1);
6e682d7e 2007 if (integer_zerop (e) || integer_nonzerop (e))
1d481ba8
ZD
2008 return e;
2009
2010 e = simplify_replace_tree (expr, e1, e0);
6e682d7e 2011 if (integer_zerop (e) || integer_nonzerop (e))
1d481ba8
ZD
2012 return e;
2013 }
2014 if (TREE_CODE (expr) == EQ_EXPR)
2015 {
2016 e0 = TREE_OPERAND (expr, 0);
2017 e1 = TREE_OPERAND (expr, 1);
2018
2019 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2020 e = simplify_replace_tree (cond, e0, e1);
6e682d7e 2021 if (integer_zerop (e))
1d481ba8
ZD
2022 return e;
2023 e = simplify_replace_tree (cond, e1, e0);
6e682d7e 2024 if (integer_zerop (e))
1d481ba8
ZD
2025 return e;
2026 }
2027 if (TREE_CODE (expr) == NE_EXPR)
2028 {
2029 e0 = TREE_OPERAND (expr, 0);
2030 e1 = TREE_OPERAND (expr, 1);
2031
2032 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2033 e = simplify_replace_tree (cond, e0, e1);
6e682d7e 2034 if (integer_zerop (e))
1d481ba8
ZD
2035 return boolean_true_node;
2036 e = simplify_replace_tree (cond, e1, e0);
6e682d7e 2037 if (integer_zerop (e))
1d481ba8
ZD
2038 return boolean_true_node;
2039 }
2040
e9eb809d
ZD
2041 /* Check whether COND ==> EXPR. */
2042 notcond = invert_truthvalue (cond);
8aa46dd2 2043 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
6e682d7e 2044 if (e && integer_nonzerop (e))
e9eb809d
ZD
2045 return e;
2046
2047 /* Check whether COND ==> not EXPR. */
8aa46dd2 2048 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
6e682d7e 2049 if (e && integer_zerop (e))
e9eb809d
ZD
2050 return e;
2051
2052 return expr;
2053}
2054
be1b5cba
ZD
2055/* Tries to simplify EXPR using the condition COND. Returns the simplified
2056 expression (or EXPR unchanged, if no simplification was possible).
2057 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2058 of simple operations in definitions of ssa names in COND are expanded,
2059 so that things like casts or incrementing the value of the bound before
2060 the loop do not cause us to fail. */
2061
2062static tree
8aa46dd2 2063tree_simplify_using_condition (tree cond, tree expr)
be1b5cba 2064{
8aa46dd2 2065 cond = expand_simple_operations (cond);
be1b5cba 2066
8aa46dd2 2067 return tree_simplify_using_condition_1 (cond, expr);
be1b5cba 2068}
b16fb82d 2069
e9eb809d 2070/* Tries to simplify EXPR using the conditions on entry to LOOP.
e9eb809d 2071 Returns the simplified expression (or EXPR unchanged, if no
f3c5f3a3 2072 simplification was possible). */
e9eb809d 2073
f3c5f3a3 2074tree
8aa46dd2 2075simplify_using_initial_conditions (struct loop *loop, tree expr)
e9eb809d
ZD
2076{
2077 edge e;
2078 basic_block bb;
355fe088 2079 gimple *stmt;
8aa46dd2 2080 tree cond, expanded, backup;
b16fb82d 2081 int cnt = 0;
e9eb809d
ZD
2082
2083 if (TREE_CODE (expr) == INTEGER_CST)
2084 return expr;
2085
8aa46dd2
BC
2086 backup = expanded = expand_simple_operations (expr);
2087
b16fb82d
RG
2088 /* Limit walking the dominators to avoid quadraticness in
2089 the number of BBs times the number of loops in degenerate
2090 cases. */
e9eb809d 2091 for (bb = loop->header;
fefa31b5 2092 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
e9eb809d
ZD
2093 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2094 {
c5cbcccf 2095 if (!single_pred_p (bb))
e9eb809d 2096 continue;
c5cbcccf 2097 e = single_pred_edge (bb);
e9eb809d
ZD
2098
2099 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2100 continue;
2101
726a989a
RB
2102 stmt = last_stmt (e->src);
2103 cond = fold_build2 (gimple_cond_code (stmt),
2104 boolean_type_node,
2105 gimple_cond_lhs (stmt),
2106 gimple_cond_rhs (stmt));
e9eb809d
ZD
2107 if (e->flags & EDGE_FALSE_VALUE)
2108 cond = invert_truthvalue (cond);
8aa46dd2 2109 expanded = tree_simplify_using_condition (cond, expanded);
eff1e5af 2110 /* Break if EXPR is simplified to const values. */
8aa46dd2
BC
2111 if (expanded
2112 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2113 return expanded;
eff1e5af 2114
b16fb82d 2115 ++cnt;
e9eb809d
ZD
2116 }
2117
8aa46dd2
BC
2118 /* Return the original expression if no simplification is done. */
2119 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
e9eb809d
ZD
2120}
2121
c33e657d
ZD
2122/* Tries to simplify EXPR using the evolutions of the loop invariants
2123 in the superloops of LOOP. Returns the simplified expression
2124 (or EXPR unchanged, if no simplification was possible). */
2125
2126static tree
2127simplify_using_outer_evolutions (struct loop *loop, tree expr)
2128{
2129 enum tree_code code = TREE_CODE (expr);
2130 bool changed;
2131 tree e, e0, e1, e2;
2132
2133 if (is_gimple_min_invariant (expr))
2134 return expr;
2135
2136 if (code == TRUTH_OR_EXPR
2137 || code == TRUTH_AND_EXPR
2138 || code == COND_EXPR)
2139 {
2140 changed = false;
2141
2142 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2143 if (TREE_OPERAND (expr, 0) != e0)
2144 changed = true;
2145
2146 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2147 if (TREE_OPERAND (expr, 1) != e1)
2148 changed = true;
2149
2150 if (code == COND_EXPR)
2151 {
2152 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2153 if (TREE_OPERAND (expr, 2) != e2)
2154 changed = true;
2155 }
2156 else
2157 e2 = NULL_TREE;
2158
2159 if (changed)
2160 {
2161 if (code == COND_EXPR)
2162 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2163 else
2164 expr = fold_build2 (code, boolean_type_node, e0, e1);
2165 }
2166
2167 return expr;
2168 }
2169
2170 e = instantiate_parameters (loop, expr);
2171 if (is_gimple_min_invariant (e))
2172 return e;
2173
2174 return expr;
2175}
2176
f08ac361
ZD
2177/* Returns true if EXIT is the only possible exit from LOOP. */
2178
52778e2a 2179bool
22ea9ec0 2180loop_only_exit_p (const struct loop *loop, const_edge exit)
f08ac361
ZD
2181{
2182 basic_block *body;
726a989a 2183 gimple_stmt_iterator bsi;
f08ac361 2184 unsigned i;
f08ac361 2185
ac8f6c69 2186 if (exit != single_exit (loop))
f08ac361
ZD
2187 return false;
2188
2189 body = get_loop_body (loop);
2190 for (i = 0; i < loop->num_nodes; i++)
2191 {
726a989a 2192 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
21bcd7be 2193 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
ccae0c85
ML
2194 {
2195 free (body);
2196 return true;
2197 }
f08ac361
ZD
2198 }
2199
2200 free (body);
2201 return true;
2202}
2203
e9eb809d 2204/* Stores description of number of iterations of LOOP derived from
43aabfcf
BC
2205 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2206 information could be derived (and fields of NITER have meaning described
2207 in comments at struct tree_niter_desc declaration), false otherwise.
cd0f6278 2208 When EVERY_ITERATION is true, only tests that are known to be executed
43aabfcf 2209 every iteration are considered (i.e. only test that alone bounds the loop).
faa1612a
BC
2210 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2211 it when returning true. */
e9eb809d
ZD
2212
2213bool
43aabfcf
BC
2214number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
2215 struct tree_niter_desc *niter,
faa1612a 2216 gcond **at_stmt, bool every_iteration)
e9eb809d 2217{
355fe088 2218 gimple *last;
538dd0b7 2219 gcond *stmt;
726a989a 2220 tree type;
a6f778b2 2221 tree op0, op1;
e9eb809d 2222 enum tree_code code;
a6f778b2 2223 affine_iv iv0, iv1;
870ca331 2224 bool safe;
e9eb809d 2225
18767ebc
BC
2226 /* Nothing to analyze if the loop is known to be infinite. */
2227 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2228 return false;
2229
870ca331
JH
2230 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2231
2232 if (every_iteration && !safe)
e9eb809d
ZD
2233 return false;
2234
2235 niter->assumptions = boolean_false_node;
2f07b722
BC
2236 niter->control.base = NULL_TREE;
2237 niter->control.step = NULL_TREE;
2238 niter->control.no_overflow = false;
538dd0b7
DM
2239 last = last_stmt (exit->src);
2240 if (!last)
2241 return false;
2242 stmt = dyn_cast <gcond *> (last);
2243 if (!stmt)
e9eb809d
ZD
2244 return false;
2245
2246 /* We want the condition for staying inside loop. */
726a989a 2247 code = gimple_cond_code (stmt);
e9eb809d 2248 if (exit->flags & EDGE_TRUE_VALUE)
726a989a 2249 code = invert_tree_comparison (code, false);
e9eb809d 2250
e9eb809d
ZD
2251 switch (code)
2252 {
2253 case GT_EXPR:
2254 case GE_EXPR:
e9eb809d
ZD
2255 case LT_EXPR:
2256 case LE_EXPR:
870ca331 2257 case NE_EXPR:
e9eb809d
ZD
2258 break;
2259
2260 default:
2261 return false;
2262 }
b8698a0f 2263
726a989a
RB
2264 op0 = gimple_cond_lhs (stmt);
2265 op1 = gimple_cond_rhs (stmt);
e9eb809d
ZD
2266 type = TREE_TYPE (op0);
2267
2268 if (TREE_CODE (type) != INTEGER_TYPE
b3393f1f 2269 && !POINTER_TYPE_P (type))
e9eb809d 2270 return false;
b8698a0f 2271
43aabfcf
BC
2272 tree iv0_niters = NULL_TREE;
2273 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2274 op0, &iv0, &iv0_niters, false))
2275 return false;
2276 tree iv1_niters = NULL_TREE;
2277 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2278 op1, &iv1, &iv1_niters, false))
e9eb809d 2279 return false;
43aabfcf
BC
2280 /* Give up on complicated case. */
2281 if (iv0_niters && iv1_niters)
e9eb809d
ZD
2282 return false;
2283
6ac01510 2284 /* We don't want to see undefined signed overflow warnings while
ea2c620c 2285 computing the number of iterations. */
6ac01510
ILT
2286 fold_defer_overflow_warnings ();
2287
7f17528a
ZD
2288 iv0.base = expand_simple_operations (iv0.base);
2289 iv1.base = expand_simple_operations (iv1.base);
b3ce5b6e 2290 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
870ca331 2291 loop_only_exit_p (loop, exit), safe))
6ac01510
ILT
2292 {
2293 fold_undefer_and_ignore_overflow_warnings ();
2294 return false;
2295 }
c33e657d 2296
43aabfcf
BC
2297 /* Incorporate additional assumption implied by control iv. */
2298 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2299 if (iv_niters)
2300 {
2301 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2302 fold_convert (TREE_TYPE (niter->niter),
2303 iv_niters));
2304
2305 if (!integer_nonzerop (assumption))
2306 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2307 niter->assumptions, assumption);
2308
2309 /* Refine upper bound if possible. */
2310 if (TREE_CODE (iv_niters) == INTEGER_CST
2311 && niter->max > wi::to_widest (iv_niters))
2312 niter->max = wi::to_widest (iv_niters);
2313 }
2314
18767ebc
BC
2315 /* There is no assumptions if the loop is known to be finite. */
2316 if (!integer_zerop (niter->assumptions)
2317 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2318 niter->assumptions = boolean_true_node;
2319
c33e657d
ZD
2320 if (optimize >= 3)
2321 {
2322 niter->assumptions = simplify_using_outer_evolutions (loop,
2323 niter->assumptions);
2324 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2325 niter->may_be_zero);
2326 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2327 }
e9eb809d 2328
e9eb809d
ZD
2329 niter->assumptions
2330 = simplify_using_initial_conditions (loop,
b3ce5b6e 2331 niter->assumptions);
e9eb809d
ZD
2332 niter->may_be_zero
2333 = simplify_using_initial_conditions (loop,
b3ce5b6e 2334 niter->may_be_zero);
f9cc1a70 2335
6ac01510
ILT
2336 fold_undefer_and_ignore_overflow_warnings ();
2337
f2a1b469
JH
2338 /* If NITER has simplified into a constant, update MAX. */
2339 if (TREE_CODE (niter->niter) == INTEGER_CST)
018b22f3 2340 niter->max = wi::to_widest (niter->niter);
f2a1b469 2341
faa1612a
BC
2342 if (at_stmt)
2343 *at_stmt = stmt;
2344
43aabfcf
BC
2345 return (!integer_zerop (niter->assumptions));
2346}
b8698a0f 2347
70e1d145
AH
2348/* Like number_of_iterations_exit_assumptions, but return TRUE only if
2349 the niter information holds unconditionally. */
f9cc1a70 2350
43aabfcf
BC
2351bool
2352number_of_iterations_exit (struct loop *loop, edge exit,
2353 struct tree_niter_desc *niter,
faa1612a 2354 bool warn, bool every_iteration)
43aabfcf 2355{
faa1612a 2356 gcond *stmt;
43aabfcf 2357 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
faa1612a 2358 &stmt, every_iteration))
43aabfcf 2359 return false;
f9cc1a70 2360
faa1612a
BC
2361 if (integer_nonzerop (niter->assumptions))
2362 return true;
2363
2364 if (warn)
9d975cb6
JJ
2365 warning_at (gimple_location_safe (stmt),
2366 OPT_Wunsafe_loop_optimizations,
2367 "missed loop optimization, the loop counter may overflow");
faa1612a
BC
2368
2369 return false;
e9eb809d
ZD
2370}
2371
ca4c3169
ZD
2372/* Try to determine the number of iterations of LOOP. If we succeed,
2373 expression giving number of iterations is returned and *EXIT is
2374 set to the edge from that the information is obtained. Otherwise
2375 chrec_dont_know is returned. */
2376
2377tree
2378find_loop_niter (struct loop *loop, edge *exit)
2379{
ca83d385 2380 unsigned i;
9771b263 2381 vec<edge> exits = get_loop_exit_edges (loop);
ca4c3169
ZD
2382 edge ex;
2383 tree niter = NULL_TREE, aniter;
2384 struct tree_niter_desc desc;
2385
2386 *exit = NULL;
9771b263 2387 FOR_EACH_VEC_ELT (exits, i, ex)
ca4c3169 2388 {
f9cc1a70 2389 if (!number_of_iterations_exit (loop, ex, &desc, false))
ca4c3169
ZD
2390 continue;
2391
6e682d7e 2392 if (integer_nonzerop (desc.may_be_zero))
ca4c3169
ZD
2393 {
2394 /* We exit in the first iteration through this exit.
2395 We won't find anything better. */
ff5e9a94 2396 niter = build_int_cst (unsigned_type_node, 0);
ca4c3169
ZD
2397 *exit = ex;
2398 break;
2399 }
2400
6e682d7e 2401 if (!integer_zerop (desc.may_be_zero))
ca4c3169
ZD
2402 continue;
2403
2404 aniter = desc.niter;
2405
2406 if (!niter)
2407 {
2408 /* Nothing recorded yet. */
2409 niter = aniter;
2410 *exit = ex;
2411 continue;
2412 }
2413
2414 /* Prefer constants, the lower the better. */
2415 if (TREE_CODE (aniter) != INTEGER_CST)
2416 continue;
2417
2418 if (TREE_CODE (niter) != INTEGER_CST)
2419 {
2420 niter = aniter;
2421 *exit = ex;
2422 continue;
2423 }
2424
2425 if (tree_int_cst_lt (aniter, niter))
2426 {
2427 niter = aniter;
2428 *exit = ex;
2429 continue;
2430 }
2431 }
9771b263 2432 exits.release ();
ca4c3169
ZD
2433
2434 return niter ? niter : chrec_dont_know;
2435}
2436
f87c9042
JH
2437/* Return true if loop is known to have bounded number of iterations. */
2438
2439bool
2440finite_loop_p (struct loop *loop)
2441{
807e902e 2442 widest_int nit;
9e3920e9 2443 int flags;
f87c9042 2444
9e3920e9
JJ
2445 flags = flags_from_decl_or_type (current_function_decl);
2446 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
f87c9042
JH
2447 {
2448 if (dump_file && (dump_flags & TDF_DETAILS))
2449 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2450 loop->num);
2451 return true;
2452 }
b8698a0f 2453
1bc60b18
JH
2454 if (loop->any_upper_bound
2455 || max_loop_iterations (loop, &nit))
f87c9042 2456 {
1bc60b18
JH
2457 if (dump_file && (dump_flags & TDF_DETAILS))
2458 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2459 loop->num);
2460 return true;
f87c9042 2461 }
1bc60b18 2462 return false;
f87c9042
JH
2463}
2464
e9eb809d
ZD
2465/*
2466
2467 Analysis of a number of iterations of a loop by a brute-force evaluation.
2468
2469*/
2470
2471/* Bound on the number of iterations we try to evaluate. */
2472
2473#define MAX_ITERATIONS_TO_TRACK \
2474 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2475
2476/* Returns the loop phi node of LOOP such that ssa name X is derived from its
2477 result by a chain of operations such that all but exactly one of their
2478 operands are constants. */
2479
538dd0b7 2480static gphi *
e9eb809d
ZD
2481chain_of_csts_start (struct loop *loop, tree x)
2482{
355fe088 2483 gimple *stmt = SSA_NAME_DEF_STMT (x);
f47c96aa 2484 tree use;
726a989a
RB
2485 basic_block bb = gimple_bb (stmt);
2486 enum tree_code code;
e9eb809d
ZD
2487
2488 if (!bb
2489 || !flow_bb_inside_loop_p (loop, bb))
726a989a 2490 return NULL;
b8698a0f 2491
726a989a 2492 if (gimple_code (stmt) == GIMPLE_PHI)
e9eb809d
ZD
2493 {
2494 if (bb == loop->header)
538dd0b7 2495 return as_a <gphi *> (stmt);
e9eb809d 2496
726a989a 2497 return NULL;
e9eb809d
ZD
2498 }
2499
100f09a5
RB
2500 if (gimple_code (stmt) != GIMPLE_ASSIGN
2501 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
726a989a 2502 return NULL;
e9eb809d 2503
726a989a
RB
2504 code = gimple_assign_rhs_code (stmt);
2505 if (gimple_references_memory_p (stmt)
726a989a 2506 || TREE_CODE_CLASS (code) == tcc_reference
5006671f
RG
2507 || (code == ADDR_EXPR
2508 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
726a989a 2509 return NULL;
f47c96aa
AM
2510
2511 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
5006671f 2512 if (use == NULL_TREE)
726a989a 2513 return NULL;
e9eb809d 2514
f47c96aa 2515 return chain_of_csts_start (loop, use);
e9eb809d
ZD
2516}
2517
2518/* Determines whether the expression X is derived from a result of a phi node
2519 in header of LOOP such that
2520
2521 * the derivation of X consists only from operations with constants
2522 * the initial value of the phi node is constant
2523 * the value of the phi node in the next iteration can be derived from the
5558f089
JJ
2524 value in the current iteration by a chain of operations with constants,
2525 or is also a constant
b8698a0f 2526
726a989a 2527 If such phi node exists, it is returned, otherwise NULL is returned. */
e9eb809d 2528
538dd0b7 2529static gphi *
e9eb809d
ZD
2530get_base_for (struct loop *loop, tree x)
2531{
538dd0b7 2532 gphi *phi;
726a989a 2533 tree init, next;
e9eb809d
ZD
2534
2535 if (is_gimple_min_invariant (x))
726a989a 2536 return NULL;
e9eb809d
ZD
2537
2538 phi = chain_of_csts_start (loop, x);
2539 if (!phi)
726a989a 2540 return NULL;
e9eb809d
ZD
2541
2542 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2543 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2544
e9eb809d 2545 if (!is_gimple_min_invariant (init))
726a989a 2546 return NULL;
e9eb809d 2547
5558f089
JJ
2548 if (TREE_CODE (next) == SSA_NAME
2549 && chain_of_csts_start (loop, next) != phi)
726a989a 2550 return NULL;
e9eb809d
ZD
2551
2552 return phi;
2553}
2554
b8698a0f
L
2555/* Given an expression X, then
2556
ed52affe 2557 * if X is NULL_TREE, we return the constant BASE.
5558f089 2558 * if X is a constant, we return the constant X.
e9eb809d
ZD
2559 * otherwise X is a SSA name, whose value in the considered loop is derived
2560 by a chain of operations with constant from a result of a phi node in
2561 the header of the loop. Then we return value of X when the value of the
2562 result of this phi node is given by the constant BASE. */
2563
2564static tree
2565get_val_for (tree x, tree base)
2566{
355fe088 2567 gimple *stmt;
e9eb809d 2568
100f09a5 2569 gcc_checking_assert (is_gimple_min_invariant (base));
ed52affe 2570
e9eb809d
ZD
2571 if (!x)
2572 return base;
5558f089
JJ
2573 else if (is_gimple_min_invariant (x))
2574 return x;
e9eb809d
ZD
2575
2576 stmt = SSA_NAME_DEF_STMT (x);
726a989a 2577 if (gimple_code (stmt) == GIMPLE_PHI)
e9eb809d
ZD
2578 return base;
2579
100f09a5 2580 gcc_checking_assert (is_gimple_assign (stmt));
726a989a
RB
2581
2582 /* STMT must be either an assignment of a single SSA name or an
2583 expression involving an SSA name and a constant. Try to fold that
2584 expression using the value for the SSA name. */
0f336c35
RG
2585 if (gimple_assign_ssa_name_copy_p (stmt))
2586 return get_val_for (gimple_assign_rhs1 (stmt), base);
2587 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2588 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
5558f089
JJ
2589 return fold_build1 (gimple_assign_rhs_code (stmt),
2590 gimple_expr_type (stmt),
2591 get_val_for (gimple_assign_rhs1 (stmt), base));
0f336c35 2592 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
726a989a 2593 {
0f336c35
RG
2594 tree rhs1 = gimple_assign_rhs1 (stmt);
2595 tree rhs2 = gimple_assign_rhs2 (stmt);
2596 if (TREE_CODE (rhs1) == SSA_NAME)
2597 rhs1 = get_val_for (rhs1, base);
2598 else if (TREE_CODE (rhs2) == SSA_NAME)
2599 rhs2 = get_val_for (rhs2, base);
2600 else
2601 gcc_unreachable ();
2602 return fold_build2 (gimple_assign_rhs_code (stmt),
2603 gimple_expr_type (stmt), rhs1, rhs2);
f47c96aa 2604 }
726a989a 2605 else
0f336c35 2606 gcc_unreachable ();
e9eb809d
ZD
2607}
2608
726a989a 2609
e9eb809d
ZD
2610/* Tries to count the number of iterations of LOOP till it exits by EXIT
2611 by brute force -- i.e. by determining the value of the operands of the
2612 condition at EXIT in first few iterations of the loop (assuming that
2613 these values are constant) and determining the first one in that the
2614 condition is not satisfied. Returns the constant giving the number
2615 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2616
2617tree
2618loop_niter_by_eval (struct loop *loop, edge exit)
2619{
726a989a
RB
2620 tree acnd;
2621 tree op[2], val[2], next[2], aval[2];
538dd0b7 2622 gphi *phi;
355fe088 2623 gimple *cond;
e9eb809d
ZD
2624 unsigned i, j;
2625 enum tree_code cmp;
2626
2627 cond = last_stmt (exit->src);
726a989a 2628 if (!cond || gimple_code (cond) != GIMPLE_COND)
e9eb809d
ZD
2629 return chrec_dont_know;
2630
726a989a 2631 cmp = gimple_cond_code (cond);
e9eb809d 2632 if (exit->flags & EDGE_TRUE_VALUE)
726a989a 2633 cmp = invert_tree_comparison (cmp, false);
e9eb809d 2634
e9eb809d
ZD
2635 switch (cmp)
2636 {
2637 case EQ_EXPR:
2638 case NE_EXPR:
2639 case GT_EXPR:
2640 case GE_EXPR:
2641 case LT_EXPR:
2642 case LE_EXPR:
726a989a
RB
2643 op[0] = gimple_cond_lhs (cond);
2644 op[1] = gimple_cond_rhs (cond);
e9eb809d
ZD
2645 break;
2646
2647 default:
2648 return chrec_dont_know;
2649 }
2650
2651 for (j = 0; j < 2; j++)
2652 {
726a989a 2653 if (is_gimple_min_invariant (op[j]))
e9eb809d 2654 {
726a989a
RB
2655 val[j] = op[j];
2656 next[j] = NULL_TREE;
2657 op[j] = NULL_TREE;
e9eb809d
ZD
2658 }
2659 else
2660 {
726a989a
RB
2661 phi = get_base_for (loop, op[j]);
2662 if (!phi)
2663 return chrec_dont_know;
2664 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2665 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
e9eb809d
ZD
2666 }
2667 }
2668
6ac01510
ILT
2669 /* Don't issue signed overflow warnings. */
2670 fold_defer_overflow_warnings ();
2671
e9eb809d
ZD
2672 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2673 {
2674 for (j = 0; j < 2; j++)
2675 aval[j] = get_val_for (op[j], val[j]);
2676
2f133f46 2677 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
6e682d7e 2678 if (acnd && integer_zerop (acnd))
e9eb809d 2679 {
6ac01510 2680 fold_undefer_and_ignore_overflow_warnings ();
e9eb809d
ZD
2681 if (dump_file && (dump_flags & TDF_DETAILS))
2682 fprintf (dump_file,
2683 "Proved that loop %d iterates %d times using brute force.\n",
2684 loop->num, i);
7d60be94 2685 return build_int_cst (unsigned_type_node, i);
e9eb809d
ZD
2686 }
2687
2688 for (j = 0; j < 2; j++)
ed52affe 2689 {
5558f089 2690 aval[j] = val[j];
ed52affe
RG
2691 val[j] = get_val_for (next[j], val[j]);
2692 if (!is_gimple_min_invariant (val[j]))
6ac01510
ILT
2693 {
2694 fold_undefer_and_ignore_overflow_warnings ();
2695 return chrec_dont_know;
2696 }
ed52affe 2697 }
5558f089
JJ
2698
2699 /* If the next iteration would use the same base values
2700 as the current one, there is no point looping further,
2701 all following iterations will be the same as this one. */
2702 if (val[0] == aval[0] && val[1] == aval[1])
2703 break;
e9eb809d
ZD
2704 }
2705
6ac01510
ILT
2706 fold_undefer_and_ignore_overflow_warnings ();
2707
e9eb809d
ZD
2708 return chrec_dont_know;
2709}
2710
2711/* Finds the exit of the LOOP by that the loop exits after a constant
2712 number of iterations and stores the exit edge to *EXIT. The constant
2713 giving the number of iterations of LOOP is returned. The number of
2714 iterations is determined using loop_niter_by_eval (i.e. by brute force
2715 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2716 determines the number of iterations, chrec_dont_know is returned. */
2717
2718tree
2719find_loop_niter_by_eval (struct loop *loop, edge *exit)
2720{
ca83d385 2721 unsigned i;
9771b263 2722 vec<edge> exits = get_loop_exit_edges (loop);
e9eb809d
ZD
2723 edge ex;
2724 tree niter = NULL_TREE, aniter;
2725
2726 *exit = NULL;
2cee1509
RG
2727
2728 /* Loops with multiple exits are expensive to handle and less important. */
2729 if (!flag_expensive_optimizations
9771b263 2730 && exits.length () > 1)
f5843d08 2731 {
9771b263 2732 exits.release ();
f5843d08
RG
2733 return chrec_dont_know;
2734 }
2cee1509 2735
9771b263 2736 FOR_EACH_VEC_ELT (exits, i, ex)
e9eb809d 2737 {
e9eb809d
ZD
2738 if (!just_once_each_iteration_p (loop, ex->src))
2739 continue;
2740
2741 aniter = loop_niter_by_eval (loop, ex);
ca4c3169 2742 if (chrec_contains_undetermined (aniter))
e9eb809d
ZD
2743 continue;
2744
2745 if (niter
ca4c3169 2746 && !tree_int_cst_lt (aniter, niter))
e9eb809d
ZD
2747 continue;
2748
2749 niter = aniter;
2750 *exit = ex;
2751 }
9771b263 2752 exits.release ();
e9eb809d
ZD
2753
2754 return niter ? niter : chrec_dont_know;
2755}
2756
2757/*
2758
2759 Analysis of upper bounds on number of iterations of a loop.
2760
2761*/
2762
807e902e 2763static widest_int derive_constant_upper_bound_ops (tree, tree,
726a989a
RB
2764 enum tree_code, tree);
2765
2766/* Returns a constant upper bound on the value of the right-hand side of
2767 an assignment statement STMT. */
2768
807e902e 2769static widest_int
355fe088 2770derive_constant_upper_bound_assign (gimple *stmt)
726a989a
RB
2771{
2772 enum tree_code code = gimple_assign_rhs_code (stmt);
2773 tree op0 = gimple_assign_rhs1 (stmt);
2774 tree op1 = gimple_assign_rhs2 (stmt);
2775
2776 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2777 op0, code, op1);
2778}
2779
0ad1d5a1
ZD
2780/* Returns a constant upper bound on the value of expression VAL. VAL
2781 is considered to be unsigned. If its type is signed, its value must
b3ce5b6e 2782 be nonnegative. */
b8698a0f 2783
807e902e 2784static widest_int
726a989a
RB
2785derive_constant_upper_bound (tree val)
2786{
2787 enum tree_code code;
d1e2bb2d 2788 tree op0, op1, op2;
726a989a 2789
d1e2bb2d 2790 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
726a989a
RB
2791 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2792}
2793
2794/* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2795 whose type is TYPE. The expression is considered to be unsigned. If
2796 its type is signed, its value must be nonnegative. */
b8698a0f 2797
807e902e 2798static widest_int
726a989a
RB
2799derive_constant_upper_bound_ops (tree type, tree op0,
2800 enum tree_code code, tree op1)
763f4527 2801{
726a989a 2802 tree subtype, maxt;
e53d562a 2803 widest_int bnd, max, cst;
355fe088 2804 gimple *stmt;
0ad1d5a1
ZD
2805
2806 if (INTEGRAL_TYPE_P (type))
2807 maxt = TYPE_MAX_VALUE (type);
2808 else
2809 maxt = upper_bound_in_type (type, type);
2810
807e902e 2811 max = wi::to_widest (maxt);
0ad1d5a1 2812
726a989a 2813 switch (code)
0ad1d5a1
ZD
2814 {
2815 case INTEGER_CST:
807e902e 2816 return wi::to_widest (op0);
0ad1d5a1 2817
1043771b 2818 CASE_CONVERT:
0ad1d5a1
ZD
2819 subtype = TREE_TYPE (op0);
2820 if (!TYPE_UNSIGNED (subtype)
2821 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2822 that OP0 is nonnegative. */
2823 && TYPE_UNSIGNED (type)
b3ce5b6e 2824 && !tree_expr_nonnegative_p (op0))
0ad1d5a1
ZD
2825 {
2826 /* If we cannot prove that the casted expression is nonnegative,
2827 we cannot establish more useful upper bound than the precision
2828 of the type gives us. */
2829 return max;
2830 }
763f4527 2831
0ad1d5a1
ZD
2832 /* We now know that op0 is an nonnegative value. Try deriving an upper
2833 bound for it. */
b3ce5b6e 2834 bnd = derive_constant_upper_bound (op0);
0ad1d5a1
ZD
2835
2836 /* If the bound does not fit in TYPE, max. value of TYPE could be
2837 attained. */
807e902e 2838 if (wi::ltu_p (max, bnd))
0ad1d5a1
ZD
2839 return max;
2840
2841 return bnd;
2842
2843 case PLUS_EXPR:
5be014d5 2844 case POINTER_PLUS_EXPR:
0ad1d5a1 2845 case MINUS_EXPR:
0ad1d5a1 2846 if (TREE_CODE (op1) != INTEGER_CST
b3ce5b6e 2847 || !tree_expr_nonnegative_p (op0))
0ad1d5a1
ZD
2848 return max;
2849
20fb52af
ZD
2850 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2851 choose the most logical way how to treat this constant regardless
2852 of the signedness of the type. */
807e902e 2853 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
726a989a 2854 if (code != MINUS_EXPR)
27bcd47c 2855 cst = -cst;
0ad1d5a1 2856
b3ce5b6e 2857 bnd = derive_constant_upper_bound (op0);
0ad1d5a1 2858
807e902e 2859 if (wi::neg_p (cst))
0ad1d5a1 2860 {
27bcd47c 2861 cst = -cst;
0ad1d5a1 2862 /* Avoid CST == 0x80000... */
807e902e 2863 if (wi::neg_p (cst))
6f3d1a5e 2864 return max;
0ad1d5a1 2865
20fb52af 2866 /* OP0 + CST. We need to check that
0ad1d5a1
ZD
2867 BND <= MAX (type) - CST. */
2868
e53d562a
RB
2869 widest_int mmax = max - cst;
2870 if (wi::leu_p (bnd, mmax))
0ad1d5a1
ZD
2871 return max;
2872
27bcd47c 2873 return bnd + cst;
0ad1d5a1
ZD
2874 }
2875 else
2876 {
20fb52af
ZD
2877 /* OP0 - CST, where CST >= 0.
2878
2879 If TYPE is signed, we have already verified that OP0 >= 0, and we
2880 know that the result is nonnegative. This implies that
2881 VAL <= BND - CST.
2882
2883 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2884 otherwise the operation underflows.
2885 */
2886
2887 /* This should only happen if the type is unsigned; however, for
b3ce5b6e 2888 buggy programs that use overflowing signed arithmetics even with
20fb52af 2889 -fno-wrapv, this condition may also be true for signed values. */
807e902e 2890 if (wi::ltu_p (bnd, cst))
0ad1d5a1
ZD
2891 return max;
2892
b3ce5b6e
ZD
2893 if (TYPE_UNSIGNED (type))
2894 {
2895 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
807e902e 2896 wide_int_to_tree (type, cst));
b3ce5b6e
ZD
2897 if (!tem || integer_nonzerop (tem))
2898 return max;
2899 }
20fb52af 2900
27bcd47c 2901 bnd -= cst;
0ad1d5a1
ZD
2902 }
2903
2904 return bnd;
2905
2906 case FLOOR_DIV_EXPR:
2907 case EXACT_DIV_EXPR:
0ad1d5a1
ZD
2908 if (TREE_CODE (op1) != INTEGER_CST
2909 || tree_int_cst_sign_bit (op1))
2910 return max;
2911
b3ce5b6e 2912 bnd = derive_constant_upper_bound (op0);
807e902e 2913 return wi::udiv_floor (bnd, wi::to_widest (op1));
0ad1d5a1 2914
946e1bc7 2915 case BIT_AND_EXPR:
946e1bc7
ZD
2916 if (TREE_CODE (op1) != INTEGER_CST
2917 || tree_int_cst_sign_bit (op1))
2918 return max;
807e902e 2919 return wi::to_widest (op1);
946e1bc7
ZD
2920
2921 case SSA_NAME:
726a989a
RB
2922 stmt = SSA_NAME_DEF_STMT (op0);
2923 if (gimple_code (stmt) != GIMPLE_ASSIGN
2924 || gimple_assign_lhs (stmt) != op0)
946e1bc7 2925 return max;
726a989a 2926 return derive_constant_upper_bound_assign (stmt);
946e1bc7 2927
b8698a0f 2928 default:
0ad1d5a1
ZD
2929 return max;
2930 }
763f4527
ZD
2931}
2932
fbd28bc3
JJ
2933/* Emit a -Waggressive-loop-optimizations warning if needed. */
2934
2935static void
2936do_warn_aggressive_loop_optimizations (struct loop *loop,
355fe088 2937 widest_int i_bound, gimple *stmt)
fbd28bc3
JJ
2938{
2939 /* Don't warn if the loop doesn't have known constant bound. */
2940 if (!loop->nb_iterations
2941 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2942 || !warn_aggressive_loop_optimizations
2943 /* To avoid warning multiple times for the same loop,
2944 only start warning when we preserve loops. */
2945 || (cfun->curr_properties & PROP_loops) == 0
2946 /* Only warn once per loop. */
2947 || loop->warned_aggressive_loop_optimizations
2948 /* Only warn if undefined behavior gives us lower estimate than the
2949 known constant bound. */
807e902e 2950 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
fbd28bc3
JJ
2951 /* And undefined behavior happens unconditionally. */
2952 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2953 return;
2954
2955 edge e = single_exit (loop);
2956 if (e == NULL)
2957 return;
2958
355fe088 2959 gimple *estmt = last_stmt (e->src);
973dabae
MLI
2960 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2961 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
2962 ? UNSIGNED : SIGNED);
44398cbe 2963 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
973dabae
MLI
2964 "iteration %s invokes undefined behavior", buf))
2965 inform (gimple_location (estmt), "within this loop");
fbd28bc3
JJ
2966 loop->warned_aggressive_loop_optimizations = true;
2967}
2968
b3ce5b6e 2969/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
946e1bc7
ZD
2970 is true if the loop is exited immediately after STMT, and this exit
2971 is taken at last when the STMT is executed BOUND + 1 times.
fa10beec 2972 REALISTIC is true if BOUND is expected to be close to the real number
9bdb685e 2973 of iterations. UPPER is true if we are sure the loop iterates at most
807e902e 2974 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
e9eb809d 2975
946e1bc7 2976static void
807e902e 2977record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
355fe088 2978 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
e9eb809d 2979{
807e902e 2980 widest_int delta;
e9eb809d
ZD
2981
2982 if (dump_file && (dump_flags & TDF_DETAILS))
2983 {
946e1bc7 2984 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
726a989a 2985 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
9bdb685e
ZD
2986 fprintf (dump_file, " is %sexecuted at most ",
2987 upper ? "" : "probably ");
e9eb809d 2988 print_generic_expr (dump_file, bound, TDF_SLIM);
763f4527 2989 fprintf (dump_file, " (bounded by ");
807e902e 2990 print_decu (i_bound, dump_file);
946e1bc7 2991 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
e9eb809d
ZD
2992 }
2993
9bdb685e
ZD
2994 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2995 real number of iterations. */
2996 if (TREE_CODE (bound) != INTEGER_CST)
2997 realistic = false;
f2a1b469 2998 else
807e902e 2999 gcc_checking_assert (i_bound == wi::to_widest (bound));
9bdb685e
ZD
3000
3001 /* If we have a guaranteed upper bound, record it in the appropriate
fbd28bc3
JJ
3002 list, unless this is an !is_exit bound (i.e. undefined behavior in
3003 at_stmt) in a loop with known constant number of iterations. */
3004 if (upper
3005 && (is_exit
3006 || loop->nb_iterations == NULL_TREE
3007 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
9bdb685e 3008 {
766090c2 3009 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
9bdb685e
ZD
3010
3011 elt->bound = i_bound;
3012 elt->stmt = at_stmt;
3013 elt->is_exit = is_exit;
3014 elt->next = loop->bounds;
3015 loop->bounds = elt;
3016 }
3017
cd0f6278
JH
3018 /* If statement is executed on every path to the loop latch, we can directly
3019 infer the upper bound on the # of iterations of the loop. */
3020 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
105e29c5 3021 upper = false;
cd0f6278 3022
9bdb685e 3023 /* Update the number of iteration estimates according to the bound.
05322355
JH
3024 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3025 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3026 later if such statement must be executed on last iteration */
3027 if (is_exit)
807e902e 3028 delta = 0;
9bdb685e 3029 else
807e902e
KZ
3030 delta = 1;
3031 widest_int new_i_bound = i_bound + delta;
9bdb685e 3032
7fa7289d 3033 /* If an overflow occurred, ignore the result. */
807e902e 3034 if (wi::ltu_p (new_i_bound, delta))
9bdb685e
ZD
3035 return;
3036
fbd28bc3 3037 if (upper && !is_exit)
807e902e
KZ
3038 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3039 record_niter_bound (loop, new_i_bound, realistic, upper);
e9eb809d
ZD
3040}
3041
2f07b722
BC
3042/* Records the control iv analyzed in NITER for LOOP if the iv is valid
3043 and doesn't overflow. */
3044
3045static void
3046record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3047{
3048 struct control_iv *iv;
3049
3050 if (!niter->control.base || !niter->control.step)
3051 return;
3052
3053 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3054 return;
3055
3056 iv = ggc_alloc<control_iv> ();
3057 iv->base = niter->control.base;
3058 iv->step = niter->control.step;
3059 iv->next = loop->control_ivs;
3060 loop->control_ivs = iv;
3061
3062 return;
3063}
3064
cf8d19de
BC
3065/* This function returns TRUE if below conditions are satisfied:
3066 1) VAR is SSA variable.
3067 2) VAR is an IV:{base, step} in its defining loop.
3068 3) IV doesn't overflow.
3069 4) Both base and step are integer constants.
3070 5) Base is the MIN/MAX value depends on IS_MIN.
3071 Store value of base to INIT correspondingly. */
3072
3073static bool
3074get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3075{
3076 if (TREE_CODE (var) != SSA_NAME)
3077 return false;
3078
3079 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3080 struct loop *loop = loop_containing_stmt (def_stmt);
3081
3082 if (loop == NULL)
3083 return false;
3084
3085 affine_iv iv;
3086 if (!simple_iv (loop, loop, var, &iv, false))
3087 return false;
3088
3089 if (!iv.no_overflow)
3090 return false;
3091
3092 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3093 return false;
3094
3095 if (is_min == tree_int_cst_sign_bit (iv.step))
3096 return false;
3097
3098 *init = iv.base;
3099 return true;
3100}
3101
946e1bc7
ZD
3102/* Record the estimate on number of iterations of LOOP based on the fact that
3103 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
9bdb685e
ZD
3104 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3105 estimated number of iterations is expected to be close to the real one.
3106 UPPER is true if we are sure the induction variable does not wrap. */
946e1bc7
ZD
3107
3108static void
355fe088 3109record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
9bdb685e 3110 tree low, tree high, bool realistic, bool upper)
946e1bc7
ZD
3111{
3112 tree niter_bound, extreme, delta;
3113 tree type = TREE_TYPE (base), unsigned_type;
fa8e5051 3114 tree orig_base = base;
946e1bc7 3115
6e682d7e 3116 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
946e1bc7
ZD
3117 return;
3118
3119 if (dump_file && (dump_flags & TDF_DETAILS))
3120 {
3121 fprintf (dump_file, "Induction variable (");
3122 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3123 fprintf (dump_file, ") ");
3124 print_generic_expr (dump_file, base, TDF_SLIM);
3125 fprintf (dump_file, " + ");
3126 print_generic_expr (dump_file, step, TDF_SLIM);
3127 fprintf (dump_file, " * iteration does not wrap in statement ");
726a989a 3128 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
946e1bc7
ZD
3129 fprintf (dump_file, " in loop %d.\n", loop->num);
3130 }
3131
3132 unsigned_type = unsigned_type_for (type);
3133 base = fold_convert (unsigned_type, base);
3134 step = fold_convert (unsigned_type, step);
3135
3136 if (tree_int_cst_sign_bit (step))
3137 {
fa8e5051 3138 wide_int min, max;
946e1bc7 3139 extreme = fold_convert (unsigned_type, low);
fa8e5051
IE
3140 if (TREE_CODE (orig_base) == SSA_NAME
3141 && TREE_CODE (high) == INTEGER_CST
3142 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
cf8d19de
BC
3143 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3144 || get_cst_init_from_scev (orig_base, &max, false))
fa8e5051
IE
3145 && wi::gts_p (high, max))
3146 base = wide_int_to_tree (unsigned_type, max);
e53d562a
RB
3147 else if (TREE_CODE (base) != INTEGER_CST
3148 && dominated_by_p (CDI_DOMINATORS,
3149 loop->latch, gimple_bb (stmt)))
946e1bc7
ZD
3150 base = fold_convert (unsigned_type, high);
3151 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3152 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3153 }
3154 else
3155 {
fa8e5051 3156 wide_int min, max;
946e1bc7 3157 extreme = fold_convert (unsigned_type, high);
fa8e5051
IE
3158 if (TREE_CODE (orig_base) == SSA_NAME
3159 && TREE_CODE (low) == INTEGER_CST
3160 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
cf8d19de
BC
3161 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3162 || get_cst_init_from_scev (orig_base, &min, true))
fa8e5051
IE
3163 && wi::gts_p (min, low))
3164 base = wide_int_to_tree (unsigned_type, min);
e53d562a
RB
3165 else if (TREE_CODE (base) != INTEGER_CST
3166 && dominated_by_p (CDI_DOMINATORS,
3167 loop->latch, gimple_bb (stmt)))
946e1bc7
ZD
3168 base = fold_convert (unsigned_type, low);
3169 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3170 }
3171
3172 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3173 would get out of the range. */
3174 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
807e902e 3175 widest_int max = derive_constant_upper_bound (niter_bound);
9bdb685e 3176 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
4839cb59
ZD
3177}
3178
946e1bc7 3179/* Determine information about number of iterations a LOOP from the index
ac84e05e
ZD
3180 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3181 guaranteed to be executed in every iteration of LOOP. Callback for
3182 for_each_index. */
946e1bc7
ZD
3183
3184struct ilb_data
3185{
3186 struct loop *loop;
355fe088 3187 gimple *stmt;
946e1bc7
ZD
3188};
3189
3190static bool
3191idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3192{
c22940cd 3193 struct ilb_data *data = (struct ilb_data *) dta;
946e1bc7
ZD
3194 tree ev, init, step;
3195 tree low, high, type, next;
cd0f6278 3196 bool sign, upper = true, at_end = false;
946e1bc7
ZD
3197 struct loop *loop = data->loop;
3198
9bdb685e 3199 if (TREE_CODE (base) != ARRAY_REF)
946e1bc7
ZD
3200 return true;
3201
9bdb685e
ZD
3202 /* For arrays at the end of the structure, we are not guaranteed that they
3203 do not really extend over their declared size. However, for arrays of
3204 size greater than one, this is unlikely to be intended. */
3205 if (array_at_struct_end_p (base))
ac84e05e
ZD
3206 {
3207 at_end = true;
3208 upper = false;
3209 }
9bdb685e 3210
8b679c9b
RB
3211 struct loop *dloop = loop_containing_stmt (data->stmt);
3212 if (!dloop)
3213 return true;
3214
3215 ev = analyze_scalar_evolution (dloop, *idx);
3216 ev = instantiate_parameters (loop, ev);
946e1bc7
ZD
3217 init = initial_condition (ev);
3218 step = evolution_part_in_loop_num (ev, loop->num);
3219
3220 if (!init
3221 || !step
3222 || TREE_CODE (step) != INTEGER_CST
6e682d7e 3223 || integer_zerop (step)
946e1bc7
ZD
3224 || tree_contains_chrecs (init, NULL)
3225 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3226 return true;
3227
3228 low = array_ref_low_bound (base);
3229 high = array_ref_up_bound (base);
b8698a0f 3230
946e1bc7
ZD
3231 /* The case of nonconstant bounds could be handled, but it would be
3232 complicated. */
3233 if (TREE_CODE (low) != INTEGER_CST
3234 || !high
3235 || TREE_CODE (high) != INTEGER_CST)
3236 return true;
3237 sign = tree_int_cst_sign_bit (step);
3238 type = TREE_TYPE (step);
9bdb685e
ZD
3239
3240 /* The array of length 1 at the end of a structure most likely extends
3241 beyond its bounds. */
ac84e05e 3242 if (at_end
9bdb685e
ZD
3243 && operand_equal_p (low, high, 0))
3244 return true;
3245
946e1bc7
ZD
3246 /* In case the relevant bound of the array does not fit in type, or
3247 it does, but bound + step (in type) still belongs into the range of the
3248 array, the index may wrap and still stay within the range of the array
3249 (consider e.g. if the array is indexed by the full range of
3250 unsigned char).
3251
3252 To make things simpler, we require both bounds to fit into type, although
2f8e468b 3253 there are cases where this would not be strictly necessary. */
946e1bc7
ZD
3254 if (!int_fits_type_p (high, type)
3255 || !int_fits_type_p (low, type))
3256 return true;
3257 low = fold_convert (type, low);
3258 high = fold_convert (type, high);
3259
3260 if (sign)
3261 next = fold_binary (PLUS_EXPR, type, low, step);
3262 else
3263 next = fold_binary (PLUS_EXPR, type, high, step);
b8698a0f 3264
946e1bc7
ZD
3265 if (tree_int_cst_compare (low, next) <= 0
3266 && tree_int_cst_compare (next, high) <= 0)
3267 return true;
3268
77c9d5b4
JH
3269 /* If access is not executed on every iteration, we must ensure that overlow
3270 may not make the access valid later. */
870ca331 3271 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
b24d9420
BC
3272 && scev_probably_wraps_p (NULL_TREE,
3273 initial_condition_in_loop_num (ev, loop->num),
870ca331 3274 step, data->stmt, loop, true))
77c9d5b4 3275 upper = false;
870ca331 3276
77c9d5b4 3277 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
946e1bc7
ZD
3278 return true;
3279}
3280
3281/* Determine information about number of iterations a LOOP from the bounds
ac84e05e
ZD
3282 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3283 STMT is guaranteed to be executed in every iteration of LOOP.*/
946e1bc7
ZD
3284
3285static void
355fe088 3286infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
946e1bc7
ZD
3287{
3288 struct ilb_data data;
3289
3290 data.loop = loop;
3291 data.stmt = stmt;
3292 for_each_index (&ref, idx_infer_loop_bounds, &data);
3293}
3294
3295/* Determine information about number of iterations of a LOOP from the way
ac84e05e
ZD
3296 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3297 executed in every iteration of LOOP. */
946e1bc7
ZD
3298
3299static void
355fe088 3300infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
946e1bc7 3301{
726a989a 3302 if (is_gimple_assign (stmt))
946e1bc7 3303 {
726a989a
RB
3304 tree op0 = gimple_assign_lhs (stmt);
3305 tree op1 = gimple_assign_rhs1 (stmt);
946e1bc7
ZD
3306
3307 /* For each memory access, analyze its access function
3308 and record a bound on the loop iteration domain. */
3309 if (REFERENCE_CLASS_P (op0))
cd0f6278 3310 infer_loop_bounds_from_ref (loop, stmt, op0);
946e1bc7
ZD
3311
3312 if (REFERENCE_CLASS_P (op1))
cd0f6278 3313 infer_loop_bounds_from_ref (loop, stmt, op1);
946e1bc7 3314 }
726a989a 3315 else if (is_gimple_call (stmt))
946e1bc7 3316 {
726a989a
RB
3317 tree arg, lhs;
3318 unsigned i, n = gimple_call_num_args (stmt);
946e1bc7 3319
726a989a
RB
3320 lhs = gimple_call_lhs (stmt);
3321 if (lhs && REFERENCE_CLASS_P (lhs))
cd0f6278 3322 infer_loop_bounds_from_ref (loop, stmt, lhs);
726a989a
RB
3323
3324 for (i = 0; i < n; i++)
3325 {
3326 arg = gimple_call_arg (stmt, i);
3327 if (REFERENCE_CLASS_P (arg))
cd0f6278 3328 infer_loop_bounds_from_ref (loop, stmt, arg);
726a989a 3329 }
946e1bc7
ZD
3330 }
3331}
3332
bc69f7ff
TV
3333/* Determine information about number of iterations of a LOOP from the fact
3334 that pointer arithmetics in STMT does not overflow. */
3335
3336static void
355fe088 3337infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
bc69f7ff
TV
3338{
3339 tree def, base, step, scev, type, low, high;
3340 tree var, ptr;
3341
3342 if (!is_gimple_assign (stmt)
3343 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3344 return;
3345
3346 def = gimple_assign_lhs (stmt);
3347 if (TREE_CODE (def) != SSA_NAME)
3348 return;
3349
3350 type = TREE_TYPE (def);
3351 if (!nowrap_type_p (type))
3352 return;
3353
3354 ptr = gimple_assign_rhs1 (stmt);
3355 if (!expr_invariant_in_loop_p (loop, ptr))
3356 return;
3357
3358 var = gimple_assign_rhs2 (stmt);
3359 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3360 return;
3361
3362 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3363 if (chrec_contains_undetermined (scev))
3364 return;
3365
3366 base = initial_condition_in_loop_num (scev, loop->num);
3367 step = evolution_part_in_loop_num (scev, loop->num);
3368
3369 if (!base || !step
3370 || TREE_CODE (step) != INTEGER_CST
3371 || tree_contains_chrecs (base, NULL)
3372 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3373 return;
3374
3375 low = lower_bound_in_type (type, type);
3376 high = upper_bound_in_type (type, type);
3377
0703f020
TV
3378 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3379 produce a NULL pointer. The contrary would mean NULL points to an object,
3380 while NULL is supposed to compare unequal with the address of all objects.
3381 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3382 NULL pointer since that would mean wrapping, which we assume here not to
3383 happen. So, we can exclude NULL from the valid range of pointer
3384 arithmetic. */
3385 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3386 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3387
bc69f7ff
TV
3388 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3389}
3390
946e1bc7
ZD
3391/* Determine information about number of iterations of a LOOP from the fact
3392 that signed arithmetics in STMT does not overflow. */
3393
3394static void
355fe088 3395infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
946e1bc7
ZD
3396{
3397 tree def, base, step, scev, type, low, high;
3398
726a989a 3399 if (gimple_code (stmt) != GIMPLE_ASSIGN)
946e1bc7
ZD
3400 return;
3401
726a989a 3402 def = gimple_assign_lhs (stmt);
946e1bc7
ZD
3403
3404 if (TREE_CODE (def) != SSA_NAME)
3405 return;
3406
3407 type = TREE_TYPE (def);
3408 if (!INTEGRAL_TYPE_P (type)
eeef0e45 3409 || !TYPE_OVERFLOW_UNDEFINED (type))
946e1bc7
ZD
3410 return;
3411
3412 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3413 if (chrec_contains_undetermined (scev))
3414 return;
3415
3416 base = initial_condition_in_loop_num (scev, loop->num);
3417 step = evolution_part_in_loop_num (scev, loop->num);
3418
3419 if (!base || !step
3420 || TREE_CODE (step) != INTEGER_CST
3421 || tree_contains_chrecs (base, NULL)
3422 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3423 return;
3424
3425 low = lower_bound_in_type (type, type);
3426 high = upper_bound_in_type (type, type);
3427
9bdb685e 3428 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
946e1bc7
ZD
3429}
3430
d7770457
SP
3431/* The following analyzers are extracting informations on the bounds
3432 of LOOP from the following undefined behaviors:
3433
3434 - data references should not access elements over the statically
3435 allocated size,
3436
3437 - signed variables should not overflow when flag_wrapv is not set.
3438*/
3439
3440static void
3441infer_loop_bounds_from_undefined (struct loop *loop)
3442{
3443 unsigned i;
946e1bc7 3444 basic_block *bbs;
726a989a 3445 gimple_stmt_iterator bsi;
946e1bc7 3446 basic_block bb;
ac84e05e 3447 bool reliable;
b8698a0f 3448
d7770457
SP
3449 bbs = get_loop_body (loop);
3450
3451 for (i = 0; i < loop->num_nodes; i++)
3452 {
3453 bb = bbs[i];
3454
946e1bc7 3455 /* If BB is not executed in each iteration of the loop, we cannot
ac84e05e 3456 use the operations in it to infer reliable upper bound on the
cd0f6278
JH
3457 # of iterations of the loop. However, we can use it as a guess.
3458 Reliable guesses come only from array bounds. */
ac84e05e 3459 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
946e1bc7 3460
726a989a 3461 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
946e1bc7 3462 {
355fe088 3463 gimple *stmt = gsi_stmt (bsi);
d7770457 3464
cd0f6278 3465 infer_loop_bounds_from_array (loop, stmt);
ac84e05e
ZD
3466
3467 if (reliable)
bc69f7ff
TV
3468 {
3469 infer_loop_bounds_from_signedness (loop, stmt);
3470 infer_loop_bounds_from_pointer_arith (loop, stmt);
3471 }
946e1bc7
ZD
3472 }
3473
d7770457
SP
3474 }
3475
3476 free (bbs);
3477}
3478
807e902e 3479/* Compare wide ints, callback for qsort. */
73ddf95b 3480
71343877 3481static int
807e902e 3482wide_int_cmp (const void *p1, const void *p2)
73ddf95b 3483{
807e902e
KZ
3484 const widest_int *d1 = (const widest_int *) p1;
3485 const widest_int *d2 = (const widest_int *) p2;
3486 return wi::cmpu (*d1, *d2);
73ddf95b
JH
3487}
3488
3489/* Return index of BOUND in BOUNDS array sorted in increasing order.
3490 Lookup by binary search. */
3491
71343877 3492static int
807e902e 3493bound_index (vec<widest_int> bounds, const widest_int &bound)
73ddf95b 3494{
9771b263 3495 unsigned int end = bounds.length ();
73ddf95b
JH
3496 unsigned int begin = 0;
3497
3498 /* Find a matching index by means of a binary search. */
3499 while (begin != end)
3500 {
3501 unsigned int middle = (begin + end) / 2;
807e902e 3502 widest_int index = bounds[middle];
73ddf95b
JH
3503
3504 if (index == bound)
3505 return middle;
807e902e 3506 else if (wi::ltu_p (index, bound))
73ddf95b
JH
3507 begin = middle + 1;
3508 else
3509 end = middle;
3510 }
3511 gcc_unreachable ();
3512}
3513
73ddf95b
JH
3514/* We recorded loop bounds only for statements dominating loop latch (and thus
3515 executed each loop iteration). If there are any bounds on statements not
3516 dominating the loop latch we can improve the estimate by walking the loop
3517 body and seeing if every path from loop header to loop latch contains
3518 some bounded statement. */
3519
3520static void
3521discover_iteration_bound_by_body_walk (struct loop *loop)
3522{
73ddf95b 3523 struct nb_iter_bound *elt;
8c681247 3524 auto_vec<widest_int> bounds;
b4f9786b
JJ
3525 vec<vec<basic_block> > queues = vNULL;
3526 vec<basic_block> queue = vNULL;
73ddf95b
JH
3527 ptrdiff_t queue_index;
3528 ptrdiff_t latch_index = 0;
73ddf95b
JH
3529
3530 /* Discover what bounds may interest us. */
3531 for (elt = loop->bounds; elt; elt = elt->next)
3532 {
807e902e 3533 widest_int bound = elt->bound;
73ddf95b
JH
3534
3535 /* Exit terminates loop at given iteration, while non-exits produce undefined
3536 effect on the next iteration. */
3537 if (!elt->is_exit)
4c052539 3538 {
807e902e 3539 bound += 1;
4c052539 3540 /* If an overflow occurred, ignore the result. */
807e902e 3541 if (bound == 0)
4c052539
JJ
3542 continue;
3543 }
73ddf95b
JH
3544
3545 if (!loop->any_upper_bound
807e902e 3546 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
9771b263 3547 bounds.safe_push (bound);
73ddf95b
JH
3548 }
3549
3550 /* Exit early if there is nothing to do. */
9771b263 3551 if (!bounds.exists ())
73ddf95b
JH
3552 return;
3553
3554 if (dump_file && (dump_flags & TDF_DETAILS))
3555 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3556
3557 /* Sort the bounds in decreasing order. */
75509ba2 3558 bounds.qsort (wide_int_cmp);
73ddf95b
JH
3559
3560 /* For every basic block record the lowest bound that is guaranteed to
3561 terminate the loop. */
3562
39c8aaa4 3563 hash_map<basic_block, ptrdiff_t> bb_bounds;
73ddf95b
JH
3564 for (elt = loop->bounds; elt; elt = elt->next)
3565 {
807e902e 3566 widest_int bound = elt->bound;
73ddf95b 3567 if (!elt->is_exit)
4c052539 3568 {
807e902e 3569 bound += 1;
4c052539 3570 /* If an overflow occurred, ignore the result. */
807e902e 3571 if (bound == 0)
4c052539
JJ
3572 continue;
3573 }
73ddf95b
JH
3574
3575 if (!loop->any_upper_bound
807e902e 3576 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
73ddf95b
JH
3577 {
3578 ptrdiff_t index = bound_index (bounds, bound);
39c8aaa4 3579 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
73ddf95b 3580 if (!entry)
39c8aaa4 3581 bb_bounds.put (gimple_bb (elt->stmt), index);
73ddf95b 3582 else if ((ptrdiff_t)*entry > index)
39c8aaa4 3583 *entry = index;
73ddf95b
JH
3584 }
3585 }
3586
39c8aaa4 3587 hash_map<basic_block, ptrdiff_t> block_priority;
73ddf95b
JH
3588
3589 /* Perform shortest path discovery loop->header ... loop->latch.
3590
3591 The "distance" is given by the smallest loop bound of basic block
3592 present in the path and we look for path with largest smallest bound
3593 on it.
3594
b4f9786b 3595 To avoid the need for fibonacci heap on double ints we simply compress
73ddf95b
JH
3596 double ints into indexes to BOUNDS array and then represent the queue
3597 as arrays of queues for every index.
9771b263 3598 Index of BOUNDS.length() means that the execution of given BB has
73ddf95b
JH
3599 no bounds determined.
3600
3601 VISITED is a pointer map translating basic block into smallest index
3602 it was inserted into the priority queue with. */
3603 latch_index = -1;
3604
3605 /* Start walk in loop header with index set to infinite bound. */
9771b263
DN
3606 queue_index = bounds.length ();
3607 queues.safe_grow_cleared (queue_index + 1);
3608 queue.safe_push (loop->header);
3609 queues[queue_index] = queue;
39c8aaa4 3610 block_priority.put (loop->header, queue_index);
73ddf95b
JH
3611
3612 for (; queue_index >= 0; queue_index--)
3613 {
3614 if (latch_index < queue_index)
3615 {
9771b263 3616 while (queues[queue_index].length ())
73ddf95b
JH
3617 {
3618 basic_block bb;
3619 ptrdiff_t bound_index = queue_index;
73ddf95b
JH
3620 edge e;
3621 edge_iterator ei;
3622
9771b263
DN
3623 queue = queues[queue_index];
3624 bb = queue.pop ();
73ddf95b
JH
3625
3626 /* OK, we later inserted the BB with lower priority, skip it. */
39c8aaa4 3627 if (*block_priority.get (bb) > queue_index)
73ddf95b
JH
3628 continue;
3629
3630 /* See if we can improve the bound. */
39c8aaa4
TS
3631 ptrdiff_t *entry = bb_bounds.get (bb);
3632 if (entry && *entry < bound_index)
3633 bound_index = *entry;
73ddf95b
JH
3634
3635 /* Insert succesors into the queue, watch for latch edge
3636 and record greatest index we saw. */
3637 FOR_EACH_EDGE (e, ei, bb->succs)
3638 {
3639 bool insert = false;
73ddf95b
JH
3640
3641 if (loop_exit_edge_p (loop, e))
3642 continue;
3643
3644 if (e == loop_latch_edge (loop)
3645 && latch_index < bound_index)
3646 latch_index = bound_index;
39c8aaa4 3647 else if (!(entry = block_priority.get (e->dest)))
73ddf95b
JH
3648 {
3649 insert = true;
39c8aaa4 3650 block_priority.put (e->dest, bound_index);
73ddf95b 3651 }
39c8aaa4 3652 else if (*entry < bound_index)
73ddf95b
JH
3653 {
3654 insert = true;
39c8aaa4 3655 *entry = bound_index;
73ddf95b
JH
3656 }
3657
3658 if (insert)
b4f9786b 3659 queues[bound_index].safe_push (e->dest);
73ddf95b
JH
3660 }
3661 }
3662 }
b4f9786b 3663 queues[queue_index].release ();
73ddf95b
JH
3664 }
3665
3666 gcc_assert (latch_index >= 0);
9771b263 3667 if ((unsigned)latch_index < bounds.length ())
73ddf95b
JH
3668 {
3669 if (dump_file && (dump_flags & TDF_DETAILS))
3670 {
3671 fprintf (dump_file, "Found better loop bound ");
807e902e 3672 print_decu (bounds[latch_index], dump_file);
73ddf95b
JH
3673 fprintf (dump_file, "\n");
3674 }
9771b263 3675 record_niter_bound (loop, bounds[latch_index], false, true);
73ddf95b
JH
3676 }
3677
9771b263 3678 queues.release ();
73ddf95b
JH
3679}
3680
05322355
JH
3681/* See if every path cross the loop goes through a statement that is known
3682 to not execute at the last iteration. In that case we can decrese iteration
3683 count by 1. */
3684
3685static void
3686maybe_lower_iteration_bound (struct loop *loop)
3687{
355fe088 3688 hash_set<gimple *> *not_executed_last_iteration = NULL;
05322355
JH
3689 struct nb_iter_bound *elt;
3690 bool found_exit = false;
8c681247 3691 auto_vec<basic_block> queue;
05322355
JH
3692 bitmap visited;
3693
3694 /* Collect all statements with interesting (i.e. lower than
3695 nb_iterations_upper_bound) bound on them.
3696
3697 TODO: Due to the way record_estimate choose estimates to store, the bounds
3698 will be always nb_iterations_upper_bound-1. We can change this to record
3699 also statements not dominating the loop latch and update the walk bellow
5764ee3c 3700 to the shortest path algorithm. */
05322355
JH
3701 for (elt = loop->bounds; elt; elt = elt->next)
3702 {
3703 if (!elt->is_exit
807e902e 3704 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
05322355
JH
3705 {
3706 if (!not_executed_last_iteration)
355fe088 3707 not_executed_last_iteration = new hash_set<gimple *>;
6e2830c3 3708 not_executed_last_iteration->add (elt->stmt);
05322355
JH
3709 }
3710 }
3711 if (!not_executed_last_iteration)
3712 return;
3713
3714 /* Start DFS walk in the loop header and see if we can reach the
3715 loop latch or any of the exits (including statements with side
3716 effects that may terminate the loop otherwise) without visiting
3717 any of the statements known to have undefined effect on the last
3718 iteration. */
9771b263 3719 queue.safe_push (loop->header);
05322355
JH
3720 visited = BITMAP_ALLOC (NULL);
3721 bitmap_set_bit (visited, loop->header->index);
3722 found_exit = false;
3723
3724 do
3725 {
9771b263 3726 basic_block bb = queue.pop ();
05322355
JH
3727 gimple_stmt_iterator gsi;
3728 bool stmt_found = false;
3729
3730 /* Loop for possible exits and statements bounding the execution. */
3731 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3732 {
355fe088 3733 gimple *stmt = gsi_stmt (gsi);
6e2830c3 3734 if (not_executed_last_iteration->contains (stmt))
05322355
JH
3735 {
3736 stmt_found = true;
3737 break;
3738 }
3739 if (gimple_has_side_effects (stmt))
3740 {
3741 found_exit = true;
3742 break;
3743 }
3744 }
3745 if (found_exit)
3746 break;
3747
3748 /* If no bounding statement is found, continue the walk. */
3749 if (!stmt_found)
3750 {
3751 edge e;
3752 edge_iterator ei;
3753
3754 FOR_EACH_EDGE (e, ei, bb->succs)
3755 {
3756 if (loop_exit_edge_p (loop, e)
3757 || e == loop_latch_edge (loop))
3758 {
3759 found_exit = true;
3760 break;
3761 }
3762 if (bitmap_set_bit (visited, e->dest->index))
9771b263 3763 queue.safe_push (e->dest);
05322355
JH
3764 }
3765 }
3766 }
9771b263 3767 while (queue.length () && !found_exit);
05322355
JH
3768
3769 /* If every path through the loop reach bounding statement before exit,
3770 then we know the last iteration of the loop will have undefined effect
3771 and we can decrease number of iterations. */
3772
3773 if (!found_exit)
3774 {
3775 if (dump_file && (dump_flags & TDF_DETAILS))
3776 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3777 "undefined statement must be executed at the last iteration.\n");
807e902e 3778 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
05322355
JH
3779 false, true);
3780 }
48067724 3781
05322355 3782 BITMAP_FREE (visited);
6e2830c3 3783 delete not_executed_last_iteration;
05322355
JH
3784}
3785
e3488283
RG
3786/* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3787 is true also use estimates derived from undefined behavior. */
e9eb809d 3788
adb7eaa2
RB
3789void
3790estimate_numbers_of_iterations (struct loop *loop)
e9eb809d 3791{
9771b263 3792 vec<edge> exits;
e9eb809d 3793 tree niter, type;
ca83d385 3794 unsigned i;
e9eb809d 3795 struct tree_niter_desc niter_desc;
ca83d385 3796 edge ex;
807e902e 3797 widest_int bound;
f9bf4777 3798 edge likely_exit;
e9eb809d 3799
79ebd55c 3800 /* Give up if we already have tried to compute an estimation. */
946e1bc7 3801 if (loop->estimate_state != EST_NOT_COMPUTED)
79ebd55c 3802 return;
03fd03d5 3803
9bdb685e 3804 loop->estimate_state = EST_AVAILABLE;
aade5c72
JH
3805
3806 /* If we have a measured profile, use it to estimate the number of
3807 iterations. Normally this is recorded by branch_prob right after
3808 reading the profile. In case we however found a new loop, record the
3809 information here.
3810
3811 Explicitly check for profile status so we do not report
3812 wrong prediction hitrates for guessed loop iterations heuristics.
3813 Do not recompute already recorded bounds - we ought to be better on
3814 updating iteration bounds than updating profile in general and thus
3815 recomputing iteration bounds later in the compilation process will just
3816 introduce random roundoff errors. */
3817 if (!loop->any_estimate
3995f3a2 3818 && loop->header->count > 0)
aade5c72
JH
3819 {
3820 gcov_type nit = expected_loop_iterations_unbounded (loop);
3821 bound = gcov_type_to_wide_int (nit);
3822 record_niter_bound (loop, bound, true, false);
3823 }
79ebd55c 3824
fbd28bc3
JJ
3825 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3826 to be constant, we avoid undefined behavior implied bounds and instead
3827 diagnose those loops with -Waggressive-loop-optimizations. */
3828 number_of_latch_executions (loop);
3829
ca83d385 3830 exits = get_loop_exit_edges (loop);
f9bf4777 3831 likely_exit = single_likely_exit (loop);
9771b263 3832 FOR_EACH_VEC_ELT (exits, i, ex)
e9eb809d 3833 {
cd0f6278 3834 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
e9eb809d
ZD
3835 continue;
3836
3837 niter = niter_desc.niter;
3838 type = TREE_TYPE (niter);
946e1bc7 3839 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
e6845c23 3840 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
ff5e9a94 3841 build_int_cst (type, 0),
e6845c23 3842 niter);
b3ce5b6e 3843 record_estimate (loop, niter, niter_desc.max,
ca83d385 3844 last_stmt (ex->src),
f9bf4777 3845 true, ex == likely_exit, true);
2f07b722 3846 record_control_iv (loop, &niter_desc);
e9eb809d 3847 }
9771b263 3848 exits.release ();
b8698a0f 3849
6e616110
RB
3850 if (flag_aggressive_loop_optimizations)
3851 infer_loop_bounds_from_undefined (loop);
9bdb685e 3852
73ddf95b
JH
3853 discover_iteration_bound_by_body_walk (loop);
3854
05322355
JH
3855 maybe_lower_iteration_bound (loop);
3856
fbd28bc3
JJ
3857 /* If we know the exact number of iterations of this loop, try to
3858 not break code with undefined behavior by not recording smaller
3859 maximum number of iterations. */
3860 if (loop->nb_iterations
3861 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3862 {
3863 loop->any_upper_bound = true;
807e902e 3864 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
fbd28bc3 3865 }
e9eb809d
ZD
3866}
3867
b4a9343c
ZD
3868/* Sets NIT to the estimated number of executions of the latch of the
3869 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3870 large as the number of iterations. If we have no reliable estimate,
3871 the function returns false, otherwise returns true. */
3872
3873bool
807e902e 3874estimated_loop_iterations (struct loop *loop, widest_int *nit)
b4a9343c 3875{
e3a8f1fa
JH
3876 /* When SCEV information is available, try to update loop iterations
3877 estimate. Otherwise just return whatever we recorded earlier. */
3878 if (scev_initialized_p ())
adb7eaa2 3879 estimate_numbers_of_iterations (loop);
e3a8f1fa 3880
71343877 3881 return (get_estimated_loop_iterations (loop, nit));
652c4c71 3882}
b4a9343c 3883
1ef88893
AM
3884/* Similar to estimated_loop_iterations, but returns the estimate only
3885 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3886 on the number of iterations of LOOP could not be derived, returns -1. */
3887
3888HOST_WIDE_INT
3889estimated_loop_iterations_int (struct loop *loop)
3890{
807e902e 3891 widest_int nit;
1ef88893
AM
3892 HOST_WIDE_INT hwi_nit;
3893
3894 if (!estimated_loop_iterations (loop, &nit))
3895 return -1;
3896
807e902e 3897 if (!wi::fits_shwi_p (nit))
1ef88893
AM
3898 return -1;
3899 hwi_nit = nit.to_shwi ();
3900
3901 return hwi_nit < 0 ? -1 : hwi_nit;
3902}
3903
3904
652c4c71
RG
3905/* Sets NIT to an upper bound for the maximum number of executions of the
3906 latch of the LOOP. If we have no reliable estimate, the function returns
3907 false, otherwise returns true. */
3908
3909bool
807e902e 3910max_loop_iterations (struct loop *loop, widest_int *nit)
652c4c71 3911{
e3a8f1fa
JH
3912 /* When SCEV information is available, try to update loop iterations
3913 estimate. Otherwise just return whatever we recorded earlier. */
3914 if (scev_initialized_p ())
adb7eaa2 3915 estimate_numbers_of_iterations (loop);
b4a9343c 3916
71343877 3917 return get_max_loop_iterations (loop, nit);
652c4c71
RG
3918}
3919
3920/* Similar to max_loop_iterations, but returns the estimate only
3921 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3922 on the number of iterations of LOOP could not be derived, returns -1. */
3923
3924HOST_WIDE_INT
3925max_loop_iterations_int (struct loop *loop)
b4a9343c 3926{
807e902e 3927 widest_int nit;
b4a9343c
ZD
3928 HOST_WIDE_INT hwi_nit;
3929
652c4c71 3930 if (!max_loop_iterations (loop, &nit))
b4a9343c
ZD
3931 return -1;
3932
807e902e 3933 if (!wi::fits_shwi_p (nit))
b4a9343c 3934 return -1;
27bcd47c 3935 hwi_nit = nit.to_shwi ();
b4a9343c
ZD
3936
3937 return hwi_nit < 0 ? -1 : hwi_nit;
3938}
3939
105e29c5
JH
3940/* Sets NIT to an likely upper bound for the maximum number of executions of the
3941 latch of the LOOP. If we have no reliable estimate, the function returns
3942 false, otherwise returns true. */
3943
3944bool
3945likely_max_loop_iterations (struct loop *loop, widest_int *nit)
3946{
3947 /* When SCEV information is available, try to update loop iterations
3948 estimate. Otherwise just return whatever we recorded earlier. */
3949 if (scev_initialized_p ())
adb7eaa2 3950 estimate_numbers_of_iterations (loop);
105e29c5
JH
3951
3952 return get_likely_max_loop_iterations (loop, nit);
3953}
3954
3955/* Similar to max_loop_iterations, but returns the estimate only
3956 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3957 on the number of iterations of LOOP could not be derived, returns -1. */
3958
3959HOST_WIDE_INT
3960likely_max_loop_iterations_int (struct loop *loop)
3961{
3962 widest_int nit;
3963 HOST_WIDE_INT hwi_nit;
3964
3965 if (!likely_max_loop_iterations (loop, &nit))
3966 return -1;
3967
3968 if (!wi::fits_shwi_p (nit))
3969 return -1;
3970 hwi_nit = nit.to_shwi ();
3971
3972 return hwi_nit < 0 ? -1 : hwi_nit;
3973}
3974
652c4c71
RG
3975/* Returns an estimate for the number of executions of statements
3976 in the LOOP. For statements before the loop exit, this exceeds
3977 the number of execution of the latch by one. */
3978
3979HOST_WIDE_INT
3980estimated_stmt_executions_int (struct loop *loop)
3981{
3982 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3983 HOST_WIDE_INT snit;
3984
3985 if (nit == -1)
3986 return -1;
3987
3988 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3989
3990 /* If the computation overflows, return -1. */
3991 return snit < 0 ? -1 : snit;
3992}
3993
105e29c5 3994/* Sets NIT to the maximum number of executions of the latch of the
652c4c71
RG
3995 LOOP, plus one. If we have no reliable estimate, the function returns
3996 false, otherwise returns true. */
3997
3998bool
807e902e 3999max_stmt_executions (struct loop *loop, widest_int *nit)
652c4c71 4000{
807e902e 4001 widest_int nit_minus_one;
652c4c71
RG
4002
4003 if (!max_loop_iterations (loop, nit))
4004 return false;
4005
4006 nit_minus_one = *nit;
4007
807e902e 4008 *nit += 1;
652c4c71 4009
807e902e 4010 return wi::gtu_p (*nit, nit_minus_one);
652c4c71
RG
4011}
4012
105e29c5
JH
4013/* Sets NIT to the estimated maximum number of executions of the latch of the
4014 LOOP, plus one. If we have no likely estimate, the function returns
4015 false, otherwise returns true. */
4016
4017bool
4018likely_max_stmt_executions (struct loop *loop, widest_int *nit)
4019{
4020 widest_int nit_minus_one;
4021
4022 if (!likely_max_loop_iterations (loop, nit))
4023 return false;
4024
4025 nit_minus_one = *nit;
4026
4027 *nit += 1;
4028
4029 return wi::gtu_p (*nit, nit_minus_one);
4030}
4031
b4a9343c 4032/* Sets NIT to the estimated number of executions of the latch of the
652c4c71
RG
4033 LOOP, plus one. If we have no reliable estimate, the function returns
4034 false, otherwise returns true. */
b4a9343c
ZD
4035
4036bool
807e902e 4037estimated_stmt_executions (struct loop *loop, widest_int *nit)
b4a9343c 4038{
807e902e 4039 widest_int nit_minus_one;
b4a9343c 4040
652c4c71 4041 if (!estimated_loop_iterations (loop, nit))
b4a9343c
ZD
4042 return false;
4043
4044 nit_minus_one = *nit;
4045
807e902e 4046 *nit += 1;
b4a9343c 4047
807e902e 4048 return wi::gtu_p (*nit, nit_minus_one);
b4a9343c
ZD
4049}
4050
d73be268 4051/* Records estimates on numbers of iterations of loops. */
e9eb809d
ZD
4052
4053void
adb7eaa2 4054estimate_numbers_of_iterations (function *fn)
e9eb809d 4055{
e9eb809d
ZD
4056 struct loop *loop;
4057
6ac01510
ILT
4058 /* We don't want to issue signed overflow warnings while getting
4059 loop iteration estimates. */
4060 fold_defer_overflow_warnings ();
4061
adb7eaa2
RB
4062 FOR_EACH_LOOP_FN (fn, loop, 0)
4063 estimate_numbers_of_iterations (loop);
6ac01510
ILT
4064
4065 fold_undefer_and_ignore_overflow_warnings ();
e9eb809d
ZD
4066}
4067
e9eb809d
ZD
4068/* Returns true if statement S1 dominates statement S2. */
4069
bbc8a8dc 4070bool
355fe088 4071stmt_dominates_stmt_p (gimple *s1, gimple *s2)
e9eb809d 4072{
726a989a 4073 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
e9eb809d
ZD
4074
4075 if (!bb1
4076 || s1 == s2)
4077 return true;
4078
4079 if (bb1 == bb2)
4080 {
726a989a 4081 gimple_stmt_iterator bsi;
e9eb809d 4082
25c6036a
RG
4083 if (gimple_code (s2) == GIMPLE_PHI)
4084 return false;
4085
4086 if (gimple_code (s1) == GIMPLE_PHI)
4087 return true;
4088
726a989a
RB
4089 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4090 if (gsi_stmt (bsi) == s1)
e9eb809d
ZD
4091 return true;
4092
4093 return false;
4094 }
4095
4096 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4097}
4098
763f4527 4099/* Returns true when we can prove that the number of executions of
946e1bc7
ZD
4100 STMT in the loop is at most NITER, according to the bound on
4101 the number of executions of the statement NITER_BOUND->stmt recorded in
870ca331
JH
4102 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4103
4104 ??? This code can become quite a CPU hog - we can have many bounds,
4105 and large basic block forcing stmt_dominates_stmt_p to be queried
4106 many times on a large basic blocks, so the whole thing is O(n^2)
4107 for scev_probably_wraps_p invocation (that can be done n times).
4108
4109 It would make more sense (and give better answers) to remember BB
4110 bounds computed by discover_iteration_bound_by_body_walk. */
e9eb809d 4111
1e8552eb 4112static bool
355fe088 4113n_of_executions_at_most (gimple *stmt,
b8698a0f 4114 struct nb_iter_bound *niter_bound,
7aa20a86 4115 tree niter)
e9eb809d 4116{
807e902e 4117 widest_int bound = niter_bound->bound;
6e682d7e 4118 tree nit_type = TREE_TYPE (niter), e;
2f133f46 4119 enum tree_code cmp;
1e8552eb 4120
946e1bc7
ZD
4121 gcc_assert (TYPE_UNSIGNED (nit_type));
4122
4123 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4124 the number of iterations is small. */
807e902e 4125 if (!wi::fits_to_tree_p (bound, nit_type))
946e1bc7
ZD
4126 return false;
4127
4128 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4129 times. This means that:
b8698a0f 4130
870ca331
JH
4131 -- if NITER_BOUND->is_exit is true, then everything after
4132 it at most NITER_BOUND->bound times.
946e1bc7
ZD
4133
4134 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4135 is executed, then NITER_BOUND->stmt is executed as well in the same
870ca331
JH
4136 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4137
4138 If we can determine that NITER_BOUND->stmt is always executed
4139 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4140 We conclude that if both statements belong to the same
4141 basic block and STMT is before NITER_BOUND->stmt and there are no
4142 statements with side effects in between. */
946e1bc7
ZD
4143
4144 if (niter_bound->is_exit)
4145 {
870ca331
JH
4146 if (stmt == niter_bound->stmt
4147 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4148 return false;
4149 cmp = GE_EXPR;
946e1bc7 4150 }
1e8552eb 4151 else
946e1bc7 4152 {
870ca331 4153 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
946e1bc7 4154 {
870ca331
JH
4155 gimple_stmt_iterator bsi;
4156 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4157 || gimple_code (stmt) == GIMPLE_PHI
4158 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4159 return false;
4160
4161 /* By stmt_dominates_stmt_p we already know that STMT appears
4162 before NITER_BOUND->STMT. Still need to test that the loop
4163 can not be terinated by a side effect in between. */
4164 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4165 gsi_next (&bsi))
4166 if (gimple_has_side_effects (gsi_stmt (bsi)))
4167 return false;
807e902e
KZ
4168 bound += 1;
4169 if (bound == 0
4170 || !wi::fits_to_tree_p (bound, nit_type))
946e1bc7
ZD
4171 return false;
4172 }
4173 cmp = GT_EXPR;
4174 }
1e8552eb 4175
6e682d7e 4176 e = fold_binary (cmp, boolean_type_node,
807e902e 4177 niter, wide_int_to_tree (nit_type, bound));
6e682d7e 4178 return e && integer_nonzerop (e);
1e8552eb
SP
4179}
4180
d7f5de76 4181/* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
e9eb809d 4182
d7f5de76
ZD
4183bool
4184nowrap_type_p (tree type)
d7770457 4185{
341c5337 4186 if (ANY_INTEGRAL_TYPE_P (type)
eeef0e45 4187 && TYPE_OVERFLOW_UNDEFINED (type))
d7f5de76 4188 return true;
d7770457 4189
d7f5de76
ZD
4190 if (POINTER_TYPE_P (type))
4191 return true;
d7770457 4192
d7770457
SP
4193 return false;
4194}
4195
2f07b722 4196/* Return true if we can prove LOOP is exited before evolution of induction
70e1d145 4197 variable {BASE, STEP} overflows with respect to its type bound. */
2f07b722
BC
4198
4199static bool
4200loop_exits_before_overflow (tree base, tree step,
355fe088 4201 gimple *at_stmt, struct loop *loop)
2f07b722
BC
4202{
4203 widest_int niter;
4204 struct control_iv *civ;
4205 struct nb_iter_bound *bound;
4206 tree e, delta, step_abs, unsigned_base;
4207 tree type = TREE_TYPE (step);
4208 tree unsigned_type, valid_niter;
4209
4210 /* Don't issue signed overflow warnings. */
4211 fold_defer_overflow_warnings ();
4212
4213 /* Compute the number of iterations before we reach the bound of the
4214 type, and verify that the loop is exited before this occurs. */
4215 unsigned_type = unsigned_type_for (type);
4216 unsigned_base = fold_convert (unsigned_type, base);
4217
4218 if (tree_int_cst_sign_bit (step))
4219 {
4220 tree extreme = fold_convert (unsigned_type,
4221 lower_bound_in_type (type, type));
4222 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4223 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4224 fold_convert (unsigned_type, step));
4225 }
4226 else
4227 {
4228 tree extreme = fold_convert (unsigned_type,
4229 upper_bound_in_type (type, type));
4230 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4231 step_abs = fold_convert (unsigned_type, step);
4232 }
4233
4234 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4235
adb7eaa2 4236 estimate_numbers_of_iterations (loop);
2f07b722
BC
4237
4238 if (max_loop_iterations (loop, &niter)
4239 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4240 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4241 wide_int_to_tree (TREE_TYPE (valid_niter),
4242 niter))) != NULL
4243 && integer_nonzerop (e))
4244 {
4245 fold_undefer_and_ignore_overflow_warnings ();
4246 return true;
4247 }
4248 if (at_stmt)
4249 for (bound = loop->bounds; bound; bound = bound->next)
4250 {
4251 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4252 {
4253 fold_undefer_and_ignore_overflow_warnings ();
4254 return true;
4255 }
4256 }
4257 fold_undefer_and_ignore_overflow_warnings ();
4258
4259 /* Try to prove loop is exited before {base, step} overflows with the
4260 help of analyzed loop control IV. This is done only for IVs with
4261 constant step because otherwise we don't have the information. */
4262 if (TREE_CODE (step) == INTEGER_CST)
f3c5f3a3 4263 {
f3c5f3a3
BC
4264 for (civ = loop->control_ivs; civ; civ = civ->next)
4265 {
4266 enum tree_code code;
42970a17 4267 tree civ_type = TREE_TYPE (civ->step);
2f07b722 4268
f3c5f3a3
BC
4269 /* Have to consider type difference because operand_equal_p ignores
4270 that for constants. */
4271 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4272 || element_precision (type) != element_precision (civ_type))
2f07b722 4273 continue;
2f07b722 4274
f3c5f3a3
BC
4275 /* Only consider control IV with same step. */
4276 if (!operand_equal_p (step, civ->step, 0))
4277 continue;
2f07b722 4278
f3c5f3a3 4279 /* Done proving if this is a no-overflow control IV. */
42970a17
BC
4280 if (operand_equal_p (base, civ->base, 0))
4281 return true;
4282
4283 /* Control IV is recorded after expanding simple operations,
4284 Here we expand base and compare it too. */
4285 tree expanded_base = expand_simple_operations (base);
4286 if (operand_equal_p (expanded_base, civ->base, 0))
f3c5f3a3 4287 return true;
2f07b722 4288
f3c5f3a3 4289 /* If this is a before stepping control IV, in other words, we have
2f07b722 4290
f3c5f3a3 4291 {civ_base, step} = {base + step, step}
8710e302 4292
f3c5f3a3
BC
4293 Because civ {base + step, step} doesn't overflow during loop
4294 iterations, {base, step} will not overflow if we can prove the
4295 operation "base + step" does not overflow. Specifically, we try
4296 to prove below conditions are satisfied:
8710e302 4297
f3c5f3a3
BC
4298 base <= UPPER_BOUND (type) - step ;;step > 0
4299 base >= LOWER_BOUND (type) - step ;;step < 0
8710e302 4300
f3c5f3a3
BC
4301 by proving the reverse conditions are false using loop's initial
4302 condition. */
4303 if (POINTER_TYPE_P (TREE_TYPE (base)))
4304 code = POINTER_PLUS_EXPR;
4305 else
4306 code = PLUS_EXPR;
8710e302 4307
42970a17
BC
4308 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4309 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4310 expanded_base, step);
4311 if (operand_equal_p (stepped, civ->base, 0)
4312 || operand_equal_p (expanded_stepped, civ->base, 0))
f3c5f3a3 4313 {
42970a17
BC
4314 tree extreme;
4315
f3c5f3a3
BC
4316 if (tree_int_cst_sign_bit (step))
4317 {
4318 code = LT_EXPR;
4319 extreme = lower_bound_in_type (type, type);
4320 }
4321 else
4322 {
4323 code = GT_EXPR;
4324 extreme = upper_bound_in_type (type, type);
4325 }
4326 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4327 e = fold_build2 (code, boolean_type_node, base, extreme);
8aa46dd2 4328 e = simplify_using_initial_conditions (loop, e);
f3c5f3a3
BC
4329 if (integer_zerop (e))
4330 return true;
4331 }
4332 }
4333 }
2f07b722
BC
4334
4335 return false;
4336}
4337
b24d9420
BC
4338/* VAR is scev variable whose evolution part is constant STEP, this function
4339 proves that VAR can't overflow by using value range info. If VAR's value
4340 range is [MIN, MAX], it can be proven by:
4341 MAX + step doesn't overflow ; if step > 0
4342 or
4343 MIN + step doesn't underflow ; if step < 0.
4344
4345 We can only do this if var is computed in every loop iteration, i.e, var's
4346 definition has to dominate loop latch. Consider below example:
4347
4348 {
4349 unsigned int i;
4350
4351 <bb 3>:
4352
4353 <bb 4>:
4354 # RANGE [0, 4294967294] NONZERO 65535
4355 # i_21 = PHI <0(3), i_18(9)>
4356 if (i_21 != 0)
4357 goto <bb 6>;
4358 else
4359 goto <bb 8>;
4360
4361 <bb 6>:
4362 # RANGE [0, 65533] NONZERO 65535
4363 _6 = i_21 + 4294967295;
4364 # RANGE [0, 65533] NONZERO 65535
4365 _7 = (long unsigned int) _6;
4366 # RANGE [0, 524264] NONZERO 524280
4367 _8 = _7 * 8;
4368 # PT = nonlocal escaped
4369 _9 = a_14 + _8;
4370 *_9 = 0;
4371
4372 <bb 8>:
4373 # RANGE [1, 65535] NONZERO 65535
4374 i_18 = i_21 + 1;
4375 if (i_18 >= 65535)
4376 goto <bb 10>;
4377 else
4378 goto <bb 9>;
4379
4380 <bb 9>:
4381 goto <bb 4>;
4382
4383 <bb 10>:
4384 return;
4385 }
4386
4387 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4388 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4389 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4390 (4294967295, 4294967296, ...). */
4391
4392static bool
4393scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
4394{
4395 tree type;
4396 wide_int minv, maxv, diff, step_wi;
4397 enum value_range_type rtype;
4398
4399 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4400 return false;
4401
4402 /* Check if VAR evaluates in every loop iteration. It's not the case
4403 if VAR is default definition or does not dominate loop's latch. */
4404 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4405 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4406 return false;
4407
4408 rtype = get_range_info (var, &minv, &maxv);
4409 if (rtype != VR_RANGE)
4410 return false;
4411
4412 /* VAR is a scev whose evolution part is STEP and value range info
4413 is [MIN, MAX], we can prove its no-overflowness by conditions:
4414
4415 type_MAX - MAX >= step ; if step > 0
4416 MIN - type_MIN >= |step| ; if step < 0.
4417
4418 Or VAR must take value outside of value range, which is not true. */
4419 step_wi = step;
4420 type = TREE_TYPE (var);
4421 if (tree_int_cst_sign_bit (step))
4422 {
4423 diff = lower_bound_in_type (type, type);
4424 diff = minv - diff;
4425 step_wi = - step_wi;
4426 }
4427 else
4428 {
4429 diff = upper_bound_in_type (type, type);
4430 diff = diff - maxv;
4431 }
4432
4433 return (wi::geu_p (diff, step_wi));
4434}
4435
1e8552eb
SP
4436/* Return false only when the induction variable BASE + STEP * I is
4437 known to not overflow: i.e. when the number of iterations is small
4438 enough with respect to the step and initial condition in order to
4439 keep the evolution confined in TYPEs bounds. Return true when the
4440 iv is known to overflow or when the property is not computable.
b8698a0f 4441
d7f5de76
ZD
4442 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4443 the rules for overflow of the given language apply (e.g., that signed
b24d9420
BC
4444 arithmetics in C does not overflow).
4445
4446 If VAR is a ssa variable, this function also returns false if VAR can
4447 be proven not overflow with value range info. */
1e8552eb
SP
4448
4449bool
b24d9420 4450scev_probably_wraps_p (tree var, tree base, tree step,
355fe088 4451 gimple *at_stmt, struct loop *loop,
525dc87d 4452 bool use_overflow_semantics)
1e8552eb 4453{
d7f5de76
ZD
4454 /* FIXME: We really need something like
4455 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4456
4457 We used to test for the following situation that frequently appears
4458 during address arithmetics:
b8698a0f 4459
d7770457
SP
4460 D.1621_13 = (long unsigned intD.4) D.1620_12;
4461 D.1622_14 = D.1621_13 * 8;
4462 D.1623_15 = (doubleD.29 *) D.1622_14;
d7770457 4463
d7f5de76
ZD
4464 And derived that the sequence corresponding to D_14
4465 can be proved to not wrap because it is used for computing a
4466 memory access; however, this is not really the case -- for example,
4467 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4468 2032, 2040, 0, 8, ..., but the code is still legal. */
1e8552eb 4469
18aed06a 4470 if (chrec_contains_undetermined (base)
24938ce9 4471 || chrec_contains_undetermined (step))
d7f5de76 4472 return true;
d7770457 4473
6e682d7e 4474 if (integer_zerop (step))
d7f5de76 4475 return false;
ab02cc4e 4476
d7f5de76
ZD
4477 /* If we can use the fact that signed and pointer arithmetics does not
4478 wrap, we are done. */
dc5b3407 4479 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
d7f5de76 4480 return false;
ab02cc4e 4481
24938ce9
ZD
4482 /* To be able to use estimates on number of iterations of the loop,
4483 we must have an upper bound on the absolute value of the step. */
4484 if (TREE_CODE (step) != INTEGER_CST)
4485 return true;
4486
b24d9420
BC
4487 /* Check if var can be proven not overflow with value range info. */
4488 if (var && TREE_CODE (var) == SSA_NAME
4489 && scev_var_range_cant_overflow (var, step, loop))
4490 return false;
4491
2f07b722
BC
4492 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4493 return false;
1e8552eb
SP
4494
4495 /* At this point we still don't have a proof that the iv does not
4496 overflow: give up. */
4497 return true;
e9eb809d
ZD
4498}
4499
e9eb809d
ZD
4500/* Frees the information on upper bounds on numbers of iterations of LOOP. */
4501
c9639aae 4502void
adb7eaa2 4503free_numbers_of_iterations_estimates (struct loop *loop)
e9eb809d 4504{
2f07b722
BC
4505 struct control_iv *civ;
4506 struct nb_iter_bound *bound;
c9639aae
ZD
4507
4508 loop->nb_iterations = NULL;
946e1bc7 4509 loop->estimate_state = EST_NOT_COMPUTED;
2f07b722 4510 for (bound = loop->bounds; bound;)
e9eb809d 4511 {
2f07b722 4512 struct nb_iter_bound *next = bound->next;
9e2f83a5 4513 ggc_free (bound);
2f07b722 4514 bound = next;
e9eb809d 4515 }
e9eb809d 4516 loop->bounds = NULL;
2f07b722
BC
4517
4518 for (civ = loop->control_ivs; civ;)
4519 {
4520 struct control_iv *next = civ->next;
4521 ggc_free (civ);
4522 civ = next;
4523 }
4524 loop->control_ivs = NULL;
e9eb809d
ZD
4525}
4526
d73be268 4527/* Frees the information on upper bounds on numbers of iterations of loops. */
e9eb809d
ZD
4528
4529void
61183076 4530free_numbers_of_iterations_estimates (function *fn)
e9eb809d 4531{
e9eb809d
ZD
4532 struct loop *loop;
4533
61183076 4534 FOR_EACH_LOOP_FN (fn, loop, 0)
adb7eaa2 4535 free_numbers_of_iterations_estimates (loop);
e9eb809d 4536}
d5ab5675
ZD
4537
4538/* Substitute value VAL for ssa name NAME inside expressions held
4539 at LOOP. */
4540
4541void
4542substitute_in_loop_info (struct loop *loop, tree name, tree val)
4543{
d5ab5675 4544 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
d5ab5675 4545}