]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-niter.c
profile.c: Include cfgloop.h.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-niter.c
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45
46
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50 #define MAX_DOMINATORS_TO_WALK 8
51
52 /*
53
54 Analysis of number of iterations of an affine exit test.
55
56 */
57
58 /* Bounds on some value, BELOW <= X <= UP. */
59
60 struct bounds
61 {
62 mpz_t below, up;
63 };
64
65
66 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
67
68 static void
69 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
70 {
71 tree type = TREE_TYPE (expr);
72 tree op0, op1;
73 bool negate = false;
74
75 *var = expr;
76 mpz_set_ui (offset, 0);
77
78 switch (TREE_CODE (expr))
79 {
80 case MINUS_EXPR:
81 negate = true;
82 /* Fallthru. */
83
84 case PLUS_EXPR:
85 case POINTER_PLUS_EXPR:
86 op0 = TREE_OPERAND (expr, 0);
87 op1 = TREE_OPERAND (expr, 1);
88
89 if (TREE_CODE (op1) != INTEGER_CST)
90 break;
91
92 *var = op0;
93 /* Always sign extend the offset. */
94 wi::to_mpz (op1, offset, SIGNED);
95 if (negate)
96 mpz_neg (offset, offset);
97 break;
98
99 case INTEGER_CST:
100 *var = build_int_cst_type (type, 0);
101 wi::to_mpz (expr, offset, TYPE_SIGN (type));
102 break;
103
104 default:
105 break;
106 }
107 }
108
109 /* From condition C0 CMP C1 derives information regarding the value range
110 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
111
112 static void
113 refine_value_range_using_guard (tree type, tree var,
114 tree c0, enum tree_code cmp, tree c1,
115 mpz_t below, mpz_t up)
116 {
117 tree varc0, varc1, ctype;
118 mpz_t offc0, offc1;
119 mpz_t mint, maxt, minc1, maxc1;
120 wide_int minv, maxv;
121 bool no_wrap = nowrap_type_p (type);
122 bool c0_ok, c1_ok;
123 signop sgn = TYPE_SIGN (type);
124
125 switch (cmp)
126 {
127 case LT_EXPR:
128 case LE_EXPR:
129 case GT_EXPR:
130 case GE_EXPR:
131 STRIP_SIGN_NOPS (c0);
132 STRIP_SIGN_NOPS (c1);
133 ctype = TREE_TYPE (c0);
134 if (!useless_type_conversion_p (ctype, type))
135 return;
136
137 break;
138
139 case EQ_EXPR:
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
142 handling it. */
143 return;
144
145 case NE_EXPR:
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type))
150 return;
151
152 /* Ensure that the condition speaks about an expression in the same
153 type as X and Y. */
154 ctype = TREE_TYPE (c0);
155 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
156 return;
157 c0 = fold_convert (type, c0);
158 c1 = fold_convert (type, c1);
159
160 if (operand_equal_p (var, c0, 0))
161 {
162 mpz_t valc1;
163
164 /* Case of comparing VAR with its below/up bounds. */
165 mpz_init (valc1);
166 wi::to_mpz (c1, valc1, TYPE_SIGN (type));
167 if (mpz_cmp (valc1, below) == 0)
168 cmp = GT_EXPR;
169 if (mpz_cmp (valc1, up) == 0)
170 cmp = LT_EXPR;
171
172 mpz_clear (valc1);
173 }
174 else
175 {
176 /* Case of comparing with the bounds of the type. */
177 wide_int min = wi::min_value (type);
178 wide_int max = wi::max_value (type);
179
180 if (wi::eq_p (c1, min))
181 cmp = GT_EXPR;
182 if (wi::eq_p (c1, max))
183 cmp = LT_EXPR;
184 }
185
186 /* Quick return if no useful information. */
187 if (cmp == NE_EXPR)
188 return;
189
190 break;
191
192 default:
193 return;
194 }
195
196 mpz_init (offc0);
197 mpz_init (offc1);
198 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
200
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var, varc1, 0))
203 {
204 std::swap (varc0, varc1);
205 mpz_swap (offc0, offc1);
206 cmp = swap_tree_comparison (cmp);
207 }
208 else if (!operand_equal_p (var, varc0, 0))
209 {
210 mpz_clear (offc0);
211 mpz_clear (offc1);
212 return;
213 }
214
215 mpz_init (mint);
216 mpz_init (maxt);
217 get_type_static_bounds (type, mint, maxt);
218 mpz_init (minc1);
219 mpz_init (maxc1);
220 /* Setup range information for varc1. */
221 if (integer_zerop (varc1))
222 {
223 wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
224 wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
225 }
226 else if (TREE_CODE (varc1) == SSA_NAME
227 && INTEGRAL_TYPE_P (type)
228 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
229 {
230 gcc_assert (wi::le_p (minv, maxv, sgn));
231 wi::to_mpz (minv, minc1, sgn);
232 wi::to_mpz (maxv, maxc1, sgn);
233 }
234 else
235 {
236 mpz_set (minc1, mint);
237 mpz_set (maxc1, maxt);
238 }
239
240 /* Compute valid range information for varc1 + offc1. Note nothing
241 useful can be derived if it overflows or underflows. Overflow or
242 underflow could happen when:
243
244 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
246 mpz_add (minc1, minc1, offc1);
247 mpz_add (maxc1, maxc1, offc1);
248 c1_ok = (no_wrap
249 || mpz_sgn (offc1) == 0
250 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
251 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
252 if (!c1_ok)
253 goto end;
254
255 if (mpz_cmp (minc1, mint) < 0)
256 mpz_set (minc1, mint);
257 if (mpz_cmp (maxc1, maxt) > 0)
258 mpz_set (maxc1, maxt);
259
260 if (cmp == LT_EXPR)
261 {
262 cmp = LE_EXPR;
263 mpz_sub_ui (maxc1, maxc1, 1);
264 }
265 if (cmp == GT_EXPR)
266 {
267 cmp = GE_EXPR;
268 mpz_add_ui (minc1, minc1, 1);
269 }
270
271 /* Compute range information for varc0. If there is no overflow,
272 the condition implied that
273
274 (varc0) cmp (varc1 + offc1 - offc0)
275
276 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277 or the below bound if cmp is GE_EXPR.
278
279 To prove there is no overflow/underflow, we need to check below
280 four cases:
281 1) cmp == LE_EXPR && offc0 > 0
282
283 (varc0 + offc0) doesn't overflow
284 && (varc1 + offc1 - offc0) doesn't underflow
285
286 2) cmp == LE_EXPR && offc0 < 0
287
288 (varc0 + offc0) doesn't underflow
289 && (varc1 + offc1 - offc0) doesn't overfloe
290
291 In this case, (varc0 + offc0) will never underflow if we can
292 prove (varc1 + offc1 - offc0) doesn't overflow.
293
294 3) cmp == GE_EXPR && offc0 < 0
295
296 (varc0 + offc0) doesn't underflow
297 && (varc1 + offc1 - offc0) doesn't overflow
298
299 4) cmp == GE_EXPR && offc0 > 0
300
301 (varc0 + offc0) doesn't overflow
302 && (varc1 + offc1 - offc0) doesn't underflow
303
304 In this case, (varc0 + offc0) will never overflow if we can
305 prove (varc1 + offc1 - offc0) doesn't underflow.
306
307 Note we only handle case 2 and 4 in below code. */
308
309 mpz_sub (minc1, minc1, offc0);
310 mpz_sub (maxc1, maxc1, offc0);
311 c0_ok = (no_wrap
312 || mpz_sgn (offc0) == 0
313 || (cmp == LE_EXPR
314 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
315 || (cmp == GE_EXPR
316 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
317 if (!c0_ok)
318 goto end;
319
320 if (cmp == LE_EXPR)
321 {
322 if (mpz_cmp (up, maxc1) > 0)
323 mpz_set (up, maxc1);
324 }
325 else
326 {
327 if (mpz_cmp (below, minc1) < 0)
328 mpz_set (below, minc1);
329 }
330
331 end:
332 mpz_clear (mint);
333 mpz_clear (maxt);
334 mpz_clear (minc1);
335 mpz_clear (maxc1);
336 mpz_clear (offc0);
337 mpz_clear (offc1);
338 }
339
340 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341 in TYPE to MIN and MAX. */
342
343 static void
344 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
345 mpz_t min, mpz_t max)
346 {
347 int cnt = 0;
348 mpz_t minm, maxm;
349 basic_block bb;
350 wide_int minv, maxv;
351 enum value_range_type rtype = VR_VARYING;
352
353 /* If the expression is a constant, we know its value exactly. */
354 if (integer_zerop (var))
355 {
356 mpz_set (min, off);
357 mpz_set (max, off);
358 return;
359 }
360
361 get_type_static_bounds (type, min, max);
362
363 /* See if we have some range info from VRP. */
364 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
365 {
366 edge e = loop_preheader_edge (loop);
367 signop sgn = TYPE_SIGN (type);
368 gphi_iterator gsi;
369
370 /* Either for VAR itself... */
371 rtype = get_range_info (var, &minv, &maxv);
372 /* Or for PHI results in loop->header where VAR is used as
373 PHI argument from the loop preheader edge. */
374 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
375 {
376 gphi *phi = gsi.phi ();
377 wide_int minc, maxc;
378 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
379 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
380 == VR_RANGE))
381 {
382 if (rtype != VR_RANGE)
383 {
384 rtype = VR_RANGE;
385 minv = minc;
386 maxv = maxc;
387 }
388 else
389 {
390 minv = wi::max (minv, minc, sgn);
391 maxv = wi::min (maxv, maxc, sgn);
392 /* If the PHI result range are inconsistent with
393 the VAR range, give up on looking at the PHI
394 results. This can happen if VR_UNDEFINED is
395 involved. */
396 if (wi::gt_p (minv, maxv, sgn))
397 {
398 rtype = get_range_info (var, &minv, &maxv);
399 break;
400 }
401 }
402 }
403 }
404 mpz_init (minm);
405 mpz_init (maxm);
406 if (rtype != VR_RANGE)
407 {
408 mpz_set (minm, min);
409 mpz_set (maxm, max);
410 }
411 else
412 {
413 gcc_assert (wi::le_p (minv, maxv, sgn));
414 wi::to_mpz (minv, minm, sgn);
415 wi::to_mpz (maxv, maxm, sgn);
416 }
417 /* Now walk the dominators of the loop header and use the entry
418 guards to refine the estimates. */
419 for (bb = loop->header;
420 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
421 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
422 {
423 edge e;
424 tree c0, c1;
425 gimple *cond;
426 enum tree_code cmp;
427
428 if (!single_pred_p (bb))
429 continue;
430 e = single_pred_edge (bb);
431
432 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
433 continue;
434
435 cond = last_stmt (e->src);
436 c0 = gimple_cond_lhs (cond);
437 cmp = gimple_cond_code (cond);
438 c1 = gimple_cond_rhs (cond);
439
440 if (e->flags & EDGE_FALSE_VALUE)
441 cmp = invert_tree_comparison (cmp, false);
442
443 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
444 ++cnt;
445 }
446
447 mpz_add (minm, minm, off);
448 mpz_add (maxm, maxm, off);
449 /* If the computation may not wrap or off is zero, then this
450 is always fine. If off is negative and minv + off isn't
451 smaller than type's minimum, or off is positive and
452 maxv + off isn't bigger than type's maximum, use the more
453 precise range too. */
454 if (nowrap_type_p (type)
455 || mpz_sgn (off) == 0
456 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
457 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
458 {
459 mpz_set (min, minm);
460 mpz_set (max, maxm);
461 mpz_clear (minm);
462 mpz_clear (maxm);
463 return;
464 }
465 mpz_clear (minm);
466 mpz_clear (maxm);
467 }
468
469 /* If the computation may wrap, we know nothing about the value, except for
470 the range of the type. */
471 if (!nowrap_type_p (type))
472 return;
473
474 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475 add it to MIN, otherwise to MAX. */
476 if (mpz_sgn (off) < 0)
477 mpz_add (max, max, off);
478 else
479 mpz_add (min, min, off);
480 }
481
482 /* Stores the bounds on the difference of the values of the expressions
483 (var + X) and (var + Y), computed in TYPE, to BNDS. */
484
485 static void
486 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
487 bounds *bnds)
488 {
489 int rel = mpz_cmp (x, y);
490 bool may_wrap = !nowrap_type_p (type);
491 mpz_t m;
492
493 /* If X == Y, then the expressions are always equal.
494 If X > Y, there are the following possibilities:
495 a) neither of var + X and var + Y overflow or underflow, or both of
496 them do. Then their difference is X - Y.
497 b) var + X overflows, and var + Y does not. Then the values of the
498 expressions are var + X - M and var + Y, where M is the range of
499 the type, and their difference is X - Y - M.
500 c) var + Y underflows and var + X does not. Their difference again
501 is M - X + Y.
502 Therefore, if the arithmetics in type does not overflow, then the
503 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505 (X - Y, X - Y + M). */
506
507 if (rel == 0)
508 {
509 mpz_set_ui (bnds->below, 0);
510 mpz_set_ui (bnds->up, 0);
511 return;
512 }
513
514 mpz_init (m);
515 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
516 mpz_add_ui (m, m, 1);
517 mpz_sub (bnds->up, x, y);
518 mpz_set (bnds->below, bnds->up);
519
520 if (may_wrap)
521 {
522 if (rel > 0)
523 mpz_sub (bnds->below, bnds->below, m);
524 else
525 mpz_add (bnds->up, bnds->up, m);
526 }
527
528 mpz_clear (m);
529 }
530
531 /* From condition C0 CMP C1 derives information regarding the
532 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533 and stores it to BNDS. */
534
535 static void
536 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
537 tree vary, mpz_t offy,
538 tree c0, enum tree_code cmp, tree c1,
539 bounds *bnds)
540 {
541 tree varc0, varc1, ctype;
542 mpz_t offc0, offc1, loffx, loffy, bnd;
543 bool lbound = false;
544 bool no_wrap = nowrap_type_p (type);
545 bool x_ok, y_ok;
546
547 switch (cmp)
548 {
549 case LT_EXPR:
550 case LE_EXPR:
551 case GT_EXPR:
552 case GE_EXPR:
553 STRIP_SIGN_NOPS (c0);
554 STRIP_SIGN_NOPS (c1);
555 ctype = TREE_TYPE (c0);
556 if (!useless_type_conversion_p (ctype, type))
557 return;
558
559 break;
560
561 case EQ_EXPR:
562 /* We could derive quite precise information from EQ_EXPR, however, such
563 a guard is unlikely to appear, so we do not bother with handling
564 it. */
565 return;
566
567 case NE_EXPR:
568 /* NE_EXPR comparisons do not contain much of useful information, except for
569 special case of comparing with the bounds of the type. */
570 if (TREE_CODE (c1) != INTEGER_CST
571 || !INTEGRAL_TYPE_P (type))
572 return;
573
574 /* Ensure that the condition speaks about an expression in the same type
575 as X and Y. */
576 ctype = TREE_TYPE (c0);
577 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
578 return;
579 c0 = fold_convert (type, c0);
580 c1 = fold_convert (type, c1);
581
582 if (TYPE_MIN_VALUE (type)
583 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
584 {
585 cmp = GT_EXPR;
586 break;
587 }
588 if (TYPE_MAX_VALUE (type)
589 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
590 {
591 cmp = LT_EXPR;
592 break;
593 }
594
595 return;
596 default:
597 return;
598 }
599
600 mpz_init (offc0);
601 mpz_init (offc1);
602 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
603 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
604
605 /* We are only interested in comparisons of expressions based on VARX and
606 VARY. TODO -- we might also be able to derive some bounds from
607 expressions containing just one of the variables. */
608
609 if (operand_equal_p (varx, varc1, 0))
610 {
611 std::swap (varc0, varc1);
612 mpz_swap (offc0, offc1);
613 cmp = swap_tree_comparison (cmp);
614 }
615
616 if (!operand_equal_p (varx, varc0, 0)
617 || !operand_equal_p (vary, varc1, 0))
618 goto end;
619
620 mpz_init_set (loffx, offx);
621 mpz_init_set (loffy, offy);
622
623 if (cmp == GT_EXPR || cmp == GE_EXPR)
624 {
625 std::swap (varx, vary);
626 mpz_swap (offc0, offc1);
627 mpz_swap (loffx, loffy);
628 cmp = swap_tree_comparison (cmp);
629 lbound = true;
630 }
631
632 /* If there is no overflow, the condition implies that
633
634 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
635
636 The overflows and underflows may complicate things a bit; each
637 overflow decreases the appropriate offset by M, and underflow
638 increases it by M. The above inequality would not necessarily be
639 true if
640
641 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 VARX + OFFC0 overflows, but VARX + OFFX does not.
643 This may only happen if OFFX < OFFC0.
644 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 VARY + OFFC1 underflows and VARY + OFFY does not.
646 This may only happen if OFFY > OFFC1. */
647
648 if (no_wrap)
649 {
650 x_ok = true;
651 y_ok = true;
652 }
653 else
654 {
655 x_ok = (integer_zerop (varx)
656 || mpz_cmp (loffx, offc0) >= 0);
657 y_ok = (integer_zerop (vary)
658 || mpz_cmp (loffy, offc1) <= 0);
659 }
660
661 if (x_ok && y_ok)
662 {
663 mpz_init (bnd);
664 mpz_sub (bnd, loffx, loffy);
665 mpz_add (bnd, bnd, offc1);
666 mpz_sub (bnd, bnd, offc0);
667
668 if (cmp == LT_EXPR)
669 mpz_sub_ui (bnd, bnd, 1);
670
671 if (lbound)
672 {
673 mpz_neg (bnd, bnd);
674 if (mpz_cmp (bnds->below, bnd) < 0)
675 mpz_set (bnds->below, bnd);
676 }
677 else
678 {
679 if (mpz_cmp (bnd, bnds->up) < 0)
680 mpz_set (bnds->up, bnd);
681 }
682 mpz_clear (bnd);
683 }
684
685 mpz_clear (loffx);
686 mpz_clear (loffy);
687 end:
688 mpz_clear (offc0);
689 mpz_clear (offc1);
690 }
691
692 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693 The subtraction is considered to be performed in arbitrary precision,
694 without overflows.
695
696 We do not attempt to be too clever regarding the value ranges of X and
697 Y; most of the time, they are just integers or ssa names offsetted by
698 integer. However, we try to use the information contained in the
699 comparisons before the loop (usually created by loop header copying). */
700
701 static void
702 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
703 {
704 tree type = TREE_TYPE (x);
705 tree varx, vary;
706 mpz_t offx, offy;
707 mpz_t minx, maxx, miny, maxy;
708 int cnt = 0;
709 edge e;
710 basic_block bb;
711 tree c0, c1;
712 gimple *cond;
713 enum tree_code cmp;
714
715 /* Get rid of unnecessary casts, but preserve the value of
716 the expressions. */
717 STRIP_SIGN_NOPS (x);
718 STRIP_SIGN_NOPS (y);
719
720 mpz_init (bnds->below);
721 mpz_init (bnds->up);
722 mpz_init (offx);
723 mpz_init (offy);
724 split_to_var_and_offset (x, &varx, offx);
725 split_to_var_and_offset (y, &vary, offy);
726
727 if (!integer_zerop (varx)
728 && operand_equal_p (varx, vary, 0))
729 {
730 /* Special case VARX == VARY -- we just need to compare the
731 offsets. The matters are a bit more complicated in the
732 case addition of offsets may wrap. */
733 bound_difference_of_offsetted_base (type, offx, offy, bnds);
734 }
735 else
736 {
737 /* Otherwise, use the value ranges to determine the initial
738 estimates on below and up. */
739 mpz_init (minx);
740 mpz_init (maxx);
741 mpz_init (miny);
742 mpz_init (maxy);
743 determine_value_range (loop, type, varx, offx, minx, maxx);
744 determine_value_range (loop, type, vary, offy, miny, maxy);
745
746 mpz_sub (bnds->below, minx, maxy);
747 mpz_sub (bnds->up, maxx, miny);
748 mpz_clear (minx);
749 mpz_clear (maxx);
750 mpz_clear (miny);
751 mpz_clear (maxy);
752 }
753
754 /* If both X and Y are constants, we cannot get any more precise. */
755 if (integer_zerop (varx) && integer_zerop (vary))
756 goto end;
757
758 /* Now walk the dominators of the loop header and use the entry
759 guards to refine the estimates. */
760 for (bb = loop->header;
761 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
762 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
763 {
764 if (!single_pred_p (bb))
765 continue;
766 e = single_pred_edge (bb);
767
768 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
769 continue;
770
771 cond = last_stmt (e->src);
772 c0 = gimple_cond_lhs (cond);
773 cmp = gimple_cond_code (cond);
774 c1 = gimple_cond_rhs (cond);
775
776 if (e->flags & EDGE_FALSE_VALUE)
777 cmp = invert_tree_comparison (cmp, false);
778
779 refine_bounds_using_guard (type, varx, offx, vary, offy,
780 c0, cmp, c1, bnds);
781 ++cnt;
782 }
783
784 end:
785 mpz_clear (offx);
786 mpz_clear (offy);
787 }
788
789 /* Update the bounds in BNDS that restrict the value of X to the bounds
790 that restrict the value of X + DELTA. X can be obtained as a
791 difference of two values in TYPE. */
792
793 static void
794 bounds_add (bounds *bnds, const widest_int &delta, tree type)
795 {
796 mpz_t mdelta, max;
797
798 mpz_init (mdelta);
799 wi::to_mpz (delta, mdelta, SIGNED);
800
801 mpz_init (max);
802 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
803
804 mpz_add (bnds->up, bnds->up, mdelta);
805 mpz_add (bnds->below, bnds->below, mdelta);
806
807 if (mpz_cmp (bnds->up, max) > 0)
808 mpz_set (bnds->up, max);
809
810 mpz_neg (max, max);
811 if (mpz_cmp (bnds->below, max) < 0)
812 mpz_set (bnds->below, max);
813
814 mpz_clear (mdelta);
815 mpz_clear (max);
816 }
817
818 /* Update the bounds in BNDS that restrict the value of X to the bounds
819 that restrict the value of -X. */
820
821 static void
822 bounds_negate (bounds *bnds)
823 {
824 mpz_t tmp;
825
826 mpz_init_set (tmp, bnds->up);
827 mpz_neg (bnds->up, bnds->below);
828 mpz_neg (bnds->below, tmp);
829 mpz_clear (tmp);
830 }
831
832 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
833
834 static tree
835 inverse (tree x, tree mask)
836 {
837 tree type = TREE_TYPE (x);
838 tree rslt;
839 unsigned ctr = tree_floor_log2 (mask);
840
841 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
842 {
843 unsigned HOST_WIDE_INT ix;
844 unsigned HOST_WIDE_INT imask;
845 unsigned HOST_WIDE_INT irslt = 1;
846
847 gcc_assert (cst_and_fits_in_hwi (x));
848 gcc_assert (cst_and_fits_in_hwi (mask));
849
850 ix = int_cst_value (x);
851 imask = int_cst_value (mask);
852
853 for (; ctr; ctr--)
854 {
855 irslt *= ix;
856 ix *= ix;
857 }
858 irslt &= imask;
859
860 rslt = build_int_cst_type (type, irslt);
861 }
862 else
863 {
864 rslt = build_int_cst (type, 1);
865 for (; ctr; ctr--)
866 {
867 rslt = int_const_binop (MULT_EXPR, rslt, x);
868 x = int_const_binop (MULT_EXPR, x, x);
869 }
870 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
871 }
872
873 return rslt;
874 }
875
876 /* Derives the upper bound BND on the number of executions of loop with exit
877 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
878 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
879 that the loop ends through this exit, i.e., the induction variable ever
880 reaches the value of C.
881
882 The value C is equal to final - base, where final and base are the final and
883 initial value of the actual induction variable in the analysed loop. BNDS
884 bounds the value of this difference when computed in signed type with
885 unbounded range, while the computation of C is performed in an unsigned
886 type with the range matching the range of the type of the induction variable.
887 In particular, BNDS.up contains an upper bound on C in the following cases:
888 -- if the iv must reach its final value without overflow, i.e., if
889 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890 -- if final >= base, which we know to hold when BNDS.below >= 0. */
891
892 static void
893 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
894 bounds *bnds, bool exit_must_be_taken)
895 {
896 widest_int max;
897 mpz_t d;
898 tree type = TREE_TYPE (c);
899 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
900 || mpz_sgn (bnds->below) >= 0);
901
902 if (integer_onep (s)
903 || (TREE_CODE (c) == INTEGER_CST
904 && TREE_CODE (s) == INTEGER_CST
905 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
906 || (TYPE_OVERFLOW_UNDEFINED (type)
907 && multiple_of_p (type, c, s)))
908 {
909 /* If C is an exact multiple of S, then its value will be reached before
910 the induction variable overflows (unless the loop is exited in some
911 other way before). Note that the actual induction variable in the
912 loop (which ranges from base to final instead of from 0 to C) may
913 overflow, in which case BNDS.up will not be giving a correct upper
914 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
915 no_overflow = true;
916 exit_must_be_taken = true;
917 }
918
919 /* If the induction variable can overflow, the number of iterations is at
920 most the period of the control variable (or infinite, but in that case
921 the whole # of iterations analysis will fail). */
922 if (!no_overflow)
923 {
924 max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
925 wi::to_mpz (max, bnd, UNSIGNED);
926 return;
927 }
928
929 /* Now we know that the induction variable does not overflow, so the loop
930 iterates at most (range of type / S) times. */
931 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
932
933 /* If the induction variable is guaranteed to reach the value of C before
934 overflow, ... */
935 if (exit_must_be_taken)
936 {
937 /* ... then we can strengthen this to C / S, and possibly we can use
938 the upper bound on C given by BNDS. */
939 if (TREE_CODE (c) == INTEGER_CST)
940 wi::to_mpz (c, bnd, UNSIGNED);
941 else if (bnds_u_valid)
942 mpz_set (bnd, bnds->up);
943 }
944
945 mpz_init (d);
946 wi::to_mpz (s, d, UNSIGNED);
947 mpz_fdiv_q (bnd, bnd, d);
948 mpz_clear (d);
949 }
950
951 /* Determines number of iterations of loop whose ending condition
952 is IV <> FINAL. TYPE is the type of the iv. The number of
953 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
954 we know that the exit must be taken eventually, i.e., that the IV
955 ever reaches the value FINAL (we derived this earlier, and possibly set
956 NITER->assumptions to make sure this is the case). BNDS contains the
957 bounds on the difference FINAL - IV->base. */
958
959 static bool
960 number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
961 tree final, struct tree_niter_desc *niter,
962 bool exit_must_be_taken, bounds *bnds)
963 {
964 tree niter_type = unsigned_type_for (type);
965 tree s, c, d, bits, assumption, tmp, bound;
966 mpz_t max;
967 tree e;
968
969 niter->control = *iv;
970 niter->bound = final;
971 niter->cmp = NE_EXPR;
972
973 /* Rearrange the terms so that we get inequality S * i <> C, with S
974 positive. Also cast everything to the unsigned type. If IV does
975 not overflow, BNDS bounds the value of C. Also, this is the
976 case if the computation |FINAL - IV->base| does not overflow, i.e.,
977 if BNDS->below in the result is nonnegative. */
978 if (tree_int_cst_sign_bit (iv->step))
979 {
980 s = fold_convert (niter_type,
981 fold_build1 (NEGATE_EXPR, type, iv->step));
982 c = fold_build2 (MINUS_EXPR, niter_type,
983 fold_convert (niter_type, iv->base),
984 fold_convert (niter_type, final));
985 bounds_negate (bnds);
986 }
987 else
988 {
989 s = fold_convert (niter_type, iv->step);
990 c = fold_build2 (MINUS_EXPR, niter_type,
991 fold_convert (niter_type, final),
992 fold_convert (niter_type, iv->base));
993 }
994
995 mpz_init (max);
996 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
997 exit_must_be_taken);
998 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
999 TYPE_SIGN (niter_type));
1000 mpz_clear (max);
1001
1002 /* Compute no-overflow information for the control iv. Note we are
1003 handling NE_EXPR, if iv base equals to final value, the loop exits
1004 immediately, and the iv does not overflow. */
1005 if (tree_int_cst_sign_bit (iv->step))
1006 e = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1007 else
1008 e = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1009 e = simplify_using_initial_conditions (loop, e);
1010 if (integer_onep (e)
1011 && (integer_onep (s)
1012 || (TREE_CODE (c) == INTEGER_CST
1013 && TREE_CODE (s) == INTEGER_CST
1014 && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)))
1015 {
1016 niter->control.no_overflow = true;
1017 }
1018
1019 /* First the trivial cases -- when the step is 1. */
1020 if (integer_onep (s))
1021 {
1022 niter->niter = c;
1023 return true;
1024 }
1025
1026 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1027 is infinite. Otherwise, the number of iterations is
1028 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1029 bits = num_ending_zeros (s);
1030 bound = build_low_bits_mask (niter_type,
1031 (TYPE_PRECISION (niter_type)
1032 - tree_to_uhwi (bits)));
1033
1034 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1035 build_int_cst (niter_type, 1), bits);
1036 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1037
1038 if (!exit_must_be_taken)
1039 {
1040 /* If we cannot assume that the exit is taken eventually, record the
1041 assumptions for divisibility of c. */
1042 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1043 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1044 assumption, build_int_cst (niter_type, 0));
1045 if (!integer_nonzerop (assumption))
1046 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1047 niter->assumptions, assumption);
1048 }
1049
1050 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1051 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1052 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1053 return true;
1054 }
1055
1056 /* Checks whether we can determine the final value of the control variable
1057 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1058 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1059 of the step. The assumptions necessary to ensure that the computation
1060 of the final value does not overflow are recorded in NITER. If we
1061 find the final value, we adjust DELTA and return TRUE. Otherwise
1062 we return false. BNDS bounds the value of IV1->base - IV0->base,
1063 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1064 true if we know that the exit must be taken eventually. */
1065
1066 static bool
1067 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1068 struct tree_niter_desc *niter,
1069 tree *delta, tree step,
1070 bool exit_must_be_taken, bounds *bnds)
1071 {
1072 tree niter_type = TREE_TYPE (step);
1073 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1074 tree tmod;
1075 mpz_t mmod;
1076 tree assumption = boolean_true_node, bound, noloop;
1077 bool ret = false, fv_comp_no_overflow;
1078 tree type1 = type;
1079 if (POINTER_TYPE_P (type))
1080 type1 = sizetype;
1081
1082 if (TREE_CODE (mod) != INTEGER_CST)
1083 return false;
1084 if (integer_nonzerop (mod))
1085 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1086 tmod = fold_convert (type1, mod);
1087
1088 mpz_init (mmod);
1089 wi::to_mpz (mod, mmod, UNSIGNED);
1090 mpz_neg (mmod, mmod);
1091
1092 /* If the induction variable does not overflow and the exit is taken,
1093 then the computation of the final value does not overflow. This is
1094 also obviously the case if the new final value is equal to the
1095 current one. Finally, we postulate this for pointer type variables,
1096 as the code cannot rely on the object to that the pointer points being
1097 placed at the end of the address space (and more pragmatically,
1098 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1099 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1100 fv_comp_no_overflow = true;
1101 else if (!exit_must_be_taken)
1102 fv_comp_no_overflow = false;
1103 else
1104 fv_comp_no_overflow =
1105 (iv0->no_overflow && integer_nonzerop (iv0->step))
1106 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1107
1108 if (integer_nonzerop (iv0->step))
1109 {
1110 /* The final value of the iv is iv1->base + MOD, assuming that this
1111 computation does not overflow, and that
1112 iv0->base <= iv1->base + MOD. */
1113 if (!fv_comp_no_overflow)
1114 {
1115 bound = fold_build2 (MINUS_EXPR, type1,
1116 TYPE_MAX_VALUE (type1), tmod);
1117 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1118 iv1->base, bound);
1119 if (integer_zerop (assumption))
1120 goto end;
1121 }
1122 if (mpz_cmp (mmod, bnds->below) < 0)
1123 noloop = boolean_false_node;
1124 else if (POINTER_TYPE_P (type))
1125 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1126 iv0->base,
1127 fold_build_pointer_plus (iv1->base, tmod));
1128 else
1129 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1130 iv0->base,
1131 fold_build2 (PLUS_EXPR, type1,
1132 iv1->base, tmod));
1133 }
1134 else
1135 {
1136 /* The final value of the iv is iv0->base - MOD, assuming that this
1137 computation does not overflow, and that
1138 iv0->base - MOD <= iv1->base. */
1139 if (!fv_comp_no_overflow)
1140 {
1141 bound = fold_build2 (PLUS_EXPR, type1,
1142 TYPE_MIN_VALUE (type1), tmod);
1143 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1144 iv0->base, bound);
1145 if (integer_zerop (assumption))
1146 goto end;
1147 }
1148 if (mpz_cmp (mmod, bnds->below) < 0)
1149 noloop = boolean_false_node;
1150 else if (POINTER_TYPE_P (type))
1151 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1152 fold_build_pointer_plus (iv0->base,
1153 fold_build1 (NEGATE_EXPR,
1154 type1, tmod)),
1155 iv1->base);
1156 else
1157 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1158 fold_build2 (MINUS_EXPR, type1,
1159 iv0->base, tmod),
1160 iv1->base);
1161 }
1162
1163 if (!integer_nonzerop (assumption))
1164 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1165 niter->assumptions,
1166 assumption);
1167 if (!integer_zerop (noloop))
1168 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1169 niter->may_be_zero,
1170 noloop);
1171 bounds_add (bnds, wi::to_widest (mod), type);
1172 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1173
1174 ret = true;
1175 end:
1176 mpz_clear (mmod);
1177 return ret;
1178 }
1179
1180 /* Add assertions to NITER that ensure that the control variable of the loop
1181 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1182 are TYPE. Returns false if we can prove that there is an overflow, true
1183 otherwise. STEP is the absolute value of the step. */
1184
1185 static bool
1186 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1187 struct tree_niter_desc *niter, tree step)
1188 {
1189 tree bound, d, assumption, diff;
1190 tree niter_type = TREE_TYPE (step);
1191
1192 if (integer_nonzerop (iv0->step))
1193 {
1194 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1195 if (iv0->no_overflow)
1196 return true;
1197
1198 /* If iv0->base is a constant, we can determine the last value before
1199 overflow precisely; otherwise we conservatively assume
1200 MAX - STEP + 1. */
1201
1202 if (TREE_CODE (iv0->base) == INTEGER_CST)
1203 {
1204 d = fold_build2 (MINUS_EXPR, niter_type,
1205 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1206 fold_convert (niter_type, iv0->base));
1207 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1208 }
1209 else
1210 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1211 build_int_cst (niter_type, 1));
1212 bound = fold_build2 (MINUS_EXPR, type,
1213 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1214 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1215 iv1->base, bound);
1216 }
1217 else
1218 {
1219 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1220 if (iv1->no_overflow)
1221 return true;
1222
1223 if (TREE_CODE (iv1->base) == INTEGER_CST)
1224 {
1225 d = fold_build2 (MINUS_EXPR, niter_type,
1226 fold_convert (niter_type, iv1->base),
1227 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1228 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1229 }
1230 else
1231 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1232 build_int_cst (niter_type, 1));
1233 bound = fold_build2 (PLUS_EXPR, type,
1234 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1235 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1236 iv0->base, bound);
1237 }
1238
1239 if (integer_zerop (assumption))
1240 return false;
1241 if (!integer_nonzerop (assumption))
1242 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1243 niter->assumptions, assumption);
1244
1245 iv0->no_overflow = true;
1246 iv1->no_overflow = true;
1247 return true;
1248 }
1249
1250 /* Add an assumption to NITER that a loop whose ending condition
1251 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1252 bounds the value of IV1->base - IV0->base. */
1253
1254 static void
1255 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1256 struct tree_niter_desc *niter, bounds *bnds)
1257 {
1258 tree assumption = boolean_true_node, bound, diff;
1259 tree mbz, mbzl, mbzr, type1;
1260 bool rolls_p, no_overflow_p;
1261 widest_int dstep;
1262 mpz_t mstep, max;
1263
1264 /* We are going to compute the number of iterations as
1265 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1266 variant of TYPE. This formula only works if
1267
1268 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1269
1270 (where MAX is the maximum value of the unsigned variant of TYPE, and
1271 the computations in this formula are performed in full precision,
1272 i.e., without overflows).
1273
1274 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1275 we have a condition of the form iv0->base - step < iv1->base before the loop,
1276 and for loops iv0->base < iv1->base - step * i the condition
1277 iv0->base < iv1->base + step, due to loop header copying, which enable us
1278 to prove the lower bound.
1279
1280 The upper bound is more complicated. Unless the expressions for initial
1281 and final value themselves contain enough information, we usually cannot
1282 derive it from the context. */
1283
1284 /* First check whether the answer does not follow from the bounds we gathered
1285 before. */
1286 if (integer_nonzerop (iv0->step))
1287 dstep = wi::to_widest (iv0->step);
1288 else
1289 {
1290 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1291 dstep = -dstep;
1292 }
1293
1294 mpz_init (mstep);
1295 wi::to_mpz (dstep, mstep, UNSIGNED);
1296 mpz_neg (mstep, mstep);
1297 mpz_add_ui (mstep, mstep, 1);
1298
1299 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1300
1301 mpz_init (max);
1302 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1303 mpz_add (max, max, mstep);
1304 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1305 /* For pointers, only values lying inside a single object
1306 can be compared or manipulated by pointer arithmetics.
1307 Gcc in general does not allow or handle objects larger
1308 than half of the address space, hence the upper bound
1309 is satisfied for pointers. */
1310 || POINTER_TYPE_P (type));
1311 mpz_clear (mstep);
1312 mpz_clear (max);
1313
1314 if (rolls_p && no_overflow_p)
1315 return;
1316
1317 type1 = type;
1318 if (POINTER_TYPE_P (type))
1319 type1 = sizetype;
1320
1321 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1322 we must be careful not to introduce overflow. */
1323
1324 if (integer_nonzerop (iv0->step))
1325 {
1326 diff = fold_build2 (MINUS_EXPR, type1,
1327 iv0->step, build_int_cst (type1, 1));
1328
1329 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1330 0 address never belongs to any object, we can assume this for
1331 pointers. */
1332 if (!POINTER_TYPE_P (type))
1333 {
1334 bound = fold_build2 (PLUS_EXPR, type1,
1335 TYPE_MIN_VALUE (type), diff);
1336 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1337 iv0->base, bound);
1338 }
1339
1340 /* And then we can compute iv0->base - diff, and compare it with
1341 iv1->base. */
1342 mbzl = fold_build2 (MINUS_EXPR, type1,
1343 fold_convert (type1, iv0->base), diff);
1344 mbzr = fold_convert (type1, iv1->base);
1345 }
1346 else
1347 {
1348 diff = fold_build2 (PLUS_EXPR, type1,
1349 iv1->step, build_int_cst (type1, 1));
1350
1351 if (!POINTER_TYPE_P (type))
1352 {
1353 bound = fold_build2 (PLUS_EXPR, type1,
1354 TYPE_MAX_VALUE (type), diff);
1355 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1356 iv1->base, bound);
1357 }
1358
1359 mbzl = fold_convert (type1, iv0->base);
1360 mbzr = fold_build2 (MINUS_EXPR, type1,
1361 fold_convert (type1, iv1->base), diff);
1362 }
1363
1364 if (!integer_nonzerop (assumption))
1365 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1366 niter->assumptions, assumption);
1367 if (!rolls_p)
1368 {
1369 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1370 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1371 niter->may_be_zero, mbz);
1372 }
1373 }
1374
1375 /* Determines number of iterations of loop whose ending condition
1376 is IV0 < IV1. TYPE is the type of the iv. The number of
1377 iterations is stored to NITER. BNDS bounds the difference
1378 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1379 that the exit must be taken eventually. */
1380
1381 static bool
1382 number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1383 affine_iv *iv1, struct tree_niter_desc *niter,
1384 bool exit_must_be_taken, bounds *bnds)
1385 {
1386 tree niter_type = unsigned_type_for (type);
1387 tree delta, step, s;
1388 mpz_t mstep, tmp;
1389
1390 if (integer_nonzerop (iv0->step))
1391 {
1392 niter->control = *iv0;
1393 niter->cmp = LT_EXPR;
1394 niter->bound = iv1->base;
1395 }
1396 else
1397 {
1398 niter->control = *iv1;
1399 niter->cmp = GT_EXPR;
1400 niter->bound = iv0->base;
1401 }
1402
1403 delta = fold_build2 (MINUS_EXPR, niter_type,
1404 fold_convert (niter_type, iv1->base),
1405 fold_convert (niter_type, iv0->base));
1406
1407 /* First handle the special case that the step is +-1. */
1408 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1409 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1410 {
1411 /* for (i = iv0->base; i < iv1->base; i++)
1412
1413 or
1414
1415 for (i = iv1->base; i > iv0->base; i--).
1416
1417 In both cases # of iterations is iv1->base - iv0->base, assuming that
1418 iv1->base >= iv0->base.
1419
1420 First try to derive a lower bound on the value of
1421 iv1->base - iv0->base, computed in full precision. If the difference
1422 is nonnegative, we are done, otherwise we must record the
1423 condition. */
1424
1425 if (mpz_sgn (bnds->below) < 0)
1426 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1427 iv1->base, iv0->base);
1428 niter->niter = delta;
1429 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1430 TYPE_SIGN (niter_type));
1431 niter->control.no_overflow = true;
1432 return true;
1433 }
1434
1435 if (integer_nonzerop (iv0->step))
1436 step = fold_convert (niter_type, iv0->step);
1437 else
1438 step = fold_convert (niter_type,
1439 fold_build1 (NEGATE_EXPR, type, iv1->step));
1440
1441 /* If we can determine the final value of the control iv exactly, we can
1442 transform the condition to != comparison. In particular, this will be
1443 the case if DELTA is constant. */
1444 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1445 exit_must_be_taken, bnds))
1446 {
1447 affine_iv zps;
1448
1449 zps.base = build_int_cst (niter_type, 0);
1450 zps.step = step;
1451 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1452 zps does not overflow. */
1453 zps.no_overflow = true;
1454
1455 return number_of_iterations_ne (loop, type, &zps,
1456 delta, niter, true, bnds);
1457 }
1458
1459 /* Make sure that the control iv does not overflow. */
1460 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1461 return false;
1462
1463 /* We determine the number of iterations as (delta + step - 1) / step. For
1464 this to work, we must know that iv1->base >= iv0->base - step + 1,
1465 otherwise the loop does not roll. */
1466 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1467
1468 s = fold_build2 (MINUS_EXPR, niter_type,
1469 step, build_int_cst (niter_type, 1));
1470 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1471 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1472
1473 mpz_init (mstep);
1474 mpz_init (tmp);
1475 wi::to_mpz (step, mstep, UNSIGNED);
1476 mpz_add (tmp, bnds->up, mstep);
1477 mpz_sub_ui (tmp, tmp, 1);
1478 mpz_fdiv_q (tmp, tmp, mstep);
1479 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1480 TYPE_SIGN (niter_type));
1481 mpz_clear (mstep);
1482 mpz_clear (tmp);
1483
1484 return true;
1485 }
1486
1487 /* Determines number of iterations of loop whose ending condition
1488 is IV0 <= IV1. TYPE is the type of the iv. The number of
1489 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1490 we know that this condition must eventually become false (we derived this
1491 earlier, and possibly set NITER->assumptions to make sure this
1492 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1493
1494 static bool
1495 number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1496 affine_iv *iv1, struct tree_niter_desc *niter,
1497 bool exit_must_be_taken, bounds *bnds)
1498 {
1499 tree assumption;
1500 tree type1 = type;
1501 if (POINTER_TYPE_P (type))
1502 type1 = sizetype;
1503
1504 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1505 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1506 value of the type. This we must know anyway, since if it is
1507 equal to this value, the loop rolls forever. We do not check
1508 this condition for pointer type ivs, as the code cannot rely on
1509 the object to that the pointer points being placed at the end of
1510 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1511 not defined for pointers). */
1512
1513 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1514 {
1515 if (integer_nonzerop (iv0->step))
1516 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1517 iv1->base, TYPE_MAX_VALUE (type));
1518 else
1519 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1520 iv0->base, TYPE_MIN_VALUE (type));
1521
1522 if (integer_zerop (assumption))
1523 return false;
1524 if (!integer_nonzerop (assumption))
1525 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1526 niter->assumptions, assumption);
1527 }
1528
1529 if (integer_nonzerop (iv0->step))
1530 {
1531 if (POINTER_TYPE_P (type))
1532 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1533 else
1534 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1535 build_int_cst (type1, 1));
1536 }
1537 else if (POINTER_TYPE_P (type))
1538 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1539 else
1540 iv0->base = fold_build2 (MINUS_EXPR, type1,
1541 iv0->base, build_int_cst (type1, 1));
1542
1543 bounds_add (bnds, 1, type1);
1544
1545 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1546 bnds);
1547 }
1548
1549 /* Dumps description of affine induction variable IV to FILE. */
1550
1551 static void
1552 dump_affine_iv (FILE *file, affine_iv *iv)
1553 {
1554 if (!integer_zerop (iv->step))
1555 fprintf (file, "[");
1556
1557 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1558
1559 if (!integer_zerop (iv->step))
1560 {
1561 fprintf (file, ", + , ");
1562 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1563 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1564 }
1565 }
1566
1567 /* Determine the number of iterations according to condition (for staying
1568 inside loop) which compares two induction variables using comparison
1569 operator CODE. The induction variable on left side of the comparison
1570 is IV0, the right-hand side is IV1. Both induction variables must have
1571 type TYPE, which must be an integer or pointer type. The steps of the
1572 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1573
1574 LOOP is the loop whose number of iterations we are determining.
1575
1576 ONLY_EXIT is true if we are sure this is the only way the loop could be
1577 exited (including possibly non-returning function calls, exceptions, etc.)
1578 -- in this case we can use the information whether the control induction
1579 variables can overflow or not in a more efficient way.
1580
1581 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1582
1583 The results (number of iterations and assumptions as described in
1584 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1585 Returns false if it fails to determine number of iterations, true if it
1586 was determined (possibly with some assumptions). */
1587
1588 static bool
1589 number_of_iterations_cond (struct loop *loop,
1590 tree type, affine_iv *iv0, enum tree_code code,
1591 affine_iv *iv1, struct tree_niter_desc *niter,
1592 bool only_exit, bool every_iteration)
1593 {
1594 bool exit_must_be_taken = false, ret;
1595 bounds bnds;
1596
1597 /* If the test is not executed every iteration, wrapping may make the test
1598 to pass again.
1599 TODO: the overflow case can be still used as unreliable estimate of upper
1600 bound. But we have no API to pass it down to number of iterations code
1601 and, at present, it will not use it anyway. */
1602 if (!every_iteration
1603 && (!iv0->no_overflow || !iv1->no_overflow
1604 || code == NE_EXPR || code == EQ_EXPR))
1605 return false;
1606
1607 /* The meaning of these assumptions is this:
1608 if !assumptions
1609 then the rest of information does not have to be valid
1610 if may_be_zero then the loop does not roll, even if
1611 niter != 0. */
1612 niter->assumptions = boolean_true_node;
1613 niter->may_be_zero = boolean_false_node;
1614 niter->niter = NULL_TREE;
1615 niter->max = 0;
1616 niter->bound = NULL_TREE;
1617 niter->cmp = ERROR_MARK;
1618
1619 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1620 the control variable is on lhs. */
1621 if (code == GE_EXPR || code == GT_EXPR
1622 || (code == NE_EXPR && integer_zerop (iv0->step)))
1623 {
1624 std::swap (iv0, iv1);
1625 code = swap_tree_comparison (code);
1626 }
1627
1628 if (POINTER_TYPE_P (type))
1629 {
1630 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1631 to the same object. If they do, the control variable cannot wrap
1632 (as wrap around the bounds of memory will never return a pointer
1633 that would be guaranteed to point to the same object, even if we
1634 avoid undefined behavior by casting to size_t and back). */
1635 iv0->no_overflow = true;
1636 iv1->no_overflow = true;
1637 }
1638
1639 /* If the control induction variable does not overflow and the only exit
1640 from the loop is the one that we analyze, we know it must be taken
1641 eventually. */
1642 if (only_exit)
1643 {
1644 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1645 exit_must_be_taken = true;
1646 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1647 exit_must_be_taken = true;
1648 }
1649
1650 /* We can handle the case when neither of the sides of the comparison is
1651 invariant, provided that the test is NE_EXPR. This rarely occurs in
1652 practice, but it is simple enough to manage. */
1653 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1654 {
1655 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1656 if (code != NE_EXPR)
1657 return false;
1658
1659 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1660 iv0->step, iv1->step);
1661 iv0->no_overflow = false;
1662 iv1->step = build_int_cst (step_type, 0);
1663 iv1->no_overflow = true;
1664 }
1665
1666 /* If the result of the comparison is a constant, the loop is weird. More
1667 precise handling would be possible, but the situation is not common enough
1668 to waste time on it. */
1669 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1670 return false;
1671
1672 /* Ignore loops of while (i-- < 10) type. */
1673 if (code != NE_EXPR)
1674 {
1675 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1676 return false;
1677
1678 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1679 return false;
1680 }
1681
1682 /* If the loop exits immediately, there is nothing to do. */
1683 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1684 if (tem && integer_zerop (tem))
1685 {
1686 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1687 niter->max = 0;
1688 return true;
1689 }
1690
1691 /* OK, now we know we have a senseful loop. Handle several cases, depending
1692 on what comparison operator is used. */
1693 bound_difference (loop, iv1->base, iv0->base, &bnds);
1694
1695 if (dump_file && (dump_flags & TDF_DETAILS))
1696 {
1697 fprintf (dump_file,
1698 "Analyzing # of iterations of loop %d\n", loop->num);
1699
1700 fprintf (dump_file, " exit condition ");
1701 dump_affine_iv (dump_file, iv0);
1702 fprintf (dump_file, " %s ",
1703 code == NE_EXPR ? "!="
1704 : code == LT_EXPR ? "<"
1705 : "<=");
1706 dump_affine_iv (dump_file, iv1);
1707 fprintf (dump_file, "\n");
1708
1709 fprintf (dump_file, " bounds on difference of bases: ");
1710 mpz_out_str (dump_file, 10, bnds.below);
1711 fprintf (dump_file, " ... ");
1712 mpz_out_str (dump_file, 10, bnds.up);
1713 fprintf (dump_file, "\n");
1714 }
1715
1716 switch (code)
1717 {
1718 case NE_EXPR:
1719 gcc_assert (integer_zerop (iv1->step));
1720 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1721 exit_must_be_taken, &bnds);
1722 break;
1723
1724 case LT_EXPR:
1725 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1726 exit_must_be_taken, &bnds);
1727 break;
1728
1729 case LE_EXPR:
1730 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1731 exit_must_be_taken, &bnds);
1732 break;
1733
1734 default:
1735 gcc_unreachable ();
1736 }
1737
1738 mpz_clear (bnds.up);
1739 mpz_clear (bnds.below);
1740
1741 if (dump_file && (dump_flags & TDF_DETAILS))
1742 {
1743 if (ret)
1744 {
1745 fprintf (dump_file, " result:\n");
1746 if (!integer_nonzerop (niter->assumptions))
1747 {
1748 fprintf (dump_file, " under assumptions ");
1749 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1750 fprintf (dump_file, "\n");
1751 }
1752
1753 if (!integer_zerop (niter->may_be_zero))
1754 {
1755 fprintf (dump_file, " zero if ");
1756 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1757 fprintf (dump_file, "\n");
1758 }
1759
1760 fprintf (dump_file, " # of iterations ");
1761 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1762 fprintf (dump_file, ", bounded by ");
1763 print_decu (niter->max, dump_file);
1764 fprintf (dump_file, "\n");
1765 }
1766 else
1767 fprintf (dump_file, " failed\n\n");
1768 }
1769 return ret;
1770 }
1771
1772 /* Substitute NEW for OLD in EXPR and fold the result. */
1773
1774 static tree
1775 simplify_replace_tree (tree expr, tree old, tree new_tree)
1776 {
1777 unsigned i, n;
1778 tree ret = NULL_TREE, e, se;
1779
1780 if (!expr)
1781 return NULL_TREE;
1782
1783 /* Do not bother to replace constants. */
1784 if (CONSTANT_CLASS_P (old))
1785 return expr;
1786
1787 if (expr == old
1788 || operand_equal_p (expr, old, 0))
1789 return unshare_expr (new_tree);
1790
1791 if (!EXPR_P (expr))
1792 return expr;
1793
1794 n = TREE_OPERAND_LENGTH (expr);
1795 for (i = 0; i < n; i++)
1796 {
1797 e = TREE_OPERAND (expr, i);
1798 se = simplify_replace_tree (e, old, new_tree);
1799 if (e == se)
1800 continue;
1801
1802 if (!ret)
1803 ret = copy_node (expr);
1804
1805 TREE_OPERAND (ret, i) = se;
1806 }
1807
1808 return (ret ? fold (ret) : expr);
1809 }
1810
1811 /* Expand definitions of ssa names in EXPR as long as they are simple
1812 enough, and return the new expression. If STOP is specified, stop
1813 expanding if EXPR equals to it. */
1814
1815 tree
1816 expand_simple_operations (tree expr, tree stop)
1817 {
1818 unsigned i, n;
1819 tree ret = NULL_TREE, e, ee, e1;
1820 enum tree_code code;
1821 gimple *stmt;
1822
1823 if (expr == NULL_TREE)
1824 return expr;
1825
1826 if (is_gimple_min_invariant (expr))
1827 return expr;
1828
1829 code = TREE_CODE (expr);
1830 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1831 {
1832 n = TREE_OPERAND_LENGTH (expr);
1833 for (i = 0; i < n; i++)
1834 {
1835 e = TREE_OPERAND (expr, i);
1836 ee = expand_simple_operations (e, stop);
1837 if (e == ee)
1838 continue;
1839
1840 if (!ret)
1841 ret = copy_node (expr);
1842
1843 TREE_OPERAND (ret, i) = ee;
1844 }
1845
1846 if (!ret)
1847 return expr;
1848
1849 fold_defer_overflow_warnings ();
1850 ret = fold (ret);
1851 fold_undefer_and_ignore_overflow_warnings ();
1852 return ret;
1853 }
1854
1855 /* Stop if it's not ssa name or the one we don't want to expand. */
1856 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
1857 return expr;
1858
1859 stmt = SSA_NAME_DEF_STMT (expr);
1860 if (gimple_code (stmt) == GIMPLE_PHI)
1861 {
1862 basic_block src, dest;
1863
1864 if (gimple_phi_num_args (stmt) != 1)
1865 return expr;
1866 e = PHI_ARG_DEF (stmt, 0);
1867
1868 /* Avoid propagating through loop exit phi nodes, which
1869 could break loop-closed SSA form restrictions. */
1870 dest = gimple_bb (stmt);
1871 src = single_pred (dest);
1872 if (TREE_CODE (e) == SSA_NAME
1873 && src->loop_father != dest->loop_father)
1874 return expr;
1875
1876 return expand_simple_operations (e, stop);
1877 }
1878 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1879 return expr;
1880
1881 /* Avoid expanding to expressions that contain SSA names that need
1882 to take part in abnormal coalescing. */
1883 ssa_op_iter iter;
1884 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1885 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1886 return expr;
1887
1888 e = gimple_assign_rhs1 (stmt);
1889 code = gimple_assign_rhs_code (stmt);
1890 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1891 {
1892 if (is_gimple_min_invariant (e))
1893 return e;
1894
1895 if (code == SSA_NAME)
1896 return expand_simple_operations (e, stop);
1897
1898 return expr;
1899 }
1900
1901 switch (code)
1902 {
1903 CASE_CONVERT:
1904 /* Casts are simple. */
1905 ee = expand_simple_operations (e, stop);
1906 return fold_build1 (code, TREE_TYPE (expr), ee);
1907
1908 case PLUS_EXPR:
1909 case MINUS_EXPR:
1910 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
1911 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
1912 return expr;
1913 /* Fallthru. */
1914 case POINTER_PLUS_EXPR:
1915 /* And increments and decrements by a constant are simple. */
1916 e1 = gimple_assign_rhs2 (stmt);
1917 if (!is_gimple_min_invariant (e1))
1918 return expr;
1919
1920 ee = expand_simple_operations (e, stop);
1921 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1922
1923 default:
1924 return expr;
1925 }
1926 }
1927
1928 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1929 expression (or EXPR unchanged, if no simplification was possible). */
1930
1931 static tree
1932 tree_simplify_using_condition_1 (tree cond, tree expr, tree stop)
1933 {
1934 bool changed;
1935 tree e, te, e0, e1, e2, notcond;
1936 enum tree_code code = TREE_CODE (expr);
1937
1938 if (code == INTEGER_CST)
1939 return expr;
1940
1941 if (code == TRUTH_OR_EXPR
1942 || code == TRUTH_AND_EXPR
1943 || code == COND_EXPR)
1944 {
1945 changed = false;
1946
1947 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0), stop);
1948 if (TREE_OPERAND (expr, 0) != e0)
1949 changed = true;
1950
1951 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1), stop);
1952 if (TREE_OPERAND (expr, 1) != e1)
1953 changed = true;
1954
1955 if (code == COND_EXPR)
1956 {
1957 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2), stop);
1958 if (TREE_OPERAND (expr, 2) != e2)
1959 changed = true;
1960 }
1961 else
1962 e2 = NULL_TREE;
1963
1964 if (changed)
1965 {
1966 if (code == COND_EXPR)
1967 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1968 else
1969 expr = fold_build2 (code, boolean_type_node, e0, e1);
1970 }
1971
1972 return expr;
1973 }
1974
1975 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1976 propagation, and vice versa. Fold does not handle this, since it is
1977 considered too expensive. */
1978 if (TREE_CODE (cond) == EQ_EXPR)
1979 {
1980 e0 = TREE_OPERAND (cond, 0);
1981 e1 = TREE_OPERAND (cond, 1);
1982
1983 /* We know that e0 == e1. Check whether we cannot simplify expr
1984 using this fact. */
1985 e = simplify_replace_tree (expr, e0, e1);
1986 if (integer_zerop (e) || integer_nonzerop (e))
1987 return e;
1988
1989 e = simplify_replace_tree (expr, e1, e0);
1990 if (integer_zerop (e) || integer_nonzerop (e))
1991 return e;
1992 }
1993 if (TREE_CODE (expr) == EQ_EXPR)
1994 {
1995 e0 = TREE_OPERAND (expr, 0);
1996 e1 = TREE_OPERAND (expr, 1);
1997
1998 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1999 e = simplify_replace_tree (cond, e0, e1);
2000 if (integer_zerop (e))
2001 return e;
2002 e = simplify_replace_tree (cond, e1, e0);
2003 if (integer_zerop (e))
2004 return e;
2005 }
2006 if (TREE_CODE (expr) == NE_EXPR)
2007 {
2008 e0 = TREE_OPERAND (expr, 0);
2009 e1 = TREE_OPERAND (expr, 1);
2010
2011 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2012 e = simplify_replace_tree (cond, e0, e1);
2013 if (integer_zerop (e))
2014 return boolean_true_node;
2015 e = simplify_replace_tree (cond, e1, e0);
2016 if (integer_zerop (e))
2017 return boolean_true_node;
2018 }
2019
2020 te = expand_simple_operations (expr, stop);
2021
2022 /* Check whether COND ==> EXPR. */
2023 notcond = invert_truthvalue (cond);
2024 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
2025 if (e && integer_nonzerop (e))
2026 return e;
2027
2028 /* Check whether COND ==> not EXPR. */
2029 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
2030 if (e && integer_zerop (e))
2031 return e;
2032
2033 return expr;
2034 }
2035
2036 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2037 expression (or EXPR unchanged, if no simplification was possible).
2038 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2039 of simple operations in definitions of ssa names in COND are expanded,
2040 so that things like casts or incrementing the value of the bound before
2041 the loop do not cause us to fail. */
2042
2043 static tree
2044 tree_simplify_using_condition (tree cond, tree expr, tree stop)
2045 {
2046 cond = expand_simple_operations (cond, stop);
2047
2048 return tree_simplify_using_condition_1 (cond, expr, stop);
2049 }
2050
2051 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2052 Returns the simplified expression (or EXPR unchanged, if no
2053 simplification was possible). */
2054
2055 tree
2056 simplify_using_initial_conditions (struct loop *loop, tree expr, tree stop)
2057 {
2058 edge e;
2059 basic_block bb;
2060 gimple *stmt;
2061 tree cond;
2062 int cnt = 0;
2063
2064 if (TREE_CODE (expr) == INTEGER_CST)
2065 return expr;
2066
2067 /* Limit walking the dominators to avoid quadraticness in
2068 the number of BBs times the number of loops in degenerate
2069 cases. */
2070 for (bb = loop->header;
2071 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2072 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2073 {
2074 if (!single_pred_p (bb))
2075 continue;
2076 e = single_pred_edge (bb);
2077
2078 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2079 continue;
2080
2081 stmt = last_stmt (e->src);
2082 cond = fold_build2 (gimple_cond_code (stmt),
2083 boolean_type_node,
2084 gimple_cond_lhs (stmt),
2085 gimple_cond_rhs (stmt));
2086 if (e->flags & EDGE_FALSE_VALUE)
2087 cond = invert_truthvalue (cond);
2088 expr = tree_simplify_using_condition (cond, expr, stop);
2089 /* Break if EXPR is simplified to const values. */
2090 if (expr && (integer_zerop (expr) || integer_nonzerop (expr)))
2091 break;
2092
2093 ++cnt;
2094 }
2095
2096 return expr;
2097 }
2098
2099 /* Tries to simplify EXPR using the evolutions of the loop invariants
2100 in the superloops of LOOP. Returns the simplified expression
2101 (or EXPR unchanged, if no simplification was possible). */
2102
2103 static tree
2104 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2105 {
2106 enum tree_code code = TREE_CODE (expr);
2107 bool changed;
2108 tree e, e0, e1, e2;
2109
2110 if (is_gimple_min_invariant (expr))
2111 return expr;
2112
2113 if (code == TRUTH_OR_EXPR
2114 || code == TRUTH_AND_EXPR
2115 || code == COND_EXPR)
2116 {
2117 changed = false;
2118
2119 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2120 if (TREE_OPERAND (expr, 0) != e0)
2121 changed = true;
2122
2123 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2124 if (TREE_OPERAND (expr, 1) != e1)
2125 changed = true;
2126
2127 if (code == COND_EXPR)
2128 {
2129 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2130 if (TREE_OPERAND (expr, 2) != e2)
2131 changed = true;
2132 }
2133 else
2134 e2 = NULL_TREE;
2135
2136 if (changed)
2137 {
2138 if (code == COND_EXPR)
2139 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2140 else
2141 expr = fold_build2 (code, boolean_type_node, e0, e1);
2142 }
2143
2144 return expr;
2145 }
2146
2147 e = instantiate_parameters (loop, expr);
2148 if (is_gimple_min_invariant (e))
2149 return e;
2150
2151 return expr;
2152 }
2153
2154 /* Returns true if EXIT is the only possible exit from LOOP. */
2155
2156 bool
2157 loop_only_exit_p (const struct loop *loop, const_edge exit)
2158 {
2159 basic_block *body;
2160 gimple_stmt_iterator bsi;
2161 unsigned i;
2162 gimple *call;
2163
2164 if (exit != single_exit (loop))
2165 return false;
2166
2167 body = get_loop_body (loop);
2168 for (i = 0; i < loop->num_nodes; i++)
2169 {
2170 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2171 {
2172 call = gsi_stmt (bsi);
2173 if (gimple_code (call) != GIMPLE_CALL)
2174 continue;
2175
2176 if (gimple_has_side_effects (call))
2177 {
2178 free (body);
2179 return false;
2180 }
2181 }
2182 }
2183
2184 free (body);
2185 return true;
2186 }
2187
2188 /* Stores description of number of iterations of LOOP derived from
2189 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
2190 useful information could be derived (and fields of NITER has
2191 meaning described in comments at struct tree_niter_desc
2192 declaration), false otherwise. If WARN is true and
2193 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
2194 potentially unsafe assumptions.
2195 When EVERY_ITERATION is true, only tests that are known to be executed
2196 every iteration are considered (i.e. only test that alone bounds the loop).
2197 */
2198
2199 bool
2200 number_of_iterations_exit (struct loop *loop, edge exit,
2201 struct tree_niter_desc *niter,
2202 bool warn, bool every_iteration)
2203 {
2204 gimple *last;
2205 gcond *stmt;
2206 tree type;
2207 tree op0, op1;
2208 enum tree_code code;
2209 affine_iv iv0, iv1;
2210 bool safe;
2211
2212 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2213
2214 if (every_iteration && !safe)
2215 return false;
2216
2217 niter->assumptions = boolean_false_node;
2218 niter->control.base = NULL_TREE;
2219 niter->control.step = NULL_TREE;
2220 niter->control.no_overflow = false;
2221 last = last_stmt (exit->src);
2222 if (!last)
2223 return false;
2224 stmt = dyn_cast <gcond *> (last);
2225 if (!stmt)
2226 return false;
2227
2228 /* We want the condition for staying inside loop. */
2229 code = gimple_cond_code (stmt);
2230 if (exit->flags & EDGE_TRUE_VALUE)
2231 code = invert_tree_comparison (code, false);
2232
2233 switch (code)
2234 {
2235 case GT_EXPR:
2236 case GE_EXPR:
2237 case LT_EXPR:
2238 case LE_EXPR:
2239 case NE_EXPR:
2240 break;
2241
2242 default:
2243 return false;
2244 }
2245
2246 op0 = gimple_cond_lhs (stmt);
2247 op1 = gimple_cond_rhs (stmt);
2248 type = TREE_TYPE (op0);
2249
2250 if (TREE_CODE (type) != INTEGER_TYPE
2251 && !POINTER_TYPE_P (type))
2252 return false;
2253
2254 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
2255 return false;
2256 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
2257 return false;
2258
2259 /* We don't want to see undefined signed overflow warnings while
2260 computing the number of iterations. */
2261 fold_defer_overflow_warnings ();
2262
2263 iv0.base = expand_simple_operations (iv0.base);
2264 iv1.base = expand_simple_operations (iv1.base);
2265 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2266 loop_only_exit_p (loop, exit), safe))
2267 {
2268 fold_undefer_and_ignore_overflow_warnings ();
2269 return false;
2270 }
2271
2272 if (optimize >= 3)
2273 {
2274 niter->assumptions = simplify_using_outer_evolutions (loop,
2275 niter->assumptions);
2276 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2277 niter->may_be_zero);
2278 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2279 }
2280
2281 niter->assumptions
2282 = simplify_using_initial_conditions (loop,
2283 niter->assumptions);
2284 niter->may_be_zero
2285 = simplify_using_initial_conditions (loop,
2286 niter->may_be_zero);
2287
2288 fold_undefer_and_ignore_overflow_warnings ();
2289
2290 /* If NITER has simplified into a constant, update MAX. */
2291 if (TREE_CODE (niter->niter) == INTEGER_CST)
2292 niter->max = wi::to_widest (niter->niter);
2293
2294 if (integer_onep (niter->assumptions))
2295 return true;
2296
2297 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2298 But if we can prove that there is overflow or some other source of weird
2299 behavior, ignore the loop even with -funsafe-loop-optimizations. */
2300 if (integer_zerop (niter->assumptions) || !single_exit (loop))
2301 return false;
2302
2303 if (flag_unsafe_loop_optimizations)
2304 niter->assumptions = boolean_true_node;
2305
2306 if (warn)
2307 {
2308 const char *wording;
2309 location_t loc = gimple_location (stmt);
2310
2311 /* We can provide a more specific warning if one of the operator is
2312 constant and the other advances by +1 or -1. */
2313 if (!integer_zerop (iv1.step)
2314 ? (integer_zerop (iv0.step)
2315 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
2316 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
2317 wording =
2318 flag_unsafe_loop_optimizations
2319 ? N_("assuming that the loop is not infinite")
2320 : N_("cannot optimize possibly infinite loops");
2321 else
2322 wording =
2323 flag_unsafe_loop_optimizations
2324 ? N_("assuming that the loop counter does not overflow")
2325 : N_("cannot optimize loop, the loop counter may overflow");
2326
2327 warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
2328 OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
2329 }
2330
2331 return flag_unsafe_loop_optimizations;
2332 }
2333
2334 /* Try to determine the number of iterations of LOOP. If we succeed,
2335 expression giving number of iterations is returned and *EXIT is
2336 set to the edge from that the information is obtained. Otherwise
2337 chrec_dont_know is returned. */
2338
2339 tree
2340 find_loop_niter (struct loop *loop, edge *exit)
2341 {
2342 unsigned i;
2343 vec<edge> exits = get_loop_exit_edges (loop);
2344 edge ex;
2345 tree niter = NULL_TREE, aniter;
2346 struct tree_niter_desc desc;
2347
2348 *exit = NULL;
2349 FOR_EACH_VEC_ELT (exits, i, ex)
2350 {
2351 if (!number_of_iterations_exit (loop, ex, &desc, false))
2352 continue;
2353
2354 if (integer_nonzerop (desc.may_be_zero))
2355 {
2356 /* We exit in the first iteration through this exit.
2357 We won't find anything better. */
2358 niter = build_int_cst (unsigned_type_node, 0);
2359 *exit = ex;
2360 break;
2361 }
2362
2363 if (!integer_zerop (desc.may_be_zero))
2364 continue;
2365
2366 aniter = desc.niter;
2367
2368 if (!niter)
2369 {
2370 /* Nothing recorded yet. */
2371 niter = aniter;
2372 *exit = ex;
2373 continue;
2374 }
2375
2376 /* Prefer constants, the lower the better. */
2377 if (TREE_CODE (aniter) != INTEGER_CST)
2378 continue;
2379
2380 if (TREE_CODE (niter) != INTEGER_CST)
2381 {
2382 niter = aniter;
2383 *exit = ex;
2384 continue;
2385 }
2386
2387 if (tree_int_cst_lt (aniter, niter))
2388 {
2389 niter = aniter;
2390 *exit = ex;
2391 continue;
2392 }
2393 }
2394 exits.release ();
2395
2396 return niter ? niter : chrec_dont_know;
2397 }
2398
2399 /* Return true if loop is known to have bounded number of iterations. */
2400
2401 bool
2402 finite_loop_p (struct loop *loop)
2403 {
2404 widest_int nit;
2405 int flags;
2406
2407 if (flag_unsafe_loop_optimizations)
2408 return true;
2409 flags = flags_from_decl_or_type (current_function_decl);
2410 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2411 {
2412 if (dump_file && (dump_flags & TDF_DETAILS))
2413 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2414 loop->num);
2415 return true;
2416 }
2417
2418 if (loop->any_upper_bound
2419 || max_loop_iterations (loop, &nit))
2420 {
2421 if (dump_file && (dump_flags & TDF_DETAILS))
2422 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2423 loop->num);
2424 return true;
2425 }
2426 return false;
2427 }
2428
2429 /*
2430
2431 Analysis of a number of iterations of a loop by a brute-force evaluation.
2432
2433 */
2434
2435 /* Bound on the number of iterations we try to evaluate. */
2436
2437 #define MAX_ITERATIONS_TO_TRACK \
2438 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2439
2440 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2441 result by a chain of operations such that all but exactly one of their
2442 operands are constants. */
2443
2444 static gphi *
2445 chain_of_csts_start (struct loop *loop, tree x)
2446 {
2447 gimple *stmt = SSA_NAME_DEF_STMT (x);
2448 tree use;
2449 basic_block bb = gimple_bb (stmt);
2450 enum tree_code code;
2451
2452 if (!bb
2453 || !flow_bb_inside_loop_p (loop, bb))
2454 return NULL;
2455
2456 if (gimple_code (stmt) == GIMPLE_PHI)
2457 {
2458 if (bb == loop->header)
2459 return as_a <gphi *> (stmt);
2460
2461 return NULL;
2462 }
2463
2464 if (gimple_code (stmt) != GIMPLE_ASSIGN
2465 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2466 return NULL;
2467
2468 code = gimple_assign_rhs_code (stmt);
2469 if (gimple_references_memory_p (stmt)
2470 || TREE_CODE_CLASS (code) == tcc_reference
2471 || (code == ADDR_EXPR
2472 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2473 return NULL;
2474
2475 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2476 if (use == NULL_TREE)
2477 return NULL;
2478
2479 return chain_of_csts_start (loop, use);
2480 }
2481
2482 /* Determines whether the expression X is derived from a result of a phi node
2483 in header of LOOP such that
2484
2485 * the derivation of X consists only from operations with constants
2486 * the initial value of the phi node is constant
2487 * the value of the phi node in the next iteration can be derived from the
2488 value in the current iteration by a chain of operations with constants.
2489
2490 If such phi node exists, it is returned, otherwise NULL is returned. */
2491
2492 static gphi *
2493 get_base_for (struct loop *loop, tree x)
2494 {
2495 gphi *phi;
2496 tree init, next;
2497
2498 if (is_gimple_min_invariant (x))
2499 return NULL;
2500
2501 phi = chain_of_csts_start (loop, x);
2502 if (!phi)
2503 return NULL;
2504
2505 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2506 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2507
2508 if (TREE_CODE (next) != SSA_NAME)
2509 return NULL;
2510
2511 if (!is_gimple_min_invariant (init))
2512 return NULL;
2513
2514 if (chain_of_csts_start (loop, next) != phi)
2515 return NULL;
2516
2517 return phi;
2518 }
2519
2520 /* Given an expression X, then
2521
2522 * if X is NULL_TREE, we return the constant BASE.
2523 * otherwise X is a SSA name, whose value in the considered loop is derived
2524 by a chain of operations with constant from a result of a phi node in
2525 the header of the loop. Then we return value of X when the value of the
2526 result of this phi node is given by the constant BASE. */
2527
2528 static tree
2529 get_val_for (tree x, tree base)
2530 {
2531 gimple *stmt;
2532
2533 gcc_checking_assert (is_gimple_min_invariant (base));
2534
2535 if (!x)
2536 return base;
2537
2538 stmt = SSA_NAME_DEF_STMT (x);
2539 if (gimple_code (stmt) == GIMPLE_PHI)
2540 return base;
2541
2542 gcc_checking_assert (is_gimple_assign (stmt));
2543
2544 /* STMT must be either an assignment of a single SSA name or an
2545 expression involving an SSA name and a constant. Try to fold that
2546 expression using the value for the SSA name. */
2547 if (gimple_assign_ssa_name_copy_p (stmt))
2548 return get_val_for (gimple_assign_rhs1 (stmt), base);
2549 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2550 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2551 {
2552 return fold_build1 (gimple_assign_rhs_code (stmt),
2553 gimple_expr_type (stmt),
2554 get_val_for (gimple_assign_rhs1 (stmt), base));
2555 }
2556 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2557 {
2558 tree rhs1 = gimple_assign_rhs1 (stmt);
2559 tree rhs2 = gimple_assign_rhs2 (stmt);
2560 if (TREE_CODE (rhs1) == SSA_NAME)
2561 rhs1 = get_val_for (rhs1, base);
2562 else if (TREE_CODE (rhs2) == SSA_NAME)
2563 rhs2 = get_val_for (rhs2, base);
2564 else
2565 gcc_unreachable ();
2566 return fold_build2 (gimple_assign_rhs_code (stmt),
2567 gimple_expr_type (stmt), rhs1, rhs2);
2568 }
2569 else
2570 gcc_unreachable ();
2571 }
2572
2573
2574 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2575 by brute force -- i.e. by determining the value of the operands of the
2576 condition at EXIT in first few iterations of the loop (assuming that
2577 these values are constant) and determining the first one in that the
2578 condition is not satisfied. Returns the constant giving the number
2579 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2580
2581 tree
2582 loop_niter_by_eval (struct loop *loop, edge exit)
2583 {
2584 tree acnd;
2585 tree op[2], val[2], next[2], aval[2];
2586 gphi *phi;
2587 gimple *cond;
2588 unsigned i, j;
2589 enum tree_code cmp;
2590
2591 cond = last_stmt (exit->src);
2592 if (!cond || gimple_code (cond) != GIMPLE_COND)
2593 return chrec_dont_know;
2594
2595 cmp = gimple_cond_code (cond);
2596 if (exit->flags & EDGE_TRUE_VALUE)
2597 cmp = invert_tree_comparison (cmp, false);
2598
2599 switch (cmp)
2600 {
2601 case EQ_EXPR:
2602 case NE_EXPR:
2603 case GT_EXPR:
2604 case GE_EXPR:
2605 case LT_EXPR:
2606 case LE_EXPR:
2607 op[0] = gimple_cond_lhs (cond);
2608 op[1] = gimple_cond_rhs (cond);
2609 break;
2610
2611 default:
2612 return chrec_dont_know;
2613 }
2614
2615 for (j = 0; j < 2; j++)
2616 {
2617 if (is_gimple_min_invariant (op[j]))
2618 {
2619 val[j] = op[j];
2620 next[j] = NULL_TREE;
2621 op[j] = NULL_TREE;
2622 }
2623 else
2624 {
2625 phi = get_base_for (loop, op[j]);
2626 if (!phi)
2627 return chrec_dont_know;
2628 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2629 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2630 }
2631 }
2632
2633 /* Don't issue signed overflow warnings. */
2634 fold_defer_overflow_warnings ();
2635
2636 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2637 {
2638 for (j = 0; j < 2; j++)
2639 aval[j] = get_val_for (op[j], val[j]);
2640
2641 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2642 if (acnd && integer_zerop (acnd))
2643 {
2644 fold_undefer_and_ignore_overflow_warnings ();
2645 if (dump_file && (dump_flags & TDF_DETAILS))
2646 fprintf (dump_file,
2647 "Proved that loop %d iterates %d times using brute force.\n",
2648 loop->num, i);
2649 return build_int_cst (unsigned_type_node, i);
2650 }
2651
2652 for (j = 0; j < 2; j++)
2653 {
2654 val[j] = get_val_for (next[j], val[j]);
2655 if (!is_gimple_min_invariant (val[j]))
2656 {
2657 fold_undefer_and_ignore_overflow_warnings ();
2658 return chrec_dont_know;
2659 }
2660 }
2661 }
2662
2663 fold_undefer_and_ignore_overflow_warnings ();
2664
2665 return chrec_dont_know;
2666 }
2667
2668 /* Finds the exit of the LOOP by that the loop exits after a constant
2669 number of iterations and stores the exit edge to *EXIT. The constant
2670 giving the number of iterations of LOOP is returned. The number of
2671 iterations is determined using loop_niter_by_eval (i.e. by brute force
2672 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2673 determines the number of iterations, chrec_dont_know is returned. */
2674
2675 tree
2676 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2677 {
2678 unsigned i;
2679 vec<edge> exits = get_loop_exit_edges (loop);
2680 edge ex;
2681 tree niter = NULL_TREE, aniter;
2682
2683 *exit = NULL;
2684
2685 /* Loops with multiple exits are expensive to handle and less important. */
2686 if (!flag_expensive_optimizations
2687 && exits.length () > 1)
2688 {
2689 exits.release ();
2690 return chrec_dont_know;
2691 }
2692
2693 FOR_EACH_VEC_ELT (exits, i, ex)
2694 {
2695 if (!just_once_each_iteration_p (loop, ex->src))
2696 continue;
2697
2698 aniter = loop_niter_by_eval (loop, ex);
2699 if (chrec_contains_undetermined (aniter))
2700 continue;
2701
2702 if (niter
2703 && !tree_int_cst_lt (aniter, niter))
2704 continue;
2705
2706 niter = aniter;
2707 *exit = ex;
2708 }
2709 exits.release ();
2710
2711 return niter ? niter : chrec_dont_know;
2712 }
2713
2714 /*
2715
2716 Analysis of upper bounds on number of iterations of a loop.
2717
2718 */
2719
2720 static widest_int derive_constant_upper_bound_ops (tree, tree,
2721 enum tree_code, tree);
2722
2723 /* Returns a constant upper bound on the value of the right-hand side of
2724 an assignment statement STMT. */
2725
2726 static widest_int
2727 derive_constant_upper_bound_assign (gimple *stmt)
2728 {
2729 enum tree_code code = gimple_assign_rhs_code (stmt);
2730 tree op0 = gimple_assign_rhs1 (stmt);
2731 tree op1 = gimple_assign_rhs2 (stmt);
2732
2733 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2734 op0, code, op1);
2735 }
2736
2737 /* Returns a constant upper bound on the value of expression VAL. VAL
2738 is considered to be unsigned. If its type is signed, its value must
2739 be nonnegative. */
2740
2741 static widest_int
2742 derive_constant_upper_bound (tree val)
2743 {
2744 enum tree_code code;
2745 tree op0, op1, op2;
2746
2747 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
2748 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2749 }
2750
2751 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2752 whose type is TYPE. The expression is considered to be unsigned. If
2753 its type is signed, its value must be nonnegative. */
2754
2755 static widest_int
2756 derive_constant_upper_bound_ops (tree type, tree op0,
2757 enum tree_code code, tree op1)
2758 {
2759 tree subtype, maxt;
2760 widest_int bnd, max, cst;
2761 gimple *stmt;
2762
2763 if (INTEGRAL_TYPE_P (type))
2764 maxt = TYPE_MAX_VALUE (type);
2765 else
2766 maxt = upper_bound_in_type (type, type);
2767
2768 max = wi::to_widest (maxt);
2769
2770 switch (code)
2771 {
2772 case INTEGER_CST:
2773 return wi::to_widest (op0);
2774
2775 CASE_CONVERT:
2776 subtype = TREE_TYPE (op0);
2777 if (!TYPE_UNSIGNED (subtype)
2778 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2779 that OP0 is nonnegative. */
2780 && TYPE_UNSIGNED (type)
2781 && !tree_expr_nonnegative_p (op0))
2782 {
2783 /* If we cannot prove that the casted expression is nonnegative,
2784 we cannot establish more useful upper bound than the precision
2785 of the type gives us. */
2786 return max;
2787 }
2788
2789 /* We now know that op0 is an nonnegative value. Try deriving an upper
2790 bound for it. */
2791 bnd = derive_constant_upper_bound (op0);
2792
2793 /* If the bound does not fit in TYPE, max. value of TYPE could be
2794 attained. */
2795 if (wi::ltu_p (max, bnd))
2796 return max;
2797
2798 return bnd;
2799
2800 case PLUS_EXPR:
2801 case POINTER_PLUS_EXPR:
2802 case MINUS_EXPR:
2803 if (TREE_CODE (op1) != INTEGER_CST
2804 || !tree_expr_nonnegative_p (op0))
2805 return max;
2806
2807 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2808 choose the most logical way how to treat this constant regardless
2809 of the signedness of the type. */
2810 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
2811 if (code != MINUS_EXPR)
2812 cst = -cst;
2813
2814 bnd = derive_constant_upper_bound (op0);
2815
2816 if (wi::neg_p (cst))
2817 {
2818 cst = -cst;
2819 /* Avoid CST == 0x80000... */
2820 if (wi::neg_p (cst))
2821 return max;
2822
2823 /* OP0 + CST. We need to check that
2824 BND <= MAX (type) - CST. */
2825
2826 widest_int mmax = max - cst;
2827 if (wi::leu_p (bnd, mmax))
2828 return max;
2829
2830 return bnd + cst;
2831 }
2832 else
2833 {
2834 /* OP0 - CST, where CST >= 0.
2835
2836 If TYPE is signed, we have already verified that OP0 >= 0, and we
2837 know that the result is nonnegative. This implies that
2838 VAL <= BND - CST.
2839
2840 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2841 otherwise the operation underflows.
2842 */
2843
2844 /* This should only happen if the type is unsigned; however, for
2845 buggy programs that use overflowing signed arithmetics even with
2846 -fno-wrapv, this condition may also be true for signed values. */
2847 if (wi::ltu_p (bnd, cst))
2848 return max;
2849
2850 if (TYPE_UNSIGNED (type))
2851 {
2852 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2853 wide_int_to_tree (type, cst));
2854 if (!tem || integer_nonzerop (tem))
2855 return max;
2856 }
2857
2858 bnd -= cst;
2859 }
2860
2861 return bnd;
2862
2863 case FLOOR_DIV_EXPR:
2864 case EXACT_DIV_EXPR:
2865 if (TREE_CODE (op1) != INTEGER_CST
2866 || tree_int_cst_sign_bit (op1))
2867 return max;
2868
2869 bnd = derive_constant_upper_bound (op0);
2870 return wi::udiv_floor (bnd, wi::to_widest (op1));
2871
2872 case BIT_AND_EXPR:
2873 if (TREE_CODE (op1) != INTEGER_CST
2874 || tree_int_cst_sign_bit (op1))
2875 return max;
2876 return wi::to_widest (op1);
2877
2878 case SSA_NAME:
2879 stmt = SSA_NAME_DEF_STMT (op0);
2880 if (gimple_code (stmt) != GIMPLE_ASSIGN
2881 || gimple_assign_lhs (stmt) != op0)
2882 return max;
2883 return derive_constant_upper_bound_assign (stmt);
2884
2885 default:
2886 return max;
2887 }
2888 }
2889
2890 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2891
2892 static void
2893 do_warn_aggressive_loop_optimizations (struct loop *loop,
2894 widest_int i_bound, gimple *stmt)
2895 {
2896 /* Don't warn if the loop doesn't have known constant bound. */
2897 if (!loop->nb_iterations
2898 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2899 || !warn_aggressive_loop_optimizations
2900 /* To avoid warning multiple times for the same loop,
2901 only start warning when we preserve loops. */
2902 || (cfun->curr_properties & PROP_loops) == 0
2903 /* Only warn once per loop. */
2904 || loop->warned_aggressive_loop_optimizations
2905 /* Only warn if undefined behavior gives us lower estimate than the
2906 known constant bound. */
2907 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
2908 /* And undefined behavior happens unconditionally. */
2909 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2910 return;
2911
2912 edge e = single_exit (loop);
2913 if (e == NULL)
2914 return;
2915
2916 gimple *estmt = last_stmt (e->src);
2917 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2918 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
2919 ? UNSIGNED : SIGNED);
2920 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2921 "iteration %s invokes undefined behavior", buf))
2922 inform (gimple_location (estmt), "within this loop");
2923 loop->warned_aggressive_loop_optimizations = true;
2924 }
2925
2926 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2927 is true if the loop is exited immediately after STMT, and this exit
2928 is taken at last when the STMT is executed BOUND + 1 times.
2929 REALISTIC is true if BOUND is expected to be close to the real number
2930 of iterations. UPPER is true if we are sure the loop iterates at most
2931 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2932
2933 static void
2934 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
2935 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
2936 {
2937 widest_int delta;
2938
2939 if (dump_file && (dump_flags & TDF_DETAILS))
2940 {
2941 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2942 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2943 fprintf (dump_file, " is %sexecuted at most ",
2944 upper ? "" : "probably ");
2945 print_generic_expr (dump_file, bound, TDF_SLIM);
2946 fprintf (dump_file, " (bounded by ");
2947 print_decu (i_bound, dump_file);
2948 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2949 }
2950
2951 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2952 real number of iterations. */
2953 if (TREE_CODE (bound) != INTEGER_CST)
2954 realistic = false;
2955 else
2956 gcc_checking_assert (i_bound == wi::to_widest (bound));
2957
2958 /* If we have a guaranteed upper bound, record it in the appropriate
2959 list, unless this is an !is_exit bound (i.e. undefined behavior in
2960 at_stmt) in a loop with known constant number of iterations. */
2961 if (upper
2962 && (is_exit
2963 || loop->nb_iterations == NULL_TREE
2964 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
2965 {
2966 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
2967
2968 elt->bound = i_bound;
2969 elt->stmt = at_stmt;
2970 elt->is_exit = is_exit;
2971 elt->next = loop->bounds;
2972 loop->bounds = elt;
2973 }
2974
2975 /* If statement is executed on every path to the loop latch, we can directly
2976 infer the upper bound on the # of iterations of the loop. */
2977 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2978 upper = false;
2979
2980 /* Update the number of iteration estimates according to the bound.
2981 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2982 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2983 later if such statement must be executed on last iteration */
2984 if (is_exit)
2985 delta = 0;
2986 else
2987 delta = 1;
2988 widest_int new_i_bound = i_bound + delta;
2989
2990 /* If an overflow occurred, ignore the result. */
2991 if (wi::ltu_p (new_i_bound, delta))
2992 return;
2993
2994 if (upper && !is_exit)
2995 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
2996 record_niter_bound (loop, new_i_bound, realistic, upper);
2997 }
2998
2999 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3000 and doesn't overflow. */
3001
3002 static void
3003 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3004 {
3005 struct control_iv *iv;
3006
3007 if (!niter->control.base || !niter->control.step)
3008 return;
3009
3010 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3011 return;
3012
3013 iv = ggc_alloc<control_iv> ();
3014 iv->base = niter->control.base;
3015 iv->step = niter->control.step;
3016 iv->next = loop->control_ivs;
3017 loop->control_ivs = iv;
3018
3019 return;
3020 }
3021
3022 /* Record the estimate on number of iterations of LOOP based on the fact that
3023 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3024 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3025 estimated number of iterations is expected to be close to the real one.
3026 UPPER is true if we are sure the induction variable does not wrap. */
3027
3028 static void
3029 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3030 tree low, tree high, bool realistic, bool upper)
3031 {
3032 tree niter_bound, extreme, delta;
3033 tree type = TREE_TYPE (base), unsigned_type;
3034 tree orig_base = base;
3035
3036 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3037 return;
3038
3039 if (dump_file && (dump_flags & TDF_DETAILS))
3040 {
3041 fprintf (dump_file, "Induction variable (");
3042 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3043 fprintf (dump_file, ") ");
3044 print_generic_expr (dump_file, base, TDF_SLIM);
3045 fprintf (dump_file, " + ");
3046 print_generic_expr (dump_file, step, TDF_SLIM);
3047 fprintf (dump_file, " * iteration does not wrap in statement ");
3048 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3049 fprintf (dump_file, " in loop %d.\n", loop->num);
3050 }
3051
3052 unsigned_type = unsigned_type_for (type);
3053 base = fold_convert (unsigned_type, base);
3054 step = fold_convert (unsigned_type, step);
3055
3056 if (tree_int_cst_sign_bit (step))
3057 {
3058 wide_int min, max;
3059 extreme = fold_convert (unsigned_type, low);
3060 if (TREE_CODE (orig_base) == SSA_NAME
3061 && TREE_CODE (high) == INTEGER_CST
3062 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3063 && get_range_info (orig_base, &min, &max) == VR_RANGE
3064 && wi::gts_p (high, max))
3065 base = wide_int_to_tree (unsigned_type, max);
3066 else if (TREE_CODE (base) != INTEGER_CST
3067 && dominated_by_p (CDI_DOMINATORS,
3068 loop->latch, gimple_bb (stmt)))
3069 base = fold_convert (unsigned_type, high);
3070 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3071 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3072 }
3073 else
3074 {
3075 wide_int min, max;
3076 extreme = fold_convert (unsigned_type, high);
3077 if (TREE_CODE (orig_base) == SSA_NAME
3078 && TREE_CODE (low) == INTEGER_CST
3079 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3080 && get_range_info (orig_base, &min, &max) == VR_RANGE
3081 && wi::gts_p (min, low))
3082 base = wide_int_to_tree (unsigned_type, min);
3083 else if (TREE_CODE (base) != INTEGER_CST
3084 && dominated_by_p (CDI_DOMINATORS,
3085 loop->latch, gimple_bb (stmt)))
3086 base = fold_convert (unsigned_type, low);
3087 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3088 }
3089
3090 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3091 would get out of the range. */
3092 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3093 widest_int max = derive_constant_upper_bound (niter_bound);
3094 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3095 }
3096
3097 /* Determine information about number of iterations a LOOP from the index
3098 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3099 guaranteed to be executed in every iteration of LOOP. Callback for
3100 for_each_index. */
3101
3102 struct ilb_data
3103 {
3104 struct loop *loop;
3105 gimple *stmt;
3106 };
3107
3108 static bool
3109 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3110 {
3111 struct ilb_data *data = (struct ilb_data *) dta;
3112 tree ev, init, step;
3113 tree low, high, type, next;
3114 bool sign, upper = true, at_end = false;
3115 struct loop *loop = data->loop;
3116
3117 if (TREE_CODE (base) != ARRAY_REF)
3118 return true;
3119
3120 /* For arrays at the end of the structure, we are not guaranteed that they
3121 do not really extend over their declared size. However, for arrays of
3122 size greater than one, this is unlikely to be intended. */
3123 if (array_at_struct_end_p (base))
3124 {
3125 at_end = true;
3126 upper = false;
3127 }
3128
3129 struct loop *dloop = loop_containing_stmt (data->stmt);
3130 if (!dloop)
3131 return true;
3132
3133 ev = analyze_scalar_evolution (dloop, *idx);
3134 ev = instantiate_parameters (loop, ev);
3135 init = initial_condition (ev);
3136 step = evolution_part_in_loop_num (ev, loop->num);
3137
3138 if (!init
3139 || !step
3140 || TREE_CODE (step) != INTEGER_CST
3141 || integer_zerop (step)
3142 || tree_contains_chrecs (init, NULL)
3143 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3144 return true;
3145
3146 low = array_ref_low_bound (base);
3147 high = array_ref_up_bound (base);
3148
3149 /* The case of nonconstant bounds could be handled, but it would be
3150 complicated. */
3151 if (TREE_CODE (low) != INTEGER_CST
3152 || !high
3153 || TREE_CODE (high) != INTEGER_CST)
3154 return true;
3155 sign = tree_int_cst_sign_bit (step);
3156 type = TREE_TYPE (step);
3157
3158 /* The array of length 1 at the end of a structure most likely extends
3159 beyond its bounds. */
3160 if (at_end
3161 && operand_equal_p (low, high, 0))
3162 return true;
3163
3164 /* In case the relevant bound of the array does not fit in type, or
3165 it does, but bound + step (in type) still belongs into the range of the
3166 array, the index may wrap and still stay within the range of the array
3167 (consider e.g. if the array is indexed by the full range of
3168 unsigned char).
3169
3170 To make things simpler, we require both bounds to fit into type, although
3171 there are cases where this would not be strictly necessary. */
3172 if (!int_fits_type_p (high, type)
3173 || !int_fits_type_p (low, type))
3174 return true;
3175 low = fold_convert (type, low);
3176 high = fold_convert (type, high);
3177
3178 if (sign)
3179 next = fold_binary (PLUS_EXPR, type, low, step);
3180 else
3181 next = fold_binary (PLUS_EXPR, type, high, step);
3182
3183 if (tree_int_cst_compare (low, next) <= 0
3184 && tree_int_cst_compare (next, high) <= 0)
3185 return true;
3186
3187 /* If access is not executed on every iteration, we must ensure that overlow
3188 may not make the access valid later. */
3189 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3190 && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
3191 step, data->stmt, loop, true))
3192 upper = false;
3193
3194 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3195 return true;
3196 }
3197
3198 /* Determine information about number of iterations a LOOP from the bounds
3199 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3200 STMT is guaranteed to be executed in every iteration of LOOP.*/
3201
3202 static void
3203 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3204 {
3205 struct ilb_data data;
3206
3207 data.loop = loop;
3208 data.stmt = stmt;
3209 for_each_index (&ref, idx_infer_loop_bounds, &data);
3210 }
3211
3212 /* Determine information about number of iterations of a LOOP from the way
3213 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3214 executed in every iteration of LOOP. */
3215
3216 static void
3217 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3218 {
3219 if (is_gimple_assign (stmt))
3220 {
3221 tree op0 = gimple_assign_lhs (stmt);
3222 tree op1 = gimple_assign_rhs1 (stmt);
3223
3224 /* For each memory access, analyze its access function
3225 and record a bound on the loop iteration domain. */
3226 if (REFERENCE_CLASS_P (op0))
3227 infer_loop_bounds_from_ref (loop, stmt, op0);
3228
3229 if (REFERENCE_CLASS_P (op1))
3230 infer_loop_bounds_from_ref (loop, stmt, op1);
3231 }
3232 else if (is_gimple_call (stmt))
3233 {
3234 tree arg, lhs;
3235 unsigned i, n = gimple_call_num_args (stmt);
3236
3237 lhs = gimple_call_lhs (stmt);
3238 if (lhs && REFERENCE_CLASS_P (lhs))
3239 infer_loop_bounds_from_ref (loop, stmt, lhs);
3240
3241 for (i = 0; i < n; i++)
3242 {
3243 arg = gimple_call_arg (stmt, i);
3244 if (REFERENCE_CLASS_P (arg))
3245 infer_loop_bounds_from_ref (loop, stmt, arg);
3246 }
3247 }
3248 }
3249
3250 /* Determine information about number of iterations of a LOOP from the fact
3251 that pointer arithmetics in STMT does not overflow. */
3252
3253 static void
3254 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3255 {
3256 tree def, base, step, scev, type, low, high;
3257 tree var, ptr;
3258
3259 if (!is_gimple_assign (stmt)
3260 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3261 return;
3262
3263 def = gimple_assign_lhs (stmt);
3264 if (TREE_CODE (def) != SSA_NAME)
3265 return;
3266
3267 type = TREE_TYPE (def);
3268 if (!nowrap_type_p (type))
3269 return;
3270
3271 ptr = gimple_assign_rhs1 (stmt);
3272 if (!expr_invariant_in_loop_p (loop, ptr))
3273 return;
3274
3275 var = gimple_assign_rhs2 (stmt);
3276 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3277 return;
3278
3279 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3280 if (chrec_contains_undetermined (scev))
3281 return;
3282
3283 base = initial_condition_in_loop_num (scev, loop->num);
3284 step = evolution_part_in_loop_num (scev, loop->num);
3285
3286 if (!base || !step
3287 || TREE_CODE (step) != INTEGER_CST
3288 || tree_contains_chrecs (base, NULL)
3289 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3290 return;
3291
3292 low = lower_bound_in_type (type, type);
3293 high = upper_bound_in_type (type, type);
3294
3295 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3296 produce a NULL pointer. The contrary would mean NULL points to an object,
3297 while NULL is supposed to compare unequal with the address of all objects.
3298 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3299 NULL pointer since that would mean wrapping, which we assume here not to
3300 happen. So, we can exclude NULL from the valid range of pointer
3301 arithmetic. */
3302 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3303 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3304
3305 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3306 }
3307
3308 /* Determine information about number of iterations of a LOOP from the fact
3309 that signed arithmetics in STMT does not overflow. */
3310
3311 static void
3312 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3313 {
3314 tree def, base, step, scev, type, low, high;
3315
3316 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3317 return;
3318
3319 def = gimple_assign_lhs (stmt);
3320
3321 if (TREE_CODE (def) != SSA_NAME)
3322 return;
3323
3324 type = TREE_TYPE (def);
3325 if (!INTEGRAL_TYPE_P (type)
3326 || !TYPE_OVERFLOW_UNDEFINED (type))
3327 return;
3328
3329 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3330 if (chrec_contains_undetermined (scev))
3331 return;
3332
3333 base = initial_condition_in_loop_num (scev, loop->num);
3334 step = evolution_part_in_loop_num (scev, loop->num);
3335
3336 if (!base || !step
3337 || TREE_CODE (step) != INTEGER_CST
3338 || tree_contains_chrecs (base, NULL)
3339 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3340 return;
3341
3342 low = lower_bound_in_type (type, type);
3343 high = upper_bound_in_type (type, type);
3344
3345 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3346 }
3347
3348 /* The following analyzers are extracting informations on the bounds
3349 of LOOP from the following undefined behaviors:
3350
3351 - data references should not access elements over the statically
3352 allocated size,
3353
3354 - signed variables should not overflow when flag_wrapv is not set.
3355 */
3356
3357 static void
3358 infer_loop_bounds_from_undefined (struct loop *loop)
3359 {
3360 unsigned i;
3361 basic_block *bbs;
3362 gimple_stmt_iterator bsi;
3363 basic_block bb;
3364 bool reliable;
3365
3366 bbs = get_loop_body (loop);
3367
3368 for (i = 0; i < loop->num_nodes; i++)
3369 {
3370 bb = bbs[i];
3371
3372 /* If BB is not executed in each iteration of the loop, we cannot
3373 use the operations in it to infer reliable upper bound on the
3374 # of iterations of the loop. However, we can use it as a guess.
3375 Reliable guesses come only from array bounds. */
3376 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3377
3378 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3379 {
3380 gimple *stmt = gsi_stmt (bsi);
3381
3382 infer_loop_bounds_from_array (loop, stmt);
3383
3384 if (reliable)
3385 {
3386 infer_loop_bounds_from_signedness (loop, stmt);
3387 infer_loop_bounds_from_pointer_arith (loop, stmt);
3388 }
3389 }
3390
3391 }
3392
3393 free (bbs);
3394 }
3395
3396 /* Compare wide ints, callback for qsort. */
3397
3398 static int
3399 wide_int_cmp (const void *p1, const void *p2)
3400 {
3401 const widest_int *d1 = (const widest_int *) p1;
3402 const widest_int *d2 = (const widest_int *) p2;
3403 return wi::cmpu (*d1, *d2);
3404 }
3405
3406 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3407 Lookup by binary search. */
3408
3409 static int
3410 bound_index (vec<widest_int> bounds, const widest_int &bound)
3411 {
3412 unsigned int end = bounds.length ();
3413 unsigned int begin = 0;
3414
3415 /* Find a matching index by means of a binary search. */
3416 while (begin != end)
3417 {
3418 unsigned int middle = (begin + end) / 2;
3419 widest_int index = bounds[middle];
3420
3421 if (index == bound)
3422 return middle;
3423 else if (wi::ltu_p (index, bound))
3424 begin = middle + 1;
3425 else
3426 end = middle;
3427 }
3428 gcc_unreachable ();
3429 }
3430
3431 /* We recorded loop bounds only for statements dominating loop latch (and thus
3432 executed each loop iteration). If there are any bounds on statements not
3433 dominating the loop latch we can improve the estimate by walking the loop
3434 body and seeing if every path from loop header to loop latch contains
3435 some bounded statement. */
3436
3437 static void
3438 discover_iteration_bound_by_body_walk (struct loop *loop)
3439 {
3440 struct nb_iter_bound *elt;
3441 vec<widest_int> bounds = vNULL;
3442 vec<vec<basic_block> > queues = vNULL;
3443 vec<basic_block> queue = vNULL;
3444 ptrdiff_t queue_index;
3445 ptrdiff_t latch_index = 0;
3446
3447 /* Discover what bounds may interest us. */
3448 for (elt = loop->bounds; elt; elt = elt->next)
3449 {
3450 widest_int bound = elt->bound;
3451
3452 /* Exit terminates loop at given iteration, while non-exits produce undefined
3453 effect on the next iteration. */
3454 if (!elt->is_exit)
3455 {
3456 bound += 1;
3457 /* If an overflow occurred, ignore the result. */
3458 if (bound == 0)
3459 continue;
3460 }
3461
3462 if (!loop->any_upper_bound
3463 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3464 bounds.safe_push (bound);
3465 }
3466
3467 /* Exit early if there is nothing to do. */
3468 if (!bounds.exists ())
3469 return;
3470
3471 if (dump_file && (dump_flags & TDF_DETAILS))
3472 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3473
3474 /* Sort the bounds in decreasing order. */
3475 bounds.qsort (wide_int_cmp);
3476
3477 /* For every basic block record the lowest bound that is guaranteed to
3478 terminate the loop. */
3479
3480 hash_map<basic_block, ptrdiff_t> bb_bounds;
3481 for (elt = loop->bounds; elt; elt = elt->next)
3482 {
3483 widest_int bound = elt->bound;
3484 if (!elt->is_exit)
3485 {
3486 bound += 1;
3487 /* If an overflow occurred, ignore the result. */
3488 if (bound == 0)
3489 continue;
3490 }
3491
3492 if (!loop->any_upper_bound
3493 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3494 {
3495 ptrdiff_t index = bound_index (bounds, bound);
3496 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3497 if (!entry)
3498 bb_bounds.put (gimple_bb (elt->stmt), index);
3499 else if ((ptrdiff_t)*entry > index)
3500 *entry = index;
3501 }
3502 }
3503
3504 hash_map<basic_block, ptrdiff_t> block_priority;
3505
3506 /* Perform shortest path discovery loop->header ... loop->latch.
3507
3508 The "distance" is given by the smallest loop bound of basic block
3509 present in the path and we look for path with largest smallest bound
3510 on it.
3511
3512 To avoid the need for fibonacci heap on double ints we simply compress
3513 double ints into indexes to BOUNDS array and then represent the queue
3514 as arrays of queues for every index.
3515 Index of BOUNDS.length() means that the execution of given BB has
3516 no bounds determined.
3517
3518 VISITED is a pointer map translating basic block into smallest index
3519 it was inserted into the priority queue with. */
3520 latch_index = -1;
3521
3522 /* Start walk in loop header with index set to infinite bound. */
3523 queue_index = bounds.length ();
3524 queues.safe_grow_cleared (queue_index + 1);
3525 queue.safe_push (loop->header);
3526 queues[queue_index] = queue;
3527 block_priority.put (loop->header, queue_index);
3528
3529 for (; queue_index >= 0; queue_index--)
3530 {
3531 if (latch_index < queue_index)
3532 {
3533 while (queues[queue_index].length ())
3534 {
3535 basic_block bb;
3536 ptrdiff_t bound_index = queue_index;
3537 edge e;
3538 edge_iterator ei;
3539
3540 queue = queues[queue_index];
3541 bb = queue.pop ();
3542
3543 /* OK, we later inserted the BB with lower priority, skip it. */
3544 if (*block_priority.get (bb) > queue_index)
3545 continue;
3546
3547 /* See if we can improve the bound. */
3548 ptrdiff_t *entry = bb_bounds.get (bb);
3549 if (entry && *entry < bound_index)
3550 bound_index = *entry;
3551
3552 /* Insert succesors into the queue, watch for latch edge
3553 and record greatest index we saw. */
3554 FOR_EACH_EDGE (e, ei, bb->succs)
3555 {
3556 bool insert = false;
3557
3558 if (loop_exit_edge_p (loop, e))
3559 continue;
3560
3561 if (e == loop_latch_edge (loop)
3562 && latch_index < bound_index)
3563 latch_index = bound_index;
3564 else if (!(entry = block_priority.get (e->dest)))
3565 {
3566 insert = true;
3567 block_priority.put (e->dest, bound_index);
3568 }
3569 else if (*entry < bound_index)
3570 {
3571 insert = true;
3572 *entry = bound_index;
3573 }
3574
3575 if (insert)
3576 queues[bound_index].safe_push (e->dest);
3577 }
3578 }
3579 }
3580 queues[queue_index].release ();
3581 }
3582
3583 gcc_assert (latch_index >= 0);
3584 if ((unsigned)latch_index < bounds.length ())
3585 {
3586 if (dump_file && (dump_flags & TDF_DETAILS))
3587 {
3588 fprintf (dump_file, "Found better loop bound ");
3589 print_decu (bounds[latch_index], dump_file);
3590 fprintf (dump_file, "\n");
3591 }
3592 record_niter_bound (loop, bounds[latch_index], false, true);
3593 }
3594
3595 queues.release ();
3596 bounds.release ();
3597 }
3598
3599 /* See if every path cross the loop goes through a statement that is known
3600 to not execute at the last iteration. In that case we can decrese iteration
3601 count by 1. */
3602
3603 static void
3604 maybe_lower_iteration_bound (struct loop *loop)
3605 {
3606 hash_set<gimple *> *not_executed_last_iteration = NULL;
3607 struct nb_iter_bound *elt;
3608 bool found_exit = false;
3609 vec<basic_block> queue = vNULL;
3610 bitmap visited;
3611
3612 /* Collect all statements with interesting (i.e. lower than
3613 nb_iterations_upper_bound) bound on them.
3614
3615 TODO: Due to the way record_estimate choose estimates to store, the bounds
3616 will be always nb_iterations_upper_bound-1. We can change this to record
3617 also statements not dominating the loop latch and update the walk bellow
3618 to the shortest path algorthm. */
3619 for (elt = loop->bounds; elt; elt = elt->next)
3620 {
3621 if (!elt->is_exit
3622 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
3623 {
3624 if (!not_executed_last_iteration)
3625 not_executed_last_iteration = new hash_set<gimple *>;
3626 not_executed_last_iteration->add (elt->stmt);
3627 }
3628 }
3629 if (!not_executed_last_iteration)
3630 return;
3631
3632 /* Start DFS walk in the loop header and see if we can reach the
3633 loop latch or any of the exits (including statements with side
3634 effects that may terminate the loop otherwise) without visiting
3635 any of the statements known to have undefined effect on the last
3636 iteration. */
3637 queue.safe_push (loop->header);
3638 visited = BITMAP_ALLOC (NULL);
3639 bitmap_set_bit (visited, loop->header->index);
3640 found_exit = false;
3641
3642 do
3643 {
3644 basic_block bb = queue.pop ();
3645 gimple_stmt_iterator gsi;
3646 bool stmt_found = false;
3647
3648 /* Loop for possible exits and statements bounding the execution. */
3649 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3650 {
3651 gimple *stmt = gsi_stmt (gsi);
3652 if (not_executed_last_iteration->contains (stmt))
3653 {
3654 stmt_found = true;
3655 break;
3656 }
3657 if (gimple_has_side_effects (stmt))
3658 {
3659 found_exit = true;
3660 break;
3661 }
3662 }
3663 if (found_exit)
3664 break;
3665
3666 /* If no bounding statement is found, continue the walk. */
3667 if (!stmt_found)
3668 {
3669 edge e;
3670 edge_iterator ei;
3671
3672 FOR_EACH_EDGE (e, ei, bb->succs)
3673 {
3674 if (loop_exit_edge_p (loop, e)
3675 || e == loop_latch_edge (loop))
3676 {
3677 found_exit = true;
3678 break;
3679 }
3680 if (bitmap_set_bit (visited, e->dest->index))
3681 queue.safe_push (e->dest);
3682 }
3683 }
3684 }
3685 while (queue.length () && !found_exit);
3686
3687 /* If every path through the loop reach bounding statement before exit,
3688 then we know the last iteration of the loop will have undefined effect
3689 and we can decrease number of iterations. */
3690
3691 if (!found_exit)
3692 {
3693 if (dump_file && (dump_flags & TDF_DETAILS))
3694 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3695 "undefined statement must be executed at the last iteration.\n");
3696 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
3697 false, true);
3698 }
3699
3700 BITMAP_FREE (visited);
3701 queue.release ();
3702 delete not_executed_last_iteration;
3703 }
3704
3705 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3706 is true also use estimates derived from undefined behavior. */
3707
3708 static void
3709 estimate_numbers_of_iterations_loop (struct loop *loop)
3710 {
3711 vec<edge> exits;
3712 tree niter, type;
3713 unsigned i;
3714 struct tree_niter_desc niter_desc;
3715 edge ex;
3716 widest_int bound;
3717 edge likely_exit;
3718
3719 /* Give up if we already have tried to compute an estimation. */
3720 if (loop->estimate_state != EST_NOT_COMPUTED)
3721 return;
3722
3723 loop->estimate_state = EST_AVAILABLE;
3724
3725 /* If we have a measured profile, use it to estimate the number of
3726 iterations. Normally this is recorded by branch_prob right after
3727 reading the profile. In case we however found a new loop, record the
3728 information here.
3729
3730 Explicitly check for profile status so we do not report
3731 wrong prediction hitrates for guessed loop iterations heuristics.
3732 Do not recompute already recorded bounds - we ought to be better on
3733 updating iteration bounds than updating profile in general and thus
3734 recomputing iteration bounds later in the compilation process will just
3735 introduce random roundoff errors. */
3736 if (!loop->any_estimate
3737 && loop->header->count != 0
3738 && profile_status_for_fn (cfun) >= PROFILE_READ)
3739 {
3740 gcov_type nit = expected_loop_iterations_unbounded (loop);
3741 bound = gcov_type_to_wide_int (nit);
3742 record_niter_bound (loop, bound, true, false);
3743 }
3744
3745 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3746 to be constant, we avoid undefined behavior implied bounds and instead
3747 diagnose those loops with -Waggressive-loop-optimizations. */
3748 number_of_latch_executions (loop);
3749
3750 exits = get_loop_exit_edges (loop);
3751 likely_exit = single_likely_exit (loop);
3752 FOR_EACH_VEC_ELT (exits, i, ex)
3753 {
3754 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3755 continue;
3756
3757 niter = niter_desc.niter;
3758 type = TREE_TYPE (niter);
3759 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3760 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3761 build_int_cst (type, 0),
3762 niter);
3763 record_estimate (loop, niter, niter_desc.max,
3764 last_stmt (ex->src),
3765 true, ex == likely_exit, true);
3766 record_control_iv (loop, &niter_desc);
3767 }
3768 exits.release ();
3769
3770 if (flag_aggressive_loop_optimizations)
3771 infer_loop_bounds_from_undefined (loop);
3772
3773 discover_iteration_bound_by_body_walk (loop);
3774
3775 maybe_lower_iteration_bound (loop);
3776
3777 /* If we know the exact number of iterations of this loop, try to
3778 not break code with undefined behavior by not recording smaller
3779 maximum number of iterations. */
3780 if (loop->nb_iterations
3781 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3782 {
3783 loop->any_upper_bound = true;
3784 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
3785 }
3786 }
3787
3788 /* Sets NIT to the estimated number of executions of the latch of the
3789 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3790 large as the number of iterations. If we have no reliable estimate,
3791 the function returns false, otherwise returns true. */
3792
3793 bool
3794 estimated_loop_iterations (struct loop *loop, widest_int *nit)
3795 {
3796 /* When SCEV information is available, try to update loop iterations
3797 estimate. Otherwise just return whatever we recorded earlier. */
3798 if (scev_initialized_p ())
3799 estimate_numbers_of_iterations_loop (loop);
3800
3801 return (get_estimated_loop_iterations (loop, nit));
3802 }
3803
3804 /* Similar to estimated_loop_iterations, but returns the estimate only
3805 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3806 on the number of iterations of LOOP could not be derived, returns -1. */
3807
3808 HOST_WIDE_INT
3809 estimated_loop_iterations_int (struct loop *loop)
3810 {
3811 widest_int nit;
3812 HOST_WIDE_INT hwi_nit;
3813
3814 if (!estimated_loop_iterations (loop, &nit))
3815 return -1;
3816
3817 if (!wi::fits_shwi_p (nit))
3818 return -1;
3819 hwi_nit = nit.to_shwi ();
3820
3821 return hwi_nit < 0 ? -1 : hwi_nit;
3822 }
3823
3824
3825 /* Sets NIT to an upper bound for the maximum number of executions of the
3826 latch of the LOOP. If we have no reliable estimate, the function returns
3827 false, otherwise returns true. */
3828
3829 bool
3830 max_loop_iterations (struct loop *loop, widest_int *nit)
3831 {
3832 /* When SCEV information is available, try to update loop iterations
3833 estimate. Otherwise just return whatever we recorded earlier. */
3834 if (scev_initialized_p ())
3835 estimate_numbers_of_iterations_loop (loop);
3836
3837 return get_max_loop_iterations (loop, nit);
3838 }
3839
3840 /* Similar to max_loop_iterations, but returns the estimate only
3841 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3842 on the number of iterations of LOOP could not be derived, returns -1. */
3843
3844 HOST_WIDE_INT
3845 max_loop_iterations_int (struct loop *loop)
3846 {
3847 widest_int nit;
3848 HOST_WIDE_INT hwi_nit;
3849
3850 if (!max_loop_iterations (loop, &nit))
3851 return -1;
3852
3853 if (!wi::fits_shwi_p (nit))
3854 return -1;
3855 hwi_nit = nit.to_shwi ();
3856
3857 return hwi_nit < 0 ? -1 : hwi_nit;
3858 }
3859
3860 /* Sets NIT to an likely upper bound for the maximum number of executions of the
3861 latch of the LOOP. If we have no reliable estimate, the function returns
3862 false, otherwise returns true. */
3863
3864 bool
3865 likely_max_loop_iterations (struct loop *loop, widest_int *nit)
3866 {
3867 /* When SCEV information is available, try to update loop iterations
3868 estimate. Otherwise just return whatever we recorded earlier. */
3869 if (scev_initialized_p ())
3870 estimate_numbers_of_iterations_loop (loop);
3871
3872 return get_likely_max_loop_iterations (loop, nit);
3873 }
3874
3875 /* Similar to max_loop_iterations, but returns the estimate only
3876 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3877 on the number of iterations of LOOP could not be derived, returns -1. */
3878
3879 HOST_WIDE_INT
3880 likely_max_loop_iterations_int (struct loop *loop)
3881 {
3882 widest_int nit;
3883 HOST_WIDE_INT hwi_nit;
3884
3885 if (!likely_max_loop_iterations (loop, &nit))
3886 return -1;
3887
3888 if (!wi::fits_shwi_p (nit))
3889 return -1;
3890 hwi_nit = nit.to_shwi ();
3891
3892 return hwi_nit < 0 ? -1 : hwi_nit;
3893 }
3894
3895 /* Returns an estimate for the number of executions of statements
3896 in the LOOP. For statements before the loop exit, this exceeds
3897 the number of execution of the latch by one. */
3898
3899 HOST_WIDE_INT
3900 estimated_stmt_executions_int (struct loop *loop)
3901 {
3902 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3903 HOST_WIDE_INT snit;
3904
3905 if (nit == -1)
3906 return -1;
3907
3908 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3909
3910 /* If the computation overflows, return -1. */
3911 return snit < 0 ? -1 : snit;
3912 }
3913
3914 /* Sets NIT to the maximum number of executions of the latch of the
3915 LOOP, plus one. If we have no reliable estimate, the function returns
3916 false, otherwise returns true. */
3917
3918 bool
3919 max_stmt_executions (struct loop *loop, widest_int *nit)
3920 {
3921 widest_int nit_minus_one;
3922
3923 if (!max_loop_iterations (loop, nit))
3924 return false;
3925
3926 nit_minus_one = *nit;
3927
3928 *nit += 1;
3929
3930 return wi::gtu_p (*nit, nit_minus_one);
3931 }
3932
3933 /* Sets NIT to the estimated maximum number of executions of the latch of the
3934 LOOP, plus one. If we have no likely estimate, the function returns
3935 false, otherwise returns true. */
3936
3937 bool
3938 likely_max_stmt_executions (struct loop *loop, widest_int *nit)
3939 {
3940 widest_int nit_minus_one;
3941
3942 if (!likely_max_loop_iterations (loop, nit))
3943 return false;
3944
3945 nit_minus_one = *nit;
3946
3947 *nit += 1;
3948
3949 return wi::gtu_p (*nit, nit_minus_one);
3950 }
3951
3952 /* Sets NIT to the estimated number of executions of the latch of the
3953 LOOP, plus one. If we have no reliable estimate, the function returns
3954 false, otherwise returns true. */
3955
3956 bool
3957 estimated_stmt_executions (struct loop *loop, widest_int *nit)
3958 {
3959 widest_int nit_minus_one;
3960
3961 if (!estimated_loop_iterations (loop, nit))
3962 return false;
3963
3964 nit_minus_one = *nit;
3965
3966 *nit += 1;
3967
3968 return wi::gtu_p (*nit, nit_minus_one);
3969 }
3970
3971 /* Records estimates on numbers of iterations of loops. */
3972
3973 void
3974 estimate_numbers_of_iterations (void)
3975 {
3976 struct loop *loop;
3977
3978 /* We don't want to issue signed overflow warnings while getting
3979 loop iteration estimates. */
3980 fold_defer_overflow_warnings ();
3981
3982 FOR_EACH_LOOP (loop, 0)
3983 {
3984 estimate_numbers_of_iterations_loop (loop);
3985 }
3986
3987 fold_undefer_and_ignore_overflow_warnings ();
3988 }
3989
3990 /* Returns true if statement S1 dominates statement S2. */
3991
3992 bool
3993 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
3994 {
3995 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3996
3997 if (!bb1
3998 || s1 == s2)
3999 return true;
4000
4001 if (bb1 == bb2)
4002 {
4003 gimple_stmt_iterator bsi;
4004
4005 if (gimple_code (s2) == GIMPLE_PHI)
4006 return false;
4007
4008 if (gimple_code (s1) == GIMPLE_PHI)
4009 return true;
4010
4011 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4012 if (gsi_stmt (bsi) == s1)
4013 return true;
4014
4015 return false;
4016 }
4017
4018 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4019 }
4020
4021 /* Returns true when we can prove that the number of executions of
4022 STMT in the loop is at most NITER, according to the bound on
4023 the number of executions of the statement NITER_BOUND->stmt recorded in
4024 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4025
4026 ??? This code can become quite a CPU hog - we can have many bounds,
4027 and large basic block forcing stmt_dominates_stmt_p to be queried
4028 many times on a large basic blocks, so the whole thing is O(n^2)
4029 for scev_probably_wraps_p invocation (that can be done n times).
4030
4031 It would make more sense (and give better answers) to remember BB
4032 bounds computed by discover_iteration_bound_by_body_walk. */
4033
4034 static bool
4035 n_of_executions_at_most (gimple *stmt,
4036 struct nb_iter_bound *niter_bound,
4037 tree niter)
4038 {
4039 widest_int bound = niter_bound->bound;
4040 tree nit_type = TREE_TYPE (niter), e;
4041 enum tree_code cmp;
4042
4043 gcc_assert (TYPE_UNSIGNED (nit_type));
4044
4045 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4046 the number of iterations is small. */
4047 if (!wi::fits_to_tree_p (bound, nit_type))
4048 return false;
4049
4050 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4051 times. This means that:
4052
4053 -- if NITER_BOUND->is_exit is true, then everything after
4054 it at most NITER_BOUND->bound times.
4055
4056 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4057 is executed, then NITER_BOUND->stmt is executed as well in the same
4058 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4059
4060 If we can determine that NITER_BOUND->stmt is always executed
4061 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4062 We conclude that if both statements belong to the same
4063 basic block and STMT is before NITER_BOUND->stmt and there are no
4064 statements with side effects in between. */
4065
4066 if (niter_bound->is_exit)
4067 {
4068 if (stmt == niter_bound->stmt
4069 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4070 return false;
4071 cmp = GE_EXPR;
4072 }
4073 else
4074 {
4075 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4076 {
4077 gimple_stmt_iterator bsi;
4078 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4079 || gimple_code (stmt) == GIMPLE_PHI
4080 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4081 return false;
4082
4083 /* By stmt_dominates_stmt_p we already know that STMT appears
4084 before NITER_BOUND->STMT. Still need to test that the loop
4085 can not be terinated by a side effect in between. */
4086 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4087 gsi_next (&bsi))
4088 if (gimple_has_side_effects (gsi_stmt (bsi)))
4089 return false;
4090 bound += 1;
4091 if (bound == 0
4092 || !wi::fits_to_tree_p (bound, nit_type))
4093 return false;
4094 }
4095 cmp = GT_EXPR;
4096 }
4097
4098 e = fold_binary (cmp, boolean_type_node,
4099 niter, wide_int_to_tree (nit_type, bound));
4100 return e && integer_nonzerop (e);
4101 }
4102
4103 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4104
4105 bool
4106 nowrap_type_p (tree type)
4107 {
4108 if (INTEGRAL_TYPE_P (type)
4109 && TYPE_OVERFLOW_UNDEFINED (type))
4110 return true;
4111
4112 if (POINTER_TYPE_P (type))
4113 return true;
4114
4115 return false;
4116 }
4117
4118 /* Return true if we can prove LOOP is exited before evolution of induction
4119 variabled {BASE, STEP} overflows with respect to its type bound. */
4120
4121 static bool
4122 loop_exits_before_overflow (tree base, tree step,
4123 gimple *at_stmt, struct loop *loop)
4124 {
4125 widest_int niter;
4126 struct control_iv *civ;
4127 struct nb_iter_bound *bound;
4128 tree e, delta, step_abs, unsigned_base;
4129 tree type = TREE_TYPE (step);
4130 tree unsigned_type, valid_niter;
4131
4132 /* Don't issue signed overflow warnings. */
4133 fold_defer_overflow_warnings ();
4134
4135 /* Compute the number of iterations before we reach the bound of the
4136 type, and verify that the loop is exited before this occurs. */
4137 unsigned_type = unsigned_type_for (type);
4138 unsigned_base = fold_convert (unsigned_type, base);
4139
4140 if (tree_int_cst_sign_bit (step))
4141 {
4142 tree extreme = fold_convert (unsigned_type,
4143 lower_bound_in_type (type, type));
4144 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4145 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4146 fold_convert (unsigned_type, step));
4147 }
4148 else
4149 {
4150 tree extreme = fold_convert (unsigned_type,
4151 upper_bound_in_type (type, type));
4152 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4153 step_abs = fold_convert (unsigned_type, step);
4154 }
4155
4156 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4157
4158 estimate_numbers_of_iterations_loop (loop);
4159
4160 if (max_loop_iterations (loop, &niter)
4161 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4162 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4163 wide_int_to_tree (TREE_TYPE (valid_niter),
4164 niter))) != NULL
4165 && integer_nonzerop (e))
4166 {
4167 fold_undefer_and_ignore_overflow_warnings ();
4168 return true;
4169 }
4170 if (at_stmt)
4171 for (bound = loop->bounds; bound; bound = bound->next)
4172 {
4173 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4174 {
4175 fold_undefer_and_ignore_overflow_warnings ();
4176 return true;
4177 }
4178 }
4179 fold_undefer_and_ignore_overflow_warnings ();
4180
4181 /* Try to prove loop is exited before {base, step} overflows with the
4182 help of analyzed loop control IV. This is done only for IVs with
4183 constant step because otherwise we don't have the information. */
4184 if (TREE_CODE (step) == INTEGER_CST)
4185 {
4186 tree stop = (TREE_CODE (base) == SSA_NAME) ? base : NULL;
4187
4188 for (civ = loop->control_ivs; civ; civ = civ->next)
4189 {
4190 enum tree_code code;
4191 tree stepped, extreme, civ_type = TREE_TYPE (civ->step);
4192
4193 /* Have to consider type difference because operand_equal_p ignores
4194 that for constants. */
4195 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4196 || element_precision (type) != element_precision (civ_type))
4197 continue;
4198
4199 /* Only consider control IV with same step. */
4200 if (!operand_equal_p (step, civ->step, 0))
4201 continue;
4202
4203 /* Done proving if this is a no-overflow control IV. */
4204 if (operand_equal_p (base, civ->base, 0)
4205 /* Control IV is recorded after expanding simple operations,
4206 Here we compare it against expanded base too. */
4207 || operand_equal_p (expand_simple_operations (base),
4208 civ->base, 0))
4209 return true;
4210
4211 /* If this is a before stepping control IV, in other words, we have
4212
4213 {civ_base, step} = {base + step, step}
4214
4215 Because civ {base + step, step} doesn't overflow during loop
4216 iterations, {base, step} will not overflow if we can prove the
4217 operation "base + step" does not overflow. Specifically, we try
4218 to prove below conditions are satisfied:
4219
4220 base <= UPPER_BOUND (type) - step ;;step > 0
4221 base >= LOWER_BOUND (type) - step ;;step < 0
4222
4223 by proving the reverse conditions are false using loop's initial
4224 condition. */
4225 if (POINTER_TYPE_P (TREE_TYPE (base)))
4226 code = POINTER_PLUS_EXPR;
4227 else
4228 code = PLUS_EXPR;
4229
4230 stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4231 if (operand_equal_p (stepped, civ->base, 0))
4232 {
4233 if (tree_int_cst_sign_bit (step))
4234 {
4235 code = LT_EXPR;
4236 extreme = lower_bound_in_type (type, type);
4237 }
4238 else
4239 {
4240 code = GT_EXPR;
4241 extreme = upper_bound_in_type (type, type);
4242 }
4243 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4244 e = fold_build2 (code, boolean_type_node, base, extreme);
4245 e = simplify_using_initial_conditions (loop, e, stop);
4246 if (integer_zerop (e))
4247 return true;
4248 }
4249 }
4250 }
4251
4252 return false;
4253 }
4254
4255 /* Return false only when the induction variable BASE + STEP * I is
4256 known to not overflow: i.e. when the number of iterations is small
4257 enough with respect to the step and initial condition in order to
4258 keep the evolution confined in TYPEs bounds. Return true when the
4259 iv is known to overflow or when the property is not computable.
4260
4261 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4262 the rules for overflow of the given language apply (e.g., that signed
4263 arithmetics in C does not overflow). */
4264
4265 bool
4266 scev_probably_wraps_p (tree base, tree step,
4267 gimple *at_stmt, struct loop *loop,
4268 bool use_overflow_semantics)
4269 {
4270 /* FIXME: We really need something like
4271 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4272
4273 We used to test for the following situation that frequently appears
4274 during address arithmetics:
4275
4276 D.1621_13 = (long unsigned intD.4) D.1620_12;
4277 D.1622_14 = D.1621_13 * 8;
4278 D.1623_15 = (doubleD.29 *) D.1622_14;
4279
4280 And derived that the sequence corresponding to D_14
4281 can be proved to not wrap because it is used for computing a
4282 memory access; however, this is not really the case -- for example,
4283 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4284 2032, 2040, 0, 8, ..., but the code is still legal. */
4285
4286 if (chrec_contains_undetermined (base)
4287 || chrec_contains_undetermined (step))
4288 return true;
4289
4290 if (integer_zerop (step))
4291 return false;
4292
4293 /* If we can use the fact that signed and pointer arithmetics does not
4294 wrap, we are done. */
4295 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4296 return false;
4297
4298 /* To be able to use estimates on number of iterations of the loop,
4299 we must have an upper bound on the absolute value of the step. */
4300 if (TREE_CODE (step) != INTEGER_CST)
4301 return true;
4302
4303 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4304 return false;
4305
4306 /* At this point we still don't have a proof that the iv does not
4307 overflow: give up. */
4308 return true;
4309 }
4310
4311 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4312
4313 void
4314 free_numbers_of_iterations_estimates_loop (struct loop *loop)
4315 {
4316 struct control_iv *civ;
4317 struct nb_iter_bound *bound;
4318
4319 loop->nb_iterations = NULL;
4320 loop->estimate_state = EST_NOT_COMPUTED;
4321 for (bound = loop->bounds; bound;)
4322 {
4323 struct nb_iter_bound *next = bound->next;
4324 ggc_free (bound);
4325 bound = next;
4326 }
4327 loop->bounds = NULL;
4328
4329 for (civ = loop->control_ivs; civ;)
4330 {
4331 struct control_iv *next = civ->next;
4332 ggc_free (civ);
4333 civ = next;
4334 }
4335 loop->control_ivs = NULL;
4336 }
4337
4338 /* Frees the information on upper bounds on numbers of iterations of loops. */
4339
4340 void
4341 free_numbers_of_iterations_estimates (function *fn)
4342 {
4343 struct loop *loop;
4344
4345 FOR_EACH_LOOP_FN (fn, loop, 0)
4346 {
4347 free_numbers_of_iterations_estimates_loop (loop);
4348 }
4349 }
4350
4351 /* Substitute value VAL for ssa name NAME inside expressions held
4352 at LOOP. */
4353
4354 void
4355 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4356 {
4357 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
4358 }