]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-loop-niter.c
Update libbid according to the latest Intel Decimal Floating-Point Math Library.
[thirdparty/gcc.git] / gcc / tree-ssa-loop-niter.c
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45 #include "tree-dfa.h"
46
47
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
52
53 /*
54
55 Analysis of number of iterations of an affine exit test.
56
57 */
58
59 /* Bounds on some value, BELOW <= X <= UP. */
60
61 struct bounds
62 {
63 mpz_t below, up;
64 };
65
66 static bool number_of_iterations_popcount (loop_p loop, edge exit,
67 enum tree_code code,
68 struct tree_niter_desc *niter);
69
70
71 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
72
73 static void
74 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
75 {
76 tree type = TREE_TYPE (expr);
77 tree op0, op1;
78 bool negate = false;
79
80 *var = expr;
81 mpz_set_ui (offset, 0);
82
83 switch (TREE_CODE (expr))
84 {
85 case MINUS_EXPR:
86 negate = true;
87 /* Fallthru. */
88
89 case PLUS_EXPR:
90 case POINTER_PLUS_EXPR:
91 op0 = TREE_OPERAND (expr, 0);
92 op1 = TREE_OPERAND (expr, 1);
93
94 if (TREE_CODE (op1) != INTEGER_CST)
95 break;
96
97 *var = op0;
98 /* Always sign extend the offset. */
99 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
100 if (negate)
101 mpz_neg (offset, offset);
102 break;
103
104 case INTEGER_CST:
105 *var = build_int_cst_type (type, 0);
106 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
107 break;
108
109 default:
110 break;
111 }
112 }
113
114 /* From condition C0 CMP C1 derives information regarding the value range
115 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
116
117 static void
118 refine_value_range_using_guard (tree type, tree var,
119 tree c0, enum tree_code cmp, tree c1,
120 mpz_t below, mpz_t up)
121 {
122 tree varc0, varc1, ctype;
123 mpz_t offc0, offc1;
124 mpz_t mint, maxt, minc1, maxc1;
125 wide_int minv, maxv;
126 bool no_wrap = nowrap_type_p (type);
127 bool c0_ok, c1_ok;
128 signop sgn = TYPE_SIGN (type);
129
130 switch (cmp)
131 {
132 case LT_EXPR:
133 case LE_EXPR:
134 case GT_EXPR:
135 case GE_EXPR:
136 STRIP_SIGN_NOPS (c0);
137 STRIP_SIGN_NOPS (c1);
138 ctype = TREE_TYPE (c0);
139 if (!useless_type_conversion_p (ctype, type))
140 return;
141
142 break;
143
144 case EQ_EXPR:
145 /* We could derive quite precise information from EQ_EXPR, however,
146 such a guard is unlikely to appear, so we do not bother with
147 handling it. */
148 return;
149
150 case NE_EXPR:
151 /* NE_EXPR comparisons do not contain much of useful information,
152 except for cases of comparing with bounds. */
153 if (TREE_CODE (c1) != INTEGER_CST
154 || !INTEGRAL_TYPE_P (type))
155 return;
156
157 /* Ensure that the condition speaks about an expression in the same
158 type as X and Y. */
159 ctype = TREE_TYPE (c0);
160 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
161 return;
162 c0 = fold_convert (type, c0);
163 c1 = fold_convert (type, c1);
164
165 if (operand_equal_p (var, c0, 0))
166 {
167 mpz_t valc1;
168
169 /* Case of comparing VAR with its below/up bounds. */
170 mpz_init (valc1);
171 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
172 if (mpz_cmp (valc1, below) == 0)
173 cmp = GT_EXPR;
174 if (mpz_cmp (valc1, up) == 0)
175 cmp = LT_EXPR;
176
177 mpz_clear (valc1);
178 }
179 else
180 {
181 /* Case of comparing with the bounds of the type. */
182 wide_int min = wi::min_value (type);
183 wide_int max = wi::max_value (type);
184
185 if (wi::to_wide (c1) == min)
186 cmp = GT_EXPR;
187 if (wi::to_wide (c1) == max)
188 cmp = LT_EXPR;
189 }
190
191 /* Quick return if no useful information. */
192 if (cmp == NE_EXPR)
193 return;
194
195 break;
196
197 default:
198 return;
199 }
200
201 mpz_init (offc0);
202 mpz_init (offc1);
203 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
204 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
205
206 /* We are only interested in comparisons of expressions based on VAR. */
207 if (operand_equal_p (var, varc1, 0))
208 {
209 std::swap (varc0, varc1);
210 mpz_swap (offc0, offc1);
211 cmp = swap_tree_comparison (cmp);
212 }
213 else if (!operand_equal_p (var, varc0, 0))
214 {
215 mpz_clear (offc0);
216 mpz_clear (offc1);
217 return;
218 }
219
220 mpz_init (mint);
221 mpz_init (maxt);
222 get_type_static_bounds (type, mint, maxt);
223 mpz_init (minc1);
224 mpz_init (maxc1);
225 /* Setup range information for varc1. */
226 if (integer_zerop (varc1))
227 {
228 wi::to_mpz (0, minc1, TYPE_SIGN (type));
229 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
230 }
231 else if (TREE_CODE (varc1) == SSA_NAME
232 && INTEGRAL_TYPE_P (type)
233 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
234 {
235 gcc_assert (wi::le_p (minv, maxv, sgn));
236 wi::to_mpz (minv, minc1, sgn);
237 wi::to_mpz (maxv, maxc1, sgn);
238 }
239 else
240 {
241 mpz_set (minc1, mint);
242 mpz_set (maxc1, maxt);
243 }
244
245 /* Compute valid range information for varc1 + offc1. Note nothing
246 useful can be derived if it overflows or underflows. Overflow or
247 underflow could happen when:
248
249 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
250 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
251 mpz_add (minc1, minc1, offc1);
252 mpz_add (maxc1, maxc1, offc1);
253 c1_ok = (no_wrap
254 || mpz_sgn (offc1) == 0
255 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
256 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
257 if (!c1_ok)
258 goto end;
259
260 if (mpz_cmp (minc1, mint) < 0)
261 mpz_set (minc1, mint);
262 if (mpz_cmp (maxc1, maxt) > 0)
263 mpz_set (maxc1, maxt);
264
265 if (cmp == LT_EXPR)
266 {
267 cmp = LE_EXPR;
268 mpz_sub_ui (maxc1, maxc1, 1);
269 }
270 if (cmp == GT_EXPR)
271 {
272 cmp = GE_EXPR;
273 mpz_add_ui (minc1, minc1, 1);
274 }
275
276 /* Compute range information for varc0. If there is no overflow,
277 the condition implied that
278
279 (varc0) cmp (varc1 + offc1 - offc0)
280
281 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
282 or the below bound if cmp is GE_EXPR.
283
284 To prove there is no overflow/underflow, we need to check below
285 four cases:
286 1) cmp == LE_EXPR && offc0 > 0
287
288 (varc0 + offc0) doesn't overflow
289 && (varc1 + offc1 - offc0) doesn't underflow
290
291 2) cmp == LE_EXPR && offc0 < 0
292
293 (varc0 + offc0) doesn't underflow
294 && (varc1 + offc1 - offc0) doesn't overfloe
295
296 In this case, (varc0 + offc0) will never underflow if we can
297 prove (varc1 + offc1 - offc0) doesn't overflow.
298
299 3) cmp == GE_EXPR && offc0 < 0
300
301 (varc0 + offc0) doesn't underflow
302 && (varc1 + offc1 - offc0) doesn't overflow
303
304 4) cmp == GE_EXPR && offc0 > 0
305
306 (varc0 + offc0) doesn't overflow
307 && (varc1 + offc1 - offc0) doesn't underflow
308
309 In this case, (varc0 + offc0) will never overflow if we can
310 prove (varc1 + offc1 - offc0) doesn't underflow.
311
312 Note we only handle case 2 and 4 in below code. */
313
314 mpz_sub (minc1, minc1, offc0);
315 mpz_sub (maxc1, maxc1, offc0);
316 c0_ok = (no_wrap
317 || mpz_sgn (offc0) == 0
318 || (cmp == LE_EXPR
319 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
320 || (cmp == GE_EXPR
321 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
322 if (!c0_ok)
323 goto end;
324
325 if (cmp == LE_EXPR)
326 {
327 if (mpz_cmp (up, maxc1) > 0)
328 mpz_set (up, maxc1);
329 }
330 else
331 {
332 if (mpz_cmp (below, minc1) < 0)
333 mpz_set (below, minc1);
334 }
335
336 end:
337 mpz_clear (mint);
338 mpz_clear (maxt);
339 mpz_clear (minc1);
340 mpz_clear (maxc1);
341 mpz_clear (offc0);
342 mpz_clear (offc1);
343 }
344
345 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
346 in TYPE to MIN and MAX. */
347
348 static void
349 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
350 mpz_t min, mpz_t max)
351 {
352 int cnt = 0;
353 mpz_t minm, maxm;
354 basic_block bb;
355 wide_int minv, maxv;
356 enum value_range_kind rtype = VR_VARYING;
357
358 /* If the expression is a constant, we know its value exactly. */
359 if (integer_zerop (var))
360 {
361 mpz_set (min, off);
362 mpz_set (max, off);
363 return;
364 }
365
366 get_type_static_bounds (type, min, max);
367
368 /* See if we have some range info from VRP. */
369 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
370 {
371 edge e = loop_preheader_edge (loop);
372 signop sgn = TYPE_SIGN (type);
373 gphi_iterator gsi;
374
375 /* Either for VAR itself... */
376 rtype = get_range_info (var, &minv, &maxv);
377 /* Or for PHI results in loop->header where VAR is used as
378 PHI argument from the loop preheader edge. */
379 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
380 {
381 gphi *phi = gsi.phi ();
382 wide_int minc, maxc;
383 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
384 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
385 == VR_RANGE))
386 {
387 if (rtype != VR_RANGE)
388 {
389 rtype = VR_RANGE;
390 minv = minc;
391 maxv = maxc;
392 }
393 else
394 {
395 minv = wi::max (minv, minc, sgn);
396 maxv = wi::min (maxv, maxc, sgn);
397 /* If the PHI result range are inconsistent with
398 the VAR range, give up on looking at the PHI
399 results. This can happen if VR_UNDEFINED is
400 involved. */
401 if (wi::gt_p (minv, maxv, sgn))
402 {
403 rtype = get_range_info (var, &minv, &maxv);
404 break;
405 }
406 }
407 }
408 }
409 mpz_init (minm);
410 mpz_init (maxm);
411 if (rtype != VR_RANGE)
412 {
413 mpz_set (minm, min);
414 mpz_set (maxm, max);
415 }
416 else
417 {
418 gcc_assert (wi::le_p (minv, maxv, sgn));
419 wi::to_mpz (minv, minm, sgn);
420 wi::to_mpz (maxv, maxm, sgn);
421 }
422 /* Now walk the dominators of the loop header and use the entry
423 guards to refine the estimates. */
424 for (bb = loop->header;
425 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
426 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
427 {
428 edge e;
429 tree c0, c1;
430 gimple *cond;
431 enum tree_code cmp;
432
433 if (!single_pred_p (bb))
434 continue;
435 e = single_pred_edge (bb);
436
437 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
438 continue;
439
440 cond = last_stmt (e->src);
441 c0 = gimple_cond_lhs (cond);
442 cmp = gimple_cond_code (cond);
443 c1 = gimple_cond_rhs (cond);
444
445 if (e->flags & EDGE_FALSE_VALUE)
446 cmp = invert_tree_comparison (cmp, false);
447
448 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
449 ++cnt;
450 }
451
452 mpz_add (minm, minm, off);
453 mpz_add (maxm, maxm, off);
454 /* If the computation may not wrap or off is zero, then this
455 is always fine. If off is negative and minv + off isn't
456 smaller than type's minimum, or off is positive and
457 maxv + off isn't bigger than type's maximum, use the more
458 precise range too. */
459 if (nowrap_type_p (type)
460 || mpz_sgn (off) == 0
461 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
462 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
463 {
464 mpz_set (min, minm);
465 mpz_set (max, maxm);
466 mpz_clear (minm);
467 mpz_clear (maxm);
468 return;
469 }
470 mpz_clear (minm);
471 mpz_clear (maxm);
472 }
473
474 /* If the computation may wrap, we know nothing about the value, except for
475 the range of the type. */
476 if (!nowrap_type_p (type))
477 return;
478
479 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
480 add it to MIN, otherwise to MAX. */
481 if (mpz_sgn (off) < 0)
482 mpz_add (max, max, off);
483 else
484 mpz_add (min, min, off);
485 }
486
487 /* Stores the bounds on the difference of the values of the expressions
488 (var + X) and (var + Y), computed in TYPE, to BNDS. */
489
490 static void
491 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
492 bounds *bnds)
493 {
494 int rel = mpz_cmp (x, y);
495 bool may_wrap = !nowrap_type_p (type);
496 mpz_t m;
497
498 /* If X == Y, then the expressions are always equal.
499 If X > Y, there are the following possibilities:
500 a) neither of var + X and var + Y overflow or underflow, or both of
501 them do. Then their difference is X - Y.
502 b) var + X overflows, and var + Y does not. Then the values of the
503 expressions are var + X - M and var + Y, where M is the range of
504 the type, and their difference is X - Y - M.
505 c) var + Y underflows and var + X does not. Their difference again
506 is M - X + Y.
507 Therefore, if the arithmetics in type does not overflow, then the
508 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
509 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
510 (X - Y, X - Y + M). */
511
512 if (rel == 0)
513 {
514 mpz_set_ui (bnds->below, 0);
515 mpz_set_ui (bnds->up, 0);
516 return;
517 }
518
519 mpz_init (m);
520 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
521 mpz_add_ui (m, m, 1);
522 mpz_sub (bnds->up, x, y);
523 mpz_set (bnds->below, bnds->up);
524
525 if (may_wrap)
526 {
527 if (rel > 0)
528 mpz_sub (bnds->below, bnds->below, m);
529 else
530 mpz_add (bnds->up, bnds->up, m);
531 }
532
533 mpz_clear (m);
534 }
535
536 /* From condition C0 CMP C1 derives information regarding the
537 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
538 and stores it to BNDS. */
539
540 static void
541 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
542 tree vary, mpz_t offy,
543 tree c0, enum tree_code cmp, tree c1,
544 bounds *bnds)
545 {
546 tree varc0, varc1, ctype;
547 mpz_t offc0, offc1, loffx, loffy, bnd;
548 bool lbound = false;
549 bool no_wrap = nowrap_type_p (type);
550 bool x_ok, y_ok;
551
552 switch (cmp)
553 {
554 case LT_EXPR:
555 case LE_EXPR:
556 case GT_EXPR:
557 case GE_EXPR:
558 STRIP_SIGN_NOPS (c0);
559 STRIP_SIGN_NOPS (c1);
560 ctype = TREE_TYPE (c0);
561 if (!useless_type_conversion_p (ctype, type))
562 return;
563
564 break;
565
566 case EQ_EXPR:
567 /* We could derive quite precise information from EQ_EXPR, however, such
568 a guard is unlikely to appear, so we do not bother with handling
569 it. */
570 return;
571
572 case NE_EXPR:
573 /* NE_EXPR comparisons do not contain much of useful information, except for
574 special case of comparing with the bounds of the type. */
575 if (TREE_CODE (c1) != INTEGER_CST
576 || !INTEGRAL_TYPE_P (type))
577 return;
578
579 /* Ensure that the condition speaks about an expression in the same type
580 as X and Y. */
581 ctype = TREE_TYPE (c0);
582 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
583 return;
584 c0 = fold_convert (type, c0);
585 c1 = fold_convert (type, c1);
586
587 if (TYPE_MIN_VALUE (type)
588 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
589 {
590 cmp = GT_EXPR;
591 break;
592 }
593 if (TYPE_MAX_VALUE (type)
594 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
595 {
596 cmp = LT_EXPR;
597 break;
598 }
599
600 return;
601 default:
602 return;
603 }
604
605 mpz_init (offc0);
606 mpz_init (offc1);
607 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
608 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
609
610 /* We are only interested in comparisons of expressions based on VARX and
611 VARY. TODO -- we might also be able to derive some bounds from
612 expressions containing just one of the variables. */
613
614 if (operand_equal_p (varx, varc1, 0))
615 {
616 std::swap (varc0, varc1);
617 mpz_swap (offc0, offc1);
618 cmp = swap_tree_comparison (cmp);
619 }
620
621 if (!operand_equal_p (varx, varc0, 0)
622 || !operand_equal_p (vary, varc1, 0))
623 goto end;
624
625 mpz_init_set (loffx, offx);
626 mpz_init_set (loffy, offy);
627
628 if (cmp == GT_EXPR || cmp == GE_EXPR)
629 {
630 std::swap (varx, vary);
631 mpz_swap (offc0, offc1);
632 mpz_swap (loffx, loffy);
633 cmp = swap_tree_comparison (cmp);
634 lbound = true;
635 }
636
637 /* If there is no overflow, the condition implies that
638
639 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
640
641 The overflows and underflows may complicate things a bit; each
642 overflow decreases the appropriate offset by M, and underflow
643 increases it by M. The above inequality would not necessarily be
644 true if
645
646 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
647 VARX + OFFC0 overflows, but VARX + OFFX does not.
648 This may only happen if OFFX < OFFC0.
649 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
650 VARY + OFFC1 underflows and VARY + OFFY does not.
651 This may only happen if OFFY > OFFC1. */
652
653 if (no_wrap)
654 {
655 x_ok = true;
656 y_ok = true;
657 }
658 else
659 {
660 x_ok = (integer_zerop (varx)
661 || mpz_cmp (loffx, offc0) >= 0);
662 y_ok = (integer_zerop (vary)
663 || mpz_cmp (loffy, offc1) <= 0);
664 }
665
666 if (x_ok && y_ok)
667 {
668 mpz_init (bnd);
669 mpz_sub (bnd, loffx, loffy);
670 mpz_add (bnd, bnd, offc1);
671 mpz_sub (bnd, bnd, offc0);
672
673 if (cmp == LT_EXPR)
674 mpz_sub_ui (bnd, bnd, 1);
675
676 if (lbound)
677 {
678 mpz_neg (bnd, bnd);
679 if (mpz_cmp (bnds->below, bnd) < 0)
680 mpz_set (bnds->below, bnd);
681 }
682 else
683 {
684 if (mpz_cmp (bnd, bnds->up) < 0)
685 mpz_set (bnds->up, bnd);
686 }
687 mpz_clear (bnd);
688 }
689
690 mpz_clear (loffx);
691 mpz_clear (loffy);
692 end:
693 mpz_clear (offc0);
694 mpz_clear (offc1);
695 }
696
697 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
698 The subtraction is considered to be performed in arbitrary precision,
699 without overflows.
700
701 We do not attempt to be too clever regarding the value ranges of X and
702 Y; most of the time, they are just integers or ssa names offsetted by
703 integer. However, we try to use the information contained in the
704 comparisons before the loop (usually created by loop header copying). */
705
706 static void
707 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
708 {
709 tree type = TREE_TYPE (x);
710 tree varx, vary;
711 mpz_t offx, offy;
712 mpz_t minx, maxx, miny, maxy;
713 int cnt = 0;
714 edge e;
715 basic_block bb;
716 tree c0, c1;
717 gimple *cond;
718 enum tree_code cmp;
719
720 /* Get rid of unnecessary casts, but preserve the value of
721 the expressions. */
722 STRIP_SIGN_NOPS (x);
723 STRIP_SIGN_NOPS (y);
724
725 mpz_init (bnds->below);
726 mpz_init (bnds->up);
727 mpz_init (offx);
728 mpz_init (offy);
729 split_to_var_and_offset (x, &varx, offx);
730 split_to_var_and_offset (y, &vary, offy);
731
732 if (!integer_zerop (varx)
733 && operand_equal_p (varx, vary, 0))
734 {
735 /* Special case VARX == VARY -- we just need to compare the
736 offsets. The matters are a bit more complicated in the
737 case addition of offsets may wrap. */
738 bound_difference_of_offsetted_base (type, offx, offy, bnds);
739 }
740 else
741 {
742 /* Otherwise, use the value ranges to determine the initial
743 estimates on below and up. */
744 mpz_init (minx);
745 mpz_init (maxx);
746 mpz_init (miny);
747 mpz_init (maxy);
748 determine_value_range (loop, type, varx, offx, minx, maxx);
749 determine_value_range (loop, type, vary, offy, miny, maxy);
750
751 mpz_sub (bnds->below, minx, maxy);
752 mpz_sub (bnds->up, maxx, miny);
753 mpz_clear (minx);
754 mpz_clear (maxx);
755 mpz_clear (miny);
756 mpz_clear (maxy);
757 }
758
759 /* If both X and Y are constants, we cannot get any more precise. */
760 if (integer_zerop (varx) && integer_zerop (vary))
761 goto end;
762
763 /* Now walk the dominators of the loop header and use the entry
764 guards to refine the estimates. */
765 for (bb = loop->header;
766 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
767 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
768 {
769 if (!single_pred_p (bb))
770 continue;
771 e = single_pred_edge (bb);
772
773 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
774 continue;
775
776 cond = last_stmt (e->src);
777 c0 = gimple_cond_lhs (cond);
778 cmp = gimple_cond_code (cond);
779 c1 = gimple_cond_rhs (cond);
780
781 if (e->flags & EDGE_FALSE_VALUE)
782 cmp = invert_tree_comparison (cmp, false);
783
784 refine_bounds_using_guard (type, varx, offx, vary, offy,
785 c0, cmp, c1, bnds);
786 ++cnt;
787 }
788
789 end:
790 mpz_clear (offx);
791 mpz_clear (offy);
792 }
793
794 /* Update the bounds in BNDS that restrict the value of X to the bounds
795 that restrict the value of X + DELTA. X can be obtained as a
796 difference of two values in TYPE. */
797
798 static void
799 bounds_add (bounds *bnds, const widest_int &delta, tree type)
800 {
801 mpz_t mdelta, max;
802
803 mpz_init (mdelta);
804 wi::to_mpz (delta, mdelta, SIGNED);
805
806 mpz_init (max);
807 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
808
809 mpz_add (bnds->up, bnds->up, mdelta);
810 mpz_add (bnds->below, bnds->below, mdelta);
811
812 if (mpz_cmp (bnds->up, max) > 0)
813 mpz_set (bnds->up, max);
814
815 mpz_neg (max, max);
816 if (mpz_cmp (bnds->below, max) < 0)
817 mpz_set (bnds->below, max);
818
819 mpz_clear (mdelta);
820 mpz_clear (max);
821 }
822
823 /* Update the bounds in BNDS that restrict the value of X to the bounds
824 that restrict the value of -X. */
825
826 static void
827 bounds_negate (bounds *bnds)
828 {
829 mpz_t tmp;
830
831 mpz_init_set (tmp, bnds->up);
832 mpz_neg (bnds->up, bnds->below);
833 mpz_neg (bnds->below, tmp);
834 mpz_clear (tmp);
835 }
836
837 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
838
839 static tree
840 inverse (tree x, tree mask)
841 {
842 tree type = TREE_TYPE (x);
843 tree rslt;
844 unsigned ctr = tree_floor_log2 (mask);
845
846 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
847 {
848 unsigned HOST_WIDE_INT ix;
849 unsigned HOST_WIDE_INT imask;
850 unsigned HOST_WIDE_INT irslt = 1;
851
852 gcc_assert (cst_and_fits_in_hwi (x));
853 gcc_assert (cst_and_fits_in_hwi (mask));
854
855 ix = int_cst_value (x);
856 imask = int_cst_value (mask);
857
858 for (; ctr; ctr--)
859 {
860 irslt *= ix;
861 ix *= ix;
862 }
863 irslt &= imask;
864
865 rslt = build_int_cst_type (type, irslt);
866 }
867 else
868 {
869 rslt = build_int_cst (type, 1);
870 for (; ctr; ctr--)
871 {
872 rslt = int_const_binop (MULT_EXPR, rslt, x);
873 x = int_const_binop (MULT_EXPR, x, x);
874 }
875 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
876 }
877
878 return rslt;
879 }
880
881 /* Derives the upper bound BND on the number of executions of loop with exit
882 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
883 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
884 that the loop ends through this exit, i.e., the induction variable ever
885 reaches the value of C.
886
887 The value C is equal to final - base, where final and base are the final and
888 initial value of the actual induction variable in the analysed loop. BNDS
889 bounds the value of this difference when computed in signed type with
890 unbounded range, while the computation of C is performed in an unsigned
891 type with the range matching the range of the type of the induction variable.
892 In particular, BNDS.up contains an upper bound on C in the following cases:
893 -- if the iv must reach its final value without overflow, i.e., if
894 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
895 -- if final >= base, which we know to hold when BNDS.below >= 0. */
896
897 static void
898 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
899 bounds *bnds, bool exit_must_be_taken)
900 {
901 widest_int max;
902 mpz_t d;
903 tree type = TREE_TYPE (c);
904 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
905 || mpz_sgn (bnds->below) >= 0);
906
907 if (integer_onep (s)
908 || (TREE_CODE (c) == INTEGER_CST
909 && TREE_CODE (s) == INTEGER_CST
910 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
911 TYPE_SIGN (type)) == 0)
912 || (TYPE_OVERFLOW_UNDEFINED (type)
913 && multiple_of_p (type, c, s)))
914 {
915 /* If C is an exact multiple of S, then its value will be reached before
916 the induction variable overflows (unless the loop is exited in some
917 other way before). Note that the actual induction variable in the
918 loop (which ranges from base to final instead of from 0 to C) may
919 overflow, in which case BNDS.up will not be giving a correct upper
920 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
921 no_overflow = true;
922 exit_must_be_taken = true;
923 }
924
925 /* If the induction variable can overflow, the number of iterations is at
926 most the period of the control variable (or infinite, but in that case
927 the whole # of iterations analysis will fail). */
928 if (!no_overflow)
929 {
930 max = wi::mask <widest_int> (TYPE_PRECISION (type)
931 - wi::ctz (wi::to_wide (s)), false);
932 wi::to_mpz (max, bnd, UNSIGNED);
933 return;
934 }
935
936 /* Now we know that the induction variable does not overflow, so the loop
937 iterates at most (range of type / S) times. */
938 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
939
940 /* If the induction variable is guaranteed to reach the value of C before
941 overflow, ... */
942 if (exit_must_be_taken)
943 {
944 /* ... then we can strengthen this to C / S, and possibly we can use
945 the upper bound on C given by BNDS. */
946 if (TREE_CODE (c) == INTEGER_CST)
947 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
948 else if (bnds_u_valid)
949 mpz_set (bnd, bnds->up);
950 }
951
952 mpz_init (d);
953 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
954 mpz_fdiv_q (bnd, bnd, d);
955 mpz_clear (d);
956 }
957
958 /* Determines number of iterations of loop whose ending condition
959 is IV <> FINAL. TYPE is the type of the iv. The number of
960 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
961 we know that the exit must be taken eventually, i.e., that the IV
962 ever reaches the value FINAL (we derived this earlier, and possibly set
963 NITER->assumptions to make sure this is the case). BNDS contains the
964 bounds on the difference FINAL - IV->base. */
965
966 static bool
967 number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
968 tree final, struct tree_niter_desc *niter,
969 bool exit_must_be_taken, bounds *bnds)
970 {
971 tree niter_type = unsigned_type_for (type);
972 tree s, c, d, bits, assumption, tmp, bound;
973 mpz_t max;
974
975 niter->control = *iv;
976 niter->bound = final;
977 niter->cmp = NE_EXPR;
978
979 /* Rearrange the terms so that we get inequality S * i <> C, with S
980 positive. Also cast everything to the unsigned type. If IV does
981 not overflow, BNDS bounds the value of C. Also, this is the
982 case if the computation |FINAL - IV->base| does not overflow, i.e.,
983 if BNDS->below in the result is nonnegative. */
984 if (tree_int_cst_sign_bit (iv->step))
985 {
986 s = fold_convert (niter_type,
987 fold_build1 (NEGATE_EXPR, type, iv->step));
988 c = fold_build2 (MINUS_EXPR, niter_type,
989 fold_convert (niter_type, iv->base),
990 fold_convert (niter_type, final));
991 bounds_negate (bnds);
992 }
993 else
994 {
995 s = fold_convert (niter_type, iv->step);
996 c = fold_build2 (MINUS_EXPR, niter_type,
997 fold_convert (niter_type, final),
998 fold_convert (niter_type, iv->base));
999 }
1000
1001 mpz_init (max);
1002 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1003 exit_must_be_taken);
1004 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1005 TYPE_SIGN (niter_type));
1006 mpz_clear (max);
1007
1008 /* Compute no-overflow information for the control iv. This can be
1009 proven when below two conditions are satisfied:
1010
1011 1) IV evaluates toward FINAL at beginning, i.e:
1012 base <= FINAL ; step > 0
1013 base >= FINAL ; step < 0
1014
1015 2) |FINAL - base| is an exact multiple of step.
1016
1017 Unfortunately, it's hard to prove above conditions after pass loop-ch
1018 because loop with exit condition (IV != FINAL) usually will be guarded
1019 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1020 can alternatively try to prove below conditions:
1021
1022 1') IV evaluates toward FINAL at beginning, i.e:
1023 new_base = base - step < FINAL ; step > 0
1024 && base - step doesn't underflow
1025 new_base = base - step > FINAL ; step < 0
1026 && base - step doesn't overflow
1027
1028 2') |FINAL - new_base| is an exact multiple of step.
1029
1030 Please refer to PR34114 as an example of loop-ch's impact, also refer
1031 to PR72817 as an example why condition 2') is necessary.
1032
1033 Note, for NE_EXPR, base equals to FINAL is a special case, in
1034 which the loop exits immediately, and the iv does not overflow. */
1035 if (!niter->control.no_overflow
1036 && (integer_onep (s) || multiple_of_p (type, c, s)))
1037 {
1038 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1039
1040 if (tree_int_cst_sign_bit (iv->step))
1041 {
1042 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1043 if (TREE_CODE (type) == INTEGER_TYPE)
1044 {
1045 /* Only when base - step doesn't overflow. */
1046 t = TYPE_MAX_VALUE (type);
1047 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1048 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1049 if (integer_nonzerop (t))
1050 {
1051 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1052 new_c = fold_build2 (MINUS_EXPR, niter_type,
1053 fold_convert (niter_type, t),
1054 fold_convert (niter_type, final));
1055 if (multiple_of_p (type, new_c, s))
1056 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1057 t, final);
1058 }
1059 }
1060 }
1061 else
1062 {
1063 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1064 if (TREE_CODE (type) == INTEGER_TYPE)
1065 {
1066 /* Only when base - step doesn't underflow. */
1067 t = TYPE_MIN_VALUE (type);
1068 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1069 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1070 if (integer_nonzerop (t))
1071 {
1072 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1073 new_c = fold_build2 (MINUS_EXPR, niter_type,
1074 fold_convert (niter_type, final),
1075 fold_convert (niter_type, t));
1076 if (multiple_of_p (type, new_c, s))
1077 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1078 t, final);
1079 }
1080 }
1081 }
1082
1083 t = simplify_using_initial_conditions (loop, cond);
1084 if (!t || !integer_onep (t))
1085 t = simplify_using_initial_conditions (loop, relaxed_cond);
1086
1087 if (t && integer_onep (t))
1088 niter->control.no_overflow = true;
1089 }
1090
1091 /* First the trivial cases -- when the step is 1. */
1092 if (integer_onep (s))
1093 {
1094 niter->niter = c;
1095 return true;
1096 }
1097 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1098 {
1099 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1100 return true;
1101 }
1102
1103 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1104 is infinite. Otherwise, the number of iterations is
1105 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1106 bits = num_ending_zeros (s);
1107 bound = build_low_bits_mask (niter_type,
1108 (TYPE_PRECISION (niter_type)
1109 - tree_to_uhwi (bits)));
1110
1111 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1112 build_int_cst (niter_type, 1), bits);
1113 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1114
1115 if (!exit_must_be_taken)
1116 {
1117 /* If we cannot assume that the exit is taken eventually, record the
1118 assumptions for divisibility of c. */
1119 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1120 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1121 assumption, build_int_cst (niter_type, 0));
1122 if (!integer_nonzerop (assumption))
1123 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1124 niter->assumptions, assumption);
1125 }
1126
1127 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1128 if (integer_onep (s))
1129 {
1130 niter->niter = c;
1131 }
1132 else
1133 {
1134 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1135 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1136 }
1137 return true;
1138 }
1139
1140 /* Checks whether we can determine the final value of the control variable
1141 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1142 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1143 of the step. The assumptions necessary to ensure that the computation
1144 of the final value does not overflow are recorded in NITER. If we
1145 find the final value, we adjust DELTA and return TRUE. Otherwise
1146 we return false. BNDS bounds the value of IV1->base - IV0->base,
1147 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1148 true if we know that the exit must be taken eventually. */
1149
1150 static bool
1151 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1152 struct tree_niter_desc *niter,
1153 tree *delta, tree step,
1154 bool exit_must_be_taken, bounds *bnds)
1155 {
1156 tree niter_type = TREE_TYPE (step);
1157 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1158 tree tmod;
1159 mpz_t mmod;
1160 tree assumption = boolean_true_node, bound, noloop;
1161 bool ret = false, fv_comp_no_overflow;
1162 tree type1 = type;
1163 if (POINTER_TYPE_P (type))
1164 type1 = sizetype;
1165
1166 if (TREE_CODE (mod) != INTEGER_CST)
1167 return false;
1168 if (integer_nonzerop (mod))
1169 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1170 tmod = fold_convert (type1, mod);
1171
1172 mpz_init (mmod);
1173 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1174 mpz_neg (mmod, mmod);
1175
1176 /* If the induction variable does not overflow and the exit is taken,
1177 then the computation of the final value does not overflow. This is
1178 also obviously the case if the new final value is equal to the
1179 current one. Finally, we postulate this for pointer type variables,
1180 as the code cannot rely on the object to that the pointer points being
1181 placed at the end of the address space (and more pragmatically,
1182 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1183 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1184 fv_comp_no_overflow = true;
1185 else if (!exit_must_be_taken)
1186 fv_comp_no_overflow = false;
1187 else
1188 fv_comp_no_overflow =
1189 (iv0->no_overflow && integer_nonzerop (iv0->step))
1190 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1191
1192 if (integer_nonzerop (iv0->step))
1193 {
1194 /* The final value of the iv is iv1->base + MOD, assuming that this
1195 computation does not overflow, and that
1196 iv0->base <= iv1->base + MOD. */
1197 if (!fv_comp_no_overflow)
1198 {
1199 bound = fold_build2 (MINUS_EXPR, type1,
1200 TYPE_MAX_VALUE (type1), tmod);
1201 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1202 iv1->base, bound);
1203 if (integer_zerop (assumption))
1204 goto end;
1205 }
1206 if (mpz_cmp (mmod, bnds->below) < 0)
1207 noloop = boolean_false_node;
1208 else if (POINTER_TYPE_P (type))
1209 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1210 iv0->base,
1211 fold_build_pointer_plus (iv1->base, tmod));
1212 else
1213 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1214 iv0->base,
1215 fold_build2 (PLUS_EXPR, type1,
1216 iv1->base, tmod));
1217 }
1218 else
1219 {
1220 /* The final value of the iv is iv0->base - MOD, assuming that this
1221 computation does not overflow, and that
1222 iv0->base - MOD <= iv1->base. */
1223 if (!fv_comp_no_overflow)
1224 {
1225 bound = fold_build2 (PLUS_EXPR, type1,
1226 TYPE_MIN_VALUE (type1), tmod);
1227 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1228 iv0->base, bound);
1229 if (integer_zerop (assumption))
1230 goto end;
1231 }
1232 if (mpz_cmp (mmod, bnds->below) < 0)
1233 noloop = boolean_false_node;
1234 else if (POINTER_TYPE_P (type))
1235 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1236 fold_build_pointer_plus (iv0->base,
1237 fold_build1 (NEGATE_EXPR,
1238 type1, tmod)),
1239 iv1->base);
1240 else
1241 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1242 fold_build2 (MINUS_EXPR, type1,
1243 iv0->base, tmod),
1244 iv1->base);
1245 }
1246
1247 if (!integer_nonzerop (assumption))
1248 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1249 niter->assumptions,
1250 assumption);
1251 if (!integer_zerop (noloop))
1252 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1253 niter->may_be_zero,
1254 noloop);
1255 bounds_add (bnds, wi::to_widest (mod), type);
1256 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1257
1258 ret = true;
1259 end:
1260 mpz_clear (mmod);
1261 return ret;
1262 }
1263
1264 /* Add assertions to NITER that ensure that the control variable of the loop
1265 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1266 are TYPE. Returns false if we can prove that there is an overflow, true
1267 otherwise. STEP is the absolute value of the step. */
1268
1269 static bool
1270 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1271 struct tree_niter_desc *niter, tree step)
1272 {
1273 tree bound, d, assumption, diff;
1274 tree niter_type = TREE_TYPE (step);
1275
1276 if (integer_nonzerop (iv0->step))
1277 {
1278 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1279 if (iv0->no_overflow)
1280 return true;
1281
1282 /* If iv0->base is a constant, we can determine the last value before
1283 overflow precisely; otherwise we conservatively assume
1284 MAX - STEP + 1. */
1285
1286 if (TREE_CODE (iv0->base) == INTEGER_CST)
1287 {
1288 d = fold_build2 (MINUS_EXPR, niter_type,
1289 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1290 fold_convert (niter_type, iv0->base));
1291 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1292 }
1293 else
1294 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1295 build_int_cst (niter_type, 1));
1296 bound = fold_build2 (MINUS_EXPR, type,
1297 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1298 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1299 iv1->base, bound);
1300 }
1301 else
1302 {
1303 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1304 if (iv1->no_overflow)
1305 return true;
1306
1307 if (TREE_CODE (iv1->base) == INTEGER_CST)
1308 {
1309 d = fold_build2 (MINUS_EXPR, niter_type,
1310 fold_convert (niter_type, iv1->base),
1311 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1312 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1313 }
1314 else
1315 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1316 build_int_cst (niter_type, 1));
1317 bound = fold_build2 (PLUS_EXPR, type,
1318 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1319 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1320 iv0->base, bound);
1321 }
1322
1323 if (integer_zerop (assumption))
1324 return false;
1325 if (!integer_nonzerop (assumption))
1326 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1327 niter->assumptions, assumption);
1328
1329 iv0->no_overflow = true;
1330 iv1->no_overflow = true;
1331 return true;
1332 }
1333
1334 /* Add an assumption to NITER that a loop whose ending condition
1335 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1336 bounds the value of IV1->base - IV0->base. */
1337
1338 static void
1339 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1340 struct tree_niter_desc *niter, bounds *bnds)
1341 {
1342 tree assumption = boolean_true_node, bound, diff;
1343 tree mbz, mbzl, mbzr, type1;
1344 bool rolls_p, no_overflow_p;
1345 widest_int dstep;
1346 mpz_t mstep, max;
1347
1348 /* We are going to compute the number of iterations as
1349 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1350 variant of TYPE. This formula only works if
1351
1352 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1353
1354 (where MAX is the maximum value of the unsigned variant of TYPE, and
1355 the computations in this formula are performed in full precision,
1356 i.e., without overflows).
1357
1358 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1359 we have a condition of the form iv0->base - step < iv1->base before the loop,
1360 and for loops iv0->base < iv1->base - step * i the condition
1361 iv0->base < iv1->base + step, due to loop header copying, which enable us
1362 to prove the lower bound.
1363
1364 The upper bound is more complicated. Unless the expressions for initial
1365 and final value themselves contain enough information, we usually cannot
1366 derive it from the context. */
1367
1368 /* First check whether the answer does not follow from the bounds we gathered
1369 before. */
1370 if (integer_nonzerop (iv0->step))
1371 dstep = wi::to_widest (iv0->step);
1372 else
1373 {
1374 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1375 dstep = -dstep;
1376 }
1377
1378 mpz_init (mstep);
1379 wi::to_mpz (dstep, mstep, UNSIGNED);
1380 mpz_neg (mstep, mstep);
1381 mpz_add_ui (mstep, mstep, 1);
1382
1383 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1384
1385 mpz_init (max);
1386 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1387 mpz_add (max, max, mstep);
1388 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1389 /* For pointers, only values lying inside a single object
1390 can be compared or manipulated by pointer arithmetics.
1391 Gcc in general does not allow or handle objects larger
1392 than half of the address space, hence the upper bound
1393 is satisfied for pointers. */
1394 || POINTER_TYPE_P (type));
1395 mpz_clear (mstep);
1396 mpz_clear (max);
1397
1398 if (rolls_p && no_overflow_p)
1399 return;
1400
1401 type1 = type;
1402 if (POINTER_TYPE_P (type))
1403 type1 = sizetype;
1404
1405 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1406 we must be careful not to introduce overflow. */
1407
1408 if (integer_nonzerop (iv0->step))
1409 {
1410 diff = fold_build2 (MINUS_EXPR, type1,
1411 iv0->step, build_int_cst (type1, 1));
1412
1413 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1414 0 address never belongs to any object, we can assume this for
1415 pointers. */
1416 if (!POINTER_TYPE_P (type))
1417 {
1418 bound = fold_build2 (PLUS_EXPR, type1,
1419 TYPE_MIN_VALUE (type), diff);
1420 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1421 iv0->base, bound);
1422 }
1423
1424 /* And then we can compute iv0->base - diff, and compare it with
1425 iv1->base. */
1426 mbzl = fold_build2 (MINUS_EXPR, type1,
1427 fold_convert (type1, iv0->base), diff);
1428 mbzr = fold_convert (type1, iv1->base);
1429 }
1430 else
1431 {
1432 diff = fold_build2 (PLUS_EXPR, type1,
1433 iv1->step, build_int_cst (type1, 1));
1434
1435 if (!POINTER_TYPE_P (type))
1436 {
1437 bound = fold_build2 (PLUS_EXPR, type1,
1438 TYPE_MAX_VALUE (type), diff);
1439 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1440 iv1->base, bound);
1441 }
1442
1443 mbzl = fold_convert (type1, iv0->base);
1444 mbzr = fold_build2 (MINUS_EXPR, type1,
1445 fold_convert (type1, iv1->base), diff);
1446 }
1447
1448 if (!integer_nonzerop (assumption))
1449 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1450 niter->assumptions, assumption);
1451 if (!rolls_p)
1452 {
1453 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1454 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1455 niter->may_be_zero, mbz);
1456 }
1457 }
1458
1459 /* Determines number of iterations of loop whose ending condition
1460 is IV0 < IV1. TYPE is the type of the iv. The number of
1461 iterations is stored to NITER. BNDS bounds the difference
1462 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1463 that the exit must be taken eventually. */
1464
1465 static bool
1466 number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1467 affine_iv *iv1, struct tree_niter_desc *niter,
1468 bool exit_must_be_taken, bounds *bnds)
1469 {
1470 tree niter_type = unsigned_type_for (type);
1471 tree delta, step, s;
1472 mpz_t mstep, tmp;
1473
1474 if (integer_nonzerop (iv0->step))
1475 {
1476 niter->control = *iv0;
1477 niter->cmp = LT_EXPR;
1478 niter->bound = iv1->base;
1479 }
1480 else
1481 {
1482 niter->control = *iv1;
1483 niter->cmp = GT_EXPR;
1484 niter->bound = iv0->base;
1485 }
1486
1487 delta = fold_build2 (MINUS_EXPR, niter_type,
1488 fold_convert (niter_type, iv1->base),
1489 fold_convert (niter_type, iv0->base));
1490
1491 /* First handle the special case that the step is +-1. */
1492 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1493 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1494 {
1495 /* for (i = iv0->base; i < iv1->base; i++)
1496
1497 or
1498
1499 for (i = iv1->base; i > iv0->base; i--).
1500
1501 In both cases # of iterations is iv1->base - iv0->base, assuming that
1502 iv1->base >= iv0->base.
1503
1504 First try to derive a lower bound on the value of
1505 iv1->base - iv0->base, computed in full precision. If the difference
1506 is nonnegative, we are done, otherwise we must record the
1507 condition. */
1508
1509 if (mpz_sgn (bnds->below) < 0)
1510 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1511 iv1->base, iv0->base);
1512 niter->niter = delta;
1513 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1514 TYPE_SIGN (niter_type));
1515 niter->control.no_overflow = true;
1516 return true;
1517 }
1518
1519 if (integer_nonzerop (iv0->step))
1520 step = fold_convert (niter_type, iv0->step);
1521 else
1522 step = fold_convert (niter_type,
1523 fold_build1 (NEGATE_EXPR, type, iv1->step));
1524
1525 /* If we can determine the final value of the control iv exactly, we can
1526 transform the condition to != comparison. In particular, this will be
1527 the case if DELTA is constant. */
1528 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1529 exit_must_be_taken, bnds))
1530 {
1531 affine_iv zps;
1532
1533 zps.base = build_int_cst (niter_type, 0);
1534 zps.step = step;
1535 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1536 zps does not overflow. */
1537 zps.no_overflow = true;
1538
1539 return number_of_iterations_ne (loop, type, &zps,
1540 delta, niter, true, bnds);
1541 }
1542
1543 /* Make sure that the control iv does not overflow. */
1544 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1545 return false;
1546
1547 /* We determine the number of iterations as (delta + step - 1) / step. For
1548 this to work, we must know that iv1->base >= iv0->base - step + 1,
1549 otherwise the loop does not roll. */
1550 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1551
1552 s = fold_build2 (MINUS_EXPR, niter_type,
1553 step, build_int_cst (niter_type, 1));
1554 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1555 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1556
1557 mpz_init (mstep);
1558 mpz_init (tmp);
1559 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1560 mpz_add (tmp, bnds->up, mstep);
1561 mpz_sub_ui (tmp, tmp, 1);
1562 mpz_fdiv_q (tmp, tmp, mstep);
1563 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1564 TYPE_SIGN (niter_type));
1565 mpz_clear (mstep);
1566 mpz_clear (tmp);
1567
1568 return true;
1569 }
1570
1571 /* Determines number of iterations of loop whose ending condition
1572 is IV0 <= IV1. TYPE is the type of the iv. The number of
1573 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1574 we know that this condition must eventually become false (we derived this
1575 earlier, and possibly set NITER->assumptions to make sure this
1576 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1577
1578 static bool
1579 number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1580 affine_iv *iv1, struct tree_niter_desc *niter,
1581 bool exit_must_be_taken, bounds *bnds)
1582 {
1583 tree assumption;
1584 tree type1 = type;
1585 if (POINTER_TYPE_P (type))
1586 type1 = sizetype;
1587
1588 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1589 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1590 value of the type. This we must know anyway, since if it is
1591 equal to this value, the loop rolls forever. We do not check
1592 this condition for pointer type ivs, as the code cannot rely on
1593 the object to that the pointer points being placed at the end of
1594 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1595 not defined for pointers). */
1596
1597 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1598 {
1599 if (integer_nonzerop (iv0->step))
1600 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1601 iv1->base, TYPE_MAX_VALUE (type));
1602 else
1603 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1604 iv0->base, TYPE_MIN_VALUE (type));
1605
1606 if (integer_zerop (assumption))
1607 return false;
1608 if (!integer_nonzerop (assumption))
1609 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1610 niter->assumptions, assumption);
1611 }
1612
1613 if (integer_nonzerop (iv0->step))
1614 {
1615 if (POINTER_TYPE_P (type))
1616 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1617 else
1618 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1619 build_int_cst (type1, 1));
1620 }
1621 else if (POINTER_TYPE_P (type))
1622 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1623 else
1624 iv0->base = fold_build2 (MINUS_EXPR, type1,
1625 iv0->base, build_int_cst (type1, 1));
1626
1627 bounds_add (bnds, 1, type1);
1628
1629 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1630 bnds);
1631 }
1632
1633 /* Dumps description of affine induction variable IV to FILE. */
1634
1635 static void
1636 dump_affine_iv (FILE *file, affine_iv *iv)
1637 {
1638 if (!integer_zerop (iv->step))
1639 fprintf (file, "[");
1640
1641 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1642
1643 if (!integer_zerop (iv->step))
1644 {
1645 fprintf (file, ", + , ");
1646 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1647 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1648 }
1649 }
1650
1651 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1652 the condition for loop-until-wrap cases. For example:
1653 (unsigned){8, -1}_loop < 10 => {0, 1} != 9
1654 10 < (unsigned){0, max - 7}_loop => {0, 1} != 8
1655 Return true if condition is successfully adjusted. */
1656
1657 static bool
1658 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1659 affine_iv *iv1)
1660 {
1661 /* Only support simple cases for the moment. */
1662 if (TREE_CODE (iv0->base) != INTEGER_CST
1663 || TREE_CODE (iv1->base) != INTEGER_CST)
1664 return false;
1665
1666 tree niter_type = unsigned_type_for (type), high, low;
1667 /* Case: i-- < 10. */
1668 if (integer_zerop (iv1->step))
1669 {
1670 /* TODO: Should handle case in which abs(step) != 1. */
1671 if (!integer_minus_onep (iv0->step))
1672 return false;
1673 /* Give up on infinite loop. */
1674 if (*code == LE_EXPR
1675 && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1676 return false;
1677 high = fold_build2 (PLUS_EXPR, niter_type,
1678 fold_convert (niter_type, iv0->base),
1679 build_int_cst (niter_type, 1));
1680 low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1681 }
1682 else if (integer_zerop (iv0->step))
1683 {
1684 /* TODO: Should handle case in which abs(step) != 1. */
1685 if (!integer_onep (iv1->step))
1686 return false;
1687 /* Give up on infinite loop. */
1688 if (*code == LE_EXPR
1689 && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1690 return false;
1691 high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1692 low = fold_build2 (MINUS_EXPR, niter_type,
1693 fold_convert (niter_type, iv1->base),
1694 build_int_cst (niter_type, 1));
1695 }
1696 else
1697 gcc_unreachable ();
1698
1699 iv0->base = low;
1700 iv0->step = fold_convert (niter_type, integer_one_node);
1701 iv1->base = high;
1702 iv1->step = build_int_cst (niter_type, 0);
1703 *code = NE_EXPR;
1704 return true;
1705 }
1706
1707 /* Determine the number of iterations according to condition (for staying
1708 inside loop) which compares two induction variables using comparison
1709 operator CODE. The induction variable on left side of the comparison
1710 is IV0, the right-hand side is IV1. Both induction variables must have
1711 type TYPE, which must be an integer or pointer type. The steps of the
1712 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1713
1714 LOOP is the loop whose number of iterations we are determining.
1715
1716 ONLY_EXIT is true if we are sure this is the only way the loop could be
1717 exited (including possibly non-returning function calls, exceptions, etc.)
1718 -- in this case we can use the information whether the control induction
1719 variables can overflow or not in a more efficient way.
1720
1721 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1722
1723 The results (number of iterations and assumptions as described in
1724 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1725 Returns false if it fails to determine number of iterations, true if it
1726 was determined (possibly with some assumptions). */
1727
1728 static bool
1729 number_of_iterations_cond (struct loop *loop,
1730 tree type, affine_iv *iv0, enum tree_code code,
1731 affine_iv *iv1, struct tree_niter_desc *niter,
1732 bool only_exit, bool every_iteration)
1733 {
1734 bool exit_must_be_taken = false, ret;
1735 bounds bnds;
1736
1737 /* If the test is not executed every iteration, wrapping may make the test
1738 to pass again.
1739 TODO: the overflow case can be still used as unreliable estimate of upper
1740 bound. But we have no API to pass it down to number of iterations code
1741 and, at present, it will not use it anyway. */
1742 if (!every_iteration
1743 && (!iv0->no_overflow || !iv1->no_overflow
1744 || code == NE_EXPR || code == EQ_EXPR))
1745 return false;
1746
1747 /* The meaning of these assumptions is this:
1748 if !assumptions
1749 then the rest of information does not have to be valid
1750 if may_be_zero then the loop does not roll, even if
1751 niter != 0. */
1752 niter->assumptions = boolean_true_node;
1753 niter->may_be_zero = boolean_false_node;
1754 niter->niter = NULL_TREE;
1755 niter->max = 0;
1756 niter->bound = NULL_TREE;
1757 niter->cmp = ERROR_MARK;
1758
1759 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1760 the control variable is on lhs. */
1761 if (code == GE_EXPR || code == GT_EXPR
1762 || (code == NE_EXPR && integer_zerop (iv0->step)))
1763 {
1764 std::swap (iv0, iv1);
1765 code = swap_tree_comparison (code);
1766 }
1767
1768 if (POINTER_TYPE_P (type))
1769 {
1770 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1771 to the same object. If they do, the control variable cannot wrap
1772 (as wrap around the bounds of memory will never return a pointer
1773 that would be guaranteed to point to the same object, even if we
1774 avoid undefined behavior by casting to size_t and back). */
1775 iv0->no_overflow = true;
1776 iv1->no_overflow = true;
1777 }
1778
1779 /* If the control induction variable does not overflow and the only exit
1780 from the loop is the one that we analyze, we know it must be taken
1781 eventually. */
1782 if (only_exit)
1783 {
1784 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1785 exit_must_be_taken = true;
1786 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1787 exit_must_be_taken = true;
1788 }
1789
1790 /* We can handle cases which neither of the sides of the comparison is
1791 invariant:
1792
1793 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1794 as if:
1795 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1796
1797 provided that either below condition is satisfied:
1798
1799 a) the test is NE_EXPR;
1800 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1801
1802 This rarely occurs in practice, but it is simple enough to manage. */
1803 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1804 {
1805 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1806 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1807 iv0->step, iv1->step);
1808
1809 /* No need to check sign of the new step since below code takes care
1810 of this well. */
1811 if (code != NE_EXPR
1812 && (TREE_CODE (step) != INTEGER_CST
1813 || !iv0->no_overflow || !iv1->no_overflow))
1814 return false;
1815
1816 iv0->step = step;
1817 if (!POINTER_TYPE_P (type))
1818 iv0->no_overflow = false;
1819
1820 iv1->step = build_int_cst (step_type, 0);
1821 iv1->no_overflow = true;
1822 }
1823
1824 /* If the result of the comparison is a constant, the loop is weird. More
1825 precise handling would be possible, but the situation is not common enough
1826 to waste time on it. */
1827 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1828 return false;
1829
1830 /* If the loop exits immediately, there is nothing to do. */
1831 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1832 if (tem && integer_zerop (tem))
1833 {
1834 if (!every_iteration)
1835 return false;
1836 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1837 niter->max = 0;
1838 return true;
1839 }
1840
1841 /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1842 adjusting iv0, iv1 and code. */
1843 if (code != NE_EXPR
1844 && (tree_int_cst_sign_bit (iv0->step)
1845 || (!integer_zerop (iv1->step)
1846 && !tree_int_cst_sign_bit (iv1->step)))
1847 && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1848 return false;
1849
1850 /* OK, now we know we have a senseful loop. Handle several cases, depending
1851 on what comparison operator is used. */
1852 bound_difference (loop, iv1->base, iv0->base, &bnds);
1853
1854 if (dump_file && (dump_flags & TDF_DETAILS))
1855 {
1856 fprintf (dump_file,
1857 "Analyzing # of iterations of loop %d\n", loop->num);
1858
1859 fprintf (dump_file, " exit condition ");
1860 dump_affine_iv (dump_file, iv0);
1861 fprintf (dump_file, " %s ",
1862 code == NE_EXPR ? "!="
1863 : code == LT_EXPR ? "<"
1864 : "<=");
1865 dump_affine_iv (dump_file, iv1);
1866 fprintf (dump_file, "\n");
1867
1868 fprintf (dump_file, " bounds on difference of bases: ");
1869 mpz_out_str (dump_file, 10, bnds.below);
1870 fprintf (dump_file, " ... ");
1871 mpz_out_str (dump_file, 10, bnds.up);
1872 fprintf (dump_file, "\n");
1873 }
1874
1875 switch (code)
1876 {
1877 case NE_EXPR:
1878 gcc_assert (integer_zerop (iv1->step));
1879 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1880 exit_must_be_taken, &bnds);
1881 break;
1882
1883 case LT_EXPR:
1884 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1885 exit_must_be_taken, &bnds);
1886 break;
1887
1888 case LE_EXPR:
1889 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1890 exit_must_be_taken, &bnds);
1891 break;
1892
1893 default:
1894 gcc_unreachable ();
1895 }
1896
1897 mpz_clear (bnds.up);
1898 mpz_clear (bnds.below);
1899
1900 if (dump_file && (dump_flags & TDF_DETAILS))
1901 {
1902 if (ret)
1903 {
1904 fprintf (dump_file, " result:\n");
1905 if (!integer_nonzerop (niter->assumptions))
1906 {
1907 fprintf (dump_file, " under assumptions ");
1908 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1909 fprintf (dump_file, "\n");
1910 }
1911
1912 if (!integer_zerop (niter->may_be_zero))
1913 {
1914 fprintf (dump_file, " zero if ");
1915 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1916 fprintf (dump_file, "\n");
1917 }
1918
1919 fprintf (dump_file, " # of iterations ");
1920 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1921 fprintf (dump_file, ", bounded by ");
1922 print_decu (niter->max, dump_file);
1923 fprintf (dump_file, "\n");
1924 }
1925 else
1926 fprintf (dump_file, " failed\n\n");
1927 }
1928 return ret;
1929 }
1930
1931 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1932 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1933 all SSA names are replaced with the result of calling the VALUEIZE
1934 function with the SSA name as argument. */
1935
1936 tree
1937 simplify_replace_tree (tree expr, tree old, tree new_tree,
1938 tree (*valueize) (tree))
1939 {
1940 unsigned i, n;
1941 tree ret = NULL_TREE, e, se;
1942
1943 if (!expr)
1944 return NULL_TREE;
1945
1946 /* Do not bother to replace constants. */
1947 if (CONSTANT_CLASS_P (expr))
1948 return expr;
1949
1950 if (valueize)
1951 {
1952 if (TREE_CODE (expr) == SSA_NAME)
1953 {
1954 new_tree = valueize (expr);
1955 if (new_tree != expr)
1956 return new_tree;
1957 }
1958 }
1959 else if (expr == old
1960 || operand_equal_p (expr, old, 0))
1961 return unshare_expr (new_tree);
1962
1963 if (!EXPR_P (expr))
1964 return expr;
1965
1966 n = TREE_OPERAND_LENGTH (expr);
1967 for (i = 0; i < n; i++)
1968 {
1969 e = TREE_OPERAND (expr, i);
1970 se = simplify_replace_tree (e, old, new_tree, valueize);
1971 if (e == se)
1972 continue;
1973
1974 if (!ret)
1975 ret = copy_node (expr);
1976
1977 TREE_OPERAND (ret, i) = se;
1978 }
1979
1980 return (ret ? fold (ret) : expr);
1981 }
1982
1983 /* Expand definitions of ssa names in EXPR as long as they are simple
1984 enough, and return the new expression. If STOP is specified, stop
1985 expanding if EXPR equals to it. */
1986
1987 static tree
1988 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
1989 {
1990 unsigned i, n;
1991 tree ret = NULL_TREE, e, ee, e1;
1992 enum tree_code code;
1993 gimple *stmt;
1994
1995 if (expr == NULL_TREE)
1996 return expr;
1997
1998 if (is_gimple_min_invariant (expr))
1999 return expr;
2000
2001 code = TREE_CODE (expr);
2002 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2003 {
2004 n = TREE_OPERAND_LENGTH (expr);
2005 for (i = 0; i < n; i++)
2006 {
2007 e = TREE_OPERAND (expr, i);
2008 /* SCEV analysis feeds us with a proper expression
2009 graph matching the SSA graph. Avoid turning it
2010 into a tree here, thus handle tree sharing
2011 properly.
2012 ??? The SSA walk below still turns the SSA graph
2013 into a tree but until we find a testcase do not
2014 introduce additional tree sharing here. */
2015 bool existed_p;
2016 tree &cee = cache.get_or_insert (e, &existed_p);
2017 if (existed_p)
2018 ee = cee;
2019 else
2020 {
2021 cee = e;
2022 ee = expand_simple_operations (e, stop, cache);
2023 if (ee != e)
2024 *cache.get (e) = ee;
2025 }
2026 if (e == ee)
2027 continue;
2028
2029 if (!ret)
2030 ret = copy_node (expr);
2031
2032 TREE_OPERAND (ret, i) = ee;
2033 }
2034
2035 if (!ret)
2036 return expr;
2037
2038 fold_defer_overflow_warnings ();
2039 ret = fold (ret);
2040 fold_undefer_and_ignore_overflow_warnings ();
2041 return ret;
2042 }
2043
2044 /* Stop if it's not ssa name or the one we don't want to expand. */
2045 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2046 return expr;
2047
2048 stmt = SSA_NAME_DEF_STMT (expr);
2049 if (gimple_code (stmt) == GIMPLE_PHI)
2050 {
2051 basic_block src, dest;
2052
2053 if (gimple_phi_num_args (stmt) != 1)
2054 return expr;
2055 e = PHI_ARG_DEF (stmt, 0);
2056
2057 /* Avoid propagating through loop exit phi nodes, which
2058 could break loop-closed SSA form restrictions. */
2059 dest = gimple_bb (stmt);
2060 src = single_pred (dest);
2061 if (TREE_CODE (e) == SSA_NAME
2062 && src->loop_father != dest->loop_father)
2063 return expr;
2064
2065 return expand_simple_operations (e, stop, cache);
2066 }
2067 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2068 return expr;
2069
2070 /* Avoid expanding to expressions that contain SSA names that need
2071 to take part in abnormal coalescing. */
2072 ssa_op_iter iter;
2073 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2074 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2075 return expr;
2076
2077 e = gimple_assign_rhs1 (stmt);
2078 code = gimple_assign_rhs_code (stmt);
2079 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2080 {
2081 if (is_gimple_min_invariant (e))
2082 return e;
2083
2084 if (code == SSA_NAME)
2085 return expand_simple_operations (e, stop, cache);
2086 else if (code == ADDR_EXPR)
2087 {
2088 poly_int64 offset;
2089 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2090 &offset);
2091 if (base
2092 && TREE_CODE (base) == MEM_REF)
2093 {
2094 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2095 cache);
2096 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2097 wide_int_to_tree (sizetype,
2098 mem_ref_offset (base)
2099 + offset));
2100 }
2101 }
2102
2103 return expr;
2104 }
2105
2106 switch (code)
2107 {
2108 CASE_CONVERT:
2109 /* Casts are simple. */
2110 ee = expand_simple_operations (e, stop, cache);
2111 return fold_build1 (code, TREE_TYPE (expr), ee);
2112
2113 case PLUS_EXPR:
2114 case MINUS_EXPR:
2115 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2116 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2117 return expr;
2118 /* Fallthru. */
2119 case POINTER_PLUS_EXPR:
2120 /* And increments and decrements by a constant are simple. */
2121 e1 = gimple_assign_rhs2 (stmt);
2122 if (!is_gimple_min_invariant (e1))
2123 return expr;
2124
2125 ee = expand_simple_operations (e, stop, cache);
2126 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2127
2128 default:
2129 return expr;
2130 }
2131 }
2132
2133 tree
2134 expand_simple_operations (tree expr, tree stop)
2135 {
2136 hash_map<tree, tree> cache;
2137 return expand_simple_operations (expr, stop, cache);
2138 }
2139
2140 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2141 expression (or EXPR unchanged, if no simplification was possible). */
2142
2143 static tree
2144 tree_simplify_using_condition_1 (tree cond, tree expr)
2145 {
2146 bool changed;
2147 tree e, e0, e1, e2, notcond;
2148 enum tree_code code = TREE_CODE (expr);
2149
2150 if (code == INTEGER_CST)
2151 return expr;
2152
2153 if (code == TRUTH_OR_EXPR
2154 || code == TRUTH_AND_EXPR
2155 || code == COND_EXPR)
2156 {
2157 changed = false;
2158
2159 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2160 if (TREE_OPERAND (expr, 0) != e0)
2161 changed = true;
2162
2163 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2164 if (TREE_OPERAND (expr, 1) != e1)
2165 changed = true;
2166
2167 if (code == COND_EXPR)
2168 {
2169 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2170 if (TREE_OPERAND (expr, 2) != e2)
2171 changed = true;
2172 }
2173 else
2174 e2 = NULL_TREE;
2175
2176 if (changed)
2177 {
2178 if (code == COND_EXPR)
2179 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2180 else
2181 expr = fold_build2 (code, boolean_type_node, e0, e1);
2182 }
2183
2184 return expr;
2185 }
2186
2187 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2188 propagation, and vice versa. Fold does not handle this, since it is
2189 considered too expensive. */
2190 if (TREE_CODE (cond) == EQ_EXPR)
2191 {
2192 e0 = TREE_OPERAND (cond, 0);
2193 e1 = TREE_OPERAND (cond, 1);
2194
2195 /* We know that e0 == e1. Check whether we cannot simplify expr
2196 using this fact. */
2197 e = simplify_replace_tree (expr, e0, e1);
2198 if (integer_zerop (e) || integer_nonzerop (e))
2199 return e;
2200
2201 e = simplify_replace_tree (expr, e1, e0);
2202 if (integer_zerop (e) || integer_nonzerop (e))
2203 return e;
2204 }
2205 if (TREE_CODE (expr) == EQ_EXPR)
2206 {
2207 e0 = TREE_OPERAND (expr, 0);
2208 e1 = TREE_OPERAND (expr, 1);
2209
2210 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2211 e = simplify_replace_tree (cond, e0, e1);
2212 if (integer_zerop (e))
2213 return e;
2214 e = simplify_replace_tree (cond, e1, e0);
2215 if (integer_zerop (e))
2216 return e;
2217 }
2218 if (TREE_CODE (expr) == NE_EXPR)
2219 {
2220 e0 = TREE_OPERAND (expr, 0);
2221 e1 = TREE_OPERAND (expr, 1);
2222
2223 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2224 e = simplify_replace_tree (cond, e0, e1);
2225 if (integer_zerop (e))
2226 return boolean_true_node;
2227 e = simplify_replace_tree (cond, e1, e0);
2228 if (integer_zerop (e))
2229 return boolean_true_node;
2230 }
2231
2232 /* Check whether COND ==> EXPR. */
2233 notcond = invert_truthvalue (cond);
2234 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2235 if (e && integer_nonzerop (e))
2236 return e;
2237
2238 /* Check whether COND ==> not EXPR. */
2239 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2240 if (e && integer_zerop (e))
2241 return e;
2242
2243 return expr;
2244 }
2245
2246 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2247 expression (or EXPR unchanged, if no simplification was possible).
2248 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2249 of simple operations in definitions of ssa names in COND are expanded,
2250 so that things like casts or incrementing the value of the bound before
2251 the loop do not cause us to fail. */
2252
2253 static tree
2254 tree_simplify_using_condition (tree cond, tree expr)
2255 {
2256 cond = expand_simple_operations (cond);
2257
2258 return tree_simplify_using_condition_1 (cond, expr);
2259 }
2260
2261 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2262 Returns the simplified expression (or EXPR unchanged, if no
2263 simplification was possible). */
2264
2265 tree
2266 simplify_using_initial_conditions (struct loop *loop, tree expr)
2267 {
2268 edge e;
2269 basic_block bb;
2270 gimple *stmt;
2271 tree cond, expanded, backup;
2272 int cnt = 0;
2273
2274 if (TREE_CODE (expr) == INTEGER_CST)
2275 return expr;
2276
2277 backup = expanded = expand_simple_operations (expr);
2278
2279 /* Limit walking the dominators to avoid quadraticness in
2280 the number of BBs times the number of loops in degenerate
2281 cases. */
2282 for (bb = loop->header;
2283 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2284 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2285 {
2286 if (!single_pred_p (bb))
2287 continue;
2288 e = single_pred_edge (bb);
2289
2290 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2291 continue;
2292
2293 stmt = last_stmt (e->src);
2294 cond = fold_build2 (gimple_cond_code (stmt),
2295 boolean_type_node,
2296 gimple_cond_lhs (stmt),
2297 gimple_cond_rhs (stmt));
2298 if (e->flags & EDGE_FALSE_VALUE)
2299 cond = invert_truthvalue (cond);
2300 expanded = tree_simplify_using_condition (cond, expanded);
2301 /* Break if EXPR is simplified to const values. */
2302 if (expanded
2303 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2304 return expanded;
2305
2306 ++cnt;
2307 }
2308
2309 /* Return the original expression if no simplification is done. */
2310 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2311 }
2312
2313 /* Tries to simplify EXPR using the evolutions of the loop invariants
2314 in the superloops of LOOP. Returns the simplified expression
2315 (or EXPR unchanged, if no simplification was possible). */
2316
2317 static tree
2318 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2319 {
2320 enum tree_code code = TREE_CODE (expr);
2321 bool changed;
2322 tree e, e0, e1, e2;
2323
2324 if (is_gimple_min_invariant (expr))
2325 return expr;
2326
2327 if (code == TRUTH_OR_EXPR
2328 || code == TRUTH_AND_EXPR
2329 || code == COND_EXPR)
2330 {
2331 changed = false;
2332
2333 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2334 if (TREE_OPERAND (expr, 0) != e0)
2335 changed = true;
2336
2337 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2338 if (TREE_OPERAND (expr, 1) != e1)
2339 changed = true;
2340
2341 if (code == COND_EXPR)
2342 {
2343 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2344 if (TREE_OPERAND (expr, 2) != e2)
2345 changed = true;
2346 }
2347 else
2348 e2 = NULL_TREE;
2349
2350 if (changed)
2351 {
2352 if (code == COND_EXPR)
2353 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2354 else
2355 expr = fold_build2 (code, boolean_type_node, e0, e1);
2356 }
2357
2358 return expr;
2359 }
2360
2361 e = instantiate_parameters (loop, expr);
2362 if (is_gimple_min_invariant (e))
2363 return e;
2364
2365 return expr;
2366 }
2367
2368 /* Returns true if EXIT is the only possible exit from LOOP. */
2369
2370 bool
2371 loop_only_exit_p (const struct loop *loop, const_edge exit)
2372 {
2373 basic_block *body;
2374 gimple_stmt_iterator bsi;
2375 unsigned i;
2376
2377 if (exit != single_exit (loop))
2378 return false;
2379
2380 body = get_loop_body (loop);
2381 for (i = 0; i < loop->num_nodes; i++)
2382 {
2383 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2384 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2385 {
2386 free (body);
2387 return true;
2388 }
2389 }
2390
2391 free (body);
2392 return true;
2393 }
2394
2395 /* Stores description of number of iterations of LOOP derived from
2396 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2397 information could be derived (and fields of NITER have meaning described
2398 in comments at struct tree_niter_desc declaration), false otherwise.
2399 When EVERY_ITERATION is true, only tests that are known to be executed
2400 every iteration are considered (i.e. only test that alone bounds the loop).
2401 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2402 it when returning true. */
2403
2404 bool
2405 number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
2406 struct tree_niter_desc *niter,
2407 gcond **at_stmt, bool every_iteration)
2408 {
2409 gimple *last;
2410 gcond *stmt;
2411 tree type;
2412 tree op0, op1;
2413 enum tree_code code;
2414 affine_iv iv0, iv1;
2415 bool safe;
2416
2417 /* Nothing to analyze if the loop is known to be infinite. */
2418 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2419 return false;
2420
2421 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2422
2423 if (every_iteration && !safe)
2424 return false;
2425
2426 niter->assumptions = boolean_false_node;
2427 niter->control.base = NULL_TREE;
2428 niter->control.step = NULL_TREE;
2429 niter->control.no_overflow = false;
2430 last = last_stmt (exit->src);
2431 if (!last)
2432 return false;
2433 stmt = dyn_cast <gcond *> (last);
2434 if (!stmt)
2435 return false;
2436
2437 /* We want the condition for staying inside loop. */
2438 code = gimple_cond_code (stmt);
2439 if (exit->flags & EDGE_TRUE_VALUE)
2440 code = invert_tree_comparison (code, false);
2441
2442 switch (code)
2443 {
2444 case GT_EXPR:
2445 case GE_EXPR:
2446 case LT_EXPR:
2447 case LE_EXPR:
2448 case NE_EXPR:
2449 break;
2450
2451 default:
2452 return false;
2453 }
2454
2455 op0 = gimple_cond_lhs (stmt);
2456 op1 = gimple_cond_rhs (stmt);
2457 type = TREE_TYPE (op0);
2458
2459 if (TREE_CODE (type) != INTEGER_TYPE
2460 && !POINTER_TYPE_P (type))
2461 return false;
2462
2463 tree iv0_niters = NULL_TREE;
2464 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2465 op0, &iv0, safe ? &iv0_niters : NULL, false))
2466 return number_of_iterations_popcount (loop, exit, code, niter);
2467 tree iv1_niters = NULL_TREE;
2468 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2469 op1, &iv1, safe ? &iv1_niters : NULL, false))
2470 return false;
2471 /* Give up on complicated case. */
2472 if (iv0_niters && iv1_niters)
2473 return false;
2474
2475 /* We don't want to see undefined signed overflow warnings while
2476 computing the number of iterations. */
2477 fold_defer_overflow_warnings ();
2478
2479 iv0.base = expand_simple_operations (iv0.base);
2480 iv1.base = expand_simple_operations (iv1.base);
2481 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2482 loop_only_exit_p (loop, exit), safe))
2483 {
2484 fold_undefer_and_ignore_overflow_warnings ();
2485 return false;
2486 }
2487
2488 /* Incorporate additional assumption implied by control iv. */
2489 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2490 if (iv_niters)
2491 {
2492 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2493 fold_convert (TREE_TYPE (niter->niter),
2494 iv_niters));
2495
2496 if (!integer_nonzerop (assumption))
2497 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2498 niter->assumptions, assumption);
2499
2500 /* Refine upper bound if possible. */
2501 if (TREE_CODE (iv_niters) == INTEGER_CST
2502 && niter->max > wi::to_widest (iv_niters))
2503 niter->max = wi::to_widest (iv_niters);
2504 }
2505
2506 /* There is no assumptions if the loop is known to be finite. */
2507 if (!integer_zerop (niter->assumptions)
2508 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2509 niter->assumptions = boolean_true_node;
2510
2511 if (optimize >= 3)
2512 {
2513 niter->assumptions = simplify_using_outer_evolutions (loop,
2514 niter->assumptions);
2515 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2516 niter->may_be_zero);
2517 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2518 }
2519
2520 niter->assumptions
2521 = simplify_using_initial_conditions (loop,
2522 niter->assumptions);
2523 niter->may_be_zero
2524 = simplify_using_initial_conditions (loop,
2525 niter->may_be_zero);
2526
2527 fold_undefer_and_ignore_overflow_warnings ();
2528
2529 /* If NITER has simplified into a constant, update MAX. */
2530 if (TREE_CODE (niter->niter) == INTEGER_CST)
2531 niter->max = wi::to_widest (niter->niter);
2532
2533 if (at_stmt)
2534 *at_stmt = stmt;
2535
2536 return (!integer_zerop (niter->assumptions));
2537 }
2538
2539
2540 /* Utility function to check if OP is defined by a stmt
2541 that is a val - 1. */
2542
2543 static bool
2544 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2545 {
2546 gimple *stmt;
2547 return (TREE_CODE (op) == SSA_NAME
2548 && (stmt = SSA_NAME_DEF_STMT (op))
2549 && is_gimple_assign (stmt)
2550 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2551 && val == gimple_assign_rhs1 (stmt)
2552 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2553 }
2554
2555
2556 /* See if LOOP is a popcout implementation, determine NITER for the loop
2557
2558 We match:
2559 <bb 2>
2560 goto <bb 4>
2561
2562 <bb 3>
2563 _1 = b_11 + -1
2564 b_6 = _1 & b_11
2565
2566 <bb 4>
2567 b_11 = PHI <b_5(D)(2), b_6(3)>
2568
2569 exit block
2570 if (b_11 != 0)
2571 goto <bb 3>
2572 else
2573 goto <bb 5>
2574
2575 OR we match copy-header version:
2576 if (b_5 != 0)
2577 goto <bb 3>
2578 else
2579 goto <bb 4>
2580
2581 <bb 3>
2582 b_11 = PHI <b_5(2), b_6(3)>
2583 _1 = b_11 + -1
2584 b_6 = _1 & b_11
2585
2586 exit block
2587 if (b_6 != 0)
2588 goto <bb 3>
2589 else
2590 goto <bb 4>
2591
2592 If popcount pattern, update NITER accordingly.
2593 i.e., set NITER to __builtin_popcount (b)
2594 return true if we did, false otherwise.
2595
2596 */
2597
2598 static bool
2599 number_of_iterations_popcount (loop_p loop, edge exit,
2600 enum tree_code code,
2601 struct tree_niter_desc *niter)
2602 {
2603 bool adjust = true;
2604 tree iter;
2605 HOST_WIDE_INT max;
2606 adjust = true;
2607 tree fn = NULL_TREE;
2608
2609 /* Check loop terminating branch is like
2610 if (b != 0). */
2611 gimple *stmt = last_stmt (exit->src);
2612 if (!stmt
2613 || gimple_code (stmt) != GIMPLE_COND
2614 || code != NE_EXPR
2615 || !integer_zerop (gimple_cond_rhs (stmt))
2616 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2617 return false;
2618
2619 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2620
2621 /* Depending on copy-header is performed, feeding PHI stmts might be in
2622 the loop header or loop latch, handle this. */
2623 if (gimple_code (and_stmt) == GIMPLE_PHI
2624 && gimple_bb (and_stmt) == loop->header
2625 && gimple_phi_num_args (and_stmt) == 2
2626 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2627 loop_latch_edge (loop)->dest_idx))
2628 == SSA_NAME))
2629 {
2630 /* SSA used in exit condition is defined by PHI stmt
2631 b_11 = PHI <b_5(D)(2), b_6(3)>
2632 from the PHI stmt, get the and_stmt
2633 b_6 = _1 & b_11. */
2634 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2635 and_stmt = SSA_NAME_DEF_STMT (t);
2636 adjust = false;
2637 }
2638
2639 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2640 if (!is_gimple_assign (and_stmt)
2641 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2642 return false;
2643
2644 tree b_11 = gimple_assign_rhs1 (and_stmt);
2645 tree _1 = gimple_assign_rhs2 (and_stmt);
2646
2647 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2648 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2649 Also canonicalize if _1 and _b11 are revrsed. */
2650 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2651 std::swap (b_11, _1);
2652 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2653 ;
2654 else
2655 return false;
2656 /* Check the recurrence:
2657 ... = PHI <b_5(2), b_6(3)>. */
2658 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2659 if (gimple_code (phi) != GIMPLE_PHI
2660 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2661 || (gimple_assign_lhs (and_stmt)
2662 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2663 return false;
2664
2665 /* We found a match. Get the corresponding popcount builtin. */
2666 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2667 if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION (integer_type_node))
2668 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2669 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2670 (long_integer_type_node))
2671 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2672 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2673 (long_long_integer_type_node))
2674 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2675
2676 /* ??? Support promoting char/short to int. */
2677 if (!fn)
2678 return false;
2679
2680 /* Update NITER params accordingly */
2681 tree utype = unsigned_type_for (TREE_TYPE (src));
2682 src = fold_convert (utype, src);
2683 tree call = fold_convert (utype, build_call_expr (fn, 1, src));
2684 if (adjust)
2685 iter = fold_build2 (MINUS_EXPR, utype,
2686 call,
2687 build_int_cst (utype, 1));
2688 else
2689 iter = call;
2690
2691 if (TREE_CODE (call) == INTEGER_CST)
2692 max = tree_to_uhwi (call);
2693 else
2694 max = TYPE_PRECISION (TREE_TYPE (src));
2695 if (adjust)
2696 max = max - 1;
2697
2698 niter->niter = iter;
2699 niter->assumptions = boolean_true_node;
2700
2701 if (adjust)
2702 {
2703 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2704 build_zero_cst
2705 (TREE_TYPE (src)));
2706 niter->may_be_zero =
2707 simplify_using_initial_conditions (loop, may_be_zero);
2708 }
2709 else
2710 niter->may_be_zero = boolean_false_node;
2711
2712 niter->max = max;
2713 niter->bound = NULL_TREE;
2714 niter->cmp = ERROR_MARK;
2715 return true;
2716 }
2717
2718
2719 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2720 the niter information holds unconditionally. */
2721
2722 bool
2723 number_of_iterations_exit (struct loop *loop, edge exit,
2724 struct tree_niter_desc *niter,
2725 bool warn, bool every_iteration)
2726 {
2727 gcond *stmt;
2728 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2729 &stmt, every_iteration))
2730 return false;
2731
2732 if (integer_nonzerop (niter->assumptions))
2733 return true;
2734
2735 if (warn && dump_enabled_p ())
2736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2737 "missed loop optimization: niters analysis ends up "
2738 "with assumptions.\n");
2739
2740 return false;
2741 }
2742
2743 /* Try to determine the number of iterations of LOOP. If we succeed,
2744 expression giving number of iterations is returned and *EXIT is
2745 set to the edge from that the information is obtained. Otherwise
2746 chrec_dont_know is returned. */
2747
2748 tree
2749 find_loop_niter (struct loop *loop, edge *exit)
2750 {
2751 unsigned i;
2752 vec<edge> exits = get_loop_exit_edges (loop);
2753 edge ex;
2754 tree niter = NULL_TREE, aniter;
2755 struct tree_niter_desc desc;
2756
2757 *exit = NULL;
2758 FOR_EACH_VEC_ELT (exits, i, ex)
2759 {
2760 if (!number_of_iterations_exit (loop, ex, &desc, false))
2761 continue;
2762
2763 if (integer_nonzerop (desc.may_be_zero))
2764 {
2765 /* We exit in the first iteration through this exit.
2766 We won't find anything better. */
2767 niter = build_int_cst (unsigned_type_node, 0);
2768 *exit = ex;
2769 break;
2770 }
2771
2772 if (!integer_zerop (desc.may_be_zero))
2773 continue;
2774
2775 aniter = desc.niter;
2776
2777 if (!niter)
2778 {
2779 /* Nothing recorded yet. */
2780 niter = aniter;
2781 *exit = ex;
2782 continue;
2783 }
2784
2785 /* Prefer constants, the lower the better. */
2786 if (TREE_CODE (aniter) != INTEGER_CST)
2787 continue;
2788
2789 if (TREE_CODE (niter) != INTEGER_CST)
2790 {
2791 niter = aniter;
2792 *exit = ex;
2793 continue;
2794 }
2795
2796 if (tree_int_cst_lt (aniter, niter))
2797 {
2798 niter = aniter;
2799 *exit = ex;
2800 continue;
2801 }
2802 }
2803 exits.release ();
2804
2805 return niter ? niter : chrec_dont_know;
2806 }
2807
2808 /* Return true if loop is known to have bounded number of iterations. */
2809
2810 bool
2811 finite_loop_p (struct loop *loop)
2812 {
2813 widest_int nit;
2814 int flags;
2815
2816 flags = flags_from_decl_or_type (current_function_decl);
2817 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2818 {
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2821 loop->num);
2822 return true;
2823 }
2824
2825 if (loop->any_upper_bound
2826 || max_loop_iterations (loop, &nit))
2827 {
2828 if (dump_file && (dump_flags & TDF_DETAILS))
2829 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2830 loop->num);
2831 return true;
2832 }
2833
2834 if (flag_finite_loops)
2835 {
2836 unsigned i;
2837 vec<edge> exits = get_loop_exit_edges (loop);
2838 edge ex;
2839
2840 /* If the loop has a normal exit, we can assume it will terminate. */
2841 FOR_EACH_VEC_ELT (exits, i, ex)
2842 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2843 {
2844 exits.release ();
2845 if (dump_file)
2846 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2847 "and -ffinite-loops is on.\n", loop->num);
2848 return true;
2849 }
2850
2851 exits.release ();
2852 }
2853
2854 return false;
2855 }
2856
2857 /*
2858
2859 Analysis of a number of iterations of a loop by a brute-force evaluation.
2860
2861 */
2862
2863 /* Bound on the number of iterations we try to evaluate. */
2864
2865 #define MAX_ITERATIONS_TO_TRACK \
2866 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2867
2868 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2869 result by a chain of operations such that all but exactly one of their
2870 operands are constants. */
2871
2872 static gphi *
2873 chain_of_csts_start (struct loop *loop, tree x)
2874 {
2875 gimple *stmt = SSA_NAME_DEF_STMT (x);
2876 tree use;
2877 basic_block bb = gimple_bb (stmt);
2878 enum tree_code code;
2879
2880 if (!bb
2881 || !flow_bb_inside_loop_p (loop, bb))
2882 return NULL;
2883
2884 if (gimple_code (stmt) == GIMPLE_PHI)
2885 {
2886 if (bb == loop->header)
2887 return as_a <gphi *> (stmt);
2888
2889 return NULL;
2890 }
2891
2892 if (gimple_code (stmt) != GIMPLE_ASSIGN
2893 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2894 return NULL;
2895
2896 code = gimple_assign_rhs_code (stmt);
2897 if (gimple_references_memory_p (stmt)
2898 || TREE_CODE_CLASS (code) == tcc_reference
2899 || (code == ADDR_EXPR
2900 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2901 return NULL;
2902
2903 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2904 if (use == NULL_TREE)
2905 return NULL;
2906
2907 return chain_of_csts_start (loop, use);
2908 }
2909
2910 /* Determines whether the expression X is derived from a result of a phi node
2911 in header of LOOP such that
2912
2913 * the derivation of X consists only from operations with constants
2914 * the initial value of the phi node is constant
2915 * the value of the phi node in the next iteration can be derived from the
2916 value in the current iteration by a chain of operations with constants,
2917 or is also a constant
2918
2919 If such phi node exists, it is returned, otherwise NULL is returned. */
2920
2921 static gphi *
2922 get_base_for (struct loop *loop, tree x)
2923 {
2924 gphi *phi;
2925 tree init, next;
2926
2927 if (is_gimple_min_invariant (x))
2928 return NULL;
2929
2930 phi = chain_of_csts_start (loop, x);
2931 if (!phi)
2932 return NULL;
2933
2934 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2935 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2936
2937 if (!is_gimple_min_invariant (init))
2938 return NULL;
2939
2940 if (TREE_CODE (next) == SSA_NAME
2941 && chain_of_csts_start (loop, next) != phi)
2942 return NULL;
2943
2944 return phi;
2945 }
2946
2947 /* Given an expression X, then
2948
2949 * if X is NULL_TREE, we return the constant BASE.
2950 * if X is a constant, we return the constant X.
2951 * otherwise X is a SSA name, whose value in the considered loop is derived
2952 by a chain of operations with constant from a result of a phi node in
2953 the header of the loop. Then we return value of X when the value of the
2954 result of this phi node is given by the constant BASE. */
2955
2956 static tree
2957 get_val_for (tree x, tree base)
2958 {
2959 gimple *stmt;
2960
2961 gcc_checking_assert (is_gimple_min_invariant (base));
2962
2963 if (!x)
2964 return base;
2965 else if (is_gimple_min_invariant (x))
2966 return x;
2967
2968 stmt = SSA_NAME_DEF_STMT (x);
2969 if (gimple_code (stmt) == GIMPLE_PHI)
2970 return base;
2971
2972 gcc_checking_assert (is_gimple_assign (stmt));
2973
2974 /* STMT must be either an assignment of a single SSA name or an
2975 expression involving an SSA name and a constant. Try to fold that
2976 expression using the value for the SSA name. */
2977 if (gimple_assign_ssa_name_copy_p (stmt))
2978 return get_val_for (gimple_assign_rhs1 (stmt), base);
2979 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2980 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2981 return fold_build1 (gimple_assign_rhs_code (stmt),
2982 gimple_expr_type (stmt),
2983 get_val_for (gimple_assign_rhs1 (stmt), base));
2984 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2985 {
2986 tree rhs1 = gimple_assign_rhs1 (stmt);
2987 tree rhs2 = gimple_assign_rhs2 (stmt);
2988 if (TREE_CODE (rhs1) == SSA_NAME)
2989 rhs1 = get_val_for (rhs1, base);
2990 else if (TREE_CODE (rhs2) == SSA_NAME)
2991 rhs2 = get_val_for (rhs2, base);
2992 else
2993 gcc_unreachable ();
2994 return fold_build2 (gimple_assign_rhs_code (stmt),
2995 gimple_expr_type (stmt), rhs1, rhs2);
2996 }
2997 else
2998 gcc_unreachable ();
2999 }
3000
3001
3002 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3003 by brute force -- i.e. by determining the value of the operands of the
3004 condition at EXIT in first few iterations of the loop (assuming that
3005 these values are constant) and determining the first one in that the
3006 condition is not satisfied. Returns the constant giving the number
3007 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3008
3009 tree
3010 loop_niter_by_eval (struct loop *loop, edge exit)
3011 {
3012 tree acnd;
3013 tree op[2], val[2], next[2], aval[2];
3014 gphi *phi;
3015 gimple *cond;
3016 unsigned i, j;
3017 enum tree_code cmp;
3018
3019 cond = last_stmt (exit->src);
3020 if (!cond || gimple_code (cond) != GIMPLE_COND)
3021 return chrec_dont_know;
3022
3023 cmp = gimple_cond_code (cond);
3024 if (exit->flags & EDGE_TRUE_VALUE)
3025 cmp = invert_tree_comparison (cmp, false);
3026
3027 switch (cmp)
3028 {
3029 case EQ_EXPR:
3030 case NE_EXPR:
3031 case GT_EXPR:
3032 case GE_EXPR:
3033 case LT_EXPR:
3034 case LE_EXPR:
3035 op[0] = gimple_cond_lhs (cond);
3036 op[1] = gimple_cond_rhs (cond);
3037 break;
3038
3039 default:
3040 return chrec_dont_know;
3041 }
3042
3043 for (j = 0; j < 2; j++)
3044 {
3045 if (is_gimple_min_invariant (op[j]))
3046 {
3047 val[j] = op[j];
3048 next[j] = NULL_TREE;
3049 op[j] = NULL_TREE;
3050 }
3051 else
3052 {
3053 phi = get_base_for (loop, op[j]);
3054 if (!phi)
3055 return chrec_dont_know;
3056 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3057 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3058 }
3059 }
3060
3061 /* Don't issue signed overflow warnings. */
3062 fold_defer_overflow_warnings ();
3063
3064 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3065 {
3066 for (j = 0; j < 2; j++)
3067 aval[j] = get_val_for (op[j], val[j]);
3068
3069 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3070 if (acnd && integer_zerop (acnd))
3071 {
3072 fold_undefer_and_ignore_overflow_warnings ();
3073 if (dump_file && (dump_flags & TDF_DETAILS))
3074 fprintf (dump_file,
3075 "Proved that loop %d iterates %d times using brute force.\n",
3076 loop->num, i);
3077 return build_int_cst (unsigned_type_node, i);
3078 }
3079
3080 for (j = 0; j < 2; j++)
3081 {
3082 aval[j] = val[j];
3083 val[j] = get_val_for (next[j], val[j]);
3084 if (!is_gimple_min_invariant (val[j]))
3085 {
3086 fold_undefer_and_ignore_overflow_warnings ();
3087 return chrec_dont_know;
3088 }
3089 }
3090
3091 /* If the next iteration would use the same base values
3092 as the current one, there is no point looping further,
3093 all following iterations will be the same as this one. */
3094 if (val[0] == aval[0] && val[1] == aval[1])
3095 break;
3096 }
3097
3098 fold_undefer_and_ignore_overflow_warnings ();
3099
3100 return chrec_dont_know;
3101 }
3102
3103 /* Finds the exit of the LOOP by that the loop exits after a constant
3104 number of iterations and stores the exit edge to *EXIT. The constant
3105 giving the number of iterations of LOOP is returned. The number of
3106 iterations is determined using loop_niter_by_eval (i.e. by brute force
3107 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3108 determines the number of iterations, chrec_dont_know is returned. */
3109
3110 tree
3111 find_loop_niter_by_eval (struct loop *loop, edge *exit)
3112 {
3113 unsigned i;
3114 vec<edge> exits = get_loop_exit_edges (loop);
3115 edge ex;
3116 tree niter = NULL_TREE, aniter;
3117
3118 *exit = NULL;
3119
3120 /* Loops with multiple exits are expensive to handle and less important. */
3121 if (!flag_expensive_optimizations
3122 && exits.length () > 1)
3123 {
3124 exits.release ();
3125 return chrec_dont_know;
3126 }
3127
3128 FOR_EACH_VEC_ELT (exits, i, ex)
3129 {
3130 if (!just_once_each_iteration_p (loop, ex->src))
3131 continue;
3132
3133 aniter = loop_niter_by_eval (loop, ex);
3134 if (chrec_contains_undetermined (aniter))
3135 continue;
3136
3137 if (niter
3138 && !tree_int_cst_lt (aniter, niter))
3139 continue;
3140
3141 niter = aniter;
3142 *exit = ex;
3143 }
3144 exits.release ();
3145
3146 return niter ? niter : chrec_dont_know;
3147 }
3148
3149 /*
3150
3151 Analysis of upper bounds on number of iterations of a loop.
3152
3153 */
3154
3155 static widest_int derive_constant_upper_bound_ops (tree, tree,
3156 enum tree_code, tree);
3157
3158 /* Returns a constant upper bound on the value of the right-hand side of
3159 an assignment statement STMT. */
3160
3161 static widest_int
3162 derive_constant_upper_bound_assign (gimple *stmt)
3163 {
3164 enum tree_code code = gimple_assign_rhs_code (stmt);
3165 tree op0 = gimple_assign_rhs1 (stmt);
3166 tree op1 = gimple_assign_rhs2 (stmt);
3167
3168 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3169 op0, code, op1);
3170 }
3171
3172 /* Returns a constant upper bound on the value of expression VAL. VAL
3173 is considered to be unsigned. If its type is signed, its value must
3174 be nonnegative. */
3175
3176 static widest_int
3177 derive_constant_upper_bound (tree val)
3178 {
3179 enum tree_code code;
3180 tree op0, op1, op2;
3181
3182 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3183 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3184 }
3185
3186 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3187 whose type is TYPE. The expression is considered to be unsigned. If
3188 its type is signed, its value must be nonnegative. */
3189
3190 static widest_int
3191 derive_constant_upper_bound_ops (tree type, tree op0,
3192 enum tree_code code, tree op1)
3193 {
3194 tree subtype, maxt;
3195 widest_int bnd, max, cst;
3196 gimple *stmt;
3197
3198 if (INTEGRAL_TYPE_P (type))
3199 maxt = TYPE_MAX_VALUE (type);
3200 else
3201 maxt = upper_bound_in_type (type, type);
3202
3203 max = wi::to_widest (maxt);
3204
3205 switch (code)
3206 {
3207 case INTEGER_CST:
3208 return wi::to_widest (op0);
3209
3210 CASE_CONVERT:
3211 subtype = TREE_TYPE (op0);
3212 if (!TYPE_UNSIGNED (subtype)
3213 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3214 that OP0 is nonnegative. */
3215 && TYPE_UNSIGNED (type)
3216 && !tree_expr_nonnegative_p (op0))
3217 {
3218 /* If we cannot prove that the casted expression is nonnegative,
3219 we cannot establish more useful upper bound than the precision
3220 of the type gives us. */
3221 return max;
3222 }
3223
3224 /* We now know that op0 is an nonnegative value. Try deriving an upper
3225 bound for it. */
3226 bnd = derive_constant_upper_bound (op0);
3227
3228 /* If the bound does not fit in TYPE, max. value of TYPE could be
3229 attained. */
3230 if (wi::ltu_p (max, bnd))
3231 return max;
3232
3233 return bnd;
3234
3235 case PLUS_EXPR:
3236 case POINTER_PLUS_EXPR:
3237 case MINUS_EXPR:
3238 if (TREE_CODE (op1) != INTEGER_CST
3239 || !tree_expr_nonnegative_p (op0))
3240 return max;
3241
3242 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3243 choose the most logical way how to treat this constant regardless
3244 of the signedness of the type. */
3245 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3246 if (code != MINUS_EXPR)
3247 cst = -cst;
3248
3249 bnd = derive_constant_upper_bound (op0);
3250
3251 if (wi::neg_p (cst))
3252 {
3253 cst = -cst;
3254 /* Avoid CST == 0x80000... */
3255 if (wi::neg_p (cst))
3256 return max;
3257
3258 /* OP0 + CST. We need to check that
3259 BND <= MAX (type) - CST. */
3260
3261 widest_int mmax = max - cst;
3262 if (wi::leu_p (bnd, mmax))
3263 return max;
3264
3265 return bnd + cst;
3266 }
3267 else
3268 {
3269 /* OP0 - CST, where CST >= 0.
3270
3271 If TYPE is signed, we have already verified that OP0 >= 0, and we
3272 know that the result is nonnegative. This implies that
3273 VAL <= BND - CST.
3274
3275 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3276 otherwise the operation underflows.
3277 */
3278
3279 /* This should only happen if the type is unsigned; however, for
3280 buggy programs that use overflowing signed arithmetics even with
3281 -fno-wrapv, this condition may also be true for signed values. */
3282 if (wi::ltu_p (bnd, cst))
3283 return max;
3284
3285 if (TYPE_UNSIGNED (type))
3286 {
3287 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3288 wide_int_to_tree (type, cst));
3289 if (!tem || integer_nonzerop (tem))
3290 return max;
3291 }
3292
3293 bnd -= cst;
3294 }
3295
3296 return bnd;
3297
3298 case FLOOR_DIV_EXPR:
3299 case EXACT_DIV_EXPR:
3300 if (TREE_CODE (op1) != INTEGER_CST
3301 || tree_int_cst_sign_bit (op1))
3302 return max;
3303
3304 bnd = derive_constant_upper_bound (op0);
3305 return wi::udiv_floor (bnd, wi::to_widest (op1));
3306
3307 case BIT_AND_EXPR:
3308 if (TREE_CODE (op1) != INTEGER_CST
3309 || tree_int_cst_sign_bit (op1))
3310 return max;
3311 return wi::to_widest (op1);
3312
3313 case SSA_NAME:
3314 stmt = SSA_NAME_DEF_STMT (op0);
3315 if (gimple_code (stmt) != GIMPLE_ASSIGN
3316 || gimple_assign_lhs (stmt) != op0)
3317 return max;
3318 return derive_constant_upper_bound_assign (stmt);
3319
3320 default:
3321 return max;
3322 }
3323 }
3324
3325 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3326
3327 static void
3328 do_warn_aggressive_loop_optimizations (struct loop *loop,
3329 widest_int i_bound, gimple *stmt)
3330 {
3331 /* Don't warn if the loop doesn't have known constant bound. */
3332 if (!loop->nb_iterations
3333 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3334 || !warn_aggressive_loop_optimizations
3335 /* To avoid warning multiple times for the same loop,
3336 only start warning when we preserve loops. */
3337 || (cfun->curr_properties & PROP_loops) == 0
3338 /* Only warn once per loop. */
3339 || loop->warned_aggressive_loop_optimizations
3340 /* Only warn if undefined behavior gives us lower estimate than the
3341 known constant bound. */
3342 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3343 /* And undefined behavior happens unconditionally. */
3344 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3345 return;
3346
3347 edge e = single_exit (loop);
3348 if (e == NULL)
3349 return;
3350
3351 gimple *estmt = last_stmt (e->src);
3352 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3353 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3354 ? UNSIGNED : SIGNED);
3355 auto_diagnostic_group d;
3356 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3357 "iteration %s invokes undefined behavior", buf))
3358 inform (gimple_location (estmt), "within this loop");
3359 loop->warned_aggressive_loop_optimizations = true;
3360 }
3361
3362 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3363 is true if the loop is exited immediately after STMT, and this exit
3364 is taken at last when the STMT is executed BOUND + 1 times.
3365 REALISTIC is true if BOUND is expected to be close to the real number
3366 of iterations. UPPER is true if we are sure the loop iterates at most
3367 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3368
3369 static void
3370 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
3371 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3372 {
3373 widest_int delta;
3374
3375 if (dump_file && (dump_flags & TDF_DETAILS))
3376 {
3377 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3378 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3379 fprintf (dump_file, " is %sexecuted at most ",
3380 upper ? "" : "probably ");
3381 print_generic_expr (dump_file, bound, TDF_SLIM);
3382 fprintf (dump_file, " (bounded by ");
3383 print_decu (i_bound, dump_file);
3384 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3385 }
3386
3387 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3388 real number of iterations. */
3389 if (TREE_CODE (bound) != INTEGER_CST)
3390 realistic = false;
3391 else
3392 gcc_checking_assert (i_bound == wi::to_widest (bound));
3393
3394 /* If we have a guaranteed upper bound, record it in the appropriate
3395 list, unless this is an !is_exit bound (i.e. undefined behavior in
3396 at_stmt) in a loop with known constant number of iterations. */
3397 if (upper
3398 && (is_exit
3399 || loop->nb_iterations == NULL_TREE
3400 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3401 {
3402 struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3403
3404 elt->bound = i_bound;
3405 elt->stmt = at_stmt;
3406 elt->is_exit = is_exit;
3407 elt->next = loop->bounds;
3408 loop->bounds = elt;
3409 }
3410
3411 /* If statement is executed on every path to the loop latch, we can directly
3412 infer the upper bound on the # of iterations of the loop. */
3413 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3414 upper = false;
3415
3416 /* Update the number of iteration estimates according to the bound.
3417 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3418 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3419 later if such statement must be executed on last iteration */
3420 if (is_exit)
3421 delta = 0;
3422 else
3423 delta = 1;
3424 widest_int new_i_bound = i_bound + delta;
3425
3426 /* If an overflow occurred, ignore the result. */
3427 if (wi::ltu_p (new_i_bound, delta))
3428 return;
3429
3430 if (upper && !is_exit)
3431 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3432 record_niter_bound (loop, new_i_bound, realistic, upper);
3433 }
3434
3435 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3436 and doesn't overflow. */
3437
3438 static void
3439 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3440 {
3441 struct control_iv *iv;
3442
3443 if (!niter->control.base || !niter->control.step)
3444 return;
3445
3446 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3447 return;
3448
3449 iv = ggc_alloc<control_iv> ();
3450 iv->base = niter->control.base;
3451 iv->step = niter->control.step;
3452 iv->next = loop->control_ivs;
3453 loop->control_ivs = iv;
3454
3455 return;
3456 }
3457
3458 /* This function returns TRUE if below conditions are satisfied:
3459 1) VAR is SSA variable.
3460 2) VAR is an IV:{base, step} in its defining loop.
3461 3) IV doesn't overflow.
3462 4) Both base and step are integer constants.
3463 5) Base is the MIN/MAX value depends on IS_MIN.
3464 Store value of base to INIT correspondingly. */
3465
3466 static bool
3467 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3468 {
3469 if (TREE_CODE (var) != SSA_NAME)
3470 return false;
3471
3472 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3473 struct loop *loop = loop_containing_stmt (def_stmt);
3474
3475 if (loop == NULL)
3476 return false;
3477
3478 affine_iv iv;
3479 if (!simple_iv (loop, loop, var, &iv, false))
3480 return false;
3481
3482 if (!iv.no_overflow)
3483 return false;
3484
3485 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3486 return false;
3487
3488 if (is_min == tree_int_cst_sign_bit (iv.step))
3489 return false;
3490
3491 *init = wi::to_wide (iv.base);
3492 return true;
3493 }
3494
3495 /* Record the estimate on number of iterations of LOOP based on the fact that
3496 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3497 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3498 estimated number of iterations is expected to be close to the real one.
3499 UPPER is true if we are sure the induction variable does not wrap. */
3500
3501 static void
3502 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3503 tree low, tree high, bool realistic, bool upper)
3504 {
3505 tree niter_bound, extreme, delta;
3506 tree type = TREE_TYPE (base), unsigned_type;
3507 tree orig_base = base;
3508
3509 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3510 return;
3511
3512 if (dump_file && (dump_flags & TDF_DETAILS))
3513 {
3514 fprintf (dump_file, "Induction variable (");
3515 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3516 fprintf (dump_file, ") ");
3517 print_generic_expr (dump_file, base, TDF_SLIM);
3518 fprintf (dump_file, " + ");
3519 print_generic_expr (dump_file, step, TDF_SLIM);
3520 fprintf (dump_file, " * iteration does not wrap in statement ");
3521 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3522 fprintf (dump_file, " in loop %d.\n", loop->num);
3523 }
3524
3525 unsigned_type = unsigned_type_for (type);
3526 base = fold_convert (unsigned_type, base);
3527 step = fold_convert (unsigned_type, step);
3528
3529 if (tree_int_cst_sign_bit (step))
3530 {
3531 wide_int min, max;
3532 extreme = fold_convert (unsigned_type, low);
3533 if (TREE_CODE (orig_base) == SSA_NAME
3534 && TREE_CODE (high) == INTEGER_CST
3535 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3536 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3537 || get_cst_init_from_scev (orig_base, &max, false))
3538 && wi::gts_p (wi::to_wide (high), max))
3539 base = wide_int_to_tree (unsigned_type, max);
3540 else if (TREE_CODE (base) != INTEGER_CST
3541 && dominated_by_p (CDI_DOMINATORS,
3542 loop->latch, gimple_bb (stmt)))
3543 base = fold_convert (unsigned_type, high);
3544 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3545 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3546 }
3547 else
3548 {
3549 wide_int min, max;
3550 extreme = fold_convert (unsigned_type, high);
3551 if (TREE_CODE (orig_base) == SSA_NAME
3552 && TREE_CODE (low) == INTEGER_CST
3553 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3554 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3555 || get_cst_init_from_scev (orig_base, &min, true))
3556 && wi::gts_p (min, wi::to_wide (low)))
3557 base = wide_int_to_tree (unsigned_type, min);
3558 else if (TREE_CODE (base) != INTEGER_CST
3559 && dominated_by_p (CDI_DOMINATORS,
3560 loop->latch, gimple_bb (stmt)))
3561 base = fold_convert (unsigned_type, low);
3562 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3563 }
3564
3565 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3566 would get out of the range. */
3567 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3568 widest_int max = derive_constant_upper_bound (niter_bound);
3569 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3570 }
3571
3572 /* Determine information about number of iterations a LOOP from the index
3573 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3574 guaranteed to be executed in every iteration of LOOP. Callback for
3575 for_each_index. */
3576
3577 struct ilb_data
3578 {
3579 struct loop *loop;
3580 gimple *stmt;
3581 };
3582
3583 static bool
3584 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3585 {
3586 struct ilb_data *data = (struct ilb_data *) dta;
3587 tree ev, init, step;
3588 tree low, high, type, next;
3589 bool sign, upper = true, at_end = false;
3590 struct loop *loop = data->loop;
3591
3592 if (TREE_CODE (base) != ARRAY_REF)
3593 return true;
3594
3595 /* For arrays at the end of the structure, we are not guaranteed that they
3596 do not really extend over their declared size. However, for arrays of
3597 size greater than one, this is unlikely to be intended. */
3598 if (array_at_struct_end_p (base))
3599 {
3600 at_end = true;
3601 upper = false;
3602 }
3603
3604 struct loop *dloop = loop_containing_stmt (data->stmt);
3605 if (!dloop)
3606 return true;
3607
3608 ev = analyze_scalar_evolution (dloop, *idx);
3609 ev = instantiate_parameters (loop, ev);
3610 init = initial_condition (ev);
3611 step = evolution_part_in_loop_num (ev, loop->num);
3612
3613 if (!init
3614 || !step
3615 || TREE_CODE (step) != INTEGER_CST
3616 || integer_zerop (step)
3617 || tree_contains_chrecs (init, NULL)
3618 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3619 return true;
3620
3621 low = array_ref_low_bound (base);
3622 high = array_ref_up_bound (base);
3623
3624 /* The case of nonconstant bounds could be handled, but it would be
3625 complicated. */
3626 if (TREE_CODE (low) != INTEGER_CST
3627 || !high
3628 || TREE_CODE (high) != INTEGER_CST)
3629 return true;
3630 sign = tree_int_cst_sign_bit (step);
3631 type = TREE_TYPE (step);
3632
3633 /* The array of length 1 at the end of a structure most likely extends
3634 beyond its bounds. */
3635 if (at_end
3636 && operand_equal_p (low, high, 0))
3637 return true;
3638
3639 /* In case the relevant bound of the array does not fit in type, or
3640 it does, but bound + step (in type) still belongs into the range of the
3641 array, the index may wrap and still stay within the range of the array
3642 (consider e.g. if the array is indexed by the full range of
3643 unsigned char).
3644
3645 To make things simpler, we require both bounds to fit into type, although
3646 there are cases where this would not be strictly necessary. */
3647 if (!int_fits_type_p (high, type)
3648 || !int_fits_type_p (low, type))
3649 return true;
3650 low = fold_convert (type, low);
3651 high = fold_convert (type, high);
3652
3653 if (sign)
3654 next = fold_binary (PLUS_EXPR, type, low, step);
3655 else
3656 next = fold_binary (PLUS_EXPR, type, high, step);
3657
3658 if (tree_int_cst_compare (low, next) <= 0
3659 && tree_int_cst_compare (next, high) <= 0)
3660 return true;
3661
3662 /* If access is not executed on every iteration, we must ensure that overlow
3663 may not make the access valid later. */
3664 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3665 && scev_probably_wraps_p (NULL_TREE,
3666 initial_condition_in_loop_num (ev, loop->num),
3667 step, data->stmt, loop, true))
3668 upper = false;
3669
3670 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3671 return true;
3672 }
3673
3674 /* Determine information about number of iterations a LOOP from the bounds
3675 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3676 STMT is guaranteed to be executed in every iteration of LOOP.*/
3677
3678 static void
3679 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3680 {
3681 struct ilb_data data;
3682
3683 data.loop = loop;
3684 data.stmt = stmt;
3685 for_each_index (&ref, idx_infer_loop_bounds, &data);
3686 }
3687
3688 /* Determine information about number of iterations of a LOOP from the way
3689 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3690 executed in every iteration of LOOP. */
3691
3692 static void
3693 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3694 {
3695 if (is_gimple_assign (stmt))
3696 {
3697 tree op0 = gimple_assign_lhs (stmt);
3698 tree op1 = gimple_assign_rhs1 (stmt);
3699
3700 /* For each memory access, analyze its access function
3701 and record a bound on the loop iteration domain. */
3702 if (REFERENCE_CLASS_P (op0))
3703 infer_loop_bounds_from_ref (loop, stmt, op0);
3704
3705 if (REFERENCE_CLASS_P (op1))
3706 infer_loop_bounds_from_ref (loop, stmt, op1);
3707 }
3708 else if (is_gimple_call (stmt))
3709 {
3710 tree arg, lhs;
3711 unsigned i, n = gimple_call_num_args (stmt);
3712
3713 lhs = gimple_call_lhs (stmt);
3714 if (lhs && REFERENCE_CLASS_P (lhs))
3715 infer_loop_bounds_from_ref (loop, stmt, lhs);
3716
3717 for (i = 0; i < n; i++)
3718 {
3719 arg = gimple_call_arg (stmt, i);
3720 if (REFERENCE_CLASS_P (arg))
3721 infer_loop_bounds_from_ref (loop, stmt, arg);
3722 }
3723 }
3724 }
3725
3726 /* Determine information about number of iterations of a LOOP from the fact
3727 that pointer arithmetics in STMT does not overflow. */
3728
3729 static void
3730 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3731 {
3732 tree def, base, step, scev, type, low, high;
3733 tree var, ptr;
3734
3735 if (!is_gimple_assign (stmt)
3736 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3737 return;
3738
3739 def = gimple_assign_lhs (stmt);
3740 if (TREE_CODE (def) != SSA_NAME)
3741 return;
3742
3743 type = TREE_TYPE (def);
3744 if (!nowrap_type_p (type))
3745 return;
3746
3747 ptr = gimple_assign_rhs1 (stmt);
3748 if (!expr_invariant_in_loop_p (loop, ptr))
3749 return;
3750
3751 var = gimple_assign_rhs2 (stmt);
3752 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3753 return;
3754
3755 struct loop *uloop = loop_containing_stmt (stmt);
3756 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3757 if (chrec_contains_undetermined (scev))
3758 return;
3759
3760 base = initial_condition_in_loop_num (scev, loop->num);
3761 step = evolution_part_in_loop_num (scev, loop->num);
3762
3763 if (!base || !step
3764 || TREE_CODE (step) != INTEGER_CST
3765 || tree_contains_chrecs (base, NULL)
3766 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3767 return;
3768
3769 low = lower_bound_in_type (type, type);
3770 high = upper_bound_in_type (type, type);
3771
3772 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3773 produce a NULL pointer. The contrary would mean NULL points to an object,
3774 while NULL is supposed to compare unequal with the address of all objects.
3775 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3776 NULL pointer since that would mean wrapping, which we assume here not to
3777 happen. So, we can exclude NULL from the valid range of pointer
3778 arithmetic. */
3779 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3780 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3781
3782 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3783 }
3784
3785 /* Determine information about number of iterations of a LOOP from the fact
3786 that signed arithmetics in STMT does not overflow. */
3787
3788 static void
3789 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3790 {
3791 tree def, base, step, scev, type, low, high;
3792
3793 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3794 return;
3795
3796 def = gimple_assign_lhs (stmt);
3797
3798 if (TREE_CODE (def) != SSA_NAME)
3799 return;
3800
3801 type = TREE_TYPE (def);
3802 if (!INTEGRAL_TYPE_P (type)
3803 || !TYPE_OVERFLOW_UNDEFINED (type))
3804 return;
3805
3806 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3807 if (chrec_contains_undetermined (scev))
3808 return;
3809
3810 base = initial_condition_in_loop_num (scev, loop->num);
3811 step = evolution_part_in_loop_num (scev, loop->num);
3812
3813 if (!base || !step
3814 || TREE_CODE (step) != INTEGER_CST
3815 || tree_contains_chrecs (base, NULL)
3816 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3817 return;
3818
3819 low = lower_bound_in_type (type, type);
3820 high = upper_bound_in_type (type, type);
3821 wide_int minv, maxv;
3822 if (get_range_info (def, &minv, &maxv) == VR_RANGE)
3823 {
3824 low = wide_int_to_tree (type, minv);
3825 high = wide_int_to_tree (type, maxv);
3826 }
3827
3828 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3829 }
3830
3831 /* The following analyzers are extracting informations on the bounds
3832 of LOOP from the following undefined behaviors:
3833
3834 - data references should not access elements over the statically
3835 allocated size,
3836
3837 - signed variables should not overflow when flag_wrapv is not set.
3838 */
3839
3840 static void
3841 infer_loop_bounds_from_undefined (struct loop *loop)
3842 {
3843 unsigned i;
3844 basic_block *bbs;
3845 gimple_stmt_iterator bsi;
3846 basic_block bb;
3847 bool reliable;
3848
3849 bbs = get_loop_body (loop);
3850
3851 for (i = 0; i < loop->num_nodes; i++)
3852 {
3853 bb = bbs[i];
3854
3855 /* If BB is not executed in each iteration of the loop, we cannot
3856 use the operations in it to infer reliable upper bound on the
3857 # of iterations of the loop. However, we can use it as a guess.
3858 Reliable guesses come only from array bounds. */
3859 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3860
3861 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3862 {
3863 gimple *stmt = gsi_stmt (bsi);
3864
3865 infer_loop_bounds_from_array (loop, stmt);
3866
3867 if (reliable)
3868 {
3869 infer_loop_bounds_from_signedness (loop, stmt);
3870 infer_loop_bounds_from_pointer_arith (loop, stmt);
3871 }
3872 }
3873
3874 }
3875
3876 free (bbs);
3877 }
3878
3879 /* Compare wide ints, callback for qsort. */
3880
3881 static int
3882 wide_int_cmp (const void *p1, const void *p2)
3883 {
3884 const widest_int *d1 = (const widest_int *) p1;
3885 const widest_int *d2 = (const widest_int *) p2;
3886 return wi::cmpu (*d1, *d2);
3887 }
3888
3889 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3890 Lookup by binary search. */
3891
3892 static int
3893 bound_index (vec<widest_int> bounds, const widest_int &bound)
3894 {
3895 unsigned int end = bounds.length ();
3896 unsigned int begin = 0;
3897
3898 /* Find a matching index by means of a binary search. */
3899 while (begin != end)
3900 {
3901 unsigned int middle = (begin + end) / 2;
3902 widest_int index = bounds[middle];
3903
3904 if (index == bound)
3905 return middle;
3906 else if (wi::ltu_p (index, bound))
3907 begin = middle + 1;
3908 else
3909 end = middle;
3910 }
3911 gcc_unreachable ();
3912 }
3913
3914 /* We recorded loop bounds only for statements dominating loop latch (and thus
3915 executed each loop iteration). If there are any bounds on statements not
3916 dominating the loop latch we can improve the estimate by walking the loop
3917 body and seeing if every path from loop header to loop latch contains
3918 some bounded statement. */
3919
3920 static void
3921 discover_iteration_bound_by_body_walk (struct loop *loop)
3922 {
3923 struct nb_iter_bound *elt;
3924 auto_vec<widest_int> bounds;
3925 vec<vec<basic_block> > queues = vNULL;
3926 vec<basic_block> queue = vNULL;
3927 ptrdiff_t queue_index;
3928 ptrdiff_t latch_index = 0;
3929
3930 /* Discover what bounds may interest us. */
3931 for (elt = loop->bounds; elt; elt = elt->next)
3932 {
3933 widest_int bound = elt->bound;
3934
3935 /* Exit terminates loop at given iteration, while non-exits produce undefined
3936 effect on the next iteration. */
3937 if (!elt->is_exit)
3938 {
3939 bound += 1;
3940 /* If an overflow occurred, ignore the result. */
3941 if (bound == 0)
3942 continue;
3943 }
3944
3945 if (!loop->any_upper_bound
3946 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3947 bounds.safe_push (bound);
3948 }
3949
3950 /* Exit early if there is nothing to do. */
3951 if (!bounds.exists ())
3952 return;
3953
3954 if (dump_file && (dump_flags & TDF_DETAILS))
3955 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3956
3957 /* Sort the bounds in decreasing order. */
3958 bounds.qsort (wide_int_cmp);
3959
3960 /* For every basic block record the lowest bound that is guaranteed to
3961 terminate the loop. */
3962
3963 hash_map<basic_block, ptrdiff_t> bb_bounds;
3964 for (elt = loop->bounds; elt; elt = elt->next)
3965 {
3966 widest_int bound = elt->bound;
3967 if (!elt->is_exit)
3968 {
3969 bound += 1;
3970 /* If an overflow occurred, ignore the result. */
3971 if (bound == 0)
3972 continue;
3973 }
3974
3975 if (!loop->any_upper_bound
3976 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3977 {
3978 ptrdiff_t index = bound_index (bounds, bound);
3979 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3980 if (!entry)
3981 bb_bounds.put (gimple_bb (elt->stmt), index);
3982 else if ((ptrdiff_t)*entry > index)
3983 *entry = index;
3984 }
3985 }
3986
3987 hash_map<basic_block, ptrdiff_t> block_priority;
3988
3989 /* Perform shortest path discovery loop->header ... loop->latch.
3990
3991 The "distance" is given by the smallest loop bound of basic block
3992 present in the path and we look for path with largest smallest bound
3993 on it.
3994
3995 To avoid the need for fibonacci heap on double ints we simply compress
3996 double ints into indexes to BOUNDS array and then represent the queue
3997 as arrays of queues for every index.
3998 Index of BOUNDS.length() means that the execution of given BB has
3999 no bounds determined.
4000
4001 VISITED is a pointer map translating basic block into smallest index
4002 it was inserted into the priority queue with. */
4003 latch_index = -1;
4004
4005 /* Start walk in loop header with index set to infinite bound. */
4006 queue_index = bounds.length ();
4007 queues.safe_grow_cleared (queue_index + 1);
4008 queue.safe_push (loop->header);
4009 queues[queue_index] = queue;
4010 block_priority.put (loop->header, queue_index);
4011
4012 for (; queue_index >= 0; queue_index--)
4013 {
4014 if (latch_index < queue_index)
4015 {
4016 while (queues[queue_index].length ())
4017 {
4018 basic_block bb;
4019 ptrdiff_t bound_index = queue_index;
4020 edge e;
4021 edge_iterator ei;
4022
4023 queue = queues[queue_index];
4024 bb = queue.pop ();
4025
4026 /* OK, we later inserted the BB with lower priority, skip it. */
4027 if (*block_priority.get (bb) > queue_index)
4028 continue;
4029
4030 /* See if we can improve the bound. */
4031 ptrdiff_t *entry = bb_bounds.get (bb);
4032 if (entry && *entry < bound_index)
4033 bound_index = *entry;
4034
4035 /* Insert succesors into the queue, watch for latch edge
4036 and record greatest index we saw. */
4037 FOR_EACH_EDGE (e, ei, bb->succs)
4038 {
4039 bool insert = false;
4040
4041 if (loop_exit_edge_p (loop, e))
4042 continue;
4043
4044 if (e == loop_latch_edge (loop)
4045 && latch_index < bound_index)
4046 latch_index = bound_index;
4047 else if (!(entry = block_priority.get (e->dest)))
4048 {
4049 insert = true;
4050 block_priority.put (e->dest, bound_index);
4051 }
4052 else if (*entry < bound_index)
4053 {
4054 insert = true;
4055 *entry = bound_index;
4056 }
4057
4058 if (insert)
4059 queues[bound_index].safe_push (e->dest);
4060 }
4061 }
4062 }
4063 queues[queue_index].release ();
4064 }
4065
4066 gcc_assert (latch_index >= 0);
4067 if ((unsigned)latch_index < bounds.length ())
4068 {
4069 if (dump_file && (dump_flags & TDF_DETAILS))
4070 {
4071 fprintf (dump_file, "Found better loop bound ");
4072 print_decu (bounds[latch_index], dump_file);
4073 fprintf (dump_file, "\n");
4074 }
4075 record_niter_bound (loop, bounds[latch_index], false, true);
4076 }
4077
4078 queues.release ();
4079 }
4080
4081 /* See if every path cross the loop goes through a statement that is known
4082 to not execute at the last iteration. In that case we can decrese iteration
4083 count by 1. */
4084
4085 static void
4086 maybe_lower_iteration_bound (struct loop *loop)
4087 {
4088 hash_set<gimple *> *not_executed_last_iteration = NULL;
4089 struct nb_iter_bound *elt;
4090 bool found_exit = false;
4091 auto_vec<basic_block> queue;
4092 bitmap visited;
4093
4094 /* Collect all statements with interesting (i.e. lower than
4095 nb_iterations_upper_bound) bound on them.
4096
4097 TODO: Due to the way record_estimate choose estimates to store, the bounds
4098 will be always nb_iterations_upper_bound-1. We can change this to record
4099 also statements not dominating the loop latch and update the walk bellow
4100 to the shortest path algorithm. */
4101 for (elt = loop->bounds; elt; elt = elt->next)
4102 {
4103 if (!elt->is_exit
4104 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4105 {
4106 if (!not_executed_last_iteration)
4107 not_executed_last_iteration = new hash_set<gimple *>;
4108 not_executed_last_iteration->add (elt->stmt);
4109 }
4110 }
4111 if (!not_executed_last_iteration)
4112 return;
4113
4114 /* Start DFS walk in the loop header and see if we can reach the
4115 loop latch or any of the exits (including statements with side
4116 effects that may terminate the loop otherwise) without visiting
4117 any of the statements known to have undefined effect on the last
4118 iteration. */
4119 queue.safe_push (loop->header);
4120 visited = BITMAP_ALLOC (NULL);
4121 bitmap_set_bit (visited, loop->header->index);
4122 found_exit = false;
4123
4124 do
4125 {
4126 basic_block bb = queue.pop ();
4127 gimple_stmt_iterator gsi;
4128 bool stmt_found = false;
4129
4130 /* Loop for possible exits and statements bounding the execution. */
4131 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4132 {
4133 gimple *stmt = gsi_stmt (gsi);
4134 if (not_executed_last_iteration->contains (stmt))
4135 {
4136 stmt_found = true;
4137 break;
4138 }
4139 if (gimple_has_side_effects (stmt))
4140 {
4141 found_exit = true;
4142 break;
4143 }
4144 }
4145 if (found_exit)
4146 break;
4147
4148 /* If no bounding statement is found, continue the walk. */
4149 if (!stmt_found)
4150 {
4151 edge e;
4152 edge_iterator ei;
4153
4154 FOR_EACH_EDGE (e, ei, bb->succs)
4155 {
4156 if (loop_exit_edge_p (loop, e)
4157 || e == loop_latch_edge (loop))
4158 {
4159 found_exit = true;
4160 break;
4161 }
4162 if (bitmap_set_bit (visited, e->dest->index))
4163 queue.safe_push (e->dest);
4164 }
4165 }
4166 }
4167 while (queue.length () && !found_exit);
4168
4169 /* If every path through the loop reach bounding statement before exit,
4170 then we know the last iteration of the loop will have undefined effect
4171 and we can decrease number of iterations. */
4172
4173 if (!found_exit)
4174 {
4175 if (dump_file && (dump_flags & TDF_DETAILS))
4176 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4177 "undefined statement must be executed at the last iteration.\n");
4178 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4179 false, true);
4180 }
4181
4182 BITMAP_FREE (visited);
4183 delete not_executed_last_iteration;
4184 }
4185
4186 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4187 is true also use estimates derived from undefined behavior. */
4188
4189 void
4190 estimate_numbers_of_iterations (struct loop *loop)
4191 {
4192 vec<edge> exits;
4193 tree niter, type;
4194 unsigned i;
4195 struct tree_niter_desc niter_desc;
4196 edge ex;
4197 widest_int bound;
4198 edge likely_exit;
4199
4200 /* Give up if we already have tried to compute an estimation. */
4201 if (loop->estimate_state != EST_NOT_COMPUTED)
4202 return;
4203
4204 loop->estimate_state = EST_AVAILABLE;
4205
4206 /* If we have a measured profile, use it to estimate the number of
4207 iterations. Normally this is recorded by branch_prob right after
4208 reading the profile. In case we however found a new loop, record the
4209 information here.
4210
4211 Explicitly check for profile status so we do not report
4212 wrong prediction hitrates for guessed loop iterations heuristics.
4213 Do not recompute already recorded bounds - we ought to be better on
4214 updating iteration bounds than updating profile in general and thus
4215 recomputing iteration bounds later in the compilation process will just
4216 introduce random roundoff errors. */
4217 if (!loop->any_estimate
4218 && loop->header->count.reliable_p ())
4219 {
4220 gcov_type nit = expected_loop_iterations_unbounded (loop);
4221 bound = gcov_type_to_wide_int (nit);
4222 record_niter_bound (loop, bound, true, false);
4223 }
4224
4225 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4226 to be constant, we avoid undefined behavior implied bounds and instead
4227 diagnose those loops with -Waggressive-loop-optimizations. */
4228 number_of_latch_executions (loop);
4229
4230 exits = get_loop_exit_edges (loop);
4231 likely_exit = single_likely_exit (loop);
4232 FOR_EACH_VEC_ELT (exits, i, ex)
4233 {
4234 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
4235 continue;
4236
4237 niter = niter_desc.niter;
4238 type = TREE_TYPE (niter);
4239 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4240 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4241 build_int_cst (type, 0),
4242 niter);
4243 record_estimate (loop, niter, niter_desc.max,
4244 last_stmt (ex->src),
4245 true, ex == likely_exit, true);
4246 record_control_iv (loop, &niter_desc);
4247 }
4248 exits.release ();
4249
4250 if (flag_aggressive_loop_optimizations)
4251 infer_loop_bounds_from_undefined (loop);
4252
4253 discover_iteration_bound_by_body_walk (loop);
4254
4255 maybe_lower_iteration_bound (loop);
4256
4257 /* If we know the exact number of iterations of this loop, try to
4258 not break code with undefined behavior by not recording smaller
4259 maximum number of iterations. */
4260 if (loop->nb_iterations
4261 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4262 {
4263 loop->any_upper_bound = true;
4264 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4265 }
4266 }
4267
4268 /* Sets NIT to the estimated number of executions of the latch of the
4269 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4270 large as the number of iterations. If we have no reliable estimate,
4271 the function returns false, otherwise returns true. */
4272
4273 bool
4274 estimated_loop_iterations (struct loop *loop, widest_int *nit)
4275 {
4276 /* When SCEV information is available, try to update loop iterations
4277 estimate. Otherwise just return whatever we recorded earlier. */
4278 if (scev_initialized_p ())
4279 estimate_numbers_of_iterations (loop);
4280
4281 return (get_estimated_loop_iterations (loop, nit));
4282 }
4283
4284 /* Similar to estimated_loop_iterations, but returns the estimate only
4285 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4286 on the number of iterations of LOOP could not be derived, returns -1. */
4287
4288 HOST_WIDE_INT
4289 estimated_loop_iterations_int (struct loop *loop)
4290 {
4291 widest_int nit;
4292 HOST_WIDE_INT hwi_nit;
4293
4294 if (!estimated_loop_iterations (loop, &nit))
4295 return -1;
4296
4297 if (!wi::fits_shwi_p (nit))
4298 return -1;
4299 hwi_nit = nit.to_shwi ();
4300
4301 return hwi_nit < 0 ? -1 : hwi_nit;
4302 }
4303
4304
4305 /* Sets NIT to an upper bound for the maximum number of executions of the
4306 latch of the LOOP. If we have no reliable estimate, the function returns
4307 false, otherwise returns true. */
4308
4309 bool
4310 max_loop_iterations (struct loop *loop, widest_int *nit)
4311 {
4312 /* When SCEV information is available, try to update loop iterations
4313 estimate. Otherwise just return whatever we recorded earlier. */
4314 if (scev_initialized_p ())
4315 estimate_numbers_of_iterations (loop);
4316
4317 return get_max_loop_iterations (loop, nit);
4318 }
4319
4320 /* Similar to max_loop_iterations, but returns the estimate only
4321 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4322 on the number of iterations of LOOP could not be derived, returns -1. */
4323
4324 HOST_WIDE_INT
4325 max_loop_iterations_int (struct loop *loop)
4326 {
4327 widest_int nit;
4328 HOST_WIDE_INT hwi_nit;
4329
4330 if (!max_loop_iterations (loop, &nit))
4331 return -1;
4332
4333 if (!wi::fits_shwi_p (nit))
4334 return -1;
4335 hwi_nit = nit.to_shwi ();
4336
4337 return hwi_nit < 0 ? -1 : hwi_nit;
4338 }
4339
4340 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4341 latch of the LOOP. If we have no reliable estimate, the function returns
4342 false, otherwise returns true. */
4343
4344 bool
4345 likely_max_loop_iterations (struct loop *loop, widest_int *nit)
4346 {
4347 /* When SCEV information is available, try to update loop iterations
4348 estimate. Otherwise just return whatever we recorded earlier. */
4349 if (scev_initialized_p ())
4350 estimate_numbers_of_iterations (loop);
4351
4352 return get_likely_max_loop_iterations (loop, nit);
4353 }
4354
4355 /* Similar to max_loop_iterations, but returns the estimate only
4356 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4357 on the number of iterations of LOOP could not be derived, returns -1. */
4358
4359 HOST_WIDE_INT
4360 likely_max_loop_iterations_int (struct loop *loop)
4361 {
4362 widest_int nit;
4363 HOST_WIDE_INT hwi_nit;
4364
4365 if (!likely_max_loop_iterations (loop, &nit))
4366 return -1;
4367
4368 if (!wi::fits_shwi_p (nit))
4369 return -1;
4370 hwi_nit = nit.to_shwi ();
4371
4372 return hwi_nit < 0 ? -1 : hwi_nit;
4373 }
4374
4375 /* Returns an estimate for the number of executions of statements
4376 in the LOOP. For statements before the loop exit, this exceeds
4377 the number of execution of the latch by one. */
4378
4379 HOST_WIDE_INT
4380 estimated_stmt_executions_int (struct loop *loop)
4381 {
4382 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4383 HOST_WIDE_INT snit;
4384
4385 if (nit == -1)
4386 return -1;
4387
4388 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4389
4390 /* If the computation overflows, return -1. */
4391 return snit < 0 ? -1 : snit;
4392 }
4393
4394 /* Sets NIT to the maximum number of executions of the latch of the
4395 LOOP, plus one. If we have no reliable estimate, the function returns
4396 false, otherwise returns true. */
4397
4398 bool
4399 max_stmt_executions (struct loop *loop, widest_int *nit)
4400 {
4401 widest_int nit_minus_one;
4402
4403 if (!max_loop_iterations (loop, nit))
4404 return false;
4405
4406 nit_minus_one = *nit;
4407
4408 *nit += 1;
4409
4410 return wi::gtu_p (*nit, nit_minus_one);
4411 }
4412
4413 /* Sets NIT to the estimated maximum number of executions of the latch of the
4414 LOOP, plus one. If we have no likely estimate, the function returns
4415 false, otherwise returns true. */
4416
4417 bool
4418 likely_max_stmt_executions (struct loop *loop, widest_int *nit)
4419 {
4420 widest_int nit_minus_one;
4421
4422 if (!likely_max_loop_iterations (loop, nit))
4423 return false;
4424
4425 nit_minus_one = *nit;
4426
4427 *nit += 1;
4428
4429 return wi::gtu_p (*nit, nit_minus_one);
4430 }
4431
4432 /* Sets NIT to the estimated number of executions of the latch of the
4433 LOOP, plus one. If we have no reliable estimate, the function returns
4434 false, otherwise returns true. */
4435
4436 bool
4437 estimated_stmt_executions (struct loop *loop, widest_int *nit)
4438 {
4439 widest_int nit_minus_one;
4440
4441 if (!estimated_loop_iterations (loop, nit))
4442 return false;
4443
4444 nit_minus_one = *nit;
4445
4446 *nit += 1;
4447
4448 return wi::gtu_p (*nit, nit_minus_one);
4449 }
4450
4451 /* Records estimates on numbers of iterations of loops. */
4452
4453 void
4454 estimate_numbers_of_iterations (function *fn)
4455 {
4456 struct loop *loop;
4457
4458 /* We don't want to issue signed overflow warnings while getting
4459 loop iteration estimates. */
4460 fold_defer_overflow_warnings ();
4461
4462 FOR_EACH_LOOP_FN (fn, loop, 0)
4463 estimate_numbers_of_iterations (loop);
4464
4465 fold_undefer_and_ignore_overflow_warnings ();
4466 }
4467
4468 /* Returns true if statement S1 dominates statement S2. */
4469
4470 bool
4471 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4472 {
4473 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4474
4475 if (!bb1
4476 || s1 == s2)
4477 return true;
4478
4479 if (bb1 == bb2)
4480 {
4481 gimple_stmt_iterator bsi;
4482
4483 if (gimple_code (s2) == GIMPLE_PHI)
4484 return false;
4485
4486 if (gimple_code (s1) == GIMPLE_PHI)
4487 return true;
4488
4489 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4490 if (gsi_stmt (bsi) == s1)
4491 return true;
4492
4493 return false;
4494 }
4495
4496 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4497 }
4498
4499 /* Returns true when we can prove that the number of executions of
4500 STMT in the loop is at most NITER, according to the bound on
4501 the number of executions of the statement NITER_BOUND->stmt recorded in
4502 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4503
4504 ??? This code can become quite a CPU hog - we can have many bounds,
4505 and large basic block forcing stmt_dominates_stmt_p to be queried
4506 many times on a large basic blocks, so the whole thing is O(n^2)
4507 for scev_probably_wraps_p invocation (that can be done n times).
4508
4509 It would make more sense (and give better answers) to remember BB
4510 bounds computed by discover_iteration_bound_by_body_walk. */
4511
4512 static bool
4513 n_of_executions_at_most (gimple *stmt,
4514 struct nb_iter_bound *niter_bound,
4515 tree niter)
4516 {
4517 widest_int bound = niter_bound->bound;
4518 tree nit_type = TREE_TYPE (niter), e;
4519 enum tree_code cmp;
4520
4521 gcc_assert (TYPE_UNSIGNED (nit_type));
4522
4523 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4524 the number of iterations is small. */
4525 if (!wi::fits_to_tree_p (bound, nit_type))
4526 return false;
4527
4528 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4529 times. This means that:
4530
4531 -- if NITER_BOUND->is_exit is true, then everything after
4532 it at most NITER_BOUND->bound times.
4533
4534 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4535 is executed, then NITER_BOUND->stmt is executed as well in the same
4536 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4537
4538 If we can determine that NITER_BOUND->stmt is always executed
4539 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4540 We conclude that if both statements belong to the same
4541 basic block and STMT is before NITER_BOUND->stmt and there are no
4542 statements with side effects in between. */
4543
4544 if (niter_bound->is_exit)
4545 {
4546 if (stmt == niter_bound->stmt
4547 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4548 return false;
4549 cmp = GE_EXPR;
4550 }
4551 else
4552 {
4553 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4554 {
4555 gimple_stmt_iterator bsi;
4556 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4557 || gimple_code (stmt) == GIMPLE_PHI
4558 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4559 return false;
4560
4561 /* By stmt_dominates_stmt_p we already know that STMT appears
4562 before NITER_BOUND->STMT. Still need to test that the loop
4563 cannot be terinated by a side effect in between. */
4564 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4565 gsi_next (&bsi))
4566 if (gimple_has_side_effects (gsi_stmt (bsi)))
4567 return false;
4568 bound += 1;
4569 if (bound == 0
4570 || !wi::fits_to_tree_p (bound, nit_type))
4571 return false;
4572 }
4573 cmp = GT_EXPR;
4574 }
4575
4576 e = fold_binary (cmp, boolean_type_node,
4577 niter, wide_int_to_tree (nit_type, bound));
4578 return e && integer_nonzerop (e);
4579 }
4580
4581 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4582
4583 bool
4584 nowrap_type_p (tree type)
4585 {
4586 if (ANY_INTEGRAL_TYPE_P (type)
4587 && TYPE_OVERFLOW_UNDEFINED (type))
4588 return true;
4589
4590 if (POINTER_TYPE_P (type))
4591 return true;
4592
4593 return false;
4594 }
4595
4596 /* Return true if we can prove LOOP is exited before evolution of induction
4597 variable {BASE, STEP} overflows with respect to its type bound. */
4598
4599 static bool
4600 loop_exits_before_overflow (tree base, tree step,
4601 gimple *at_stmt, struct loop *loop)
4602 {
4603 widest_int niter;
4604 struct control_iv *civ;
4605 struct nb_iter_bound *bound;
4606 tree e, delta, step_abs, unsigned_base;
4607 tree type = TREE_TYPE (step);
4608 tree unsigned_type, valid_niter;
4609
4610 /* Don't issue signed overflow warnings. */
4611 fold_defer_overflow_warnings ();
4612
4613 /* Compute the number of iterations before we reach the bound of the
4614 type, and verify that the loop is exited before this occurs. */
4615 unsigned_type = unsigned_type_for (type);
4616 unsigned_base = fold_convert (unsigned_type, base);
4617
4618 if (tree_int_cst_sign_bit (step))
4619 {
4620 tree extreme = fold_convert (unsigned_type,
4621 lower_bound_in_type (type, type));
4622 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4623 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4624 fold_convert (unsigned_type, step));
4625 }
4626 else
4627 {
4628 tree extreme = fold_convert (unsigned_type,
4629 upper_bound_in_type (type, type));
4630 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4631 step_abs = fold_convert (unsigned_type, step);
4632 }
4633
4634 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4635
4636 estimate_numbers_of_iterations (loop);
4637
4638 if (max_loop_iterations (loop, &niter)
4639 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4640 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4641 wide_int_to_tree (TREE_TYPE (valid_niter),
4642 niter))) != NULL
4643 && integer_nonzerop (e))
4644 {
4645 fold_undefer_and_ignore_overflow_warnings ();
4646 return true;
4647 }
4648 if (at_stmt)
4649 for (bound = loop->bounds; bound; bound = bound->next)
4650 {
4651 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4652 {
4653 fold_undefer_and_ignore_overflow_warnings ();
4654 return true;
4655 }
4656 }
4657 fold_undefer_and_ignore_overflow_warnings ();
4658
4659 /* Try to prove loop is exited before {base, step} overflows with the
4660 help of analyzed loop control IV. This is done only for IVs with
4661 constant step because otherwise we don't have the information. */
4662 if (TREE_CODE (step) == INTEGER_CST)
4663 {
4664 for (civ = loop->control_ivs; civ; civ = civ->next)
4665 {
4666 enum tree_code code;
4667 tree civ_type = TREE_TYPE (civ->step);
4668
4669 /* Have to consider type difference because operand_equal_p ignores
4670 that for constants. */
4671 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4672 || element_precision (type) != element_precision (civ_type))
4673 continue;
4674
4675 /* Only consider control IV with same step. */
4676 if (!operand_equal_p (step, civ->step, 0))
4677 continue;
4678
4679 /* Done proving if this is a no-overflow control IV. */
4680 if (operand_equal_p (base, civ->base, 0))
4681 return true;
4682
4683 /* Control IV is recorded after expanding simple operations,
4684 Here we expand base and compare it too. */
4685 tree expanded_base = expand_simple_operations (base);
4686 if (operand_equal_p (expanded_base, civ->base, 0))
4687 return true;
4688
4689 /* If this is a before stepping control IV, in other words, we have
4690
4691 {civ_base, step} = {base + step, step}
4692
4693 Because civ {base + step, step} doesn't overflow during loop
4694 iterations, {base, step} will not overflow if we can prove the
4695 operation "base + step" does not overflow. Specifically, we try
4696 to prove below conditions are satisfied:
4697
4698 base <= UPPER_BOUND (type) - step ;;step > 0
4699 base >= LOWER_BOUND (type) - step ;;step < 0
4700
4701 by proving the reverse conditions are false using loop's initial
4702 condition. */
4703 if (POINTER_TYPE_P (TREE_TYPE (base)))
4704 code = POINTER_PLUS_EXPR;
4705 else
4706 code = PLUS_EXPR;
4707
4708 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4709 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4710 expanded_base, step);
4711 if (operand_equal_p (stepped, civ->base, 0)
4712 || operand_equal_p (expanded_stepped, civ->base, 0))
4713 {
4714 tree extreme;
4715
4716 if (tree_int_cst_sign_bit (step))
4717 {
4718 code = LT_EXPR;
4719 extreme = lower_bound_in_type (type, type);
4720 }
4721 else
4722 {
4723 code = GT_EXPR;
4724 extreme = upper_bound_in_type (type, type);
4725 }
4726 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4727 e = fold_build2 (code, boolean_type_node, base, extreme);
4728 e = simplify_using_initial_conditions (loop, e);
4729 if (integer_zerop (e))
4730 return true;
4731 }
4732 }
4733 }
4734
4735 return false;
4736 }
4737
4738 /* VAR is scev variable whose evolution part is constant STEP, this function
4739 proves that VAR can't overflow by using value range info. If VAR's value
4740 range is [MIN, MAX], it can be proven by:
4741 MAX + step doesn't overflow ; if step > 0
4742 or
4743 MIN + step doesn't underflow ; if step < 0.
4744
4745 We can only do this if var is computed in every loop iteration, i.e, var's
4746 definition has to dominate loop latch. Consider below example:
4747
4748 {
4749 unsigned int i;
4750
4751 <bb 3>:
4752
4753 <bb 4>:
4754 # RANGE [0, 4294967294] NONZERO 65535
4755 # i_21 = PHI <0(3), i_18(9)>
4756 if (i_21 != 0)
4757 goto <bb 6>;
4758 else
4759 goto <bb 8>;
4760
4761 <bb 6>:
4762 # RANGE [0, 65533] NONZERO 65535
4763 _6 = i_21 + 4294967295;
4764 # RANGE [0, 65533] NONZERO 65535
4765 _7 = (long unsigned int) _6;
4766 # RANGE [0, 524264] NONZERO 524280
4767 _8 = _7 * 8;
4768 # PT = nonlocal escaped
4769 _9 = a_14 + _8;
4770 *_9 = 0;
4771
4772 <bb 8>:
4773 # RANGE [1, 65535] NONZERO 65535
4774 i_18 = i_21 + 1;
4775 if (i_18 >= 65535)
4776 goto <bb 10>;
4777 else
4778 goto <bb 9>;
4779
4780 <bb 9>:
4781 goto <bb 4>;
4782
4783 <bb 10>:
4784 return;
4785 }
4786
4787 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4788 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4789 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4790 (4294967295, 4294967296, ...). */
4791
4792 static bool
4793 scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
4794 {
4795 tree type;
4796 wide_int minv, maxv, diff, step_wi;
4797 enum value_range_kind rtype;
4798
4799 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4800 return false;
4801
4802 /* Check if VAR evaluates in every loop iteration. It's not the case
4803 if VAR is default definition or does not dominate loop's latch. */
4804 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4805 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4806 return false;
4807
4808 rtype = get_range_info (var, &minv, &maxv);
4809 if (rtype != VR_RANGE)
4810 return false;
4811
4812 /* VAR is a scev whose evolution part is STEP and value range info
4813 is [MIN, MAX], we can prove its no-overflowness by conditions:
4814
4815 type_MAX - MAX >= step ; if step > 0
4816 MIN - type_MIN >= |step| ; if step < 0.
4817
4818 Or VAR must take value outside of value range, which is not true. */
4819 step_wi = wi::to_wide (step);
4820 type = TREE_TYPE (var);
4821 if (tree_int_cst_sign_bit (step))
4822 {
4823 diff = minv - wi::to_wide (lower_bound_in_type (type, type));
4824 step_wi = - step_wi;
4825 }
4826 else
4827 diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
4828
4829 return (wi::geu_p (diff, step_wi));
4830 }
4831
4832 /* Return false only when the induction variable BASE + STEP * I is
4833 known to not overflow: i.e. when the number of iterations is small
4834 enough with respect to the step and initial condition in order to
4835 keep the evolution confined in TYPEs bounds. Return true when the
4836 iv is known to overflow or when the property is not computable.
4837
4838 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4839 the rules for overflow of the given language apply (e.g., that signed
4840 arithmetics in C does not overflow).
4841
4842 If VAR is a ssa variable, this function also returns false if VAR can
4843 be proven not overflow with value range info. */
4844
4845 bool
4846 scev_probably_wraps_p (tree var, tree base, tree step,
4847 gimple *at_stmt, struct loop *loop,
4848 bool use_overflow_semantics)
4849 {
4850 /* FIXME: We really need something like
4851 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4852
4853 We used to test for the following situation that frequently appears
4854 during address arithmetics:
4855
4856 D.1621_13 = (long unsigned intD.4) D.1620_12;
4857 D.1622_14 = D.1621_13 * 8;
4858 D.1623_15 = (doubleD.29 *) D.1622_14;
4859
4860 And derived that the sequence corresponding to D_14
4861 can be proved to not wrap because it is used for computing a
4862 memory access; however, this is not really the case -- for example,
4863 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4864 2032, 2040, 0, 8, ..., but the code is still legal. */
4865
4866 if (chrec_contains_undetermined (base)
4867 || chrec_contains_undetermined (step))
4868 return true;
4869
4870 if (integer_zerop (step))
4871 return false;
4872
4873 /* If we can use the fact that signed and pointer arithmetics does not
4874 wrap, we are done. */
4875 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4876 return false;
4877
4878 /* To be able to use estimates on number of iterations of the loop,
4879 we must have an upper bound on the absolute value of the step. */
4880 if (TREE_CODE (step) != INTEGER_CST)
4881 return true;
4882
4883 /* Check if var can be proven not overflow with value range info. */
4884 if (var && TREE_CODE (var) == SSA_NAME
4885 && scev_var_range_cant_overflow (var, step, loop))
4886 return false;
4887
4888 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4889 return false;
4890
4891 /* At this point we still don't have a proof that the iv does not
4892 overflow: give up. */
4893 return true;
4894 }
4895
4896 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4897
4898 void
4899 free_numbers_of_iterations_estimates (struct loop *loop)
4900 {
4901 struct control_iv *civ;
4902 struct nb_iter_bound *bound;
4903
4904 loop->nb_iterations = NULL;
4905 loop->estimate_state = EST_NOT_COMPUTED;
4906 for (bound = loop->bounds; bound;)
4907 {
4908 struct nb_iter_bound *next = bound->next;
4909 ggc_free (bound);
4910 bound = next;
4911 }
4912 loop->bounds = NULL;
4913
4914 for (civ = loop->control_ivs; civ;)
4915 {
4916 struct control_iv *next = civ->next;
4917 ggc_free (civ);
4918 civ = next;
4919 }
4920 loop->control_ivs = NULL;
4921 }
4922
4923 /* Frees the information on upper bounds on numbers of iterations of loops. */
4924
4925 void
4926 free_numbers_of_iterations_estimates (function *fn)
4927 {
4928 struct loop *loop;
4929
4930 FOR_EACH_LOOP_FN (fn, loop, 0)
4931 free_numbers_of_iterations_estimates (loop);
4932 }
4933
4934 /* Substitute value VAL for ssa name NAME inside expressions held
4935 at LOOP. */
4936
4937 void
4938 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4939 {
4940 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
4941 }