]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-ssa-loop-ivcanon.c
pass current function to opt_pass::gate ()
[thirdparty/gcc.git] / gcc / tree-ssa-loop-ivcanon.c
CommitLineData
84eb345f 1/* Induction variable canonicalization and loop peeling.
3aea1f79 2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
48e1416a 3
bb445479 4This file is part of GCC.
48e1416a 5
bb445479 6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8c4c00c1 8Free Software Foundation; either version 3, or (at your option) any
bb445479 9later version.
48e1416a 10
bb445479 11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
48e1416a 15
bb445479 16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
bb445479 19
20/* This pass detects the loops that iterate a constant number of times,
48e1416a 21 adds a canonical induction variable (step -1, tested against 0)
bb445479 22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
24
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
30
31 Additionally in case we detect that it is beneficial to unroll the
32 loop completely, we do it right here to expose the optimization
33 possibilities to the following passes. */
34
35#include "config.h"
36#include "system.h"
37#include "coretypes.h"
38#include "tm.h"
39#include "tree.h"
bb445479 40#include "tm_p.h"
bb445479 41#include "basic-block.h"
ce084dfc 42#include "gimple-pretty-print.h"
bc61cadb 43#include "tree-ssa-alias.h"
44#include "internal-fn.h"
45#include "gimple-fold.h"
46#include "tree-eh.h"
47#include "gimple-expr.h"
48#include "is-a.h"
073c1fd5 49#include "gimple.h"
dcf1a1ec 50#include "gimple-iterator.h"
073c1fd5 51#include "gimple-ssa.h"
52#include "cgraph.h"
53#include "tree-cfg.h"
54#include "tree-phinodes.h"
55#include "ssa-iterators.h"
9ed99284 56#include "stringpool.h"
073c1fd5 57#include "tree-ssanames.h"
05d9c18a 58#include "tree-ssa-loop-manip.h"
59#include "tree-ssa-loop-niter.h"
073c1fd5 60#include "tree-ssa-loop.h"
61#include "tree-into-ssa.h"
bb445479 62#include "cfgloop.h"
63#include "tree-pass.h"
bb445479 64#include "tree-chrec.h"
65#include "tree-scalar-evolution.h"
66#include "params.h"
67#include "flags.h"
68#include "tree-inline.h"
aa2ba534 69#include "target.h"
424a4a92 70#include "tree-cfgcleanup.h"
bb445479 71
604f7b8a 72/* Specifies types of loops that may be unrolled. */
73
74enum unroll_level
75{
6414dd4c 76 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
604f7b8a 77 iteration. */
78 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
79 of code size. */
80 UL_ALL /* All suitable loops. */
81};
82
bb445479 83/* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
84 is the exit edge whose condition is replaced. */
85
86static void
87create_canonical_iv (struct loop *loop, edge exit, tree niter)
88{
89 edge in;
75a70cf9 90 tree type, var;
91 gimple cond;
92 gimple_stmt_iterator incr_at;
bb445479 93 enum tree_code cmp;
94
95 if (dump_file && (dump_flags & TDF_DETAILS))
96 {
97 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
98 print_generic_expr (dump_file, niter, TDF_SLIM);
99 fprintf (dump_file, " iterations.\n");
100 }
101
102 cond = last_stmt (exit->src);
cd665a06 103 in = EDGE_SUCC (exit->src, 0);
bb445479 104 if (in == exit)
cd665a06 105 in = EDGE_SUCC (exit->src, 1);
bb445479 106
107 /* Note that we do not need to worry about overflows, since
108 type of niter is always unsigned and all comparisons are
109 just for equality/nonequality -- i.e. everything works
110 with a modulo arithmetics. */
111
112 type = TREE_TYPE (niter);
49d00087 113 niter = fold_build2 (PLUS_EXPR, type,
114 niter,
115 build_int_cst (type, 1));
75a70cf9 116 incr_at = gsi_last_bb (in->src);
bb445479 117 create_iv (niter,
3c6185f1 118 build_int_cst (type, -1),
bb445479 119 NULL_TREE, loop,
120 &incr_at, false, NULL, &var);
121
122 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
75a70cf9 123 gimple_cond_set_code (cond, cmp);
124 gimple_cond_set_lhs (cond, var);
125 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
22aa74c4 126 update_stmt (cond);
bb445479 127}
128
aa2ba534 129/* Describe size of loop as detected by tree_estimate_loop_size. */
130struct loop_size
131{
132 /* Number of instructions in the loop. */
133 int overall;
134
135 /* Number of instructions that will be likely optimized out in
136 peeled iterations of loop (i.e. computation based on induction
137 variable where induction variable starts at known constant.) */
138 int eliminated_by_peeling;
139
140 /* Same statistics for last iteration of loop: it is smaller because
141 instructions after exit are not executed. */
142 int last_iteration;
143 int last_iteration_eliminated_by_peeling;
d583c979 144
145 /* If some IV computation will become constant. */
146 bool constant_iv;
147
148 /* Number of call stmts that are not a builtin and are pure or const
149 present on the hot path. */
150 int num_pure_calls_on_hot_path;
151 /* Number of call stmts that are not a builtin and are not pure nor const
152 present on the hot path. */
153 int num_non_pure_calls_on_hot_path;
154 /* Number of statements other than calls in the loop. */
155 int non_call_stmts_on_hot_path;
156 /* Number of branches seen on the hot path. */
157 int num_branches_on_hot_path;
aa2ba534 158};
159
160/* Return true if OP in STMT will be constant after peeling LOOP. */
161
162static bool
163constant_after_peeling (tree op, gimple stmt, struct loop *loop)
164{
165 affine_iv iv;
166
167 if (is_gimple_min_invariant (op))
168 return true;
48e1416a 169
aa2ba534 170 /* We can still fold accesses to constant arrays when index is known. */
171 if (TREE_CODE (op) != SSA_NAME)
172 {
173 tree base = op;
174
175 /* First make fast look if we see constant array inside. */
176 while (handled_component_p (base))
177 base = TREE_OPERAND (base, 0);
248022b2 178 if ((DECL_P (base)
df8d3e89 179 && ctor_for_folding (base) != error_mark_node)
aa2ba534 180 || CONSTANT_CLASS_P (base))
181 {
182 /* If so, see if we understand all the indices. */
183 base = op;
184 while (handled_component_p (base))
185 {
186 if (TREE_CODE (base) == ARRAY_REF
187 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
188 return false;
189 base = TREE_OPERAND (base, 0);
190 }
191 return true;
192 }
193 return false;
194 }
195
196 /* Induction variables are constants. */
197 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
198 return false;
199 if (!is_gimple_min_invariant (iv.base))
200 return false;
201 if (!is_gimple_min_invariant (iv.step))
202 return false;
203 return true;
204}
205
d583c979 206/* Computes an estimated number of insns in LOOP.
207 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
208 iteration of the loop.
209 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
210 of loop.
84eb345f 211 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
04437ab6 212 Stop estimating after UPPER_BOUND is met. Return true in this case. */
aa2ba534 213
84eb345f 214static bool
215tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
216 int upper_bound)
aa2ba534 217{
218 basic_block *body = get_loop_body (loop);
219 gimple_stmt_iterator gsi;
220 unsigned int i;
221 bool after_exit;
f1f41a6c 222 vec<basic_block> path = get_loop_hot_path (loop);
aa2ba534 223
224 size->overall = 0;
225 size->eliminated_by_peeling = 0;
226 size->last_iteration = 0;
227 size->last_iteration_eliminated_by_peeling = 0;
d583c979 228 size->num_pure_calls_on_hot_path = 0;
229 size->num_non_pure_calls_on_hot_path = 0;
230 size->non_call_stmts_on_hot_path = 0;
231 size->num_branches_on_hot_path = 0;
232 size->constant_iv = 0;
aa2ba534 233
234 if (dump_file && (dump_flags & TDF_DETAILS))
235 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
236 for (i = 0; i < loop->num_nodes; i++)
237 {
c790d986 238 if (edge_to_cancel && body[i] != edge_to_cancel->src
239 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
aa2ba534 240 after_exit = true;
241 else
242 after_exit = false;
243 if (dump_file && (dump_flags & TDF_DETAILS))
244 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
245
246 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
247 {
248 gimple stmt = gsi_stmt (gsi);
249 int num = estimate_num_insns (stmt, &eni_size_weights);
250 bool likely_eliminated = false;
d583c979 251 bool likely_eliminated_last = false;
252 bool likely_eliminated_peeled = false;
aa2ba534 253
254 if (dump_file && (dump_flags & TDF_DETAILS))
255 {
256 fprintf (dump_file, " size: %3i ", num);
257 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
258 }
259
260 /* Look for reasons why we might optimize this stmt away. */
261
ae8a8b85 262 if (gimple_has_side_effects (stmt))
263 ;
aa2ba534 264 /* Exit conditional. */
ae8a8b85 265 else if (exit && body[i] == exit->src
d583c979 266 && stmt == last_stmt (exit->src))
aa2ba534 267 {
268 if (dump_file && (dump_flags & TDF_DETAILS))
d583c979 269 fprintf (dump_file, " Exit condition will be eliminated "
270 "in peeled copies.\n");
271 likely_eliminated_peeled = true;
272 }
273 else if (edge_to_cancel && body[i] == edge_to_cancel->src
274 && stmt == last_stmt (edge_to_cancel->src))
275 {
276 if (dump_file && (dump_flags & TDF_DETAILS))
277 fprintf (dump_file, " Exit condition will be eliminated "
278 "in last copy.\n");
279 likely_eliminated_last = true;
aa2ba534 280 }
281 /* Sets of IV variables */
282 else if (gimple_code (stmt) == GIMPLE_ASSIGN
283 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
284 {
285 if (dump_file && (dump_flags & TDF_DETAILS))
286 fprintf (dump_file, " Induction variable computation will"
287 " be folded away.\n");
288 likely_eliminated = true;
289 }
290 /* Assignments of IV variables. */
291 else if (gimple_code (stmt) == GIMPLE_ASSIGN
292 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
d583c979 293 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
aa2ba534 294 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
295 || constant_after_peeling (gimple_assign_rhs2 (stmt),
296 stmt, loop)))
297 {
d583c979 298 size->constant_iv = true;
aa2ba534 299 if (dump_file && (dump_flags & TDF_DETAILS))
300 fprintf (dump_file, " Constant expression will be folded away.\n");
301 likely_eliminated = true;
302 }
303 /* Conditionals. */
d583c979 304 else if ((gimple_code (stmt) == GIMPLE_COND
305 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
306 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
307 || (gimple_code (stmt) == GIMPLE_SWITCH
308 && constant_after_peeling (gimple_switch_index (stmt), stmt, loop)))
aa2ba534 309 {
310 if (dump_file && (dump_flags & TDF_DETAILS))
311 fprintf (dump_file, " Constant conditional.\n");
312 likely_eliminated = true;
313 }
314
315 size->overall += num;
d583c979 316 if (likely_eliminated || likely_eliminated_peeled)
aa2ba534 317 size->eliminated_by_peeling += num;
318 if (!after_exit)
319 {
320 size->last_iteration += num;
d583c979 321 if (likely_eliminated || likely_eliminated_last)
aa2ba534 322 size->last_iteration_eliminated_by_peeling += num;
323 }
84eb345f 324 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
325 - size->last_iteration_eliminated_by_peeling) > upper_bound)
326 {
327 free (body);
04437ab6 328 path.release ();
84eb345f 329 return true;
330 }
aa2ba534 331 }
332 }
f1f41a6c 333 while (path.length ())
d583c979 334 {
f1f41a6c 335 basic_block bb = path.pop ();
d583c979 336 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
337 {
338 gimple stmt = gsi_stmt (gsi);
339 if (gimple_code (stmt) == GIMPLE_CALL)
340 {
341 int flags = gimple_call_flags (stmt);
342 tree decl = gimple_call_fndecl (stmt);
343
344 if (decl && DECL_IS_BUILTIN (decl)
345 && is_inexpensive_builtin (decl))
346 ;
347 else if (flags & (ECF_PURE | ECF_CONST))
348 size->num_pure_calls_on_hot_path++;
349 else
350 size->num_non_pure_calls_on_hot_path++;
351 size->num_branches_on_hot_path ++;
352 }
353 else if (gimple_code (stmt) != GIMPLE_CALL
354 && gimple_code (stmt) != GIMPLE_DEBUG)
355 size->non_call_stmts_on_hot_path++;
356 if (((gimple_code (stmt) == GIMPLE_COND
357 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
358 || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
359 || (gimple_code (stmt) == GIMPLE_SWITCH
360 && !constant_after_peeling (gimple_switch_index (stmt), stmt, loop)))
361 && (!exit || bb != exit->src))
362 size->num_branches_on_hot_path++;
363 }
364 }
f1f41a6c 365 path.release ();
aa2ba534 366 if (dump_file && (dump_flags & TDF_DETAILS))
367 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
368 size->eliminated_by_peeling, size->last_iteration,
369 size->last_iteration_eliminated_by_peeling);
48e1416a 370
aa2ba534 371 free (body);
84eb345f 372 return false;
aa2ba534 373}
604f7b8a 374
aa2ba534 375/* Estimate number of insns of completely unrolled loop.
376 It is (NUNROLL + 1) * size of loop body with taking into account
377 the fact that in last copy everything after exit conditional
378 is dead and that some instructions will be eliminated after
379 peeling.
604f7b8a 380
c31fb425 381 Loop body is likely going to simplify further, this is difficult
aa2ba534 382 to guess, we just decrease the result by 1/3. */
604f7b8a 383
384static unsigned HOST_WIDE_INT
aa2ba534 385estimated_unrolled_size (struct loop_size *size,
604f7b8a 386 unsigned HOST_WIDE_INT nunroll)
387{
aa2ba534 388 HOST_WIDE_INT unr_insns = ((nunroll)
389 * (HOST_WIDE_INT) (size->overall
390 - size->eliminated_by_peeling));
391 if (!nunroll)
392 unr_insns = 0;
393 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
394
395 unr_insns = unr_insns * 2 / 3;
604f7b8a 396 if (unr_insns <= 0)
397 unr_insns = 1;
604f7b8a 398
399 return unr_insns;
400}
401
c790d986 402/* Loop LOOP is known to not loop. See if there is an edge in the loop
403 body that can be remove to make the loop to always exit and at
404 the same time it does not make any code potentially executed
405 during the last iteration dead.
406
407 After complette unrolling we still may get rid of the conditional
408 on the exit in the last copy even if we have no idea what it does.
409 This is quite common case for loops of form
410
411 int a[5];
412 for (i=0;i<b;i++)
413 a[i]=0;
414
415 Here we prove the loop to iterate 5 times but we do not know
416 it from induction variable.
417
418 For now we handle only simple case where there is exit condition
419 just before the latch block and the latch block contains no statements
420 with side effect that may otherwise terminate the execution of loop
421 (such as by EH or by terminating the program or longjmp).
422
423 In the general case we may want to cancel the paths leading to statements
424 loop-niter identified as having undefined effect in the last iteration.
425 The other cases are hopefully rare and will be cleaned up later. */
426
f86b328b 427static edge
c790d986 428loop_edge_to_cancel (struct loop *loop)
429{
f1f41a6c 430 vec<edge> exits;
c790d986 431 unsigned i;
432 edge edge_to_cancel;
433 gimple_stmt_iterator gsi;
434
435 /* We want only one predecestor of the loop. */
436 if (EDGE_COUNT (loop->latch->preds) > 1)
437 return NULL;
438
439 exits = get_loop_exit_edges (loop);
440
f1f41a6c 441 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
c790d986 442 {
443 /* Find the other edge than the loop exit
444 leaving the conditoinal. */
445 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
446 continue;
447 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
448 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
449 else
450 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
451
248022b2 452 /* We only can handle conditionals. */
453 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
454 continue;
455
c790d986 456 /* We should never have conditionals in the loop latch. */
457 gcc_assert (edge_to_cancel->dest != loop->header);
458
459 /* Check that it leads to loop latch. */
460 if (edge_to_cancel->dest != loop->latch)
461 continue;
462
f1f41a6c 463 exits.release ();
c790d986 464
465 /* Verify that the code in loop latch does nothing that may end program
466 execution without really reaching the exit. This may include
467 non-pure/const function calls, EH statements, volatile ASMs etc. */
468 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
469 if (gimple_has_side_effects (gsi_stmt (gsi)))
470 return NULL;
471 return edge_to_cancel;
472 }
f1f41a6c 473 exits.release ();
c790d986 474 return NULL;
475}
476
72276d01 477/* Remove all tests for exits that are known to be taken after LOOP was
478 peeled NPEELED times. Put gcc_unreachable before every statement
479 known to not be executed. */
480
481static bool
482remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
483{
484 struct nb_iter_bound *elt;
485 bool changed = false;
486
487 for (elt = loop->bounds; elt; elt = elt->next)
488 {
489 /* If statement is known to be undefined after peeling, turn it
490 into unreachable (or trap when debugging experience is supposed
491 to be good). */
492 if (!elt->is_exit
493 && elt->bound.ult (double_int::from_uhwi (npeeled)))
494 {
495 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
496 gimple stmt = gimple_build_call
497 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
498
499 gimple_set_location (stmt, gimple_location (elt->stmt));
500 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
501 changed = true;
502 if (dump_file && (dump_flags & TDF_DETAILS))
503 {
504 fprintf (dump_file, "Forced statement unreachable: ");
505 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
506 }
507 }
508 /* If we know the exit will be taken after peeling, update. */
509 else if (elt->is_exit
510 && elt->bound.ule (double_int::from_uhwi (npeeled)))
511 {
512 basic_block bb = gimple_bb (elt->stmt);
513 edge exit_edge = EDGE_SUCC (bb, 0);
514
515 if (dump_file && (dump_flags & TDF_DETAILS))
516 {
517 fprintf (dump_file, "Forced exit to be taken: ");
518 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
519 }
520 if (!loop_exit_edge_p (loop, exit_edge))
521 exit_edge = EDGE_SUCC (bb, 1);
522 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
523 if (exit_edge->flags & EDGE_TRUE_VALUE)
524 gimple_cond_make_true (elt->stmt);
525 else
526 gimple_cond_make_false (elt->stmt);
527 update_stmt (elt->stmt);
528 changed = true;
529 }
530 }
531 return changed;
532}
533
534/* Remove all exits that are known to be never taken because of the loop bound
535 discovered. */
536
537static bool
538remove_redundant_iv_tests (struct loop *loop)
539{
540 struct nb_iter_bound *elt;
541 bool changed = false;
542
543 if (!loop->any_upper_bound)
544 return false;
545 for (elt = loop->bounds; elt; elt = elt->next)
546 {
547 /* Exit is pointless if it won't be taken before loop reaches
548 upper bound. */
549 if (elt->is_exit && loop->any_upper_bound
550 && loop->nb_iterations_upper_bound.ult (elt->bound))
551 {
552 basic_block bb = gimple_bb (elt->stmt);
553 edge exit_edge = EDGE_SUCC (bb, 0);
554 struct tree_niter_desc niter;
555
556 if (!loop_exit_edge_p (loop, exit_edge))
557 exit_edge = EDGE_SUCC (bb, 1);
558
559 /* Only when we know the actual number of iterations, not
560 just a bound, we can remove the exit. */
561 if (!number_of_iterations_exit (loop, exit_edge,
3a690dce 562 &niter, false, false)
563 || !integer_onep (niter.assumptions)
72276d01 564 || !integer_zerop (niter.may_be_zero)
565 || !niter.niter
566 || TREE_CODE (niter.niter) != INTEGER_CST
567 || !loop->nb_iterations_upper_bound.ult
568 (tree_to_double_int (niter.niter)))
569 continue;
570
571 if (dump_file && (dump_flags & TDF_DETAILS))
572 {
573 fprintf (dump_file, "Removed pointless exit: ");
574 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
575 }
576 if (exit_edge->flags & EDGE_TRUE_VALUE)
577 gimple_cond_make_false (elt->stmt);
578 else
579 gimple_cond_make_true (elt->stmt);
580 update_stmt (elt->stmt);
581 changed = true;
582 }
583 }
584 return changed;
585}
586
587/* Stores loops that will be unlooped after we process whole loop tree. */
f1f41a6c 588static vec<loop_p> loops_to_unloop;
589static vec<int> loops_to_unloop_nunroll;
72276d01 590
591/* Cancel all fully unrolled loops by putting __builtin_unreachable
592 on the latch edge.
593 We do it after all unrolling since unlooping moves basic blocks
594 across loop boundaries trashing loop closed SSA form as well
595 as SCEV info needed to be intact during unrolling.
596
c790d986 597 IRRED_INVALIDATED is used to bookkeep if information about
598 irreducible regions may become invalid as a result
9f0ac045 599 of the transformation.
600 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
601 when we need to go into loop closed SSA form. */
bb445479 602
f86b328b 603static void
72276d01 604unloop_loops (bitmap loop_closed_ssa_invalidated,
605 bool *irred_invalidated)
606{
f1f41a6c 607 while (loops_to_unloop.length ())
72276d01 608 {
f1f41a6c 609 struct loop *loop = loops_to_unloop.pop ();
610 int n_unroll = loops_to_unloop_nunroll.pop ();
72276d01 611 basic_block latch = loop->latch;
612 edge latch_edge = loop_latch_edge (loop);
613 int flags = latch_edge->flags;
614 location_t locus = latch_edge->goto_locus;
615 gimple stmt;
616 gimple_stmt_iterator gsi;
617
618 remove_exits_and_undefined_stmts (loop, n_unroll);
619
620 /* Unloop destroys the latch edge. */
621 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
622
623 /* Create new basic block for the latch edge destination and wire
624 it in. */
625 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
626 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
627 latch_edge->probability = 0;
628 latch_edge->count = 0;
629 latch_edge->flags |= flags;
630 latch_edge->goto_locus = locus;
631
632 latch_edge->dest->loop_father = current_loops->tree_root;
633 latch_edge->dest->count = 0;
634 latch_edge->dest->frequency = 0;
635 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
636
637 gsi = gsi_start_bb (latch_edge->dest);
638 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
639 }
f1f41a6c 640 loops_to_unloop.release ();
641 loops_to_unloop_nunroll.release ();
72276d01 642}
643
644/* Tries to unroll LOOP completely, i.e. NITER times.
645 UL determines which loops we are allowed to unroll.
f55775aa 646 EXIT is the exit of the loop that should be eliminated.
72276d01 647 MAXITER specfy bound on number of iterations, -1 if it is
f55775aa 648 not known or too large for HOST_WIDE_INT. The location
649 LOCUS corresponding to the loop is used when emitting
650 a summary of the unroll to the dump file. */
72276d01 651
bb445479 652static bool
7194de72 653try_unroll_loop_completely (struct loop *loop,
bb445479 654 edge exit, tree niter,
c790d986 655 enum unroll_level ul,
f55775aa 656 HOST_WIDE_INT maxiter,
657 location_t locus)
bb445479 658{
604f7b8a 659 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
75a70cf9 660 gimple cond;
aa2ba534 661 struct loop_size size;
c790d986 662 bool n_unroll_found = false;
c790d986 663 edge edge_to_cancel = NULL;
bb445479 664
c790d986 665 /* See if we proved number of iterations to be low constant.
bb445479 666
c790d986 667 EXIT is an edge that will be removed in all but last iteration of
668 the loop.
669
670 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
671 of the unrolled sequence and is expected to make the final loop not
672 rolling.
673
674 If the number of execution of loop is determined by standard induction
675 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
676 from the iv test. */
cd4547bf 677 if (tree_fits_uhwi_p (niter))
c790d986 678 {
6a0712d4 679 n_unroll = tree_to_uhwi (niter);
c790d986 680 n_unroll_found = true;
681 edge_to_cancel = EDGE_SUCC (exit->src, 0);
682 if (edge_to_cancel == exit)
683 edge_to_cancel = EDGE_SUCC (exit->src, 1);
684 }
685 /* We do not know the number of iterations and thus we can not eliminate
686 the EXIT edge. */
687 else
688 exit = NULL;
689
690 /* See if we can improve our estimate by using recorded loop bounds. */
c790d986 691 if (maxiter >= 0
692 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
693 {
694 n_unroll = maxiter;
695 n_unroll_found = true;
696 /* Loop terminates before the IV variable test, so we can not
697 remove it in the last iteration. */
698 edge_to_cancel = NULL;
699 }
700
701 if (!n_unroll_found)
bb445479 702 return false;
bb445479 703
704 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
705 if (n_unroll > max_unroll)
706 return false;
707
c790d986 708 if (!edge_to_cancel)
709 edge_to_cancel = loop_edge_to_cancel (loop);
710
bb445479 711 if (n_unroll)
712 {
c790d986 713 sbitmap wont_exit;
714 edge e;
715 unsigned i;
84eb345f 716 bool large;
1e094109 717 vec<edge> to_remove = vNULL;
604f7b8a 718 if (ul == UL_SINGLE_ITER)
bb445479 719 return false;
720
84eb345f 721 large = tree_estimate_loop_size
722 (loop, exit, edge_to_cancel, &size,
723 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
aa2ba534 724 ninsns = size.overall;
84eb345f 725 if (large)
726 {
727 if (dump_file && (dump_flags & TDF_DETAILS))
728 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
729 loop->num);
730 return false;
731 }
bb445479 732
aa2ba534 733 unr_insns = estimated_unrolled_size (&size, n_unroll);
d88fd237 734 if (dump_file && (dump_flags & TDF_DETAILS))
735 {
736 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
737 fprintf (dump_file, " Estimated size after unrolling: %d\n",
738 (int) unr_insns);
739 }
740
d583c979 741 /* If the code is going to shrink, we don't need to be extra cautious
742 on guessing if the unrolling is going to be profitable. */
743 if (unr_insns
744 /* If there is IV variable that will become constant, we save
745 one instruction in the loop prologue we do not account
746 otherwise. */
747 <= ninsns + (size.constant_iv != false))
748 ;
c790d986 749 /* We unroll only inner loops, because we do not consider it profitable
750 otheriwse. We still can cancel loopback edge of not rolling loop;
751 this is always a good idea. */
d583c979 752 else if (ul == UL_NO_GROWTH)
753 {
754 if (dump_file && (dump_flags & TDF_DETAILS))
755 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
756 loop->num);
757 return false;
758 }
759 /* Outer loops tend to be less interesting candidates for complette
760 unrolling unless we can do a lot of propagation into the inner loop
761 body. For now we disable outer loop unrolling when the code would
762 grow. */
763 else if (loop->inner)
c790d986 764 {
765 if (dump_file && (dump_flags & TDF_DETAILS))
d583c979 766 fprintf (dump_file, "Not unrolling loop %d: "
c790d986 767 "it is not innermost and code would grow.\n",
768 loop->num);
769 return false;
770 }
d583c979 771 /* If there is call on a hot path through the loop, then
772 there is most probably not much to optimize. */
773 else if (size.num_non_pure_calls_on_hot_path)
f00b5e35 774 {
775 if (dump_file && (dump_flags & TDF_DETAILS))
d583c979 776 fprintf (dump_file, "Not unrolling loop %d: "
777 "contains call and code would grow.\n",
f00b5e35 778 loop->num);
779 return false;
780 }
d583c979 781 /* If there is pure/const call in the function, then we
782 can still optimize the unrolled loop body if it contains
783 some other interesting code than the calls and code
784 storing or cumulating the return value. */
785 else if (size.num_pure_calls_on_hot_path
786 /* One IV increment, one test, one ivtmp store
c31fb425 787 and one useful stmt. That is about minimal loop
d583c979 788 doing pure call. */
789 && (size.non_call_stmts_on_hot_path
790 <= 3 + size.num_pure_calls_on_hot_path))
604f7b8a 791 {
604f7b8a 792 if (dump_file && (dump_flags & TDF_DETAILS))
d583c979 793 fprintf (dump_file, "Not unrolling loop %d: "
794 "contains just pure calls and code would grow.\n",
795 loop->num);
796 return false;
797 }
798 /* Complette unrolling is major win when control flow is removed and
799 one big basic block is created. If the loop contains control flow
800 the optimization may still be a win because of eliminating the loop
801 overhead but it also may blow the branch predictor tables.
802 Limit number of branches on the hot path through the peeled
803 sequence. */
804 else if (size.num_branches_on_hot_path * (int)n_unroll
805 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
806 {
807 if (dump_file && (dump_flags & TDF_DETAILS))
808 fprintf (dump_file, "Not unrolling loop %d: "
809 " number of branches on hot path in the unrolled sequence"
810 " reach --param max-peel-branches limit.\n",
811 loop->num);
812 return false;
813 }
814 else if (unr_insns
815 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
816 {
817 if (dump_file && (dump_flags & TDF_DETAILS))
818 fprintf (dump_file, "Not unrolling loop %d: "
819 "(--param max-completely-peeled-insns limit reached).\n",
c790d986 820 loop->num);
d88fd237 821 return false;
604f7b8a 822 }
fb54ef7c 823
01020a5f 824 initialize_original_copy_tables ();
fb54ef7c 825 wont_exit = sbitmap_alloc (n_unroll + 1);
53c5d9d4 826 bitmap_ones (wont_exit);
08b7917c 827 bitmap_clear_bit (wont_exit, 0);
fb54ef7c 828
75a70cf9 829 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
830 n_unroll, wont_exit,
831 exit, &to_remove,
832 DLTHE_FLAG_UPDATE_FREQ
833 | DLTHE_FLAG_COMPLETTE_PEEL))
bb445479 834 {
01020a5f 835 free_original_copy_tables ();
fb54ef7c 836 free (wont_exit);
d583c979 837 if (dump_file && (dump_flags & TDF_DETAILS))
838 fprintf (dump_file, "Failed to duplicate the loop\n");
bb445479 839 return false;
840 }
40ffaada 841
f1f41a6c 842 FOR_EACH_VEC_ELT (to_remove, i, e)
40ffaada 843 {
844 bool ok = remove_path (e);
845 gcc_assert (ok);
846 }
847
f1f41a6c 848 to_remove.release ();
fb54ef7c 849 free (wont_exit);
01020a5f 850 free_original_copy_tables ();
bb445479 851 }
bb445479 852
72276d01 853
c790d986 854 /* Remove the conditional from the last copy of the loop. */
855 if (edge_to_cancel)
856 {
857 cond = last_stmt (edge_to_cancel->src);
858 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
859 gimple_cond_make_false (cond);
860 else
861 gimple_cond_make_true (cond);
862 update_stmt (cond);
863 /* Do not remove the path. Doing so may remove outer loop
864 and confuse bookkeeping code in tree_unroll_loops_completelly. */
865 }
c790d986 866
72276d01 867 /* Store the loop for later unlooping and exit removal. */
f1f41a6c 868 loops_to_unloop.safe_push (loop);
869 loops_to_unloop_nunroll.safe_push (n_unroll);
095dcfa3 870
f55775aa 871 if (dump_enabled_p ())
c790d986 872 {
873 if (!n_unroll)
f55775aa 874 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
6ee2edad 875 "loop turned into non-loop; it never loops\n");
c790d986 876 else
f55775aa 877 {
878 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
6ee2edad 879 "loop with %d iterations completely unrolled",
880 (int) (n_unroll + 1));
f55775aa 881 if (profile_info)
882 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
883 " (header execution count %d)",
884 (int)loop->header->count);
885 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
886 }
887 }
888
889 if (dump_file && (dump_flags & TDF_DETAILS))
890 {
c790d986 891 if (exit)
892 fprintf (dump_file, "Exit condition of peeled iterations was "
893 "eliminated.\n");
894 if (edge_to_cancel)
895 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
896 else
897 fprintf (dump_file, "Latch of last iteration was marked by "
898 "__builtin_unreachable ().\n");
899 }
bb445479 900
901 return true;
902}
903
7194de72 904/* Adds a canonical induction variable to LOOP if suitable.
48e1416a 905 CREATE_IV is true if we may create a new iv. UL determines
604f7b8a 906 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
48e1416a 907 to determine the number of iterations of a loop by direct evaluation.
72276d01 908 Returns true if cfg is changed. */
bb445479 909
910static bool
7194de72 911canonicalize_loop_induction_variables (struct loop *loop,
604f7b8a 912 bool create_iv, enum unroll_level ul,
72276d01 913 bool try_eval)
bb445479 914{
915 edge exit = NULL;
916 tree niter;
72276d01 917 HOST_WIDE_INT maxiter;
918 bool modified = false;
f55775aa 919 location_t locus = UNKNOWN_LOCATION;
bb445479 920
0c3c2e56 921 niter = number_of_latch_executions (loop);
f55775aa 922 exit = single_exit (loop);
bb445479 923 if (TREE_CODE (niter) == INTEGER_CST)
f55775aa 924 locus = gimple_location (last_stmt (exit->src));
b091dc59 925 else
926 {
927 /* If the loop has more than one exit, try checking all of them
928 for # of iterations determinable through scev. */
f55775aa 929 if (!exit)
b091dc59 930 niter = find_loop_niter (loop, &exit);
931
932 /* Finally if everything else fails, try brute force evaluation. */
933 if (try_eval
934 && (chrec_contains_undetermined (niter)
935 || TREE_CODE (niter) != INTEGER_CST))
936 niter = find_loop_niter_by_eval (loop, &exit);
937
f55775aa 938 if (exit)
939 locus = gimple_location (last_stmt (exit->src));
940
c790d986 941 if (TREE_CODE (niter) != INTEGER_CST)
942 exit = NULL;
b091dc59 943 }
bb445479 944
c790d986 945 /* We work exceptionally hard here to estimate the bound
946 by find_loop_niter_by_eval. Be sure to keep it for future. */
947 if (niter && TREE_CODE (niter) == INTEGER_CST)
57337fec 948 {
949 record_niter_bound (loop, tree_to_double_int (niter),
950 exit == single_likely_exit (loop), true);
951 }
c790d986 952
72276d01 953 /* Force re-computation of loop bounds so we can remove redundant exits. */
954 maxiter = max_loop_iterations_int (loop);
955
c790d986 956 if (dump_file && (dump_flags & TDF_DETAILS)
957 && TREE_CODE (niter) == INTEGER_CST)
bb445479 958 {
959 fprintf (dump_file, "Loop %d iterates ", loop->num);
960 print_generic_expr (dump_file, niter, TDF_SLIM);
961 fprintf (dump_file, " times.\n");
962 }
c790d986 963 if (dump_file && (dump_flags & TDF_DETAILS)
72276d01 964 && maxiter >= 0)
c790d986 965 {
966 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
72276d01 967 (int)maxiter);
c790d986 968 }
bb445479 969
72276d01 970 /* Remove exits that are known to be never taken based on loop bound.
971 Needs to be called after compilation of max_loop_iterations_int that
972 populates the loop bounds. */
973 modified |= remove_redundant_iv_tests (loop);
974
f55775aa 975 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
bb445479 976 return true;
977
c790d986 978 if (create_iv
57337fec 979 && niter && !chrec_contains_undetermined (niter)
980 && exit && just_once_each_iteration_p (loop, exit->src))
bb445479 981 create_canonical_iv (loop, exit, niter);
982
72276d01 983 return modified;
bb445479 984}
985
986/* The main entry point of the pass. Adds canonical induction variables
7194de72 987 to the suitable loops. */
bb445479 988
4c641bf8 989unsigned int
7194de72 990canonicalize_induction_variables (void)
bb445479 991{
bb445479 992 struct loop *loop;
053fdd99 993 bool changed = false;
c790d986 994 bool irred_invalidated = false;
9f0ac045 995 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
48e1416a 996
72276d01 997 free_numbers_of_iterations_estimates ();
998 estimate_numbers_of_iterations ();
999
f21d4d00 1000 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
bb445479 1001 {
17519ba0 1002 changed |= canonicalize_loop_induction_variables (loop,
1003 true, UL_SINGLE_ITER,
72276d01 1004 true);
bb445479 1005 }
ea1c5c31 1006 gcc_assert (!need_ssa_update_p (cfun));
bb445479 1007
72276d01 1008 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
c790d986 1009 if (irred_invalidated
1010 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1011 mark_irreducible_loops ();
1012
08162157 1013 /* Clean up the information about numbers of iterations, since brute force
1014 evaluation could reveal new information. */
1015 scev_reset ();
1016
9f0ac045 1017 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1018 {
1019 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1020 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1021 }
1022 BITMAP_FREE (loop_closed_ssa_invalidated);
1023
bb445479 1024 if (changed)
4c641bf8 1025 return TODO_cleanup_cfg;
1026 return 0;
bb445479 1027}
1028
2ebfc881 1029/* Propagate VAL into all uses of SSA_NAME. */
1030
1031static void
1032propagate_into_all_uses (tree ssa_name, tree val)
1033{
1034 imm_use_iterator iter;
1035 gimple use_stmt;
1036
1037 FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name)
1038 {
1039 gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt);
1040 use_operand_p use;
1041
1042 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1043 SET_USE (use, val);
1044
1045 if (is_gimple_assign (use_stmt)
1046 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt))
1047 == GIMPLE_SINGLE_RHS)
1048 {
1049 tree rhs = gimple_assign_rhs1 (use_stmt);
1050
1051 if (TREE_CODE (rhs) == ADDR_EXPR)
1052 recompute_tree_invariant_for_addr_expr (rhs);
1053 }
1054
1055 fold_stmt_inplace (&use_stmt_gsi);
1056 update_stmt (use_stmt);
f4ea772b 1057 maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt);
2ebfc881 1058 }
1059}
1060
1061/* Propagate constant SSA_NAMEs defined in basic block BB. */
1062
1063static void
1064propagate_constants_for_unrolling (basic_block bb)
1065{
1066 gimple_stmt_iterator gsi;
1067
1068 /* Look for degenerate PHI nodes with constant argument. */
1069 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1070 {
1071 gimple phi = gsi_stmt (gsi);
1072 tree result = gimple_phi_result (phi);
1073 tree arg = gimple_phi_arg_def (phi, 0);
1074
1075 if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST)
1076 {
1077 propagate_into_all_uses (result, arg);
1078 gsi_remove (&gsi, true);
1079 release_ssa_name (result);
1080 }
1081 else
1082 gsi_next (&gsi);
1083 }
1084
1085 /* Look for assignments to SSA names with constant RHS. */
1086 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1087 {
1088 gimple stmt = gsi_stmt (gsi);
1089 tree lhs;
1090
1091 if (is_gimple_assign (stmt)
fca2aa67 1092 && gimple_assign_rhs_code (stmt) == INTEGER_CST
2ebfc881 1093 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
fca2aa67 1094 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2ebfc881 1095 {
1096 propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt));
1097 gsi_remove (&gsi, true);
1098 release_ssa_name (lhs);
1099 }
1100 else
1101 gsi_next (&gsi);
1102 }
1103}
1104
042301ef 1105/* Process loops from innermost to outer, stopping at the innermost
1106 loop we unrolled. */
1107
1108static bool
1109tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
d70aebca 1110 vec<loop_p, va_heap>& father_stack,
042301ef 1111 struct loop *loop)
1112{
1113 struct loop *loop_father;
1114 bool changed = false;
1115 struct loop *inner;
1116 enum unroll_level ul;
1117
1118 /* Process inner loops first. */
1119 for (inner = loop->inner; inner != NULL; inner = inner->next)
1120 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1121 unroll_outer, father_stack,
1122 inner);
1123
1124 /* If we changed an inner loop we cannot process outer loops in this
1125 iteration because SSA form is not up-to-date. Continue with
1126 siblings of outer loops instead. */
1127 if (changed)
1128 return true;
1129
3d483a94 1130 /* Don't unroll #pragma omp simd loops until the vectorizer
1131 attempts to vectorize those. */
4c73695b 1132 if (loop->force_vectorize)
3d483a94 1133 return false;
1134
042301ef 1135 /* Try to unroll this loop. */
1136 loop_father = loop_outer (loop);
1137 if (!loop_father)
1138 return false;
1139
1140 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1141 /* Unroll outermost loops only if asked to do so or they do
1142 not cause code growth. */
1143 && (unroll_outer || loop_outer (loop_father)))
1144 ul = UL_ALL;
1145 else
1146 ul = UL_NO_GROWTH;
1147
1148 if (canonicalize_loop_induction_variables
1149 (loop, false, ul, !flag_tree_loop_ivcanon))
1150 {
1151 /* If we'll continue unrolling, we need to propagate constants
1152 within the new basic blocks to fold away induction variable
1153 computations; otherwise, the size might blow up before the
1154 iteration is complete and the IR eventually cleaned up. */
1155 if (loop_outer (loop_father) && !loop_father->aux)
1156 {
1157 father_stack.safe_push (loop_father);
1158 loop_father->aux = loop_father;
1159 }
1160
1161 return true;
1162 }
1163
1164 return false;
1165}
1166
604f7b8a 1167/* Unroll LOOPS completely if they iterate just few times. Unless
1168 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1169 size of the code does not increase. */
bb445479 1170
4c641bf8 1171unsigned int
d88fd237 1172tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
bb445479 1173{
4997014d 1174 auto_vec<loop_p, 16> father_stack;
d88fd237 1175 bool changed;
793a0ab5 1176 int iteration = 0;
9f0ac045 1177 bool irred_invalidated = false;
bb445479 1178
d88fd237 1179 do
bb445479 1180 {
d88fd237 1181 changed = false;
9f0ac045 1182 bitmap loop_closed_ssa_invalidated = NULL;
1183
1184 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1185 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
bb445479 1186
72276d01 1187 free_numbers_of_iterations_estimates ();
1188 estimate_numbers_of_iterations ();
1189
042301ef 1190 changed = tree_unroll_loops_completely_1 (may_increase_size,
1191 unroll_outer, father_stack,
1192 current_loops->tree_root);
d88fd237 1193 if (changed)
1194 {
2ebfc881 1195 struct loop **iter;
1196 unsigned i;
1197
72276d01 1198 /* Be sure to skip unlooped loops while procesing father_stack
1199 array. */
f1f41a6c 1200 FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
72276d01 1201 (*iter)->aux = NULL;
f1f41a6c 1202 FOR_EACH_VEC_ELT (father_stack, i, iter)
72276d01 1203 if (!(*iter)->aux)
1204 *iter = NULL;
1205 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
c790d986 1206
72276d01 1207 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
9f0ac045 1208 if (loop_closed_ssa_invalidated
1209 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1210 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1211 TODO_update_ssa);
1212 else
1213 update_ssa (TODO_update_ssa);
ea1c5c31 1214
2ebfc881 1215 /* Propagate the constants within the new basic blocks. */
f1f41a6c 1216 FOR_EACH_VEC_ELT (father_stack, i, iter)
72276d01 1217 if (*iter)
1218 {
1219 unsigned j;
1220 basic_block *body = get_loop_body_in_dom_order (*iter);
1221 for (j = 0; j < (*iter)->num_nodes; j++)
1222 propagate_constants_for_unrolling (body[j]);
1223 free (body);
1224 (*iter)->aux = NULL;
1225 }
f1f41a6c 1226 father_stack.truncate (0);
2ebfc881 1227
d88fd237 1228 /* This will take care of removing completely unrolled loops
1229 from the loop structures so we can continue unrolling now
1230 innermost loops. */
b2a225ba 1231 if (cleanup_tree_cfg ())
1232 update_ssa (TODO_update_ssa_only_virtuals);
d88fd237 1233
1234 /* Clean up the information about numbers of iterations, since
1235 complete unrolling might have invalidated it. */
1236 scev_reset ();
9f0ac045 1237#ifdef ENABLE_CHECKING
1238 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1239 verify_loop_closed_ssa (true);
1240#endif
d88fd237 1241 }
9f0ac045 1242 if (loop_closed_ssa_invalidated)
1243 BITMAP_FREE (loop_closed_ssa_invalidated);
d88fd237 1244 }
793a0ab5 1245 while (changed
1246 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
08162157 1247
f1f41a6c 1248 father_stack.release ();
2ebfc881 1249
9f0ac045 1250 if (irred_invalidated
1251 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1252 mark_irreducible_loops ();
1253
4c641bf8 1254 return 0;
bb445479 1255}
f86b328b 1256
1257/* Canonical induction variable creation pass. */
1258
1259static unsigned int
1260tree_ssa_loop_ivcanon (void)
1261{
1262 if (number_of_loops (cfun) <= 1)
1263 return 0;
1264
1265 return canonicalize_induction_variables ();
1266}
1267
f86b328b 1268namespace {
1269
1270const pass_data pass_data_iv_canon =
1271{
1272 GIMPLE_PASS, /* type */
1273 "ivcanon", /* name */
1274 OPTGROUP_LOOP, /* optinfo_flags */
f86b328b 1275 true, /* has_execute */
1276 TV_TREE_LOOP_IVCANON, /* tv_id */
1277 ( PROP_cfg | PROP_ssa ), /* properties_required */
1278 0, /* properties_provided */
1279 0, /* properties_destroyed */
1280 0, /* todo_flags_start */
1281 0, /* todo_flags_finish */
1282};
1283
1284class pass_iv_canon : public gimple_opt_pass
1285{
1286public:
1287 pass_iv_canon (gcc::context *ctxt)
1288 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1289 {}
1290
1291 /* opt_pass methods: */
31315c24 1292 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
f86b328b 1293 unsigned int execute () { return tree_ssa_loop_ivcanon (); }
1294
1295}; // class pass_iv_canon
1296
1297} // anon namespace
1298
1299gimple_opt_pass *
1300make_pass_iv_canon (gcc::context *ctxt)
1301{
1302 return new pass_iv_canon (ctxt);
1303}
1304
1305/* Complete unrolling of loops. */
1306
1307static unsigned int
1308tree_complete_unroll (void)
1309{
1310 if (number_of_loops (cfun) <= 1)
1311 return 0;
1312
1313 return tree_unroll_loops_completely (flag_unroll_loops
1314 || flag_peel_loops
1315 || optimize >= 3, true);
1316}
1317
f86b328b 1318namespace {
1319
1320const pass_data pass_data_complete_unroll =
1321{
1322 GIMPLE_PASS, /* type */
1323 "cunroll", /* name */
1324 OPTGROUP_LOOP, /* optinfo_flags */
f86b328b 1325 true, /* has_execute */
1326 TV_COMPLETE_UNROLL, /* tv_id */
1327 ( PROP_cfg | PROP_ssa ), /* properties_required */
1328 0, /* properties_provided */
1329 0, /* properties_destroyed */
1330 0, /* todo_flags_start */
1331 0, /* todo_flags_finish */
1332};
1333
1334class pass_complete_unroll : public gimple_opt_pass
1335{
1336public:
1337 pass_complete_unroll (gcc::context *ctxt)
1338 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1339 {}
1340
1341 /* opt_pass methods: */
f86b328b 1342 unsigned int execute () { return tree_complete_unroll (); }
1343
1344}; // class pass_complete_unroll
1345
1346} // anon namespace
1347
1348gimple_opt_pass *
1349make_pass_complete_unroll (gcc::context *ctxt)
1350{
1351 return new pass_complete_unroll (ctxt);
1352}
1353
1354/* Complete unrolling of inner loops. */
1355
1356static unsigned int
1357tree_complete_unroll_inner (void)
1358{
1359 unsigned ret = 0;
1360
1361 loop_optimizer_init (LOOPS_NORMAL
1362 | LOOPS_HAVE_RECORDED_EXITS);
1363 if (number_of_loops (cfun) > 1)
1364 {
1365 scev_initialize ();
1366 ret = tree_unroll_loops_completely (optimize >= 3, false);
1367 free_numbers_of_iterations_estimates ();
1368 scev_finalize ();
1369 }
1370 loop_optimizer_finalize ();
1371
1372 return ret;
1373}
1374
f86b328b 1375namespace {
1376
1377const pass_data pass_data_complete_unrolli =
1378{
1379 GIMPLE_PASS, /* type */
1380 "cunrolli", /* name */
1381 OPTGROUP_LOOP, /* optinfo_flags */
f86b328b 1382 true, /* has_execute */
1383 TV_COMPLETE_UNROLL, /* tv_id */
1384 ( PROP_cfg | PROP_ssa ), /* properties_required */
1385 0, /* properties_provided */
1386 0, /* properties_destroyed */
1387 0, /* todo_flags_start */
1388 TODO_verify_flow, /* todo_flags_finish */
1389};
1390
1391class pass_complete_unrolli : public gimple_opt_pass
1392{
1393public:
1394 pass_complete_unrolli (gcc::context *ctxt)
1395 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1396 {}
1397
1398 /* opt_pass methods: */
31315c24 1399 virtual bool gate (function *) { return optimize >= 2; }
f86b328b 1400 unsigned int execute () { return tree_complete_unroll_inner (); }
1401
1402}; // class pass_complete_unrolli
1403
1404} // anon namespace
1405
1406gimple_opt_pass *
1407make_pass_complete_unrolli (gcc::context *ctxt)
1408{
1409 return new pass_complete_unrolli (ctxt);
1410}
1411
1412