]>
Commit | Line | Data |
---|---|---|
84eb345f | 1 | /* Induction variable canonicalization and loop peeling. |
aad93da1 | 2 | Copyright (C) 2004-2017 Free Software Foundation, Inc. |
48e1416a | 3 | |
bb445479 | 4 | This file is part of GCC. |
48e1416a | 5 | |
bb445479 | 6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the | |
8c4c00c1 | 8 | Free Software Foundation; either version 3, or (at your option) any |
bb445479 | 9 | later version. |
48e1416a | 10 | |
bb445479 | 11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
48e1416a | 15 | |
bb445479 | 16 | You should have received a copy of the GNU General Public License |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
bb445479 | 19 | |
20 | /* This pass detects the loops that iterate a constant number of times, | |
48e1416a | 21 | adds a canonical induction variable (step -1, tested against 0) |
bb445479 | 22 | and replaces the exit test. This enables the less powerful rtl |
23 | level analysis to use this information. | |
24 | ||
25 | This might spoil the code in some cases (by increasing register pressure). | |
26 | Note that in the case the new variable is not needed, ivopts will get rid | |
27 | of it, so it might only be a problem when there are no other linear induction | |
28 | variables. In that case the created optimization possibilities are likely | |
29 | to pay up. | |
30 | ||
c836de3f | 31 | We also perform |
4cf494ec | 32 | - complete unrolling (or peeling) when the loops is rolling few enough |
c836de3f | 33 | times |
34 | - simple peeling (i.e. copying few initial iterations prior the loop) | |
35 | when number of iteration estimate is known (typically by the profile | |
36 | info). */ | |
bb445479 | 37 | |
38 | #include "config.h" | |
39 | #include "system.h" | |
40 | #include "coretypes.h" | |
9ef16211 | 41 | #include "backend.h" |
bb445479 | 42 | #include "tree.h" |
9ef16211 | 43 | #include "gimple.h" |
7c29e30e | 44 | #include "cfghooks.h" |
45 | #include "tree-pass.h" | |
9ef16211 | 46 | #include "ssa.h" |
7c29e30e | 47 | #include "cgraph.h" |
48 | #include "gimple-pretty-print.h" | |
b20a8bb4 | 49 | #include "fold-const.h" |
886c1262 | 50 | #include "profile.h" |
bc61cadb | 51 | #include "gimple-fold.h" |
52 | #include "tree-eh.h" | |
dcf1a1ec | 53 | #include "gimple-iterator.h" |
073c1fd5 | 54 | #include "tree-cfg.h" |
05d9c18a | 55 | #include "tree-ssa-loop-manip.h" |
56 | #include "tree-ssa-loop-niter.h" | |
073c1fd5 | 57 | #include "tree-ssa-loop.h" |
58 | #include "tree-into-ssa.h" | |
bb445479 | 59 | #include "cfgloop.h" |
bb445479 | 60 | #include "tree-chrec.h" |
61 | #include "tree-scalar-evolution.h" | |
62 | #include "params.h" | |
bb445479 | 63 | #include "tree-inline.h" |
424a4a92 | 64 | #include "tree-cfgcleanup.h" |
f7715905 | 65 | #include "builtins.h" |
bb445479 | 66 | |
604f7b8a | 67 | /* Specifies types of loops that may be unrolled. */ |
68 | ||
69 | enum unroll_level | |
70 | { | |
6414dd4c | 71 | UL_SINGLE_ITER, /* Only loops that exit immediately in the first |
604f7b8a | 72 | iteration. */ |
73 | UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase | |
74 | of code size. */ | |
75 | UL_ALL /* All suitable loops. */ | |
76 | }; | |
77 | ||
bb445479 | 78 | /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT |
5051abaf | 79 | is the exit edge whose condition is replaced. The ssa versions of the new |
80 | IV before and after increment will be stored in VAR_BEFORE and VAR_AFTER | |
81 | if they are not NULL. */ | |
bb445479 | 82 | |
5051abaf | 83 | void |
84 | create_canonical_iv (struct loop *loop, edge exit, tree niter, | |
85 | tree *var_before = NULL, tree *var_after = NULL) | |
bb445479 | 86 | { |
87 | edge in; | |
75a70cf9 | 88 | tree type, var; |
1a91d914 | 89 | gcond *cond; |
75a70cf9 | 90 | gimple_stmt_iterator incr_at; |
bb445479 | 91 | enum tree_code cmp; |
92 | ||
93 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
94 | { | |
95 | fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num); | |
96 | print_generic_expr (dump_file, niter, TDF_SLIM); | |
97 | fprintf (dump_file, " iterations.\n"); | |
98 | } | |
99 | ||
1a91d914 | 100 | cond = as_a <gcond *> (last_stmt (exit->src)); |
cd665a06 | 101 | in = EDGE_SUCC (exit->src, 0); |
bb445479 | 102 | if (in == exit) |
cd665a06 | 103 | in = EDGE_SUCC (exit->src, 1); |
bb445479 | 104 | |
105 | /* Note that we do not need to worry about overflows, since | |
106 | type of niter is always unsigned and all comparisons are | |
107 | just for equality/nonequality -- i.e. everything works | |
108 | with a modulo arithmetics. */ | |
109 | ||
110 | type = TREE_TYPE (niter); | |
49d00087 | 111 | niter = fold_build2 (PLUS_EXPR, type, |
112 | niter, | |
113 | build_int_cst (type, 1)); | |
75a70cf9 | 114 | incr_at = gsi_last_bb (in->src); |
bb445479 | 115 | create_iv (niter, |
3c6185f1 | 116 | build_int_cst (type, -1), |
bb445479 | 117 | NULL_TREE, loop, |
5051abaf | 118 | &incr_at, false, var_before, &var); |
119 | if (var_after) | |
120 | *var_after = var; | |
bb445479 | 121 | |
122 | cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; | |
75a70cf9 | 123 | gimple_cond_set_code (cond, cmp); |
124 | gimple_cond_set_lhs (cond, var); | |
125 | gimple_cond_set_rhs (cond, build_int_cst (type, 0)); | |
22aa74c4 | 126 | update_stmt (cond); |
bb445479 | 127 | } |
128 | ||
aa2ba534 | 129 | /* Describe size of loop as detected by tree_estimate_loop_size. */ |
130 | struct loop_size | |
131 | { | |
132 | /* Number of instructions in the loop. */ | |
133 | int overall; | |
134 | ||
135 | /* Number of instructions that will be likely optimized out in | |
136 | peeled iterations of loop (i.e. computation based on induction | |
137 | variable where induction variable starts at known constant.) */ | |
138 | int eliminated_by_peeling; | |
139 | ||
140 | /* Same statistics for last iteration of loop: it is smaller because | |
141 | instructions after exit are not executed. */ | |
142 | int last_iteration; | |
143 | int last_iteration_eliminated_by_peeling; | |
d583c979 | 144 | |
145 | /* If some IV computation will become constant. */ | |
146 | bool constant_iv; | |
147 | ||
148 | /* Number of call stmts that are not a builtin and are pure or const | |
149 | present on the hot path. */ | |
150 | int num_pure_calls_on_hot_path; | |
151 | /* Number of call stmts that are not a builtin and are not pure nor const | |
152 | present on the hot path. */ | |
153 | int num_non_pure_calls_on_hot_path; | |
154 | /* Number of statements other than calls in the loop. */ | |
155 | int non_call_stmts_on_hot_path; | |
156 | /* Number of branches seen on the hot path. */ | |
157 | int num_branches_on_hot_path; | |
aa2ba534 | 158 | }; |
159 | ||
160 | /* Return true if OP in STMT will be constant after peeling LOOP. */ | |
161 | ||
162 | static bool | |
42acab1c | 163 | constant_after_peeling (tree op, gimple *stmt, struct loop *loop) |
aa2ba534 | 164 | { |
aa2ba534 | 165 | if (is_gimple_min_invariant (op)) |
166 | return true; | |
48e1416a | 167 | |
aa2ba534 | 168 | /* We can still fold accesses to constant arrays when index is known. */ |
169 | if (TREE_CODE (op) != SSA_NAME) | |
170 | { | |
171 | tree base = op; | |
172 | ||
173 | /* First make fast look if we see constant array inside. */ | |
174 | while (handled_component_p (base)) | |
175 | base = TREE_OPERAND (base, 0); | |
248022b2 | 176 | if ((DECL_P (base) |
df8d3e89 | 177 | && ctor_for_folding (base) != error_mark_node) |
aa2ba534 | 178 | || CONSTANT_CLASS_P (base)) |
179 | { | |
180 | /* If so, see if we understand all the indices. */ | |
181 | base = op; | |
182 | while (handled_component_p (base)) | |
183 | { | |
184 | if (TREE_CODE (base) == ARRAY_REF | |
185 | && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop)) | |
186 | return false; | |
187 | base = TREE_OPERAND (base, 0); | |
188 | } | |
189 | return true; | |
190 | } | |
191 | return false; | |
192 | } | |
193 | ||
c39eea14 | 194 | /* Induction variables are constants when defined in loop. */ |
195 | if (loop_containing_stmt (stmt) != loop) | |
aa2ba534 | 196 | return false; |
c39eea14 | 197 | tree ev = analyze_scalar_evolution (loop, op); |
198 | if (chrec_contains_undetermined (ev) | |
199 | || chrec_contains_symbols (ev)) | |
aa2ba534 | 200 | return false; |
201 | return true; | |
202 | } | |
203 | ||
d583c979 | 204 | /* Computes an estimated number of insns in LOOP. |
205 | EXIT (if non-NULL) is an exite edge that will be eliminated in all but last | |
206 | iteration of the loop. | |
207 | EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration | |
208 | of loop. | |
84eb345f | 209 | Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT. |
04437ab6 | 210 | Stop estimating after UPPER_BOUND is met. Return true in this case. */ |
aa2ba534 | 211 | |
84eb345f | 212 | static bool |
f18de397 | 213 | tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, |
214 | struct loop_size *size, int upper_bound) | |
aa2ba534 | 215 | { |
216 | basic_block *body = get_loop_body (loop); | |
217 | gimple_stmt_iterator gsi; | |
218 | unsigned int i; | |
219 | bool after_exit; | |
f1f41a6c | 220 | vec<basic_block> path = get_loop_hot_path (loop); |
aa2ba534 | 221 | |
222 | size->overall = 0; | |
223 | size->eliminated_by_peeling = 0; | |
224 | size->last_iteration = 0; | |
225 | size->last_iteration_eliminated_by_peeling = 0; | |
d583c979 | 226 | size->num_pure_calls_on_hot_path = 0; |
227 | size->num_non_pure_calls_on_hot_path = 0; | |
228 | size->non_call_stmts_on_hot_path = 0; | |
229 | size->num_branches_on_hot_path = 0; | |
230 | size->constant_iv = 0; | |
aa2ba534 | 231 | |
232 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
233 | fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); | |
234 | for (i = 0; i < loop->num_nodes; i++) | |
235 | { | |
c790d986 | 236 | if (edge_to_cancel && body[i] != edge_to_cancel->src |
237 | && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) | |
aa2ba534 | 238 | after_exit = true; |
239 | else | |
240 | after_exit = false; | |
241 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
f18de397 | 242 | fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, |
243 | after_exit); | |
aa2ba534 | 244 | |
245 | for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) | |
246 | { | |
42acab1c | 247 | gimple *stmt = gsi_stmt (gsi); |
aa2ba534 | 248 | int num = estimate_num_insns (stmt, &eni_size_weights); |
249 | bool likely_eliminated = false; | |
d583c979 | 250 | bool likely_eliminated_last = false; |
251 | bool likely_eliminated_peeled = false; | |
aa2ba534 | 252 | |
253 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
254 | { | |
255 | fprintf (dump_file, " size: %3i ", num); | |
1ffa4346 | 256 | print_gimple_stmt (dump_file, gsi_stmt (gsi), 0); |
aa2ba534 | 257 | } |
258 | ||
259 | /* Look for reasons why we might optimize this stmt away. */ | |
260 | ||
8c1879bc | 261 | if (!gimple_has_side_effects (stmt)) |
aa2ba534 | 262 | { |
8c1879bc | 263 | /* Exit conditional. */ |
264 | if (exit && body[i] == exit->src | |
265 | && stmt == last_stmt (exit->src)) | |
266 | { | |
267 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
268 | fprintf (dump_file, " Exit condition will be eliminated " | |
269 | "in peeled copies.\n"); | |
270 | likely_eliminated_peeled = true; | |
271 | } | |
272 | if (edge_to_cancel && body[i] == edge_to_cancel->src | |
273 | && stmt == last_stmt (edge_to_cancel->src)) | |
274 | { | |
275 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
276 | fprintf (dump_file, " Exit condition will be eliminated " | |
277 | "in last copy.\n"); | |
278 | likely_eliminated_last = true; | |
279 | } | |
280 | /* Sets of IV variables */ | |
281 | if (gimple_code (stmt) == GIMPLE_ASSIGN | |
282 | && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) | |
283 | { | |
284 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
285 | fprintf (dump_file, " Induction variable computation will" | |
286 | " be folded away.\n"); | |
287 | likely_eliminated = true; | |
288 | } | |
289 | /* Assignments of IV variables. */ | |
290 | else if (gimple_code (stmt) == GIMPLE_ASSIGN | |
291 | && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME | |
292 | && constant_after_peeling (gimple_assign_rhs1 (stmt), | |
93505d22 | 293 | stmt, loop) |
8c1879bc | 294 | && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS |
295 | || constant_after_peeling (gimple_assign_rhs2 (stmt), | |
296 | stmt, loop))) | |
297 | { | |
298 | size->constant_iv = true; | |
299 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
300 | fprintf (dump_file, | |
301 | " Constant expression will be folded away.\n"); | |
302 | likely_eliminated = true; | |
303 | } | |
304 | /* Conditionals. */ | |
305 | else if ((gimple_code (stmt) == GIMPLE_COND | |
306 | && constant_after_peeling (gimple_cond_lhs (stmt), stmt, | |
307 | loop) | |
308 | && constant_after_peeling (gimple_cond_rhs (stmt), stmt, | |
309 | loop) | |
310 | /* We don't simplify all constant compares so make sure | |
311 | they are not both constant already. See PR70288. */ | |
312 | && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) | |
313 | || ! is_gimple_min_invariant | |
314 | (gimple_cond_rhs (stmt)))) | |
315 | || (gimple_code (stmt) == GIMPLE_SWITCH | |
316 | && constant_after_peeling (gimple_switch_index ( | |
317 | as_a <gswitch *> | |
318 | (stmt)), | |
319 | stmt, loop) | |
320 | && ! is_gimple_min_invariant | |
321 | (gimple_switch_index | |
322 | (as_a <gswitch *> (stmt))))) | |
323 | { | |
324 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
325 | fprintf (dump_file, " Constant conditional.\n"); | |
326 | likely_eliminated = true; | |
327 | } | |
aa2ba534 | 328 | } |
329 | ||
330 | size->overall += num; | |
d583c979 | 331 | if (likely_eliminated || likely_eliminated_peeled) |
aa2ba534 | 332 | size->eliminated_by_peeling += num; |
333 | if (!after_exit) | |
334 | { | |
335 | size->last_iteration += num; | |
d583c979 | 336 | if (likely_eliminated || likely_eliminated_last) |
aa2ba534 | 337 | size->last_iteration_eliminated_by_peeling += num; |
338 | } | |
84eb345f | 339 | if ((size->overall * 3 / 2 - size->eliminated_by_peeling |
340 | - size->last_iteration_eliminated_by_peeling) > upper_bound) | |
341 | { | |
342 | free (body); | |
04437ab6 | 343 | path.release (); |
84eb345f | 344 | return true; |
345 | } | |
aa2ba534 | 346 | } |
347 | } | |
f1f41a6c | 348 | while (path.length ()) |
d583c979 | 349 | { |
f1f41a6c | 350 | basic_block bb = path.pop (); |
d583c979 | 351 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
352 | { | |
42acab1c | 353 | gimple *stmt = gsi_stmt (gsi); |
f18de397 | 354 | if (gimple_code (stmt) == GIMPLE_CALL |
355 | && !gimple_inexpensive_call_p (as_a <gcall *> (stmt))) | |
d583c979 | 356 | { |
357 | int flags = gimple_call_flags (stmt); | |
f18de397 | 358 | if (flags & (ECF_PURE | ECF_CONST)) |
d583c979 | 359 | size->num_pure_calls_on_hot_path++; |
360 | else | |
361 | size->num_non_pure_calls_on_hot_path++; | |
362 | size->num_branches_on_hot_path ++; | |
363 | } | |
f18de397 | 364 | /* Count inexpensive calls as non-calls, because they will likely |
365 | expand inline. */ | |
366 | else if (gimple_code (stmt) != GIMPLE_DEBUG) | |
d583c979 | 367 | size->non_call_stmts_on_hot_path++; |
368 | if (((gimple_code (stmt) == GIMPLE_COND | |
369 | && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) | |
f18de397 | 370 | || constant_after_peeling (gimple_cond_rhs (stmt), stmt, |
371 | loop))) | |
d583c979 | 372 | || (gimple_code (stmt) == GIMPLE_SWITCH |
1a91d914 | 373 | && !constant_after_peeling (gimple_switch_index ( |
374 | as_a <gswitch *> (stmt)), | |
375 | stmt, loop))) | |
d583c979 | 376 | && (!exit || bb != exit->src)) |
377 | size->num_branches_on_hot_path++; | |
378 | } | |
379 | } | |
f1f41a6c | 380 | path.release (); |
aa2ba534 | 381 | if (dump_file && (dump_flags & TDF_DETAILS)) |
382 | fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, | |
383 | size->eliminated_by_peeling, size->last_iteration, | |
384 | size->last_iteration_eliminated_by_peeling); | |
48e1416a | 385 | |
aa2ba534 | 386 | free (body); |
84eb345f | 387 | return false; |
aa2ba534 | 388 | } |
604f7b8a | 389 | |
aa2ba534 | 390 | /* Estimate number of insns of completely unrolled loop. |
391 | It is (NUNROLL + 1) * size of loop body with taking into account | |
392 | the fact that in last copy everything after exit conditional | |
393 | is dead and that some instructions will be eliminated after | |
394 | peeling. | |
604f7b8a | 395 | |
c31fb425 | 396 | Loop body is likely going to simplify further, this is difficult |
aa2ba534 | 397 | to guess, we just decrease the result by 1/3. */ |
604f7b8a | 398 | |
399 | static unsigned HOST_WIDE_INT | |
aa2ba534 | 400 | estimated_unrolled_size (struct loop_size *size, |
604f7b8a | 401 | unsigned HOST_WIDE_INT nunroll) |
402 | { | |
aa2ba534 | 403 | HOST_WIDE_INT unr_insns = ((nunroll) |
404 | * (HOST_WIDE_INT) (size->overall | |
405 | - size->eliminated_by_peeling)); | |
406 | if (!nunroll) | |
407 | unr_insns = 0; | |
408 | unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling; | |
409 | ||
410 | unr_insns = unr_insns * 2 / 3; | |
604f7b8a | 411 | if (unr_insns <= 0) |
412 | unr_insns = 1; | |
604f7b8a | 413 | |
414 | return unr_insns; | |
415 | } | |
416 | ||
c790d986 | 417 | /* Loop LOOP is known to not loop. See if there is an edge in the loop |
418 | body that can be remove to make the loop to always exit and at | |
419 | the same time it does not make any code potentially executed | |
420 | during the last iteration dead. | |
421 | ||
4cf494ec | 422 | After complete unrolling we still may get rid of the conditional |
c790d986 | 423 | on the exit in the last copy even if we have no idea what it does. |
424 | This is quite common case for loops of form | |
425 | ||
426 | int a[5]; | |
427 | for (i=0;i<b;i++) | |
428 | a[i]=0; | |
429 | ||
430 | Here we prove the loop to iterate 5 times but we do not know | |
431 | it from induction variable. | |
432 | ||
433 | For now we handle only simple case where there is exit condition | |
434 | just before the latch block and the latch block contains no statements | |
435 | with side effect that may otherwise terminate the execution of loop | |
436 | (such as by EH or by terminating the program or longjmp). | |
437 | ||
438 | In the general case we may want to cancel the paths leading to statements | |
439 | loop-niter identified as having undefined effect in the last iteration. | |
440 | The other cases are hopefully rare and will be cleaned up later. */ | |
441 | ||
f86b328b | 442 | static edge |
c790d986 | 443 | loop_edge_to_cancel (struct loop *loop) |
444 | { | |
f1f41a6c | 445 | vec<edge> exits; |
c790d986 | 446 | unsigned i; |
447 | edge edge_to_cancel; | |
448 | gimple_stmt_iterator gsi; | |
449 | ||
450 | /* We want only one predecestor of the loop. */ | |
451 | if (EDGE_COUNT (loop->latch->preds) > 1) | |
452 | return NULL; | |
453 | ||
454 | exits = get_loop_exit_edges (loop); | |
455 | ||
f1f41a6c | 456 | FOR_EACH_VEC_ELT (exits, i, edge_to_cancel) |
c790d986 | 457 | { |
458 | /* Find the other edge than the loop exit | |
459 | leaving the conditoinal. */ | |
460 | if (EDGE_COUNT (edge_to_cancel->src->succs) != 2) | |
461 | continue; | |
462 | if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel) | |
463 | edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1); | |
464 | else | |
465 | edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0); | |
466 | ||
248022b2 | 467 | /* We only can handle conditionals. */ |
468 | if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) | |
469 | continue; | |
470 | ||
c790d986 | 471 | /* We should never have conditionals in the loop latch. */ |
472 | gcc_assert (edge_to_cancel->dest != loop->header); | |
473 | ||
474 | /* Check that it leads to loop latch. */ | |
475 | if (edge_to_cancel->dest != loop->latch) | |
476 | continue; | |
477 | ||
f1f41a6c | 478 | exits.release (); |
c790d986 | 479 | |
480 | /* Verify that the code in loop latch does nothing that may end program | |
481 | execution without really reaching the exit. This may include | |
482 | non-pure/const function calls, EH statements, volatile ASMs etc. */ | |
483 | for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi)) | |
484 | if (gimple_has_side_effects (gsi_stmt (gsi))) | |
485 | return NULL; | |
486 | return edge_to_cancel; | |
487 | } | |
f1f41a6c | 488 | exits.release (); |
c790d986 | 489 | return NULL; |
490 | } | |
491 | ||
72276d01 | 492 | /* Remove all tests for exits that are known to be taken after LOOP was |
493 | peeled NPEELED times. Put gcc_unreachable before every statement | |
494 | known to not be executed. */ | |
495 | ||
496 | static bool | |
497 | remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) | |
498 | { | |
499 | struct nb_iter_bound *elt; | |
500 | bool changed = false; | |
501 | ||
502 | for (elt = loop->bounds; elt; elt = elt->next) | |
503 | { | |
504 | /* If statement is known to be undefined after peeling, turn it | |
505 | into unreachable (or trap when debugging experience is supposed | |
506 | to be good). */ | |
507 | if (!elt->is_exit | |
796b6678 | 508 | && wi::ltu_p (elt->bound, npeeled)) |
72276d01 | 509 | { |
510 | gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); | |
1a91d914 | 511 | gcall *stmt = gimple_build_call |
72276d01 | 512 | (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); |
72276d01 | 513 | gimple_set_location (stmt, gimple_location (elt->stmt)); |
514 | gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); | |
3b43af65 | 515 | split_block (gimple_bb (stmt), stmt); |
72276d01 | 516 | changed = true; |
517 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
518 | { | |
519 | fprintf (dump_file, "Forced statement unreachable: "); | |
1ffa4346 | 520 | print_gimple_stmt (dump_file, elt->stmt, 0); |
72276d01 | 521 | } |
522 | } | |
523 | /* If we know the exit will be taken after peeling, update. */ | |
524 | else if (elt->is_exit | |
796b6678 | 525 | && wi::leu_p (elt->bound, npeeled)) |
72276d01 | 526 | { |
527 | basic_block bb = gimple_bb (elt->stmt); | |
528 | edge exit_edge = EDGE_SUCC (bb, 0); | |
529 | ||
530 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
531 | { | |
532 | fprintf (dump_file, "Forced exit to be taken: "); | |
1ffa4346 | 533 | print_gimple_stmt (dump_file, elt->stmt, 0); |
72276d01 | 534 | } |
535 | if (!loop_exit_edge_p (loop, exit_edge)) | |
536 | exit_edge = EDGE_SUCC (bb, 1); | |
720cfc43 | 537 | exit_edge->probability = profile_probability::always (); |
72276d01 | 538 | gcc_checking_assert (loop_exit_edge_p (loop, exit_edge)); |
1a91d914 | 539 | gcond *cond_stmt = as_a <gcond *> (elt->stmt); |
72276d01 | 540 | if (exit_edge->flags & EDGE_TRUE_VALUE) |
1a91d914 | 541 | gimple_cond_make_true (cond_stmt); |
72276d01 | 542 | else |
1a91d914 | 543 | gimple_cond_make_false (cond_stmt); |
544 | update_stmt (cond_stmt); | |
72276d01 | 545 | changed = true; |
546 | } | |
547 | } | |
548 | return changed; | |
549 | } | |
550 | ||
551 | /* Remove all exits that are known to be never taken because of the loop bound | |
552 | discovered. */ | |
553 | ||
554 | static bool | |
555 | remove_redundant_iv_tests (struct loop *loop) | |
556 | { | |
557 | struct nb_iter_bound *elt; | |
558 | bool changed = false; | |
559 | ||
560 | if (!loop->any_upper_bound) | |
561 | return false; | |
562 | for (elt = loop->bounds; elt; elt = elt->next) | |
563 | { | |
564 | /* Exit is pointless if it won't be taken before loop reaches | |
565 | upper bound. */ | |
566 | if (elt->is_exit && loop->any_upper_bound | |
796b6678 | 567 | && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound)) |
72276d01 | 568 | { |
569 | basic_block bb = gimple_bb (elt->stmt); | |
570 | edge exit_edge = EDGE_SUCC (bb, 0); | |
571 | struct tree_niter_desc niter; | |
572 | ||
573 | if (!loop_exit_edge_p (loop, exit_edge)) | |
574 | exit_edge = EDGE_SUCC (bb, 1); | |
575 | ||
576 | /* Only when we know the actual number of iterations, not | |
577 | just a bound, we can remove the exit. */ | |
578 | if (!number_of_iterations_exit (loop, exit_edge, | |
3a690dce | 579 | &niter, false, false) |
580 | || !integer_onep (niter.assumptions) | |
72276d01 | 581 | || !integer_zerop (niter.may_be_zero) |
582 | || !niter.niter | |
583 | || TREE_CODE (niter.niter) != INTEGER_CST | |
cd9b5516 | 584 | || !wi::ltu_p (loop->nb_iterations_upper_bound, |
585 | wi::to_widest (niter.niter))) | |
72276d01 | 586 | continue; |
587 | ||
588 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
589 | { | |
590 | fprintf (dump_file, "Removed pointless exit: "); | |
1ffa4346 | 591 | print_gimple_stmt (dump_file, elt->stmt, 0); |
72276d01 | 592 | } |
1a91d914 | 593 | gcond *cond_stmt = as_a <gcond *> (elt->stmt); |
72276d01 | 594 | if (exit_edge->flags & EDGE_TRUE_VALUE) |
1a91d914 | 595 | gimple_cond_make_false (cond_stmt); |
72276d01 | 596 | else |
1a91d914 | 597 | gimple_cond_make_true (cond_stmt); |
598 | update_stmt (cond_stmt); | |
72276d01 | 599 | changed = true; |
600 | } | |
601 | } | |
602 | return changed; | |
603 | } | |
604 | ||
0cfe7a23 | 605 | /* Stores loops that will be unlooped and edges that will be removed |
606 | after we process whole loop tree. */ | |
f1f41a6c | 607 | static vec<loop_p> loops_to_unloop; |
608 | static vec<int> loops_to_unloop_nunroll; | |
0cfe7a23 | 609 | static vec<edge> edges_to_remove; |
b96f8145 | 610 | /* Stores loops that has been peeled. */ |
611 | static bitmap peeled_loops; | |
72276d01 | 612 | |
613 | /* Cancel all fully unrolled loops by putting __builtin_unreachable | |
614 | on the latch edge. | |
615 | We do it after all unrolling since unlooping moves basic blocks | |
616 | across loop boundaries trashing loop closed SSA form as well | |
617 | as SCEV info needed to be intact during unrolling. | |
618 | ||
c790d986 | 619 | IRRED_INVALIDATED is used to bookkeep if information about |
620 | irreducible regions may become invalid as a result | |
9f0ac045 | 621 | of the transformation. |
622 | LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case | |
623 | when we need to go into loop closed SSA form. */ | |
bb445479 | 624 | |
f86b328b | 625 | static void |
72276d01 | 626 | unloop_loops (bitmap loop_closed_ssa_invalidated, |
627 | bool *irred_invalidated) | |
628 | { | |
f1f41a6c | 629 | while (loops_to_unloop.length ()) |
72276d01 | 630 | { |
f1f41a6c | 631 | struct loop *loop = loops_to_unloop.pop (); |
632 | int n_unroll = loops_to_unloop_nunroll.pop (); | |
72276d01 | 633 | basic_block latch = loop->latch; |
634 | edge latch_edge = loop_latch_edge (loop); | |
635 | int flags = latch_edge->flags; | |
636 | location_t locus = latch_edge->goto_locus; | |
1a91d914 | 637 | gcall *stmt; |
72276d01 | 638 | gimple_stmt_iterator gsi; |
639 | ||
640 | remove_exits_and_undefined_stmts (loop, n_unroll); | |
641 | ||
642 | /* Unloop destroys the latch edge. */ | |
643 | unloop (loop, irred_invalidated, loop_closed_ssa_invalidated); | |
644 | ||
645 | /* Create new basic block for the latch edge destination and wire | |
646 | it in. */ | |
647 | stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); | |
648 | latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags); | |
720cfc43 | 649 | latch_edge->probability = profile_probability::never (); |
72276d01 | 650 | latch_edge->flags |= flags; |
651 | latch_edge->goto_locus = locus; | |
652 | ||
7465dbcd | 653 | add_bb_to_loop (latch_edge->dest, current_loops->tree_root); |
db9cef39 | 654 | latch_edge->dest->count = profile_count::zero (); |
72276d01 | 655 | set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src); |
656 | ||
657 | gsi = gsi_start_bb (latch_edge->dest); | |
658 | gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); | |
659 | } | |
f1f41a6c | 660 | loops_to_unloop.release (); |
661 | loops_to_unloop_nunroll.release (); | |
be6d8ddc | 662 | |
663 | /* Remove edges in peeled copies. */ | |
664 | unsigned i; | |
665 | edge e; | |
666 | FOR_EACH_VEC_ELT (edges_to_remove, i, e) | |
667 | { | |
7465dbcd | 668 | bool ok = remove_path (e, irred_invalidated, loop_closed_ssa_invalidated); |
be6d8ddc | 669 | gcc_assert (ok); |
670 | } | |
671 | edges_to_remove.release (); | |
72276d01 | 672 | } |
673 | ||
674 | /* Tries to unroll LOOP completely, i.e. NITER times. | |
675 | UL determines which loops we are allowed to unroll. | |
f55775aa | 676 | EXIT is the exit of the loop that should be eliminated. |
72276d01 | 677 | MAXITER specfy bound on number of iterations, -1 if it is |
f55775aa | 678 | not known or too large for HOST_WIDE_INT. The location |
679 | LOCUS corresponding to the loop is used when emitting | |
680 | a summary of the unroll to the dump file. */ | |
72276d01 | 681 | |
bb445479 | 682 | static bool |
7194de72 | 683 | try_unroll_loop_completely (struct loop *loop, |
bb445479 | 684 | edge exit, tree niter, |
c790d986 | 685 | enum unroll_level ul, |
f55775aa | 686 | HOST_WIDE_INT maxiter, |
866fc6a0 | 687 | location_t locus, bool allow_peel) |
bb445479 | 688 | { |
2a09b28c | 689 | unsigned HOST_WIDE_INT n_unroll = 0; |
c790d986 | 690 | bool n_unroll_found = false; |
c790d986 | 691 | edge edge_to_cancel = NULL; |
bb445479 | 692 | |
c790d986 | 693 | /* See if we proved number of iterations to be low constant. |
bb445479 | 694 | |
c790d986 | 695 | EXIT is an edge that will be removed in all but last iteration of |
696 | the loop. | |
697 | ||
698 | EDGE_TO_CACNEL is an edge that will be removed from the last iteration | |
699 | of the unrolled sequence and is expected to make the final loop not | |
700 | rolling. | |
701 | ||
702 | If the number of execution of loop is determined by standard induction | |
703 | variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving | |
704 | from the iv test. */ | |
e913b5cd | 705 | if (tree_fits_uhwi_p (niter)) |
c790d986 | 706 | { |
e913b5cd | 707 | n_unroll = tree_to_uhwi (niter); |
c790d986 | 708 | n_unroll_found = true; |
709 | edge_to_cancel = EDGE_SUCC (exit->src, 0); | |
710 | if (edge_to_cancel == exit) | |
711 | edge_to_cancel = EDGE_SUCC (exit->src, 1); | |
712 | } | |
713 | /* We do not know the number of iterations and thus we can not eliminate | |
714 | the EXIT edge. */ | |
715 | else | |
52c6c602 | 716 | exit = NULL; |
c790d986 | 717 | |
718 | /* See if we can improve our estimate by using recorded loop bounds. */ | |
866fc6a0 | 719 | if ((allow_peel || maxiter == 0 || ul == UL_NO_GROWTH) |
720 | && maxiter >= 0 | |
c790d986 | 721 | && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll)) |
722 | { | |
723 | n_unroll = maxiter; | |
724 | n_unroll_found = true; | |
725 | /* Loop terminates before the IV variable test, so we can not | |
726 | remove it in the last iteration. */ | |
727 | edge_to_cancel = NULL; | |
728 | } | |
729 | ||
730 | if (!n_unroll_found) | |
bb445479 | 731 | return false; |
bb445479 | 732 | |
2a09b28c | 733 | if (!loop->unroll |
734 | && n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES)) | |
04e3ee8a | 735 | { |
736 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
737 | fprintf (dump_file, "Not unrolling loop %d " | |
2c290da1 | 738 | "(--param max-completely-peel-times limit reached).\n", |
04e3ee8a | 739 | loop->num); |
740 | return false; | |
741 | } | |
bb445479 | 742 | |
c790d986 | 743 | if (!edge_to_cancel) |
744 | edge_to_cancel = loop_edge_to_cancel (loop); | |
745 | ||
bb445479 | 746 | if (n_unroll) |
747 | { | |
604f7b8a | 748 | if (ul == UL_SINGLE_ITER) |
bb445479 | 749 | return false; |
750 | ||
2a09b28c | 751 | if (loop->unroll) |
84eb345f | 752 | { |
2a09b28c | 753 | /* If the unrolling factor is too large, bail out. */ |
754 | if (n_unroll > (unsigned)loop->unroll) | |
755 | { | |
756 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
757 | fprintf (dump_file, | |
758 | "Not unrolling loop %d: " | |
759 | "user didn't want it unrolled completely.\n", | |
760 | loop->num); | |
761 | return false; | |
762 | } | |
84eb345f | 763 | } |
2a09b28c | 764 | else |
d88fd237 | 765 | { |
2a09b28c | 766 | struct loop_size size; |
767 | /* EXIT can be removed only if we are sure it passes first N_UNROLL | |
768 | iterations. */ | |
769 | bool remove_exit = (exit && niter | |
770 | && TREE_CODE (niter) == INTEGER_CST | |
771 | && wi::leu_p (n_unroll, wi::to_widest (niter))); | |
772 | bool large | |
773 | = tree_estimate_loop_size | |
774 | (loop, remove_exit ? exit : NULL, edge_to_cancel, &size, | |
775 | PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)); | |
776 | if (large) | |
777 | { | |
778 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
779 | fprintf (dump_file, "Not unrolling loop %d: it is too large.\n", | |
780 | loop->num); | |
781 | return false; | |
782 | } | |
d88fd237 | 783 | |
2a09b28c | 784 | unsigned HOST_WIDE_INT ninsns = size.overall; |
785 | unsigned HOST_WIDE_INT unr_insns | |
786 | = estimated_unrolled_size (&size, n_unroll); | |
d583c979 | 787 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2a09b28c | 788 | { |
789 | fprintf (dump_file, " Loop size: %d\n", (int) ninsns); | |
790 | fprintf (dump_file, " Estimated size after unrolling: %d\n", | |
791 | (int) unr_insns); | |
792 | } | |
793 | ||
794 | /* If the code is going to shrink, we don't need to be extra | |
795 | cautious on guessing if the unrolling is going to be | |
796 | profitable. */ | |
797 | if (unr_insns | |
798 | /* If there is IV variable that will become constant, we | |
799 | save one instruction in the loop prologue we do not | |
800 | account otherwise. */ | |
801 | <= ninsns + (size.constant_iv != false)) | |
802 | ; | |
803 | /* We unroll only inner loops, because we do not consider it | |
804 | profitable otheriwse. We still can cancel loopback edge | |
805 | of not rolling loop; this is always a good idea. */ | |
806 | else if (ul == UL_NO_GROWTH) | |
807 | { | |
808 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
809 | fprintf (dump_file, "Not unrolling loop %d: size would grow.\n", | |
810 | loop->num); | |
811 | return false; | |
812 | } | |
813 | /* Outer loops tend to be less interesting candidates for | |
814 | complete unrolling unless we can do a lot of propagation | |
815 | into the inner loop body. For now we disable outer loop | |
816 | unrolling when the code would grow. */ | |
817 | else if (loop->inner) | |
818 | { | |
819 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
820 | fprintf (dump_file, "Not unrolling loop %d: " | |
821 | "it is not innermost and code would grow.\n", | |
822 | loop->num); | |
823 | return false; | |
824 | } | |
825 | /* If there is call on a hot path through the loop, then | |
826 | there is most probably not much to optimize. */ | |
827 | else if (size.num_non_pure_calls_on_hot_path) | |
828 | { | |
829 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
830 | fprintf (dump_file, "Not unrolling loop %d: " | |
831 | "contains call and code would grow.\n", | |
832 | loop->num); | |
833 | return false; | |
834 | } | |
835 | /* If there is pure/const call in the function, then we can | |
836 | still optimize the unrolled loop body if it contains some | |
837 | other interesting code than the calls and code storing or | |
838 | cumulating the return value. */ | |
839 | else if (size.num_pure_calls_on_hot_path | |
840 | /* One IV increment, one test, one ivtmp store and | |
841 | one useful stmt. That is about minimal loop | |
842 | doing pure call. */ | |
843 | && (size.non_call_stmts_on_hot_path | |
844 | <= 3 + size.num_pure_calls_on_hot_path)) | |
845 | { | |
846 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
847 | fprintf (dump_file, "Not unrolling loop %d: " | |
848 | "contains just pure calls and code would grow.\n", | |
849 | loop->num); | |
850 | return false; | |
851 | } | |
852 | /* Complete unrolling is major win when control flow is | |
853 | removed and one big basic block is created. If the loop | |
854 | contains control flow the optimization may still be a win | |
855 | because of eliminating the loop overhead but it also may | |
856 | blow the branch predictor tables. Limit number of | |
857 | branches on the hot path through the peeled sequence. */ | |
858 | else if (size.num_branches_on_hot_path * (int)n_unroll | |
859 | > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES)) | |
860 | { | |
861 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
862 | fprintf (dump_file, "Not unrolling loop %d: " | |
863 | "number of branches on hot path in the unrolled " | |
864 | "sequence reaches --param max-peel-branches limit.\n", | |
865 | loop->num); | |
866 | return false; | |
867 | } | |
868 | else if (unr_insns | |
869 | > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)) | |
870 | { | |
871 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
872 | fprintf (dump_file, "Not unrolling loop %d: " | |
873 | "number of insns in the unrolled sequence reaches " | |
874 | "--param max-completely-peeled-insns limit.\n", | |
875 | loop->num); | |
876 | return false; | |
877 | } | |
604f7b8a | 878 | } |
fb54ef7c | 879 | |
01020a5f | 880 | initialize_original_copy_tables (); |
3c6549f8 | 881 | auto_sbitmap wont_exit (n_unroll + 1); |
eedd711b | 882 | if (exit && niter |
883 | && TREE_CODE (niter) == INTEGER_CST | |
884 | && wi::leu_p (n_unroll, wi::to_widest (niter))) | |
885 | { | |
886 | bitmap_ones (wont_exit); | |
887 | if (wi::eq_p (wi::to_widest (niter), n_unroll) | |
888 | || edge_to_cancel) | |
889 | bitmap_clear_bit (wont_exit, 0); | |
890 | } | |
891 | else | |
892 | { | |
893 | exit = NULL; | |
894 | bitmap_clear (wont_exit); | |
895 | } | |
fb54ef7c | 896 | |
75a70cf9 | 897 | if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), |
898 | n_unroll, wont_exit, | |
0cfe7a23 | 899 | exit, &edges_to_remove, |
75a70cf9 | 900 | DLTHE_FLAG_UPDATE_FREQ |
901 | | DLTHE_FLAG_COMPLETTE_PEEL)) | |
bb445479 | 902 | { |
01020a5f | 903 | free_original_copy_tables (); |
d583c979 | 904 | if (dump_file && (dump_flags & TDF_DETAILS)) |
905 | fprintf (dump_file, "Failed to duplicate the loop\n"); | |
bb445479 | 906 | return false; |
907 | } | |
40ffaada | 908 | |
01020a5f | 909 | free_original_copy_tables (); |
bb445479 | 910 | } |
bb445479 | 911 | |
c790d986 | 912 | /* Remove the conditional from the last copy of the loop. */ |
913 | if (edge_to_cancel) | |
914 | { | |
1a91d914 | 915 | gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src)); |
eedd711b | 916 | force_edge_cold (edge_to_cancel, true); |
c790d986 | 917 | if (edge_to_cancel->flags & EDGE_TRUE_VALUE) |
918 | gimple_cond_make_false (cond); | |
919 | else | |
920 | gimple_cond_make_true (cond); | |
921 | update_stmt (cond); | |
2a09b28c | 922 | /* Do not remove the path, as doing so may remove outer loop and |
923 | confuse bookkeeping code in tree_unroll_loops_completely. */ | |
c790d986 | 924 | } |
c790d986 | 925 | |
72276d01 | 926 | /* Store the loop for later unlooping and exit removal. */ |
f1f41a6c | 927 | loops_to_unloop.safe_push (loop); |
928 | loops_to_unloop_nunroll.safe_push (n_unroll); | |
095dcfa3 | 929 | |
f55775aa | 930 | if (dump_enabled_p ()) |
c790d986 | 931 | { |
932 | if (!n_unroll) | |
f55775aa | 933 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, |
6ee2edad | 934 | "loop turned into non-loop; it never loops\n"); |
c790d986 | 935 | else |
f55775aa | 936 | { |
937 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, | |
6ee2edad | 938 | "loop with %d iterations completely unrolled", |
2a09b28c | 939 | (int) n_unroll); |
db9cef39 | 940 | if (loop->header->count.initialized_p ()) |
f55775aa | 941 | dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, |
942 | " (header execution count %d)", | |
db9cef39 | 943 | (int)loop->header->count.to_gcov_type ()); |
f55775aa | 944 | dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n"); |
945 | } | |
946 | } | |
947 | ||
948 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
949 | { | |
c790d986 | 950 | if (exit) |
951 | fprintf (dump_file, "Exit condition of peeled iterations was " | |
952 | "eliminated.\n"); | |
953 | if (edge_to_cancel) | |
954 | fprintf (dump_file, "Last iteration exit edge was proved true.\n"); | |
955 | else | |
956 | fprintf (dump_file, "Latch of last iteration was marked by " | |
957 | "__builtin_unreachable ().\n"); | |
958 | } | |
bb445479 | 959 | |
960 | return true; | |
961 | } | |
962 | ||
c836de3f | 963 | /* Return number of instructions after peeling. */ |
964 | static unsigned HOST_WIDE_INT | |
965 | estimated_peeled_sequence_size (struct loop_size *size, | |
966 | unsigned HOST_WIDE_INT npeel) | |
967 | { | |
968 | return MAX (npeel * (HOST_WIDE_INT) (size->overall | |
969 | - size->eliminated_by_peeling), 1); | |
970 | } | |
971 | ||
972 | /* If the loop is expected to iterate N times and is | |
973 | small enough, duplicate the loop body N+1 times before | |
974 | the loop itself. This way the hot path will never | |
975 | enter the loop. | |
976 | Parameters are the same as for try_unroll_loops_completely */ | |
977 | ||
978 | static bool | |
979 | try_peel_loop (struct loop *loop, | |
980 | edge exit, tree niter, | |
981 | HOST_WIDE_INT maxiter) | |
982 | { | |
39ab2939 | 983 | HOST_WIDE_INT npeel; |
c836de3f | 984 | struct loop_size size; |
985 | int peeled_size; | |
c836de3f | 986 | |
2a09b28c | 987 | if (!flag_peel_loops |
988 | || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0 | |
b96f8145 | 989 | || !peeled_loops) |
c836de3f | 990 | return false; |
991 | ||
b96f8145 | 992 | if (bitmap_bit_p (peeled_loops, loop->num)) |
993 | { | |
994 | if (dump_file) | |
995 | fprintf (dump_file, "Not peeling: loop is already peeled\n"); | |
996 | return false; | |
997 | } | |
998 | ||
2a09b28c | 999 | /* We don't peel loops that will be unrolled as this can duplicate a |
1000 | loop more times than the user requested. */ | |
1001 | if (loop->unroll) | |
1002 | { | |
1003 | if (dump_file) | |
1004 | fprintf (dump_file, "Not peeling: user didn't want it peeled.\n"); | |
1005 | return false; | |
1006 | } | |
1007 | ||
3ccfbed6 | 1008 | /* Peel only innermost loops. |
1009 | While the code is perfectly capable of peeling non-innermost loops, | |
1010 | the heuristics would probably need some improvements. */ | |
c836de3f | 1011 | if (loop->inner) |
1012 | { | |
1013 | if (dump_file) | |
2a09b28c | 1014 | fprintf (dump_file, "Not peeling: outer loop\n"); |
c836de3f | 1015 | return false; |
1016 | } | |
1017 | ||
1018 | if (!optimize_loop_for_speed_p (loop)) | |
1019 | { | |
1020 | if (dump_file) | |
2a09b28c | 1021 | fprintf (dump_file, "Not peeling: cold loop\n"); |
c836de3f | 1022 | return false; |
1023 | } | |
1024 | ||
1025 | /* Check if there is an estimate on the number of iterations. */ | |
1026 | npeel = estimated_loop_iterations_int (loop); | |
b96f8145 | 1027 | if (npeel < 0) |
1028 | npeel = likely_max_loop_iterations_int (loop); | |
c836de3f | 1029 | if (npeel < 0) |
1030 | { | |
1031 | if (dump_file) | |
1032 | fprintf (dump_file, "Not peeling: number of iterations is not " | |
1033 | "estimated\n"); | |
1034 | return false; | |
1035 | } | |
1036 | if (maxiter >= 0 && maxiter <= npeel) | |
1037 | { | |
1038 | if (dump_file) | |
2a09b28c | 1039 | fprintf (dump_file, "Not peeling: upper bound is known so can " |
4cf494ec | 1040 | "unroll completely\n"); |
c836de3f | 1041 | return false; |
1042 | } | |
1043 | ||
1044 | /* We want to peel estimated number of iterations + 1 (so we never | |
1045 | enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES | |
1046 | and be sure to avoid overflows. */ | |
1047 | if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1) | |
1048 | { | |
1049 | if (dump_file) | |
2a09b28c | 1050 | fprintf (dump_file, "Not peeling: rolls too much " |
39ab2939 | 1051 | "(%i + 1 > --param max-peel-times)\n", (int) npeel); |
c836de3f | 1052 | return false; |
1053 | } | |
1054 | npeel++; | |
1055 | ||
1056 | /* Check peeled loops size. */ | |
1057 | tree_estimate_loop_size (loop, exit, NULL, &size, | |
1058 | PARAM_VALUE (PARAM_MAX_PEELED_INSNS)); | |
39ab2939 | 1059 | if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel)) |
c836de3f | 1060 | > PARAM_VALUE (PARAM_MAX_PEELED_INSNS)) |
1061 | { | |
1062 | if (dump_file) | |
2a09b28c | 1063 | fprintf (dump_file, "Not peeling: peeled sequence size is too large " |
c836de3f | 1064 | "(%i insns > --param max-peel-insns)", peeled_size); |
1065 | return false; | |
1066 | } | |
1067 | ||
1068 | /* Duplicate possibly eliminating the exits. */ | |
1069 | initialize_original_copy_tables (); | |
3c6549f8 | 1070 | auto_sbitmap wont_exit (npeel + 1); |
3ccfbed6 | 1071 | if (exit && niter |
1072 | && TREE_CODE (niter) == INTEGER_CST | |
1073 | && wi::leu_p (npeel, wi::to_widest (niter))) | |
1074 | { | |
1075 | bitmap_ones (wont_exit); | |
b96f8145 | 1076 | bitmap_clear_bit (wont_exit, 0); |
3ccfbed6 | 1077 | } |
1078 | else | |
1079 | { | |
1080 | exit = NULL; | |
1081 | bitmap_clear (wont_exit); | |
1082 | } | |
c836de3f | 1083 | if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), |
1084 | npeel, wont_exit, | |
0cfe7a23 | 1085 | exit, &edges_to_remove, |
3ccfbed6 | 1086 | DLTHE_FLAG_UPDATE_FREQ)) |
c836de3f | 1087 | { |
1088 | free_original_copy_tables (); | |
c836de3f | 1089 | return false; |
1090 | } | |
c836de3f | 1091 | free_original_copy_tables (); |
1092 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1093 | { | |
1094 | fprintf (dump_file, "Peeled loop %d, %i times.\n", | |
39ab2939 | 1095 | loop->num, (int) npeel); |
c836de3f | 1096 | } |
3ccfbed6 | 1097 | if (loop->any_estimate) |
1098 | { | |
1099 | if (wi::ltu_p (npeel, loop->nb_iterations_estimate)) | |
1100 | loop->nb_iterations_estimate -= npeel; | |
1101 | else | |
1102 | loop->nb_iterations_estimate = 0; | |
1103 | } | |
c836de3f | 1104 | if (loop->any_upper_bound) |
3ccfbed6 | 1105 | { |
b96f8145 | 1106 | if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound)) |
3ccfbed6 | 1107 | loop->nb_iterations_upper_bound -= npeel; |
1108 | else | |
1109 | loop->nb_iterations_upper_bound = 0; | |
1110 | } | |
8e3ffe30 | 1111 | if (loop->any_likely_upper_bound) |
3ccfbed6 | 1112 | { |
b96f8145 | 1113 | if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound)) |
3ccfbed6 | 1114 | loop->nb_iterations_likely_upper_bound -= npeel; |
1115 | else | |
1116 | { | |
1117 | loop->any_estimate = true; | |
1118 | loop->nb_iterations_estimate = 0; | |
1119 | loop->nb_iterations_likely_upper_bound = 0; | |
1120 | } | |
1121 | } | |
db9cef39 | 1122 | profile_count entry_count = profile_count::zero (); |
3ccfbed6 | 1123 | |
0cfe7a23 | 1124 | edge e; |
3ccfbed6 | 1125 | edge_iterator ei; |
1126 | FOR_EACH_EDGE (e, ei, loop->header->preds) | |
1127 | if (e->src != loop->latch) | |
1128 | { | |
db9cef39 | 1129 | if (e->src->count.initialized_p ()) |
1130 | entry_count = e->src->count + e->src->count; | |
3ccfbed6 | 1131 | gcc_assert (!flow_bb_inside_loop_p (loop, e->src)); |
1132 | } | |
ca69b069 | 1133 | profile_probability p = profile_probability::very_unlikely (); |
205ce1aa | 1134 | p = entry_count.probability_in (loop->header->count); |
ca69b069 | 1135 | scale_loop_profile (loop, p, 0); |
b96f8145 | 1136 | bitmap_set_bit (peeled_loops, loop->num); |
c836de3f | 1137 | return true; |
1138 | } | |
7194de72 | 1139 | /* Adds a canonical induction variable to LOOP if suitable. |
48e1416a | 1140 | CREATE_IV is true if we may create a new iv. UL determines |
604f7b8a | 1141 | which loops we are allowed to completely unroll. If TRY_EVAL is true, we try |
48e1416a | 1142 | to determine the number of iterations of a loop by direct evaluation. |
72276d01 | 1143 | Returns true if cfg is changed. */ |
bb445479 | 1144 | |
1145 | static bool | |
7194de72 | 1146 | canonicalize_loop_induction_variables (struct loop *loop, |
604f7b8a | 1147 | bool create_iv, enum unroll_level ul, |
866fc6a0 | 1148 | bool try_eval, bool allow_peel) |
bb445479 | 1149 | { |
1150 | edge exit = NULL; | |
1151 | tree niter; | |
72276d01 | 1152 | HOST_WIDE_INT maxiter; |
1153 | bool modified = false; | |
f55775aa | 1154 | location_t locus = UNKNOWN_LOCATION; |
bb445479 | 1155 | |
0c3c2e56 | 1156 | niter = number_of_latch_executions (loop); |
f55775aa | 1157 | exit = single_exit (loop); |
bb445479 | 1158 | if (TREE_CODE (niter) == INTEGER_CST) |
f55775aa | 1159 | locus = gimple_location (last_stmt (exit->src)); |
b091dc59 | 1160 | else |
1161 | { | |
1162 | /* If the loop has more than one exit, try checking all of them | |
1163 | for # of iterations determinable through scev. */ | |
f55775aa | 1164 | if (!exit) |
b091dc59 | 1165 | niter = find_loop_niter (loop, &exit); |
1166 | ||
1167 | /* Finally if everything else fails, try brute force evaluation. */ | |
1168 | if (try_eval | |
1169 | && (chrec_contains_undetermined (niter) | |
1170 | || TREE_CODE (niter) != INTEGER_CST)) | |
1171 | niter = find_loop_niter_by_eval (loop, &exit); | |
1172 | ||
f55775aa | 1173 | if (exit) |
1174 | locus = gimple_location (last_stmt (exit->src)); | |
1175 | ||
c790d986 | 1176 | if (TREE_CODE (niter) != INTEGER_CST) |
1177 | exit = NULL; | |
b091dc59 | 1178 | } |
bb445479 | 1179 | |
c790d986 | 1180 | /* We work exceptionally hard here to estimate the bound |
1181 | by find_loop_niter_by_eval. Be sure to keep it for future. */ | |
1182 | if (niter && TREE_CODE (niter) == INTEGER_CST) | |
57337fec | 1183 | { |
5de9d3ed | 1184 | record_niter_bound (loop, wi::to_widest (niter), |
57337fec | 1185 | exit == single_likely_exit (loop), true); |
1186 | } | |
c790d986 | 1187 | |
72276d01 | 1188 | /* Force re-computation of loop bounds so we can remove redundant exits. */ |
1189 | maxiter = max_loop_iterations_int (loop); | |
1190 | ||
c790d986 | 1191 | if (dump_file && (dump_flags & TDF_DETAILS) |
1192 | && TREE_CODE (niter) == INTEGER_CST) | |
bb445479 | 1193 | { |
1194 | fprintf (dump_file, "Loop %d iterates ", loop->num); | |
1195 | print_generic_expr (dump_file, niter, TDF_SLIM); | |
1196 | fprintf (dump_file, " times.\n"); | |
1197 | } | |
c790d986 | 1198 | if (dump_file && (dump_flags & TDF_DETAILS) |
72276d01 | 1199 | && maxiter >= 0) |
c790d986 | 1200 | { |
1201 | fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num, | |
72276d01 | 1202 | (int)maxiter); |
c790d986 | 1203 | } |
8e3ffe30 | 1204 | if (dump_file && (dump_flags & TDF_DETAILS) |
1205 | && likely_max_loop_iterations_int (loop) >= 0) | |
1206 | { | |
eedd711b | 1207 | fprintf (dump_file, "Loop %d likely iterates at most %i times.\n", |
1208 | loop->num, (int)likely_max_loop_iterations_int (loop)); | |
8e3ffe30 | 1209 | } |
bb445479 | 1210 | |
72276d01 | 1211 | /* Remove exits that are known to be never taken based on loop bound. |
1212 | Needs to be called after compilation of max_loop_iterations_int that | |
1213 | populates the loop bounds. */ | |
1214 | modified |= remove_redundant_iv_tests (loop); | |
1215 | ||
866fc6a0 | 1216 | if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus, |
1217 | allow_peel)) | |
bb445479 | 1218 | return true; |
1219 | ||
c790d986 | 1220 | if (create_iv |
57337fec | 1221 | && niter && !chrec_contains_undetermined (niter) |
1222 | && exit && just_once_each_iteration_p (loop, exit->src)) | |
bb445479 | 1223 | create_canonical_iv (loop, exit, niter); |
1224 | ||
c836de3f | 1225 | if (ul == UL_ALL) |
1226 | modified |= try_peel_loop (loop, exit, niter, maxiter); | |
1227 | ||
72276d01 | 1228 | return modified; |
bb445479 | 1229 | } |
1230 | ||
1231 | /* The main entry point of the pass. Adds canonical induction variables | |
7194de72 | 1232 | to the suitable loops. */ |
bb445479 | 1233 | |
4c641bf8 | 1234 | unsigned int |
7194de72 | 1235 | canonicalize_induction_variables (void) |
bb445479 | 1236 | { |
bb445479 | 1237 | struct loop *loop; |
053fdd99 | 1238 | bool changed = false; |
c790d986 | 1239 | bool irred_invalidated = false; |
9f0ac045 | 1240 | bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); |
48e1416a | 1241 | |
46480a95 | 1242 | estimate_numbers_of_iterations (cfun); |
72276d01 | 1243 | |
f21d4d00 | 1244 | FOR_EACH_LOOP (loop, LI_FROM_INNERMOST) |
bb445479 | 1245 | { |
17519ba0 | 1246 | changed |= canonicalize_loop_induction_variables (loop, |
1247 | true, UL_SINGLE_ITER, | |
866fc6a0 | 1248 | true, false); |
bb445479 | 1249 | } |
ea1c5c31 | 1250 | gcc_assert (!need_ssa_update_p (cfun)); |
bb445479 | 1251 | |
72276d01 | 1252 | unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); |
c790d986 | 1253 | if (irred_invalidated |
1254 | && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) | |
1255 | mark_irreducible_loops (); | |
1256 | ||
08162157 | 1257 | /* Clean up the information about numbers of iterations, since brute force |
1258 | evaluation could reveal new information. */ | |
866da453 | 1259 | free_numbers_of_iterations_estimates (cfun); |
08162157 | 1260 | scev_reset (); |
1261 | ||
9f0ac045 | 1262 | if (!bitmap_empty_p (loop_closed_ssa_invalidated)) |
1263 | { | |
1264 | gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA)); | |
1265 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); | |
1266 | } | |
1267 | BITMAP_FREE (loop_closed_ssa_invalidated); | |
1268 | ||
bb445479 | 1269 | if (changed) |
4c641bf8 | 1270 | return TODO_cleanup_cfg; |
1271 | return 0; | |
bb445479 | 1272 | } |
1273 | ||
2ebfc881 | 1274 | /* Propagate constant SSA_NAMEs defined in basic block BB. */ |
1275 | ||
1276 | static void | |
1277 | propagate_constants_for_unrolling (basic_block bb) | |
1278 | { | |
2ebfc881 | 1279 | /* Look for degenerate PHI nodes with constant argument. */ |
1a91d914 | 1280 | for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); ) |
2ebfc881 | 1281 | { |
1a91d914 | 1282 | gphi *phi = gsi.phi (); |
2ebfc881 | 1283 | tree result = gimple_phi_result (phi); |
1284 | tree arg = gimple_phi_arg_def (phi, 0); | |
1285 | ||
2eaaa4e7 | 1286 | if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result) |
1287 | && gimple_phi_num_args (phi) == 1 | |
c39eea14 | 1288 | && CONSTANT_CLASS_P (arg)) |
2ebfc881 | 1289 | { |
d33914da | 1290 | replace_uses_by (result, arg); |
2ebfc881 | 1291 | gsi_remove (&gsi, true); |
1292 | release_ssa_name (result); | |
1293 | } | |
1294 | else | |
1295 | gsi_next (&gsi); | |
1296 | } | |
1297 | ||
1298 | /* Look for assignments to SSA names with constant RHS. */ | |
1a91d914 | 1299 | for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) |
2ebfc881 | 1300 | { |
42acab1c | 1301 | gimple *stmt = gsi_stmt (gsi); |
2ebfc881 | 1302 | tree lhs; |
1303 | ||
1304 | if (is_gimple_assign (stmt) | |
c39eea14 | 1305 | && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_constant |
2ebfc881 | 1306 | && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME) |
fca2aa67 | 1307 | && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
2ebfc881 | 1308 | { |
d33914da | 1309 | replace_uses_by (lhs, gimple_assign_rhs1 (stmt)); |
2ebfc881 | 1310 | gsi_remove (&gsi, true); |
1311 | release_ssa_name (lhs); | |
1312 | } | |
1313 | else | |
1314 | gsi_next (&gsi); | |
1315 | } | |
1316 | } | |
1317 | ||
042301ef | 1318 | /* Process loops from innermost to outer, stopping at the innermost |
1319 | loop we unrolled. */ | |
1320 | ||
1321 | static bool | |
1322 | tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer, | |
0cfe7a23 | 1323 | bitmap father_bbs, struct loop *loop) |
042301ef | 1324 | { |
1325 | struct loop *loop_father; | |
1326 | bool changed = false; | |
1327 | struct loop *inner; | |
1328 | enum unroll_level ul; | |
1329 | ||
1330 | /* Process inner loops first. */ | |
1331 | for (inner = loop->inner; inner != NULL; inner = inner->next) | |
1332 | changed |= tree_unroll_loops_completely_1 (may_increase_size, | |
0cfe7a23 | 1333 | unroll_outer, father_bbs, |
042301ef | 1334 | inner); |
1335 | ||
1336 | /* If we changed an inner loop we cannot process outer loops in this | |
1337 | iteration because SSA form is not up-to-date. Continue with | |
1338 | siblings of outer loops instead. */ | |
1339 | if (changed) | |
1340 | return true; | |
1341 | ||
3d483a94 | 1342 | /* Don't unroll #pragma omp simd loops until the vectorizer |
1343 | attempts to vectorize those. */ | |
4c73695b | 1344 | if (loop->force_vectorize) |
3d483a94 | 1345 | return false; |
1346 | ||
042301ef | 1347 | /* Try to unroll this loop. */ |
1348 | loop_father = loop_outer (loop); | |
1349 | if (!loop_father) | |
1350 | return false; | |
1351 | ||
2a09b28c | 1352 | if (loop->unroll > 1) |
1353 | ul = UL_ALL; | |
1354 | else if (may_increase_size && optimize_loop_nest_for_speed_p (loop) | |
042301ef | 1355 | /* Unroll outermost loops only if asked to do so or they do |
1356 | not cause code growth. */ | |
1357 | && (unroll_outer || loop_outer (loop_father))) | |
1358 | ul = UL_ALL; | |
1359 | else | |
1360 | ul = UL_NO_GROWTH; | |
1361 | ||
1362 | if (canonicalize_loop_induction_variables | |
866fc6a0 | 1363 | (loop, false, ul, !flag_tree_loop_ivcanon, unroll_outer)) |
042301ef | 1364 | { |
1365 | /* If we'll continue unrolling, we need to propagate constants | |
1366 | within the new basic blocks to fold away induction variable | |
1367 | computations; otherwise, the size might blow up before the | |
1368 | iteration is complete and the IR eventually cleaned up. */ | |
0cfe7a23 | 1369 | if (loop_outer (loop_father)) |
1370 | bitmap_set_bit (father_bbs, loop_father->header->index); | |
042301ef | 1371 | |
1372 | return true; | |
1373 | } | |
1374 | ||
1375 | return false; | |
1376 | } | |
1377 | ||
604f7b8a | 1378 | /* Unroll LOOPS completely if they iterate just few times. Unless |
1379 | MAY_INCREASE_SIZE is true, perform the unrolling only if the | |
1380 | size of the code does not increase. */ | |
bb445479 | 1381 | |
2a09b28c | 1382 | static unsigned int |
d88fd237 | 1383 | tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) |
bb445479 | 1384 | { |
0cfe7a23 | 1385 | bitmap father_bbs = BITMAP_ALLOC (NULL); |
d88fd237 | 1386 | bool changed; |
793a0ab5 | 1387 | int iteration = 0; |
9f0ac045 | 1388 | bool irred_invalidated = false; |
bb445479 | 1389 | |
46480a95 | 1390 | estimate_numbers_of_iterations (cfun); |
1391 | ||
d88fd237 | 1392 | do |
bb445479 | 1393 | { |
d88fd237 | 1394 | changed = false; |
9f0ac045 | 1395 | bitmap loop_closed_ssa_invalidated = NULL; |
1396 | ||
1397 | if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
1398 | loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); | |
bb445479 | 1399 | |
d4f078b5 | 1400 | free_numbers_of_iterations_estimates (cfun); |
46480a95 | 1401 | estimate_numbers_of_iterations (cfun); |
72276d01 | 1402 | |
042301ef | 1403 | changed = tree_unroll_loops_completely_1 (may_increase_size, |
0cfe7a23 | 1404 | unroll_outer, father_bbs, |
042301ef | 1405 | current_loops->tree_root); |
d88fd237 | 1406 | if (changed) |
1407 | { | |
2ebfc881 | 1408 | unsigned i; |
1409 | ||
72276d01 | 1410 | unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); |
c790d986 | 1411 | |
72276d01 | 1412 | /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */ |
9f0ac045 | 1413 | if (loop_closed_ssa_invalidated |
1414 | && !bitmap_empty_p (loop_closed_ssa_invalidated)) | |
1415 | rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated, | |
1416 | TODO_update_ssa); | |
1417 | else | |
1418 | update_ssa (TODO_update_ssa); | |
ea1c5c31 | 1419 | |
0cfe7a23 | 1420 | /* father_bbs is a bitmap of loop father header BB indices. |
1421 | Translate that to what non-root loops these BBs belong to now. */ | |
1422 | bitmap_iterator bi; | |
1423 | bitmap fathers = BITMAP_ALLOC (NULL); | |
1424 | EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi) | |
1425 | { | |
1426 | basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i); | |
1427 | if (! unrolled_loop_bb) | |
1428 | continue; | |
1429 | if (loop_outer (unrolled_loop_bb->loop_father)) | |
1430 | bitmap_set_bit (fathers, | |
1431 | unrolled_loop_bb->loop_father->num); | |
1432 | } | |
1433 | bitmap_clear (father_bbs); | |
2ebfc881 | 1434 | /* Propagate the constants within the new basic blocks. */ |
0cfe7a23 | 1435 | EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi) |
1436 | { | |
1437 | loop_p father = get_loop (cfun, i); | |
1438 | basic_block *body = get_loop_body_in_dom_order (father); | |
1439 | for (unsigned j = 0; j < father->num_nodes; j++) | |
1440 | propagate_constants_for_unrolling (body[j]); | |
1441 | free (body); | |
1442 | } | |
1443 | BITMAP_FREE (fathers); | |
2ebfc881 | 1444 | |
d88fd237 | 1445 | /* This will take care of removing completely unrolled loops |
1446 | from the loop structures so we can continue unrolling now | |
1447 | innermost loops. */ | |
b2a225ba | 1448 | if (cleanup_tree_cfg ()) |
1449 | update_ssa (TODO_update_ssa_only_virtuals); | |
d88fd237 | 1450 | |
1451 | /* Clean up the information about numbers of iterations, since | |
1452 | complete unrolling might have invalidated it. */ | |
1453 | scev_reset (); | |
382ecba7 | 1454 | if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA)) |
9f0ac045 | 1455 | verify_loop_closed_ssa (true); |
d88fd237 | 1456 | } |
9f0ac045 | 1457 | if (loop_closed_ssa_invalidated) |
1458 | BITMAP_FREE (loop_closed_ssa_invalidated); | |
d88fd237 | 1459 | } |
793a0ab5 | 1460 | while (changed |
1461 | && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS)); | |
08162157 | 1462 | |
0cfe7a23 | 1463 | BITMAP_FREE (father_bbs); |
2ebfc881 | 1464 | |
9f0ac045 | 1465 | if (irred_invalidated |
1466 | && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) | |
1467 | mark_irreducible_loops (); | |
1468 | ||
4c641bf8 | 1469 | return 0; |
bb445479 | 1470 | } |
f86b328b | 1471 | |
1472 | /* Canonical induction variable creation pass. */ | |
1473 | ||
f86b328b | 1474 | namespace { |
1475 | ||
1476 | const pass_data pass_data_iv_canon = | |
1477 | { | |
1478 | GIMPLE_PASS, /* type */ | |
1479 | "ivcanon", /* name */ | |
1480 | OPTGROUP_LOOP, /* optinfo_flags */ | |
f86b328b | 1481 | TV_TREE_LOOP_IVCANON, /* tv_id */ |
1482 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1483 | 0, /* properties_provided */ | |
1484 | 0, /* properties_destroyed */ | |
1485 | 0, /* todo_flags_start */ | |
1486 | 0, /* todo_flags_finish */ | |
1487 | }; | |
1488 | ||
1489 | class pass_iv_canon : public gimple_opt_pass | |
1490 | { | |
1491 | public: | |
1492 | pass_iv_canon (gcc::context *ctxt) | |
1493 | : gimple_opt_pass (pass_data_iv_canon, ctxt) | |
1494 | {} | |
1495 | ||
1496 | /* opt_pass methods: */ | |
31315c24 | 1497 | virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; } |
65b0537f | 1498 | virtual unsigned int execute (function *fun); |
f86b328b | 1499 | |
1500 | }; // class pass_iv_canon | |
1501 | ||
65b0537f | 1502 | unsigned int |
1503 | pass_iv_canon::execute (function *fun) | |
1504 | { | |
1505 | if (number_of_loops (fun) <= 1) | |
1506 | return 0; | |
1507 | ||
1508 | return canonicalize_induction_variables (); | |
1509 | } | |
1510 | ||
f86b328b | 1511 | } // anon namespace |
1512 | ||
1513 | gimple_opt_pass * | |
1514 | make_pass_iv_canon (gcc::context *ctxt) | |
1515 | { | |
1516 | return new pass_iv_canon (ctxt); | |
1517 | } | |
1518 | ||
1519 | /* Complete unrolling of loops. */ | |
1520 | ||
f86b328b | 1521 | namespace { |
1522 | ||
1523 | const pass_data pass_data_complete_unroll = | |
1524 | { | |
1525 | GIMPLE_PASS, /* type */ | |
1526 | "cunroll", /* name */ | |
1527 | OPTGROUP_LOOP, /* optinfo_flags */ | |
f86b328b | 1528 | TV_COMPLETE_UNROLL, /* tv_id */ |
1529 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1530 | 0, /* properties_provided */ | |
1531 | 0, /* properties_destroyed */ | |
1532 | 0, /* todo_flags_start */ | |
1533 | 0, /* todo_flags_finish */ | |
1534 | }; | |
1535 | ||
1536 | class pass_complete_unroll : public gimple_opt_pass | |
1537 | { | |
1538 | public: | |
1539 | pass_complete_unroll (gcc::context *ctxt) | |
1540 | : gimple_opt_pass (pass_data_complete_unroll, ctxt) | |
1541 | {} | |
1542 | ||
1543 | /* opt_pass methods: */ | |
65b0537f | 1544 | virtual unsigned int execute (function *); |
f86b328b | 1545 | |
1546 | }; // class pass_complete_unroll | |
1547 | ||
65b0537f | 1548 | unsigned int |
1549 | pass_complete_unroll::execute (function *fun) | |
1550 | { | |
1551 | if (number_of_loops (fun) <= 1) | |
1552 | return 0; | |
1553 | ||
b96f8145 | 1554 | /* If we ever decide to run loop peeling more than once, we will need to |
1555 | track loops already peeled in loop structures themselves to avoid | |
1556 | re-peeling the same loop multiple times. */ | |
1557 | if (flag_peel_loops) | |
1558 | peeled_loops = BITMAP_ALLOC (NULL); | |
2a09b28c | 1559 | unsigned int val = tree_unroll_loops_completely (flag_unroll_loops |
1560 | || flag_peel_loops | |
1561 | || optimize >= 3, true); | |
b96f8145 | 1562 | if (peeled_loops) |
1563 | { | |
1564 | BITMAP_FREE (peeled_loops); | |
1565 | peeled_loops = NULL; | |
1566 | } | |
1567 | return val; | |
65b0537f | 1568 | } |
1569 | ||
f86b328b | 1570 | } // anon namespace |
1571 | ||
1572 | gimple_opt_pass * | |
1573 | make_pass_complete_unroll (gcc::context *ctxt) | |
1574 | { | |
1575 | return new pass_complete_unroll (ctxt); | |
1576 | } | |
1577 | ||
1578 | /* Complete unrolling of inner loops. */ | |
1579 | ||
f86b328b | 1580 | namespace { |
1581 | ||
1582 | const pass_data pass_data_complete_unrolli = | |
1583 | { | |
1584 | GIMPLE_PASS, /* type */ | |
1585 | "cunrolli", /* name */ | |
1586 | OPTGROUP_LOOP, /* optinfo_flags */ | |
f86b328b | 1587 | TV_COMPLETE_UNROLL, /* tv_id */ |
1588 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1589 | 0, /* properties_provided */ | |
1590 | 0, /* properties_destroyed */ | |
1591 | 0, /* todo_flags_start */ | |
8b88439e | 1592 | 0, /* todo_flags_finish */ |
f86b328b | 1593 | }; |
1594 | ||
1595 | class pass_complete_unrolli : public gimple_opt_pass | |
1596 | { | |
1597 | public: | |
1598 | pass_complete_unrolli (gcc::context *ctxt) | |
1599 | : gimple_opt_pass (pass_data_complete_unrolli, ctxt) | |
1600 | {} | |
1601 | ||
1602 | /* opt_pass methods: */ | |
31315c24 | 1603 | virtual bool gate (function *) { return optimize >= 2; } |
65b0537f | 1604 | virtual unsigned int execute (function *); |
f86b328b | 1605 | |
1606 | }; // class pass_complete_unrolli | |
1607 | ||
65b0537f | 1608 | unsigned int |
1609 | pass_complete_unrolli::execute (function *fun) | |
1610 | { | |
1611 | unsigned ret = 0; | |
1612 | ||
2a09b28c | 1613 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
65b0537f | 1614 | if (number_of_loops (fun) > 1) |
1615 | { | |
1616 | scev_initialize (); | |
1617 | ret = tree_unroll_loops_completely (optimize >= 3, false); | |
65b0537f | 1618 | scev_finalize (); |
1619 | } | |
1620 | loop_optimizer_finalize (); | |
1621 | ||
1622 | return ret; | |
1623 | } | |
1624 | ||
f86b328b | 1625 | } // anon namespace |
1626 | ||
1627 | gimple_opt_pass * | |
1628 | make_pass_complete_unrolli (gcc::context *ctxt) | |
1629 | { | |
1630 | return new pass_complete_unrolli (ctxt); | |
1631 | } | |
1632 | ||
1633 |