]>
Commit | Line | Data |
---|---|---|
0588ac84 | 1 | /* Induction variable canonicalization and loop peeling. |
5624e564 | 2 | Copyright (C) 2004-2015 Free Software Foundation, Inc. |
b8698a0f | 3 | |
82b85a85 | 4 | This file is part of GCC. |
b8698a0f | 5 | |
82b85a85 ZD |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the | |
9dcd6f09 | 8 | Free Software Foundation; either version 3, or (at your option) any |
82b85a85 | 9 | later version. |
b8698a0f | 10 | |
82b85a85 ZD |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
b8698a0f | 15 | |
82b85a85 | 16 | You should have received a copy of the GNU General Public License |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
82b85a85 ZD |
19 | |
20 | /* This pass detects the loops that iterate a constant number of times, | |
b8698a0f | 21 | adds a canonical induction variable (step -1, tested against 0) |
82b85a85 ZD |
22 | and replaces the exit test. This enables the less powerful rtl |
23 | level analysis to use this information. | |
24 | ||
25 | This might spoil the code in some cases (by increasing register pressure). | |
26 | Note that in the case the new variable is not needed, ivopts will get rid | |
27 | of it, so it might only be a problem when there are no other linear induction | |
28 | variables. In that case the created optimization possibilities are likely | |
29 | to pay up. | |
30 | ||
f8934be7 | 31 | We also perform |
1c5fd343 | 32 | - complete unrolling (or peeling) when the loops is rolling few enough |
f8934be7 JH |
33 | times |
34 | - simple peeling (i.e. copying few initial iterations prior the loop) | |
35 | when number of iteration estimate is known (typically by the profile | |
36 | info). */ | |
82b85a85 ZD |
37 | |
38 | #include "config.h" | |
39 | #include "system.h" | |
40 | #include "coretypes.h" | |
c7131fb2 | 41 | #include "backend.h" |
82b85a85 | 42 | #include "tree.h" |
c7131fb2 AM |
43 | #include "gimple.h" |
44 | #include "hard-reg-set.h" | |
45 | #include "ssa.h" | |
46 | #include "alias.h" | |
40e23961 | 47 | #include "fold-const.h" |
82b85a85 | 48 | #include "tm_p.h" |
59f2e9d8 | 49 | #include "profile.h" |
cf835838 | 50 | #include "gimple-pretty-print.h" |
2fb9a547 AM |
51 | #include "internal-fn.h" |
52 | #include "gimple-fold.h" | |
53 | #include "tree-eh.h" | |
5be5c238 | 54 | #include "gimple-iterator.h" |
442b4905 AM |
55 | #include "cgraph.h" |
56 | #include "tree-cfg.h" | |
e28030cf AM |
57 | #include "tree-ssa-loop-manip.h" |
58 | #include "tree-ssa-loop-niter.h" | |
442b4905 AM |
59 | #include "tree-ssa-loop.h" |
60 | #include "tree-into-ssa.h" | |
82b85a85 ZD |
61 | #include "cfgloop.h" |
62 | #include "tree-pass.h" | |
82b85a85 ZD |
63 | #include "tree-chrec.h" |
64 | #include "tree-scalar-evolution.h" | |
65 | #include "params.h" | |
66 | #include "flags.h" | |
67 | #include "tree-inline.h" | |
08f1af2e | 68 | #include "target.h" |
4484a35a | 69 | #include "tree-cfgcleanup.h" |
9b2b7279 | 70 | #include "builtins.h" |
82b85a85 | 71 | |
91a01f21 ZD |
72 | /* Specifies types of loops that may be unrolled. */ |
73 | ||
74 | enum unroll_level | |
75 | { | |
bb22512c | 76 | UL_SINGLE_ITER, /* Only loops that exit immediately in the first |
91a01f21 ZD |
77 | iteration. */ |
78 | UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase | |
79 | of code size. */ | |
80 | UL_ALL /* All suitable loops. */ | |
81 | }; | |
82 | ||
82b85a85 ZD |
83 | /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT |
84 | is the exit edge whose condition is replaced. */ | |
85 | ||
86 | static void | |
87 | create_canonical_iv (struct loop *loop, edge exit, tree niter) | |
88 | { | |
89 | edge in; | |
726a989a | 90 | tree type, var; |
538dd0b7 | 91 | gcond *cond; |
726a989a | 92 | gimple_stmt_iterator incr_at; |
82b85a85 ZD |
93 | enum tree_code cmp; |
94 | ||
95 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
96 | { | |
97 | fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num); | |
98 | print_generic_expr (dump_file, niter, TDF_SLIM); | |
99 | fprintf (dump_file, " iterations.\n"); | |
100 | } | |
101 | ||
538dd0b7 | 102 | cond = as_a <gcond *> (last_stmt (exit->src)); |
628f6a4e | 103 | in = EDGE_SUCC (exit->src, 0); |
82b85a85 | 104 | if (in == exit) |
628f6a4e | 105 | in = EDGE_SUCC (exit->src, 1); |
82b85a85 ZD |
106 | |
107 | /* Note that we do not need to worry about overflows, since | |
108 | type of niter is always unsigned and all comparisons are | |
109 | just for equality/nonequality -- i.e. everything works | |
110 | with a modulo arithmetics. */ | |
111 | ||
112 | type = TREE_TYPE (niter); | |
987b67bc KH |
113 | niter = fold_build2 (PLUS_EXPR, type, |
114 | niter, | |
115 | build_int_cst (type, 1)); | |
726a989a | 116 | incr_at = gsi_last_bb (in->src); |
82b85a85 | 117 | create_iv (niter, |
57decb7e | 118 | build_int_cst (type, -1), |
82b85a85 ZD |
119 | NULL_TREE, loop, |
120 | &incr_at, false, NULL, &var); | |
121 | ||
122 | cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; | |
726a989a RB |
123 | gimple_cond_set_code (cond, cmp); |
124 | gimple_cond_set_lhs (cond, var); | |
125 | gimple_cond_set_rhs (cond, build_int_cst (type, 0)); | |
f430bae8 | 126 | update_stmt (cond); |
82b85a85 ZD |
127 | } |
128 | ||
08f1af2e JH |
129 | /* Describe size of loop as detected by tree_estimate_loop_size. */ |
130 | struct loop_size | |
131 | { | |
132 | /* Number of instructions in the loop. */ | |
133 | int overall; | |
134 | ||
135 | /* Number of instructions that will be likely optimized out in | |
136 | peeled iterations of loop (i.e. computation based on induction | |
137 | variable where induction variable starts at known constant.) */ | |
138 | int eliminated_by_peeling; | |
139 | ||
140 | /* Same statistics for last iteration of loop: it is smaller because | |
141 | instructions after exit are not executed. */ | |
142 | int last_iteration; | |
143 | int last_iteration_eliminated_by_peeling; | |
519cac4a JH |
144 | |
145 | /* If some IV computation will become constant. */ | |
146 | bool constant_iv; | |
147 | ||
148 | /* Number of call stmts that are not a builtin and are pure or const | |
149 | present on the hot path. */ | |
150 | int num_pure_calls_on_hot_path; | |
151 | /* Number of call stmts that are not a builtin and are not pure nor const | |
152 | present on the hot path. */ | |
153 | int num_non_pure_calls_on_hot_path; | |
154 | /* Number of statements other than calls in the loop. */ | |
155 | int non_call_stmts_on_hot_path; | |
156 | /* Number of branches seen on the hot path. */ | |
157 | int num_branches_on_hot_path; | |
08f1af2e JH |
158 | }; |
159 | ||
160 | /* Return true if OP in STMT will be constant after peeling LOOP. */ | |
161 | ||
162 | static bool | |
163 | constant_after_peeling (tree op, gimple stmt, struct loop *loop) | |
164 | { | |
165 | affine_iv iv; | |
166 | ||
167 | if (is_gimple_min_invariant (op)) | |
168 | return true; | |
b8698a0f | 169 | |
08f1af2e JH |
170 | /* We can still fold accesses to constant arrays when index is known. */ |
171 | if (TREE_CODE (op) != SSA_NAME) | |
172 | { | |
173 | tree base = op; | |
174 | ||
175 | /* First make fast look if we see constant array inside. */ | |
176 | while (handled_component_p (base)) | |
177 | base = TREE_OPERAND (base, 0); | |
a093356a | 178 | if ((DECL_P (base) |
6a6dac52 | 179 | && ctor_for_folding (base) != error_mark_node) |
08f1af2e JH |
180 | || CONSTANT_CLASS_P (base)) |
181 | { | |
182 | /* If so, see if we understand all the indices. */ | |
183 | base = op; | |
184 | while (handled_component_p (base)) | |
185 | { | |
186 | if (TREE_CODE (base) == ARRAY_REF | |
187 | && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop)) | |
188 | return false; | |
189 | base = TREE_OPERAND (base, 0); | |
190 | } | |
191 | return true; | |
192 | } | |
193 | return false; | |
194 | } | |
195 | ||
196 | /* Induction variables are constants. */ | |
197 | if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false)) | |
198 | return false; | |
199 | if (!is_gimple_min_invariant (iv.base)) | |
200 | return false; | |
201 | if (!is_gimple_min_invariant (iv.step)) | |
202 | return false; | |
203 | return true; | |
204 | } | |
205 | ||
519cac4a JH |
206 | /* Computes an estimated number of insns in LOOP. |
207 | EXIT (if non-NULL) is an exite edge that will be eliminated in all but last | |
208 | iteration of the loop. | |
209 | EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration | |
210 | of loop. | |
0588ac84 | 211 | Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT. |
9d676bf6 | 212 | Stop estimating after UPPER_BOUND is met. Return true in this case. */ |
08f1af2e | 213 | |
0588ac84 JH |
214 | static bool |
215 | tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, | |
216 | int upper_bound) | |
08f1af2e JH |
217 | { |
218 | basic_block *body = get_loop_body (loop); | |
219 | gimple_stmt_iterator gsi; | |
220 | unsigned int i; | |
221 | bool after_exit; | |
9771b263 | 222 | vec<basic_block> path = get_loop_hot_path (loop); |
08f1af2e JH |
223 | |
224 | size->overall = 0; | |
225 | size->eliminated_by_peeling = 0; | |
226 | size->last_iteration = 0; | |
227 | size->last_iteration_eliminated_by_peeling = 0; | |
519cac4a JH |
228 | size->num_pure_calls_on_hot_path = 0; |
229 | size->num_non_pure_calls_on_hot_path = 0; | |
230 | size->non_call_stmts_on_hot_path = 0; | |
231 | size->num_branches_on_hot_path = 0; | |
232 | size->constant_iv = 0; | |
08f1af2e JH |
233 | |
234 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
235 | fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); | |
236 | for (i = 0; i < loop->num_nodes; i++) | |
237 | { | |
b7442c2f JH |
238 | if (edge_to_cancel && body[i] != edge_to_cancel->src |
239 | && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) | |
08f1af2e JH |
240 | after_exit = true; |
241 | else | |
242 | after_exit = false; | |
243 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
244 | fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); | |
245 | ||
246 | for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) | |
247 | { | |
248 | gimple stmt = gsi_stmt (gsi); | |
249 | int num = estimate_num_insns (stmt, &eni_size_weights); | |
250 | bool likely_eliminated = false; | |
519cac4a JH |
251 | bool likely_eliminated_last = false; |
252 | bool likely_eliminated_peeled = false; | |
08f1af2e JH |
253 | |
254 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
255 | { | |
256 | fprintf (dump_file, " size: %3i ", num); | |
257 | print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); | |
258 | } | |
259 | ||
260 | /* Look for reasons why we might optimize this stmt away. */ | |
261 | ||
4a61be9a RB |
262 | if (gimple_has_side_effects (stmt)) |
263 | ; | |
08f1af2e | 264 | /* Exit conditional. */ |
4a61be9a | 265 | else if (exit && body[i] == exit->src |
519cac4a | 266 | && stmt == last_stmt (exit->src)) |
08f1af2e JH |
267 | { |
268 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
519cac4a JH |
269 | fprintf (dump_file, " Exit condition will be eliminated " |
270 | "in peeled copies.\n"); | |
271 | likely_eliminated_peeled = true; | |
272 | } | |
273 | else if (edge_to_cancel && body[i] == edge_to_cancel->src | |
274 | && stmt == last_stmt (edge_to_cancel->src)) | |
275 | { | |
276 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
277 | fprintf (dump_file, " Exit condition will be eliminated " | |
278 | "in last copy.\n"); | |
279 | likely_eliminated_last = true; | |
08f1af2e JH |
280 | } |
281 | /* Sets of IV variables */ | |
282 | else if (gimple_code (stmt) == GIMPLE_ASSIGN | |
283 | && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) | |
284 | { | |
285 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
286 | fprintf (dump_file, " Induction variable computation will" | |
287 | " be folded away.\n"); | |
288 | likely_eliminated = true; | |
289 | } | |
290 | /* Assignments of IV variables. */ | |
291 | else if (gimple_code (stmt) == GIMPLE_ASSIGN | |
292 | && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME | |
519cac4a | 293 | && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) |
08f1af2e JH |
294 | && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS |
295 | || constant_after_peeling (gimple_assign_rhs2 (stmt), | |
296 | stmt, loop))) | |
297 | { | |
519cac4a | 298 | size->constant_iv = true; |
08f1af2e JH |
299 | if (dump_file && (dump_flags & TDF_DETAILS)) |
300 | fprintf (dump_file, " Constant expression will be folded away.\n"); | |
301 | likely_eliminated = true; | |
302 | } | |
303 | /* Conditionals. */ | |
519cac4a JH |
304 | else if ((gimple_code (stmt) == GIMPLE_COND |
305 | && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) | |
306 | && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)) | |
307 | || (gimple_code (stmt) == GIMPLE_SWITCH | |
538dd0b7 DM |
308 | && constant_after_peeling (gimple_switch_index ( |
309 | as_a <gswitch *> (stmt)), | |
310 | stmt, loop))) | |
08f1af2e JH |
311 | { |
312 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
313 | fprintf (dump_file, " Constant conditional.\n"); | |
314 | likely_eliminated = true; | |
315 | } | |
316 | ||
317 | size->overall += num; | |
519cac4a | 318 | if (likely_eliminated || likely_eliminated_peeled) |
08f1af2e JH |
319 | size->eliminated_by_peeling += num; |
320 | if (!after_exit) | |
321 | { | |
322 | size->last_iteration += num; | |
519cac4a | 323 | if (likely_eliminated || likely_eliminated_last) |
08f1af2e JH |
324 | size->last_iteration_eliminated_by_peeling += num; |
325 | } | |
0588ac84 JH |
326 | if ((size->overall * 3 / 2 - size->eliminated_by_peeling |
327 | - size->last_iteration_eliminated_by_peeling) > upper_bound) | |
328 | { | |
329 | free (body); | |
9d676bf6 | 330 | path.release (); |
0588ac84 JH |
331 | return true; |
332 | } | |
08f1af2e JH |
333 | } |
334 | } | |
9771b263 | 335 | while (path.length ()) |
519cac4a | 336 | { |
9771b263 | 337 | basic_block bb = path.pop (); |
519cac4a JH |
338 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
339 | { | |
340 | gimple stmt = gsi_stmt (gsi); | |
341 | if (gimple_code (stmt) == GIMPLE_CALL) | |
342 | { | |
343 | int flags = gimple_call_flags (stmt); | |
344 | tree decl = gimple_call_fndecl (stmt); | |
345 | ||
346 | if (decl && DECL_IS_BUILTIN (decl) | |
347 | && is_inexpensive_builtin (decl)) | |
348 | ; | |
349 | else if (flags & (ECF_PURE | ECF_CONST)) | |
350 | size->num_pure_calls_on_hot_path++; | |
351 | else | |
352 | size->num_non_pure_calls_on_hot_path++; | |
353 | size->num_branches_on_hot_path ++; | |
354 | } | |
355 | else if (gimple_code (stmt) != GIMPLE_CALL | |
356 | && gimple_code (stmt) != GIMPLE_DEBUG) | |
357 | size->non_call_stmts_on_hot_path++; | |
358 | if (((gimple_code (stmt) == GIMPLE_COND | |
359 | && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) | |
360 | || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) | |
361 | || (gimple_code (stmt) == GIMPLE_SWITCH | |
538dd0b7 DM |
362 | && !constant_after_peeling (gimple_switch_index ( |
363 | as_a <gswitch *> (stmt)), | |
364 | stmt, loop))) | |
519cac4a JH |
365 | && (!exit || bb != exit->src)) |
366 | size->num_branches_on_hot_path++; | |
367 | } | |
368 | } | |
9771b263 | 369 | path.release (); |
08f1af2e JH |
370 | if (dump_file && (dump_flags & TDF_DETAILS)) |
371 | fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, | |
372 | size->eliminated_by_peeling, size->last_iteration, | |
373 | size->last_iteration_eliminated_by_peeling); | |
b8698a0f | 374 | |
08f1af2e | 375 | free (body); |
0588ac84 | 376 | return false; |
08f1af2e | 377 | } |
91a01f21 | 378 | |
08f1af2e JH |
379 | /* Estimate number of insns of completely unrolled loop. |
380 | It is (NUNROLL + 1) * size of loop body with taking into account | |
381 | the fact that in last copy everything after exit conditional | |
382 | is dead and that some instructions will be eliminated after | |
383 | peeling. | |
91a01f21 | 384 | |
c0d18c6c | 385 | Loop body is likely going to simplify further, this is difficult |
08f1af2e | 386 | to guess, we just decrease the result by 1/3. */ |
91a01f21 ZD |
387 | |
388 | static unsigned HOST_WIDE_INT | |
08f1af2e | 389 | estimated_unrolled_size (struct loop_size *size, |
91a01f21 ZD |
390 | unsigned HOST_WIDE_INT nunroll) |
391 | { | |
08f1af2e JH |
392 | HOST_WIDE_INT unr_insns = ((nunroll) |
393 | * (HOST_WIDE_INT) (size->overall | |
394 | - size->eliminated_by_peeling)); | |
395 | if (!nunroll) | |
396 | unr_insns = 0; | |
397 | unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling; | |
398 | ||
399 | unr_insns = unr_insns * 2 / 3; | |
91a01f21 ZD |
400 | if (unr_insns <= 0) |
401 | unr_insns = 1; | |
91a01f21 ZD |
402 | |
403 | return unr_insns; | |
404 | } | |
405 | ||
b7442c2f JH |
406 | /* Loop LOOP is known to not loop. See if there is an edge in the loop |
407 | body that can be remove to make the loop to always exit and at | |
408 | the same time it does not make any code potentially executed | |
409 | during the last iteration dead. | |
410 | ||
1c5fd343 | 411 | After complete unrolling we still may get rid of the conditional |
b7442c2f JH |
412 | on the exit in the last copy even if we have no idea what it does. |
413 | This is quite common case for loops of form | |
414 | ||
415 | int a[5]; | |
416 | for (i=0;i<b;i++) | |
417 | a[i]=0; | |
418 | ||
419 | Here we prove the loop to iterate 5 times but we do not know | |
420 | it from induction variable. | |
421 | ||
422 | For now we handle only simple case where there is exit condition | |
423 | just before the latch block and the latch block contains no statements | |
424 | with side effect that may otherwise terminate the execution of loop | |
425 | (such as by EH or by terminating the program or longjmp). | |
426 | ||
427 | In the general case we may want to cancel the paths leading to statements | |
428 | loop-niter identified as having undefined effect in the last iteration. | |
429 | The other cases are hopefully rare and will be cleaned up later. */ | |
430 | ||
71343877 | 431 | static edge |
b7442c2f JH |
432 | loop_edge_to_cancel (struct loop *loop) |
433 | { | |
9771b263 | 434 | vec<edge> exits; |
b7442c2f JH |
435 | unsigned i; |
436 | edge edge_to_cancel; | |
437 | gimple_stmt_iterator gsi; | |
438 | ||
439 | /* We want only one predecestor of the loop. */ | |
440 | if (EDGE_COUNT (loop->latch->preds) > 1) | |
441 | return NULL; | |
442 | ||
443 | exits = get_loop_exit_edges (loop); | |
444 | ||
9771b263 | 445 | FOR_EACH_VEC_ELT (exits, i, edge_to_cancel) |
b7442c2f JH |
446 | { |
447 | /* Find the other edge than the loop exit | |
448 | leaving the conditoinal. */ | |
449 | if (EDGE_COUNT (edge_to_cancel->src->succs) != 2) | |
450 | continue; | |
451 | if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel) | |
452 | edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1); | |
453 | else | |
454 | edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0); | |
455 | ||
a093356a JH |
456 | /* We only can handle conditionals. */ |
457 | if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) | |
458 | continue; | |
459 | ||
b7442c2f JH |
460 | /* We should never have conditionals in the loop latch. */ |
461 | gcc_assert (edge_to_cancel->dest != loop->header); | |
462 | ||
463 | /* Check that it leads to loop latch. */ | |
464 | if (edge_to_cancel->dest != loop->latch) | |
465 | continue; | |
466 | ||
9771b263 | 467 | exits.release (); |
b7442c2f JH |
468 | |
469 | /* Verify that the code in loop latch does nothing that may end program | |
470 | execution without really reaching the exit. This may include | |
471 | non-pure/const function calls, EH statements, volatile ASMs etc. */ | |
472 | for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi)) | |
473 | if (gimple_has_side_effects (gsi_stmt (gsi))) | |
474 | return NULL; | |
475 | return edge_to_cancel; | |
476 | } | |
9771b263 | 477 | exits.release (); |
b7442c2f JH |
478 | return NULL; |
479 | } | |
480 | ||
f2a1b469 JH |
481 | /* Remove all tests for exits that are known to be taken after LOOP was |
482 | peeled NPEELED times. Put gcc_unreachable before every statement | |
483 | known to not be executed. */ | |
484 | ||
485 | static bool | |
486 | remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) | |
487 | { | |
488 | struct nb_iter_bound *elt; | |
489 | bool changed = false; | |
490 | ||
491 | for (elt = loop->bounds; elt; elt = elt->next) | |
492 | { | |
493 | /* If statement is known to be undefined after peeling, turn it | |
494 | into unreachable (or trap when debugging experience is supposed | |
495 | to be good). */ | |
496 | if (!elt->is_exit | |
807e902e | 497 | && wi::ltu_p (elt->bound, npeeled)) |
f2a1b469 JH |
498 | { |
499 | gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); | |
538dd0b7 | 500 | gcall *stmt = gimple_build_call |
f2a1b469 | 501 | (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); |
f2a1b469 JH |
502 | gimple_set_location (stmt, gimple_location (elt->stmt)); |
503 | gsi_insert_before (&gsi, stmt, GSI_NEW_STMT); | |
ad3e2ba0 | 504 | split_block (gimple_bb (stmt), stmt); |
f2a1b469 JH |
505 | changed = true; |
506 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
507 | { | |
508 | fprintf (dump_file, "Forced statement unreachable: "); | |
509 | print_gimple_stmt (dump_file, elt->stmt, 0, 0); | |
510 | } | |
511 | } | |
512 | /* If we know the exit will be taken after peeling, update. */ | |
513 | else if (elt->is_exit | |
807e902e | 514 | && wi::leu_p (elt->bound, npeeled)) |
f2a1b469 JH |
515 | { |
516 | basic_block bb = gimple_bb (elt->stmt); | |
517 | edge exit_edge = EDGE_SUCC (bb, 0); | |
518 | ||
519 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
520 | { | |
521 | fprintf (dump_file, "Forced exit to be taken: "); | |
522 | print_gimple_stmt (dump_file, elt->stmt, 0, 0); | |
523 | } | |
524 | if (!loop_exit_edge_p (loop, exit_edge)) | |
525 | exit_edge = EDGE_SUCC (bb, 1); | |
526 | gcc_checking_assert (loop_exit_edge_p (loop, exit_edge)); | |
538dd0b7 | 527 | gcond *cond_stmt = as_a <gcond *> (elt->stmt); |
f2a1b469 | 528 | if (exit_edge->flags & EDGE_TRUE_VALUE) |
538dd0b7 | 529 | gimple_cond_make_true (cond_stmt); |
f2a1b469 | 530 | else |
538dd0b7 DM |
531 | gimple_cond_make_false (cond_stmt); |
532 | update_stmt (cond_stmt); | |
f2a1b469 JH |
533 | changed = true; |
534 | } | |
535 | } | |
536 | return changed; | |
537 | } | |
538 | ||
539 | /* Remove all exits that are known to be never taken because of the loop bound | |
540 | discovered. */ | |
541 | ||
542 | static bool | |
543 | remove_redundant_iv_tests (struct loop *loop) | |
544 | { | |
545 | struct nb_iter_bound *elt; | |
546 | bool changed = false; | |
547 | ||
548 | if (!loop->any_upper_bound) | |
549 | return false; | |
550 | for (elt = loop->bounds; elt; elt = elt->next) | |
551 | { | |
552 | /* Exit is pointless if it won't be taken before loop reaches | |
553 | upper bound. */ | |
554 | if (elt->is_exit && loop->any_upper_bound | |
807e902e | 555 | && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound)) |
f2a1b469 JH |
556 | { |
557 | basic_block bb = gimple_bb (elt->stmt); | |
558 | edge exit_edge = EDGE_SUCC (bb, 0); | |
559 | struct tree_niter_desc niter; | |
560 | ||
561 | if (!loop_exit_edge_p (loop, exit_edge)) | |
562 | exit_edge = EDGE_SUCC (bb, 1); | |
563 | ||
564 | /* Only when we know the actual number of iterations, not | |
565 | just a bound, we can remove the exit. */ | |
566 | if (!number_of_iterations_exit (loop, exit_edge, | |
13ea799b RB |
567 | &niter, false, false) |
568 | || !integer_onep (niter.assumptions) | |
f2a1b469 JH |
569 | || !integer_zerop (niter.may_be_zero) |
570 | || !niter.niter | |
571 | || TREE_CODE (niter.niter) != INTEGER_CST | |
807e902e KZ |
572 | || !wi::ltu_p (loop->nb_iterations_upper_bound, |
573 | wi::to_widest (niter.niter))) | |
f2a1b469 JH |
574 | continue; |
575 | ||
576 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
577 | { | |
578 | fprintf (dump_file, "Removed pointless exit: "); | |
579 | print_gimple_stmt (dump_file, elt->stmt, 0, 0); | |
580 | } | |
538dd0b7 | 581 | gcond *cond_stmt = as_a <gcond *> (elt->stmt); |
f2a1b469 | 582 | if (exit_edge->flags & EDGE_TRUE_VALUE) |
538dd0b7 | 583 | gimple_cond_make_false (cond_stmt); |
f2a1b469 | 584 | else |
538dd0b7 DM |
585 | gimple_cond_make_true (cond_stmt); |
586 | update_stmt (cond_stmt); | |
f2a1b469 JH |
587 | changed = true; |
588 | } | |
589 | } | |
590 | return changed; | |
591 | } | |
592 | ||
593 | /* Stores loops that will be unlooped after we process whole loop tree. */ | |
9771b263 DN |
594 | static vec<loop_p> loops_to_unloop; |
595 | static vec<int> loops_to_unloop_nunroll; | |
f2a1b469 JH |
596 | |
597 | /* Cancel all fully unrolled loops by putting __builtin_unreachable | |
598 | on the latch edge. | |
599 | We do it after all unrolling since unlooping moves basic blocks | |
600 | across loop boundaries trashing loop closed SSA form as well | |
601 | as SCEV info needed to be intact during unrolling. | |
602 | ||
b7442c2f JH |
603 | IRRED_INVALIDATED is used to bookkeep if information about |
604 | irreducible regions may become invalid as a result | |
1a7de201 JH |
605 | of the transformation. |
606 | LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case | |
607 | when we need to go into loop closed SSA form. */ | |
82b85a85 | 608 | |
71343877 | 609 | static void |
f2a1b469 JH |
610 | unloop_loops (bitmap loop_closed_ssa_invalidated, |
611 | bool *irred_invalidated) | |
612 | { | |
9771b263 | 613 | while (loops_to_unloop.length ()) |
f2a1b469 | 614 | { |
9771b263 DN |
615 | struct loop *loop = loops_to_unloop.pop (); |
616 | int n_unroll = loops_to_unloop_nunroll.pop (); | |
f2a1b469 JH |
617 | basic_block latch = loop->latch; |
618 | edge latch_edge = loop_latch_edge (loop); | |
619 | int flags = latch_edge->flags; | |
620 | location_t locus = latch_edge->goto_locus; | |
538dd0b7 | 621 | gcall *stmt; |
f2a1b469 JH |
622 | gimple_stmt_iterator gsi; |
623 | ||
624 | remove_exits_and_undefined_stmts (loop, n_unroll); | |
625 | ||
626 | /* Unloop destroys the latch edge. */ | |
627 | unloop (loop, irred_invalidated, loop_closed_ssa_invalidated); | |
628 | ||
629 | /* Create new basic block for the latch edge destination and wire | |
630 | it in. */ | |
631 | stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); | |
632 | latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags); | |
633 | latch_edge->probability = 0; | |
634 | latch_edge->count = 0; | |
635 | latch_edge->flags |= flags; | |
636 | latch_edge->goto_locus = locus; | |
637 | ||
638 | latch_edge->dest->loop_father = current_loops->tree_root; | |
639 | latch_edge->dest->count = 0; | |
640 | latch_edge->dest->frequency = 0; | |
641 | set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src); | |
642 | ||
643 | gsi = gsi_start_bb (latch_edge->dest); | |
644 | gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); | |
645 | } | |
9771b263 DN |
646 | loops_to_unloop.release (); |
647 | loops_to_unloop_nunroll.release (); | |
f2a1b469 JH |
648 | } |
649 | ||
650 | /* Tries to unroll LOOP completely, i.e. NITER times. | |
651 | UL determines which loops we are allowed to unroll. | |
e25a6711 | 652 | EXIT is the exit of the loop that should be eliminated. |
f2a1b469 | 653 | MAXITER specfy bound on number of iterations, -1 if it is |
e25a6711 TJ |
654 | not known or too large for HOST_WIDE_INT. The location |
655 | LOCUS corresponding to the loop is used when emitting | |
656 | a summary of the unroll to the dump file. */ | |
f2a1b469 | 657 | |
82b85a85 | 658 | static bool |
d73be268 | 659 | try_unroll_loop_completely (struct loop *loop, |
82b85a85 | 660 | edge exit, tree niter, |
b7442c2f | 661 | enum unroll_level ul, |
e25a6711 TJ |
662 | HOST_WIDE_INT maxiter, |
663 | location_t locus) | |
82b85a85 | 664 | { |
d03f2c17 | 665 | unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns; |
08f1af2e | 666 | struct loop_size size; |
b7442c2f | 667 | bool n_unroll_found = false; |
b7442c2f | 668 | edge edge_to_cancel = NULL; |
f8934be7 | 669 | int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS; |
82b85a85 | 670 | |
b7442c2f | 671 | /* See if we proved number of iterations to be low constant. |
82b85a85 | 672 | |
b7442c2f JH |
673 | EXIT is an edge that will be removed in all but last iteration of |
674 | the loop. | |
675 | ||
676 | EDGE_TO_CACNEL is an edge that will be removed from the last iteration | |
677 | of the unrolled sequence and is expected to make the final loop not | |
678 | rolling. | |
679 | ||
680 | If the number of execution of loop is determined by standard induction | |
681 | variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving | |
682 | from the iv test. */ | |
cc269bb6 | 683 | if (tree_fits_uhwi_p (niter)) |
b7442c2f | 684 | { |
ae7e9ddd | 685 | n_unroll = tree_to_uhwi (niter); |
b7442c2f JH |
686 | n_unroll_found = true; |
687 | edge_to_cancel = EDGE_SUCC (exit->src, 0); | |
688 | if (edge_to_cancel == exit) | |
689 | edge_to_cancel = EDGE_SUCC (exit->src, 1); | |
690 | } | |
691 | /* We do not know the number of iterations and thus we can not eliminate | |
692 | the EXIT edge. */ | |
693 | else | |
694 | exit = NULL; | |
695 | ||
696 | /* See if we can improve our estimate by using recorded loop bounds. */ | |
b7442c2f JH |
697 | if (maxiter >= 0 |
698 | && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll)) | |
699 | { | |
700 | n_unroll = maxiter; | |
701 | n_unroll_found = true; | |
702 | /* Loop terminates before the IV variable test, so we can not | |
703 | remove it in the last iteration. */ | |
704 | edge_to_cancel = NULL; | |
705 | } | |
706 | ||
707 | if (!n_unroll_found) | |
82b85a85 | 708 | return false; |
82b85a85 | 709 | |
d03f2c17 EB |
710 | if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES)) |
711 | { | |
712 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
713 | fprintf (dump_file, "Not unrolling loop %d " | |
714 | "(--param max-completely-peeled-times limit reached).\n", | |
715 | loop->num); | |
716 | return false; | |
717 | } | |
82b85a85 | 718 | |
b7442c2f JH |
719 | if (!edge_to_cancel) |
720 | edge_to_cancel = loop_edge_to_cancel (loop); | |
721 | ||
82b85a85 ZD |
722 | if (n_unroll) |
723 | { | |
b7442c2f JH |
724 | sbitmap wont_exit; |
725 | edge e; | |
726 | unsigned i; | |
0588ac84 | 727 | bool large; |
6e1aa848 | 728 | vec<edge> to_remove = vNULL; |
91a01f21 | 729 | if (ul == UL_SINGLE_ITER) |
82b85a85 ZD |
730 | return false; |
731 | ||
0588ac84 JH |
732 | large = tree_estimate_loop_size |
733 | (loop, exit, edge_to_cancel, &size, | |
734 | PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)); | |
08f1af2e | 735 | ninsns = size.overall; |
0588ac84 JH |
736 | if (large) |
737 | { | |
738 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
739 | fprintf (dump_file, "Not unrolling loop %d: it is too large.\n", | |
740 | loop->num); | |
741 | return false; | |
742 | } | |
82b85a85 | 743 | |
08f1af2e | 744 | unr_insns = estimated_unrolled_size (&size, n_unroll); |
d6e840ee RG |
745 | if (dump_file && (dump_flags & TDF_DETAILS)) |
746 | { | |
747 | fprintf (dump_file, " Loop size: %d\n", (int) ninsns); | |
748 | fprintf (dump_file, " Estimated size after unrolling: %d\n", | |
749 | (int) unr_insns); | |
750 | } | |
751 | ||
519cac4a JH |
752 | /* If the code is going to shrink, we don't need to be extra cautious |
753 | on guessing if the unrolling is going to be profitable. */ | |
754 | if (unr_insns | |
755 | /* If there is IV variable that will become constant, we save | |
756 | one instruction in the loop prologue we do not account | |
757 | otherwise. */ | |
758 | <= ninsns + (size.constant_iv != false)) | |
759 | ; | |
b7442c2f JH |
760 | /* We unroll only inner loops, because we do not consider it profitable |
761 | otheriwse. We still can cancel loopback edge of not rolling loop; | |
762 | this is always a good idea. */ | |
519cac4a JH |
763 | else if (ul == UL_NO_GROWTH) |
764 | { | |
765 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
766 | fprintf (dump_file, "Not unrolling loop %d: size would grow.\n", | |
767 | loop->num); | |
768 | return false; | |
769 | } | |
1c5fd343 | 770 | /* Outer loops tend to be less interesting candidates for complete |
519cac4a JH |
771 | unrolling unless we can do a lot of propagation into the inner loop |
772 | body. For now we disable outer loop unrolling when the code would | |
773 | grow. */ | |
774 | else if (loop->inner) | |
b7442c2f JH |
775 | { |
776 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
519cac4a | 777 | fprintf (dump_file, "Not unrolling loop %d: " |
b7442c2f JH |
778 | "it is not innermost and code would grow.\n", |
779 | loop->num); | |
780 | return false; | |
781 | } | |
519cac4a JH |
782 | /* If there is call on a hot path through the loop, then |
783 | there is most probably not much to optimize. */ | |
784 | else if (size.num_non_pure_calls_on_hot_path) | |
678e7c65 RG |
785 | { |
786 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
519cac4a JH |
787 | fprintf (dump_file, "Not unrolling loop %d: " |
788 | "contains call and code would grow.\n", | |
678e7c65 RG |
789 | loop->num); |
790 | return false; | |
791 | } | |
519cac4a JH |
792 | /* If there is pure/const call in the function, then we |
793 | can still optimize the unrolled loop body if it contains | |
794 | some other interesting code than the calls and code | |
795 | storing or cumulating the return value. */ | |
796 | else if (size.num_pure_calls_on_hot_path | |
797 | /* One IV increment, one test, one ivtmp store | |
c0d18c6c | 798 | and one useful stmt. That is about minimal loop |
519cac4a JH |
799 | doing pure call. */ |
800 | && (size.non_call_stmts_on_hot_path | |
801 | <= 3 + size.num_pure_calls_on_hot_path)) | |
91a01f21 | 802 | { |
91a01f21 | 803 | if (dump_file && (dump_flags & TDF_DETAILS)) |
519cac4a JH |
804 | fprintf (dump_file, "Not unrolling loop %d: " |
805 | "contains just pure calls and code would grow.\n", | |
806 | loop->num); | |
807 | return false; | |
808 | } | |
809 | /* Complette unrolling is major win when control flow is removed and | |
810 | one big basic block is created. If the loop contains control flow | |
811 | the optimization may still be a win because of eliminating the loop | |
812 | overhead but it also may blow the branch predictor tables. | |
813 | Limit number of branches on the hot path through the peeled | |
814 | sequence. */ | |
815 | else if (size.num_branches_on_hot_path * (int)n_unroll | |
816 | > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES)) | |
817 | { | |
818 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
819 | fprintf (dump_file, "Not unrolling loop %d: " | |
820 | " number of branches on hot path in the unrolled sequence" | |
821 | " reach --param max-peel-branches limit.\n", | |
822 | loop->num); | |
823 | return false; | |
824 | } | |
825 | else if (unr_insns | |
826 | > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS)) | |
827 | { | |
828 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
829 | fprintf (dump_file, "Not unrolling loop %d: " | |
830 | "(--param max-completely-peeled-insns limit reached).\n", | |
b7442c2f | 831 | loop->num); |
d6e840ee | 832 | return false; |
91a01f21 | 833 | } |
f8934be7 JH |
834 | dump_printf_loc (report_flags, locus, |
835 | "loop turned into non-loop; it never loops.\n"); | |
178df94f | 836 | |
6580ee77 | 837 | initialize_original_copy_tables (); |
178df94f | 838 | wont_exit = sbitmap_alloc (n_unroll + 1); |
f61e445a | 839 | bitmap_ones (wont_exit); |
d7c028c0 | 840 | bitmap_clear_bit (wont_exit, 0); |
178df94f | 841 | |
726a989a RB |
842 | if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), |
843 | n_unroll, wont_exit, | |
844 | exit, &to_remove, | |
845 | DLTHE_FLAG_UPDATE_FREQ | |
846 | | DLTHE_FLAG_COMPLETTE_PEEL)) | |
82b85a85 | 847 | { |
6580ee77 | 848 | free_original_copy_tables (); |
178df94f | 849 | free (wont_exit); |
519cac4a JH |
850 | if (dump_file && (dump_flags & TDF_DETAILS)) |
851 | fprintf (dump_file, "Failed to duplicate the loop\n"); | |
82b85a85 ZD |
852 | return false; |
853 | } | |
6c74788e | 854 | |
9771b263 | 855 | FOR_EACH_VEC_ELT (to_remove, i, e) |
6c74788e SP |
856 | { |
857 | bool ok = remove_path (e); | |
858 | gcc_assert (ok); | |
859 | } | |
860 | ||
9771b263 | 861 | to_remove.release (); |
178df94f | 862 | free (wont_exit); |
6580ee77 | 863 | free_original_copy_tables (); |
82b85a85 | 864 | } |
82b85a85 | 865 | |
f2a1b469 | 866 | |
b7442c2f JH |
867 | /* Remove the conditional from the last copy of the loop. */ |
868 | if (edge_to_cancel) | |
869 | { | |
538dd0b7 | 870 | gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src)); |
b7442c2f JH |
871 | if (edge_to_cancel->flags & EDGE_TRUE_VALUE) |
872 | gimple_cond_make_false (cond); | |
873 | else | |
874 | gimple_cond_make_true (cond); | |
875 | update_stmt (cond); | |
876 | /* Do not remove the path. Doing so may remove outer loop | |
877 | and confuse bookkeeping code in tree_unroll_loops_completelly. */ | |
878 | } | |
b7442c2f | 879 | |
f2a1b469 | 880 | /* Store the loop for later unlooping and exit removal. */ |
9771b263 DN |
881 | loops_to_unloop.safe_push (loop); |
882 | loops_to_unloop_nunroll.safe_push (n_unroll); | |
84d65814 | 883 | |
e25a6711 | 884 | if (dump_enabled_p ()) |
b7442c2f JH |
885 | { |
886 | if (!n_unroll) | |
e25a6711 | 887 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, |
9cc1fb4b | 888 | "loop turned into non-loop; it never loops\n"); |
b7442c2f | 889 | else |
e25a6711 TJ |
890 | { |
891 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus, | |
9cc1fb4b XDL |
892 | "loop with %d iterations completely unrolled", |
893 | (int) (n_unroll + 1)); | |
e25a6711 TJ |
894 | if (profile_info) |
895 | dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, | |
896 | " (header execution count %d)", | |
897 | (int)loop->header->count); | |
898 | dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n"); | |
899 | } | |
900 | } | |
901 | ||
902 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
903 | { | |
b7442c2f JH |
904 | if (exit) |
905 | fprintf (dump_file, "Exit condition of peeled iterations was " | |
906 | "eliminated.\n"); | |
907 | if (edge_to_cancel) | |
908 | fprintf (dump_file, "Last iteration exit edge was proved true.\n"); | |
909 | else | |
910 | fprintf (dump_file, "Latch of last iteration was marked by " | |
911 | "__builtin_unreachable ().\n"); | |
912 | } | |
82b85a85 ZD |
913 | |
914 | return true; | |
915 | } | |
916 | ||
f8934be7 JH |
917 | /* Return number of instructions after peeling. */ |
918 | static unsigned HOST_WIDE_INT | |
919 | estimated_peeled_sequence_size (struct loop_size *size, | |
920 | unsigned HOST_WIDE_INT npeel) | |
921 | { | |
922 | return MAX (npeel * (HOST_WIDE_INT) (size->overall | |
923 | - size->eliminated_by_peeling), 1); | |
924 | } | |
925 | ||
926 | /* If the loop is expected to iterate N times and is | |
927 | small enough, duplicate the loop body N+1 times before | |
928 | the loop itself. This way the hot path will never | |
929 | enter the loop. | |
930 | Parameters are the same as for try_unroll_loops_completely */ | |
931 | ||
932 | static bool | |
933 | try_peel_loop (struct loop *loop, | |
934 | edge exit, tree niter, | |
935 | HOST_WIDE_INT maxiter) | |
936 | { | |
937 | int npeel; | |
938 | struct loop_size size; | |
939 | int peeled_size; | |
940 | sbitmap wont_exit; | |
941 | unsigned i; | |
942 | vec<edge> to_remove = vNULL; | |
943 | edge e; | |
944 | ||
945 | /* If the iteration bound is known and large, then we can safely eliminate | |
946 | the check in peeled copies. */ | |
947 | if (TREE_CODE (niter) != INTEGER_CST) | |
948 | exit = NULL; | |
949 | ||
950 | if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0) | |
951 | return false; | |
952 | ||
953 | /* Peel only innermost loops. */ | |
954 | if (loop->inner) | |
955 | { | |
956 | if (dump_file) | |
957 | fprintf (dump_file, "Not peeling: outer loop\n"); | |
958 | return false; | |
959 | } | |
960 | ||
961 | if (!optimize_loop_for_speed_p (loop)) | |
962 | { | |
963 | if (dump_file) | |
964 | fprintf (dump_file, "Not peeling: cold loop\n"); | |
965 | return false; | |
966 | } | |
967 | ||
968 | /* Check if there is an estimate on the number of iterations. */ | |
969 | npeel = estimated_loop_iterations_int (loop); | |
970 | if (npeel < 0) | |
971 | { | |
972 | if (dump_file) | |
973 | fprintf (dump_file, "Not peeling: number of iterations is not " | |
974 | "estimated\n"); | |
975 | return false; | |
976 | } | |
977 | if (maxiter >= 0 && maxiter <= npeel) | |
978 | { | |
979 | if (dump_file) | |
980 | fprintf (dump_file, "Not peeling: upper bound is known so can " | |
1c5fd343 | 981 | "unroll completely\n"); |
f8934be7 JH |
982 | return false; |
983 | } | |
984 | ||
985 | /* We want to peel estimated number of iterations + 1 (so we never | |
986 | enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES | |
987 | and be sure to avoid overflows. */ | |
988 | if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1) | |
989 | { | |
990 | if (dump_file) | |
991 | fprintf (dump_file, "Not peeling: rolls too much " | |
992 | "(%i + 1 > --param max-peel-times)\n", npeel); | |
993 | return false; | |
994 | } | |
995 | npeel++; | |
996 | ||
997 | /* Check peeled loops size. */ | |
998 | tree_estimate_loop_size (loop, exit, NULL, &size, | |
999 | PARAM_VALUE (PARAM_MAX_PEELED_INSNS)); | |
1000 | if ((peeled_size = estimated_peeled_sequence_size (&size, npeel)) | |
1001 | > PARAM_VALUE (PARAM_MAX_PEELED_INSNS)) | |
1002 | { | |
1003 | if (dump_file) | |
1004 | fprintf (dump_file, "Not peeling: peeled sequence size is too large " | |
1005 | "(%i insns > --param max-peel-insns)", peeled_size); | |
1006 | return false; | |
1007 | } | |
1008 | ||
1009 | /* Duplicate possibly eliminating the exits. */ | |
1010 | initialize_original_copy_tables (); | |
1011 | wont_exit = sbitmap_alloc (npeel + 1); | |
1012 | bitmap_ones (wont_exit); | |
1013 | bitmap_clear_bit (wont_exit, 0); | |
1014 | if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), | |
1015 | npeel, wont_exit, | |
1016 | exit, &to_remove, | |
1017 | DLTHE_FLAG_UPDATE_FREQ | |
1018 | | DLTHE_FLAG_COMPLETTE_PEEL)) | |
1019 | { | |
1020 | free_original_copy_tables (); | |
1021 | free (wont_exit); | |
1022 | return false; | |
1023 | } | |
1024 | FOR_EACH_VEC_ELT (to_remove, i, e) | |
1025 | { | |
1026 | bool ok = remove_path (e); | |
1027 | gcc_assert (ok); | |
1028 | } | |
1029 | free (wont_exit); | |
1030 | free_original_copy_tables (); | |
1031 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1032 | { | |
1033 | fprintf (dump_file, "Peeled loop %d, %i times.\n", | |
1034 | loop->num, npeel); | |
1035 | } | |
1036 | if (loop->any_upper_bound) | |
1037 | loop->nb_iterations_upper_bound -= npeel; | |
1038 | loop->nb_iterations_estimate = 0; | |
1039 | /* Make sure to mark loop cold so we do not try to peel it more. */ | |
1040 | scale_loop_profile (loop, 1, 0); | |
1041 | loop->header->count = 0; | |
1042 | return true; | |
1043 | } | |
d73be268 | 1044 | /* Adds a canonical induction variable to LOOP if suitable. |
b8698a0f | 1045 | CREATE_IV is true if we may create a new iv. UL determines |
91a01f21 | 1046 | which loops we are allowed to completely unroll. If TRY_EVAL is true, we try |
b8698a0f | 1047 | to determine the number of iterations of a loop by direct evaluation. |
f2a1b469 | 1048 | Returns true if cfg is changed. */ |
82b85a85 ZD |
1049 | |
1050 | static bool | |
d73be268 | 1051 | canonicalize_loop_induction_variables (struct loop *loop, |
91a01f21 | 1052 | bool create_iv, enum unroll_level ul, |
f2a1b469 | 1053 | bool try_eval) |
82b85a85 ZD |
1054 | { |
1055 | edge exit = NULL; | |
1056 | tree niter; | |
f2a1b469 JH |
1057 | HOST_WIDE_INT maxiter; |
1058 | bool modified = false; | |
e25a6711 | 1059 | location_t locus = UNKNOWN_LOCATION; |
82b85a85 | 1060 | |
a14865db | 1061 | niter = number_of_latch_executions (loop); |
e25a6711 | 1062 | exit = single_exit (loop); |
82b85a85 | 1063 | if (TREE_CODE (niter) == INTEGER_CST) |
e25a6711 | 1064 | locus = gimple_location (last_stmt (exit->src)); |
ca4c3169 ZD |
1065 | else |
1066 | { | |
1067 | /* If the loop has more than one exit, try checking all of them | |
1068 | for # of iterations determinable through scev. */ | |
e25a6711 | 1069 | if (!exit) |
ca4c3169 ZD |
1070 | niter = find_loop_niter (loop, &exit); |
1071 | ||
1072 | /* Finally if everything else fails, try brute force evaluation. */ | |
1073 | if (try_eval | |
1074 | && (chrec_contains_undetermined (niter) | |
1075 | || TREE_CODE (niter) != INTEGER_CST)) | |
1076 | niter = find_loop_niter_by_eval (loop, &exit); | |
1077 | ||
e25a6711 TJ |
1078 | if (exit) |
1079 | locus = gimple_location (last_stmt (exit->src)); | |
1080 | ||
b7442c2f JH |
1081 | if (TREE_CODE (niter) != INTEGER_CST) |
1082 | exit = NULL; | |
ca4c3169 | 1083 | } |
82b85a85 | 1084 | |
b7442c2f JH |
1085 | /* We work exceptionally hard here to estimate the bound |
1086 | by find_loop_niter_by_eval. Be sure to keep it for future. */ | |
1087 | if (niter && TREE_CODE (niter) == INTEGER_CST) | |
0450d718 | 1088 | { |
807e902e | 1089 | record_niter_bound (loop, wi::to_widest (niter), |
0450d718 JH |
1090 | exit == single_likely_exit (loop), true); |
1091 | } | |
b7442c2f | 1092 | |
f2a1b469 JH |
1093 | /* Force re-computation of loop bounds so we can remove redundant exits. */ |
1094 | maxiter = max_loop_iterations_int (loop); | |
1095 | ||
b7442c2f JH |
1096 | if (dump_file && (dump_flags & TDF_DETAILS) |
1097 | && TREE_CODE (niter) == INTEGER_CST) | |
82b85a85 ZD |
1098 | { |
1099 | fprintf (dump_file, "Loop %d iterates ", loop->num); | |
1100 | print_generic_expr (dump_file, niter, TDF_SLIM); | |
1101 | fprintf (dump_file, " times.\n"); | |
1102 | } | |
b7442c2f | 1103 | if (dump_file && (dump_flags & TDF_DETAILS) |
f2a1b469 | 1104 | && maxiter >= 0) |
b7442c2f JH |
1105 | { |
1106 | fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num, | |
f2a1b469 | 1107 | (int)maxiter); |
b7442c2f | 1108 | } |
82b85a85 | 1109 | |
f2a1b469 JH |
1110 | /* Remove exits that are known to be never taken based on loop bound. |
1111 | Needs to be called after compilation of max_loop_iterations_int that | |
1112 | populates the loop bounds. */ | |
1113 | modified |= remove_redundant_iv_tests (loop); | |
1114 | ||
e25a6711 | 1115 | if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus)) |
82b85a85 ZD |
1116 | return true; |
1117 | ||
b7442c2f | 1118 | if (create_iv |
0450d718 JH |
1119 | && niter && !chrec_contains_undetermined (niter) |
1120 | && exit && just_once_each_iteration_p (loop, exit->src)) | |
82b85a85 ZD |
1121 | create_canonical_iv (loop, exit, niter); |
1122 | ||
f8934be7 JH |
1123 | if (ul == UL_ALL) |
1124 | modified |= try_peel_loop (loop, exit, niter, maxiter); | |
1125 | ||
f2a1b469 | 1126 | return modified; |
82b85a85 ZD |
1127 | } |
1128 | ||
1129 | /* The main entry point of the pass. Adds canonical induction variables | |
d73be268 | 1130 | to the suitable loops. */ |
82b85a85 | 1131 | |
c7f965b6 | 1132 | unsigned int |
d73be268 | 1133 | canonicalize_induction_variables (void) |
82b85a85 | 1134 | { |
82b85a85 | 1135 | struct loop *loop; |
2b271002 | 1136 | bool changed = false; |
b7442c2f | 1137 | bool irred_invalidated = false; |
1a7de201 | 1138 | bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); |
b8698a0f | 1139 | |
f2a1b469 JH |
1140 | free_numbers_of_iterations_estimates (); |
1141 | estimate_numbers_of_iterations (); | |
1142 | ||
f0bd40b1 | 1143 | FOR_EACH_LOOP (loop, LI_FROM_INNERMOST) |
82b85a85 | 1144 | { |
42fd6772 ZD |
1145 | changed |= canonicalize_loop_induction_variables (loop, |
1146 | true, UL_SINGLE_ITER, | |
f2a1b469 | 1147 | true); |
82b85a85 | 1148 | } |
18eb76eb | 1149 | gcc_assert (!need_ssa_update_p (cfun)); |
82b85a85 | 1150 | |
f2a1b469 | 1151 | unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); |
b7442c2f JH |
1152 | if (irred_invalidated |
1153 | && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) | |
1154 | mark_irreducible_loops (); | |
1155 | ||
47bcd07d ZD |
1156 | /* Clean up the information about numbers of iterations, since brute force |
1157 | evaluation could reveal new information. */ | |
1158 | scev_reset (); | |
1159 | ||
1a7de201 JH |
1160 | if (!bitmap_empty_p (loop_closed_ssa_invalidated)) |
1161 | { | |
1162 | gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA)); | |
1163 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); | |
1164 | } | |
1165 | BITMAP_FREE (loop_closed_ssa_invalidated); | |
1166 | ||
82b85a85 | 1167 | if (changed) |
c7f965b6 AP |
1168 | return TODO_cleanup_cfg; |
1169 | return 0; | |
82b85a85 ZD |
1170 | } |
1171 | ||
fea4ea73 EB |
1172 | /* Propagate VAL into all uses of SSA_NAME. */ |
1173 | ||
1174 | static void | |
1175 | propagate_into_all_uses (tree ssa_name, tree val) | |
1176 | { | |
1177 | imm_use_iterator iter; | |
1178 | gimple use_stmt; | |
1179 | ||
1180 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name) | |
1181 | { | |
1182 | gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt); | |
1183 | use_operand_p use; | |
1184 | ||
1185 | FOR_EACH_IMM_USE_ON_STMT (use, iter) | |
1186 | SET_USE (use, val); | |
1187 | ||
1188 | if (is_gimple_assign (use_stmt) | |
1189 | && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt)) | |
1190 | == GIMPLE_SINGLE_RHS) | |
1191 | { | |
1192 | tree rhs = gimple_assign_rhs1 (use_stmt); | |
1193 | ||
1194 | if (TREE_CODE (rhs) == ADDR_EXPR) | |
1195 | recompute_tree_invariant_for_addr_expr (rhs); | |
1196 | } | |
1197 | ||
1198 | fold_stmt_inplace (&use_stmt_gsi); | |
1199 | update_stmt (use_stmt); | |
ee3de2b8 | 1200 | maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt); |
fea4ea73 EB |
1201 | } |
1202 | } | |
1203 | ||
1204 | /* Propagate constant SSA_NAMEs defined in basic block BB. */ | |
1205 | ||
1206 | static void | |
1207 | propagate_constants_for_unrolling (basic_block bb) | |
1208 | { | |
fea4ea73 | 1209 | /* Look for degenerate PHI nodes with constant argument. */ |
538dd0b7 | 1210 | for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); ) |
fea4ea73 | 1211 | { |
538dd0b7 | 1212 | gphi *phi = gsi.phi (); |
fea4ea73 EB |
1213 | tree result = gimple_phi_result (phi); |
1214 | tree arg = gimple_phi_arg_def (phi, 0); | |
1215 | ||
1216 | if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST) | |
1217 | { | |
1218 | propagate_into_all_uses (result, arg); | |
1219 | gsi_remove (&gsi, true); | |
1220 | release_ssa_name (result); | |
1221 | } | |
1222 | else | |
1223 | gsi_next (&gsi); | |
1224 | } | |
1225 | ||
1226 | /* Look for assignments to SSA names with constant RHS. */ | |
538dd0b7 | 1227 | for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) |
fea4ea73 EB |
1228 | { |
1229 | gimple stmt = gsi_stmt (gsi); | |
1230 | tree lhs; | |
1231 | ||
1232 | if (is_gimple_assign (stmt) | |
9ff09a22 | 1233 | && gimple_assign_rhs_code (stmt) == INTEGER_CST |
fea4ea73 | 1234 | && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME) |
9ff09a22 | 1235 | && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
fea4ea73 EB |
1236 | { |
1237 | propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt)); | |
1238 | gsi_remove (&gsi, true); | |
1239 | release_ssa_name (lhs); | |
1240 | } | |
1241 | else | |
1242 | gsi_next (&gsi); | |
1243 | } | |
1244 | } | |
1245 | ||
1b2253d4 RB |
1246 | /* Process loops from innermost to outer, stopping at the innermost |
1247 | loop we unrolled. */ | |
1248 | ||
1249 | static bool | |
1250 | tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer, | |
ff4c81cc | 1251 | vec<loop_p, va_heap>& father_stack, |
1b2253d4 RB |
1252 | struct loop *loop) |
1253 | { | |
1254 | struct loop *loop_father; | |
1255 | bool changed = false; | |
1256 | struct loop *inner; | |
1257 | enum unroll_level ul; | |
1258 | ||
1259 | /* Process inner loops first. */ | |
1260 | for (inner = loop->inner; inner != NULL; inner = inner->next) | |
1261 | changed |= tree_unroll_loops_completely_1 (may_increase_size, | |
1262 | unroll_outer, father_stack, | |
1263 | inner); | |
1264 | ||
1265 | /* If we changed an inner loop we cannot process outer loops in this | |
1266 | iteration because SSA form is not up-to-date. Continue with | |
1267 | siblings of outer loops instead. */ | |
1268 | if (changed) | |
1269 | return true; | |
1270 | ||
74bf76ed JJ |
1271 | /* Don't unroll #pragma omp simd loops until the vectorizer |
1272 | attempts to vectorize those. */ | |
b15b5979 | 1273 | if (loop->force_vectorize) |
74bf76ed JJ |
1274 | return false; |
1275 | ||
1b2253d4 RB |
1276 | /* Try to unroll this loop. */ |
1277 | loop_father = loop_outer (loop); | |
1278 | if (!loop_father) | |
1279 | return false; | |
1280 | ||
1281 | if (may_increase_size && optimize_loop_nest_for_speed_p (loop) | |
1282 | /* Unroll outermost loops only if asked to do so or they do | |
1283 | not cause code growth. */ | |
1284 | && (unroll_outer || loop_outer (loop_father))) | |
1285 | ul = UL_ALL; | |
1286 | else | |
1287 | ul = UL_NO_GROWTH; | |
1288 | ||
1289 | if (canonicalize_loop_induction_variables | |
1290 | (loop, false, ul, !flag_tree_loop_ivcanon)) | |
1291 | { | |
1292 | /* If we'll continue unrolling, we need to propagate constants | |
1293 | within the new basic blocks to fold away induction variable | |
1294 | computations; otherwise, the size might blow up before the | |
1295 | iteration is complete and the IR eventually cleaned up. */ | |
1296 | if (loop_outer (loop_father) && !loop_father->aux) | |
1297 | { | |
1298 | father_stack.safe_push (loop_father); | |
1299 | loop_father->aux = loop_father; | |
1300 | } | |
1301 | ||
1302 | return true; | |
1303 | } | |
1304 | ||
1305 | return false; | |
1306 | } | |
1307 | ||
91a01f21 ZD |
1308 | /* Unroll LOOPS completely if they iterate just few times. Unless |
1309 | MAY_INCREASE_SIZE is true, perform the unrolling only if the | |
1310 | size of the code does not increase. */ | |
82b85a85 | 1311 | |
c7f965b6 | 1312 | unsigned int |
d6e840ee | 1313 | tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) |
82b85a85 | 1314 | { |
00f96dc9 | 1315 | auto_vec<loop_p, 16> father_stack; |
d6e840ee | 1316 | bool changed; |
30bc1dca | 1317 | int iteration = 0; |
1a7de201 | 1318 | bool irred_invalidated = false; |
82b85a85 | 1319 | |
d6e840ee | 1320 | do |
82b85a85 | 1321 | { |
d6e840ee | 1322 | changed = false; |
1a7de201 JH |
1323 | bitmap loop_closed_ssa_invalidated = NULL; |
1324 | ||
1325 | if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
1326 | loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); | |
82b85a85 | 1327 | |
f2a1b469 JH |
1328 | free_numbers_of_iterations_estimates (); |
1329 | estimate_numbers_of_iterations (); | |
1330 | ||
1b2253d4 RB |
1331 | changed = tree_unroll_loops_completely_1 (may_increase_size, |
1332 | unroll_outer, father_stack, | |
1333 | current_loops->tree_root); | |
d6e840ee RG |
1334 | if (changed) |
1335 | { | |
fea4ea73 EB |
1336 | struct loop **iter; |
1337 | unsigned i; | |
1338 | ||
f2a1b469 JH |
1339 | /* Be sure to skip unlooped loops while procesing father_stack |
1340 | array. */ | |
9771b263 | 1341 | FOR_EACH_VEC_ELT (loops_to_unloop, i, iter) |
f2a1b469 | 1342 | (*iter)->aux = NULL; |
9771b263 | 1343 | FOR_EACH_VEC_ELT (father_stack, i, iter) |
f2a1b469 JH |
1344 | if (!(*iter)->aux) |
1345 | *iter = NULL; | |
1346 | unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated); | |
b7442c2f | 1347 | |
f2a1b469 | 1348 | /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */ |
1a7de201 JH |
1349 | if (loop_closed_ssa_invalidated |
1350 | && !bitmap_empty_p (loop_closed_ssa_invalidated)) | |
1351 | rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated, | |
1352 | TODO_update_ssa); | |
1353 | else | |
1354 | update_ssa (TODO_update_ssa); | |
18eb76eb | 1355 | |
fea4ea73 | 1356 | /* Propagate the constants within the new basic blocks. */ |
9771b263 | 1357 | FOR_EACH_VEC_ELT (father_stack, i, iter) |
f2a1b469 JH |
1358 | if (*iter) |
1359 | { | |
1360 | unsigned j; | |
1361 | basic_block *body = get_loop_body_in_dom_order (*iter); | |
1362 | for (j = 0; j < (*iter)->num_nodes; j++) | |
1363 | propagate_constants_for_unrolling (body[j]); | |
1364 | free (body); | |
1365 | (*iter)->aux = NULL; | |
1366 | } | |
9771b263 | 1367 | father_stack.truncate (0); |
fea4ea73 | 1368 | |
d6e840ee RG |
1369 | /* This will take care of removing completely unrolled loops |
1370 | from the loop structures so we can continue unrolling now | |
1371 | innermost loops. */ | |
ace4eb90 RG |
1372 | if (cleanup_tree_cfg ()) |
1373 | update_ssa (TODO_update_ssa_only_virtuals); | |
d6e840ee RG |
1374 | |
1375 | /* Clean up the information about numbers of iterations, since | |
1376 | complete unrolling might have invalidated it. */ | |
1377 | scev_reset (); | |
1a7de201 JH |
1378 | #ifdef ENABLE_CHECKING |
1379 | if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
1380 | verify_loop_closed_ssa (true); | |
1381 | #endif | |
d6e840ee | 1382 | } |
1a7de201 JH |
1383 | if (loop_closed_ssa_invalidated) |
1384 | BITMAP_FREE (loop_closed_ssa_invalidated); | |
d6e840ee | 1385 | } |
30bc1dca RG |
1386 | while (changed |
1387 | && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS)); | |
47bcd07d | 1388 | |
9771b263 | 1389 | father_stack.release (); |
fea4ea73 | 1390 | |
1a7de201 JH |
1391 | if (irred_invalidated |
1392 | && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) | |
1393 | mark_irreducible_loops (); | |
1394 | ||
c7f965b6 | 1395 | return 0; |
82b85a85 | 1396 | } |
71343877 AM |
1397 | |
1398 | /* Canonical induction variable creation pass. */ | |
1399 | ||
71343877 AM |
1400 | namespace { |
1401 | ||
1402 | const pass_data pass_data_iv_canon = | |
1403 | { | |
1404 | GIMPLE_PASS, /* type */ | |
1405 | "ivcanon", /* name */ | |
1406 | OPTGROUP_LOOP, /* optinfo_flags */ | |
71343877 AM |
1407 | TV_TREE_LOOP_IVCANON, /* tv_id */ |
1408 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1409 | 0, /* properties_provided */ | |
1410 | 0, /* properties_destroyed */ | |
1411 | 0, /* todo_flags_start */ | |
1412 | 0, /* todo_flags_finish */ | |
1413 | }; | |
1414 | ||
1415 | class pass_iv_canon : public gimple_opt_pass | |
1416 | { | |
1417 | public: | |
1418 | pass_iv_canon (gcc::context *ctxt) | |
1419 | : gimple_opt_pass (pass_data_iv_canon, ctxt) | |
1420 | {} | |
1421 | ||
1422 | /* opt_pass methods: */ | |
1a3d085c | 1423 | virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; } |
be55bfe6 | 1424 | virtual unsigned int execute (function *fun); |
71343877 AM |
1425 | |
1426 | }; // class pass_iv_canon | |
1427 | ||
be55bfe6 TS |
1428 | unsigned int |
1429 | pass_iv_canon::execute (function *fun) | |
1430 | { | |
1431 | if (number_of_loops (fun) <= 1) | |
1432 | return 0; | |
1433 | ||
1434 | return canonicalize_induction_variables (); | |
1435 | } | |
1436 | ||
71343877 AM |
1437 | } // anon namespace |
1438 | ||
1439 | gimple_opt_pass * | |
1440 | make_pass_iv_canon (gcc::context *ctxt) | |
1441 | { | |
1442 | return new pass_iv_canon (ctxt); | |
1443 | } | |
1444 | ||
1445 | /* Complete unrolling of loops. */ | |
1446 | ||
71343877 AM |
1447 | namespace { |
1448 | ||
1449 | const pass_data pass_data_complete_unroll = | |
1450 | { | |
1451 | GIMPLE_PASS, /* type */ | |
1452 | "cunroll", /* name */ | |
1453 | OPTGROUP_LOOP, /* optinfo_flags */ | |
71343877 AM |
1454 | TV_COMPLETE_UNROLL, /* tv_id */ |
1455 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1456 | 0, /* properties_provided */ | |
1457 | 0, /* properties_destroyed */ | |
1458 | 0, /* todo_flags_start */ | |
1459 | 0, /* todo_flags_finish */ | |
1460 | }; | |
1461 | ||
1462 | class pass_complete_unroll : public gimple_opt_pass | |
1463 | { | |
1464 | public: | |
1465 | pass_complete_unroll (gcc::context *ctxt) | |
1466 | : gimple_opt_pass (pass_data_complete_unroll, ctxt) | |
1467 | {} | |
1468 | ||
1469 | /* opt_pass methods: */ | |
be55bfe6 | 1470 | virtual unsigned int execute (function *); |
71343877 AM |
1471 | |
1472 | }; // class pass_complete_unroll | |
1473 | ||
be55bfe6 TS |
1474 | unsigned int |
1475 | pass_complete_unroll::execute (function *fun) | |
1476 | { | |
1477 | if (number_of_loops (fun) <= 1) | |
1478 | return 0; | |
1479 | ||
1480 | return tree_unroll_loops_completely (flag_unroll_loops | |
1481 | || flag_peel_loops | |
1482 | || optimize >= 3, true); | |
1483 | } | |
1484 | ||
71343877 AM |
1485 | } // anon namespace |
1486 | ||
1487 | gimple_opt_pass * | |
1488 | make_pass_complete_unroll (gcc::context *ctxt) | |
1489 | { | |
1490 | return new pass_complete_unroll (ctxt); | |
1491 | } | |
1492 | ||
1493 | /* Complete unrolling of inner loops. */ | |
1494 | ||
71343877 AM |
1495 | namespace { |
1496 | ||
1497 | const pass_data pass_data_complete_unrolli = | |
1498 | { | |
1499 | GIMPLE_PASS, /* type */ | |
1500 | "cunrolli", /* name */ | |
1501 | OPTGROUP_LOOP, /* optinfo_flags */ | |
71343877 AM |
1502 | TV_COMPLETE_UNROLL, /* tv_id */ |
1503 | ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1504 | 0, /* properties_provided */ | |
1505 | 0, /* properties_destroyed */ | |
1506 | 0, /* todo_flags_start */ | |
3bea341f | 1507 | 0, /* todo_flags_finish */ |
71343877 AM |
1508 | }; |
1509 | ||
1510 | class pass_complete_unrolli : public gimple_opt_pass | |
1511 | { | |
1512 | public: | |
1513 | pass_complete_unrolli (gcc::context *ctxt) | |
1514 | : gimple_opt_pass (pass_data_complete_unrolli, ctxt) | |
1515 | {} | |
1516 | ||
1517 | /* opt_pass methods: */ | |
1a3d085c | 1518 | virtual bool gate (function *) { return optimize >= 2; } |
be55bfe6 | 1519 | virtual unsigned int execute (function *); |
71343877 AM |
1520 | |
1521 | }; // class pass_complete_unrolli | |
1522 | ||
be55bfe6 TS |
1523 | unsigned int |
1524 | pass_complete_unrolli::execute (function *fun) | |
1525 | { | |
1526 | unsigned ret = 0; | |
1527 | ||
1528 | loop_optimizer_init (LOOPS_NORMAL | |
1529 | | LOOPS_HAVE_RECORDED_EXITS); | |
1530 | if (number_of_loops (fun) > 1) | |
1531 | { | |
1532 | scev_initialize (); | |
1533 | ret = tree_unroll_loops_completely (optimize >= 3, false); | |
1534 | free_numbers_of_iterations_estimates (); | |
1535 | scev_finalize (); | |
1536 | } | |
1537 | loop_optimizer_finalize (); | |
1538 | ||
1539 | return ret; | |
1540 | } | |
1541 | ||
71343877 AM |
1542 | } // anon namespace |
1543 | ||
1544 | gimple_opt_pass * | |
1545 | make_pass_complete_unrolli (gcc::context *ctxt) | |
1546 | { | |
1547 | return new pass_complete_unrolli (ctxt); | |
1548 | } | |
1549 | ||
1550 |