]>
Commit | Line | Data |
---|---|---|
ec1e9f7c | 1 | /* Code sinking for trees |
8d9254fc | 2 | Copyright (C) 2001-2020 Free Software Foundation, Inc. |
ec1e9f7c DB |
3 | Contributed by Daniel Berlin <dan@dberlin.org> |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9dcd6f09 | 9 | the Free Software Foundation; either version 3, or (at your option) |
ec1e9f7c DB |
10 | any later version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
ec1e9f7c DB |
20 | |
21 | #include "config.h" | |
22 | #include "system.h" | |
23 | #include "coretypes.h" | |
c7131fb2 | 24 | #include "backend.h" |
ec1e9f7c | 25 | #include "tree.h" |
c7131fb2 | 26 | #include "gimple.h" |
957060b5 | 27 | #include "cfghooks.h" |
957060b5 | 28 | #include "tree-pass.h" |
c7131fb2 | 29 | #include "ssa.h" |
957060b5 | 30 | #include "gimple-pretty-print.h" |
40e23961 | 31 | #include "fold-const.h" |
d8a2d370 | 32 | #include "stor-layout.h" |
60393bbc | 33 | #include "cfganal.h" |
5be5c238 | 34 | #include "gimple-iterator.h" |
442b4905 | 35 | #include "tree-cfg.h" |
ec1e9f7c | 36 | #include "cfgloop.h" |
84935c98 | 37 | #include "tree-eh.h" |
ec1e9f7c DB |
38 | |
39 | /* TODO: | |
40 | 1. Sinking store only using scalar promotion (IE without moving the RHS): | |
41 | ||
42 | *q = p; | |
43 | p = p + 1; | |
44 | if (something) | |
45 | *q = <not p>; | |
46 | else | |
47 | y = *q; | |
48 | ||
b8698a0f | 49 | |
ec1e9f7c DB |
50 | should become |
51 | sinktemp = p; | |
52 | p = p + 1; | |
53 | if (something) | |
54 | *q = <not p>; | |
55 | else | |
56 | { | |
57 | *q = sinktemp; | |
58 | y = *q | |
59 | } | |
60 | Store copy propagation will take care of the store elimination above. | |
b8698a0f | 61 | |
ec1e9f7c DB |
62 | |
63 | 2. Sinking using Partial Dead Code Elimination. */ | |
64 | ||
65 | ||
66 | static struct | |
b8698a0f | 67 | { |
6c6cfbfd | 68 | /* The number of statements sunk down the flowgraph by code sinking. */ |
ec1e9f7c | 69 | int sunk; |
b8698a0f | 70 | |
84935c98 RB |
71 | /* The number of stores commoned and sunk down by store commoning. */ |
72 | int commoned; | |
ec1e9f7c DB |
73 | } sink_stats; |
74 | ||
75 | ||
f652d14b | 76 | /* Given a PHI, and one of its arguments (DEF), find the edge for |
ec1e9f7c DB |
77 | that argument and return it. If the argument occurs twice in the PHI node, |
78 | we return NULL. */ | |
79 | ||
80 | static basic_block | |
538dd0b7 | 81 | find_bb_for_arg (gphi *phi, tree def) |
ec1e9f7c | 82 | { |
726a989a | 83 | size_t i; |
ec1e9f7c DB |
84 | bool foundone = false; |
85 | basic_block result = NULL; | |
726a989a | 86 | for (i = 0; i < gimple_phi_num_args (phi); i++) |
ec1e9f7c DB |
87 | if (PHI_ARG_DEF (phi, i) == def) |
88 | { | |
89 | if (foundone) | |
90 | return NULL; | |
91 | foundone = true; | |
726a989a | 92 | result = gimple_phi_arg_edge (phi, i)->src; |
ec1e9f7c DB |
93 | } |
94 | return result; | |
95 | } | |
96 | ||
97 | /* When the first immediate use is in a statement, then return true if all | |
98 | immediate uses in IMM are in the same statement. | |
99 | We could also do the case where the first immediate use is in a phi node, | |
100 | and all the other uses are in phis in the same basic block, but this | |
101 | requires some expensive checking later (you have to make sure no def/vdef | |
102 | in the statement occurs for multiple edges in the various phi nodes it's | |
6c6cfbfd | 103 | used in, so that you only have one place you can sink it to. */ |
ec1e9f7c DB |
104 | |
105 | static bool | |
acce8ce3 | 106 | all_immediate_uses_same_place (def_operand_p def_p) |
ec1e9f7c | 107 | { |
acce8ce3 | 108 | tree var = DEF_FROM_PTR (def_p); |
f430bae8 AM |
109 | imm_use_iterator imm_iter; |
110 | use_operand_p use_p; | |
ec1e9f7c | 111 | |
355fe088 | 112 | gimple *firstuse = NULL; |
acce8ce3 | 113 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var) |
ec1e9f7c | 114 | { |
acce8ce3 RB |
115 | if (is_gimple_debug (USE_STMT (use_p))) |
116 | continue; | |
117 | if (firstuse == NULL) | |
118 | firstuse = USE_STMT (use_p); | |
119 | else | |
120 | if (firstuse != USE_STMT (use_p)) | |
121 | return false; | |
ec1e9f7c | 122 | } |
f430bae8 | 123 | |
ec1e9f7c DB |
124 | return true; |
125 | } | |
126 | ||
ec1e9f7c DB |
127 | /* Find the nearest common dominator of all of the immediate uses in IMM. */ |
128 | ||
129 | static basic_block | |
acce8ce3 | 130 | nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts) |
b8698a0f | 131 | { |
acce8ce3 | 132 | tree var = DEF_FROM_PTR (def_p); |
0e3de1d4 | 133 | auto_bitmap blocks; |
ec1e9f7c | 134 | basic_block commondom; |
ec1e9f7c DB |
135 | unsigned int j; |
136 | bitmap_iterator bi; | |
f430bae8 AM |
137 | imm_use_iterator imm_iter; |
138 | use_operand_p use_p; | |
f430bae8 | 139 | |
acce8ce3 | 140 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var) |
ec1e9f7c | 141 | { |
355fe088 | 142 | gimple *usestmt = USE_STMT (use_p); |
acce8ce3 | 143 | basic_block useblock; |
000b62dc | 144 | |
538dd0b7 | 145 | if (gphi *phi = dyn_cast <gphi *> (usestmt)) |
acce8ce3 RB |
146 | { |
147 | int idx = PHI_ARG_INDEX_FROM_USE (use_p); | |
ab798313 | 148 | |
538dd0b7 | 149 | useblock = gimple_phi_arg_edge (phi, idx)->src; |
acce8ce3 RB |
150 | } |
151 | else if (is_gimple_debug (usestmt)) | |
152 | { | |
153 | *debug_stmts = true; | |
154 | continue; | |
155 | } | |
156 | else | |
157 | { | |
158 | useblock = gimple_bb (usestmt); | |
159 | } | |
f430bae8 | 160 | |
acce8ce3 RB |
161 | /* Short circuit. Nothing dominates the entry block. */ |
162 | if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun)) | |
0e3de1d4 TS |
163 | return NULL; |
164 | ||
acce8ce3 | 165 | bitmap_set_bit (blocks, useblock->index); |
ec1e9f7c | 166 | } |
06e28de2 | 167 | commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks)); |
ec1e9f7c | 168 | EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi) |
b8698a0f | 169 | commondom = nearest_common_dominator (CDI_DOMINATORS, commondom, |
06e28de2 | 170 | BASIC_BLOCK_FOR_FN (cfun, j)); |
ec1e9f7c DB |
171 | return commondom; |
172 | } | |
173 | ||
1cc17820 JL |
174 | /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator |
175 | tree, return the best basic block between them (inclusive) to place | |
176 | statements. | |
177 | ||
178 | We want the most control dependent block in the shallowest loop nest. | |
179 | ||
180 | If the resulting block is in a shallower loop nest, then use it. Else | |
181 | only use the resulting block if it has significantly lower execution | |
e4112065 | 182 | frequency than EARLY_BB to avoid gratuitous statement movement. We |
1cc17820 JL |
183 | consider statements with VOPS more desirable to move. |
184 | ||
185 | This pass would obviously benefit from PDO as it utilizes block | |
186 | frequencies. It would also benefit from recomputing frequencies | |
187 | if profile data is not available since frequencies often get out | |
188 | of sync with reality. */ | |
189 | ||
190 | static basic_block | |
191 | select_best_block (basic_block early_bb, | |
192 | basic_block late_bb, | |
355fe088 | 193 | gimple *stmt) |
1cc17820 JL |
194 | { |
195 | basic_block best_bb = late_bb; | |
196 | basic_block temp_bb = late_bb; | |
197 | int threshold; | |
198 | ||
199 | while (temp_bb != early_bb) | |
200 | { | |
201 | /* If we've moved into a lower loop nest, then that becomes | |
202 | our best block. */ | |
391886c8 | 203 | if (bb_loop_depth (temp_bb) < bb_loop_depth (best_bb)) |
1cc17820 JL |
204 | best_bb = temp_bb; |
205 | ||
206 | /* Walk up the dominator tree, hopefully we'll find a shallower | |
207 | loop nest. */ | |
208 | temp_bb = get_immediate_dominator (CDI_DOMINATORS, temp_bb); | |
209 | } | |
210 | ||
211 | /* If we found a shallower loop nest, then we always consider that | |
212 | a win. This will always give us the most control dependent block | |
213 | within that loop nest. */ | |
391886c8 | 214 | if (bb_loop_depth (best_bb) < bb_loop_depth (early_bb)) |
1cc17820 JL |
215 | return best_bb; |
216 | ||
217 | /* Get the sinking threshold. If the statement to be moved has memory | |
218 | operands, then increase the threshold by 7% as those are even more | |
219 | profitable to avoid, clamping at 100%. */ | |
028d4092 | 220 | threshold = param_sink_frequency_threshold; |
1cc17820 JL |
221 | if (gimple_vuse (stmt) || gimple_vdef (stmt)) |
222 | { | |
223 | threshold += 7; | |
224 | if (threshold > 100) | |
225 | threshold = 100; | |
226 | } | |
227 | ||
228 | /* If BEST_BB is at the same nesting level, then require it to have | |
e4112065 | 229 | significantly lower execution frequency to avoid gratuitous movement. */ |
391886c8 | 230 | if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb) |
e4112065 | 231 | /* If result of comparsion is unknown, prefer EARLY_BB. |
d4017fd3 JH |
232 | Thus use !(...>=..) rather than (...<...) */ |
233 | && !(best_bb->count.apply_scale (100, 1) | |
01a0fba6 | 234 | >= early_bb->count.apply_scale (threshold, 1))) |
1cc17820 JL |
235 | return best_bb; |
236 | ||
237 | /* No better block found, so return EARLY_BB, which happens to be the | |
238 | statement's original block. */ | |
239 | return early_bb; | |
240 | } | |
241 | ||
b8698a0f | 242 | /* Given a statement (STMT) and the basic block it is currently in (FROMBB), |
ec1e9f7c | 243 | determine the location to sink the statement to, if any. |
726a989a RB |
244 | Returns true if there is such location; in that case, TOGSI points to the |
245 | statement before that STMT should be moved. */ | |
ec1e9f7c | 246 | |
18965703 | 247 | static bool |
355fe088 | 248 | statement_sink_location (gimple *stmt, basic_block frombb, |
4c7c437c | 249 | gimple_stmt_iterator *togsi, bool *zero_uses_p) |
ec1e9f7c | 250 | { |
355fe088 | 251 | gimple *use; |
f430bae8 | 252 | use_operand_p one_use = NULL_USE_OPERAND_P; |
ec1e9f7c DB |
253 | basic_block sinkbb; |
254 | use_operand_p use_p; | |
255 | def_operand_p def_p; | |
256 | ssa_op_iter iter; | |
f430bae8 AM |
257 | imm_use_iterator imm_iter; |
258 | ||
4c7c437c RB |
259 | *zero_uses_p = false; |
260 | ||
2700fbd6 RB |
261 | /* We only can sink assignments and non-looping const/pure calls. */ |
262 | int cf; | |
263 | if (!is_gimple_assign (stmt) | |
264 | && (!is_gimple_call (stmt) | |
265 | || !((cf = gimple_call_flags (stmt)) & (ECF_CONST|ECF_PURE)) | |
266 | || (cf & ECF_LOOPING_CONST_OR_PURE))) | |
e106efc7 | 267 | return false; |
ec1e9f7c | 268 | |
e106efc7 RG |
269 | /* We only can sink stmts with a single definition. */ |
270 | def_p = single_ssa_def_operand (stmt, SSA_OP_ALL_DEFS); | |
271 | if (def_p == NULL_DEF_OPERAND_P) | |
18965703 | 272 | return false; |
ec1e9f7c | 273 | |
ec1e9f7c DB |
274 | /* There are a few classes of things we can't or don't move, some because we |
275 | don't have code to handle it, some because it's not profitable and some | |
b8698a0f L |
276 | because it's not legal. |
277 | ||
ec1e9f7c DB |
278 | We can't sink things that may be global stores, at least not without |
279 | calculating a lot more information, because we may cause it to no longer | |
280 | be seen by an external routine that needs it depending on where it gets | |
b8698a0f L |
281 | moved to. |
282 | ||
ec1e9f7c DB |
283 | We can't sink statements that end basic blocks without splitting the |
284 | incoming edge for the sink location to place it there. | |
285 | ||
b8698a0f | 286 | We can't sink statements that have volatile operands. |
ec1e9f7c DB |
287 | |
288 | We don't want to sink dead code, so anything with 0 immediate uses is not | |
fc3103e7 JJ |
289 | sunk. |
290 | ||
291 | Don't sink BLKmode assignments if current function has any local explicit | |
292 | register variables, as BLKmode assignments may involve memcpy or memset | |
293 | calls or, on some targets, inline expansion thereof that sometimes need | |
294 | to use specific hard registers. | |
ec1e9f7c DB |
295 | |
296 | */ | |
f47c96aa | 297 | if (stmt_ends_bb_p (stmt) |
726a989a | 298 | || gimple_has_side_effects (stmt) |
fc3103e7 | 299 | || (cfun->has_local_explicit_reg_vars |
2700fbd6 | 300 | && TYPE_MODE (TREE_TYPE (gimple_get_lhs (stmt))) == BLKmode)) |
18965703 | 301 | return false; |
b8698a0f | 302 | |
4c7c437c RB |
303 | /* Return if there are no immediate uses of this stmt. */ |
304 | if (has_zero_uses (DEF_FROM_PTR (def_p))) | |
305 | { | |
306 | *zero_uses_p = true; | |
307 | return false; | |
308 | } | |
309 | ||
e106efc7 RG |
310 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p))) |
311 | return false; | |
b8698a0f | 312 | |
ec1e9f7c DB |
313 | FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) |
314 | { | |
315 | tree use = USE_FROM_PTR (use_p); | |
316 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use)) | |
18965703 | 317 | return false; |
ec1e9f7c | 318 | } |
b8698a0f | 319 | |
e106efc7 RG |
320 | use = NULL; |
321 | ||
322 | /* If stmt is a store the one and only use needs to be the VOP | |
323 | merging PHI node. */ | |
acce8ce3 | 324 | if (virtual_operand_p (DEF_FROM_PTR (def_p))) |
e106efc7 RG |
325 | { |
326 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) | |
327 | { | |
355fe088 | 328 | gimple *use_stmt = USE_STMT (use_p); |
e106efc7 RG |
329 | |
330 | /* A killing definition is not a use. */ | |
7ec67e2a | 331 | if ((gimple_has_lhs (use_stmt) |
2700fbd6 | 332 | && operand_equal_p (gimple_get_lhs (stmt), |
7ec67e2a | 333 | gimple_get_lhs (use_stmt), 0)) |
2700fbd6 | 334 | || stmt_kills_ref_p (use_stmt, gimple_get_lhs (stmt))) |
7ec67e2a RB |
335 | { |
336 | /* If use_stmt is or might be a nop assignment then USE_STMT | |
337 | acts as a use as well as definition. */ | |
338 | if (stmt != use_stmt | |
339 | && ref_maybe_used_by_stmt_p (use_stmt, | |
2700fbd6 | 340 | gimple_get_lhs (stmt))) |
7ec67e2a RB |
341 | return false; |
342 | continue; | |
343 | } | |
e106efc7 RG |
344 | |
345 | if (gimple_code (use_stmt) != GIMPLE_PHI) | |
346 | return false; | |
347 | ||
348 | if (use | |
349 | && use != use_stmt) | |
350 | return false; | |
351 | ||
352 | use = use_stmt; | |
353 | } | |
354 | if (!use) | |
355 | return false; | |
356 | } | |
ec1e9f7c DB |
357 | /* If all the immediate uses are not in the same place, find the nearest |
358 | common dominator of all the immediate uses. For PHI nodes, we have to | |
359 | find the nearest common dominator of all of the predecessor blocks, since | |
360 | that is where insertion would have to take place. */ | |
acce8ce3 RB |
361 | else if (gimple_vuse (stmt) |
362 | || !all_immediate_uses_same_place (def_p)) | |
ec1e9f7c | 363 | { |
b5b8b0ac | 364 | bool debug_stmts = false; |
acce8ce3 | 365 | basic_block commondom = nearest_common_dominator_of_uses (def_p, |
b5b8b0ac | 366 | &debug_stmts); |
b8698a0f | 367 | |
ec1e9f7c | 368 | if (commondom == frombb) |
18965703 | 369 | return false; |
ec1e9f7c | 370 | |
acce8ce3 RB |
371 | /* If this is a load then do not sink past any stores. |
372 | ??? This is overly simple but cheap. We basically look | |
373 | for an existing load with the same VUSE in the path to one | |
374 | of the sink candidate blocks and we adjust commondom to the | |
375 | nearest to commondom. */ | |
376 | if (gimple_vuse (stmt)) | |
377 | { | |
99753277 RB |
378 | /* Do not sink loads from hard registers. */ |
379 | if (gimple_assign_single_p (stmt) | |
380 | && TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL | |
381 | && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))) | |
382 | return false; | |
383 | ||
acce8ce3 RB |
384 | imm_use_iterator imm_iter; |
385 | use_operand_p use_p; | |
386 | basic_block found = NULL; | |
387 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vuse (stmt)) | |
388 | { | |
355fe088 | 389 | gimple *use_stmt = USE_STMT (use_p); |
acce8ce3 RB |
390 | basic_block bb = gimple_bb (use_stmt); |
391 | /* For PHI nodes the block we know sth about | |
392 | is the incoming block with the use. */ | |
393 | if (gimple_code (use_stmt) == GIMPLE_PHI) | |
394 | bb = EDGE_PRED (bb, PHI_ARG_INDEX_FROM_USE (use_p))->src; | |
395 | /* Any dominator of commondom would be ok with | |
396 | adjusting commondom to that block. */ | |
397 | bb = nearest_common_dominator (CDI_DOMINATORS, bb, commondom); | |
398 | if (!found) | |
399 | found = bb; | |
400 | else if (dominated_by_p (CDI_DOMINATORS, bb, found)) | |
401 | found = bb; | |
402 | /* If we can't improve, stop. */ | |
403 | if (found == commondom) | |
404 | break; | |
405 | } | |
406 | commondom = found; | |
407 | if (commondom == frombb) | |
408 | return false; | |
409 | } | |
410 | ||
ec1e9f7c DB |
411 | /* Our common dominator has to be dominated by frombb in order to be a |
412 | trivially safe place to put this statement, since it has multiple | |
b8698a0f | 413 | uses. */ |
ec1e9f7c | 414 | if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb)) |
18965703 | 415 | return false; |
b8698a0f | 416 | |
1cc17820 | 417 | commondom = select_best_block (frombb, commondom, stmt); |
ec1e9f7c | 418 | |
1cc17820 JL |
419 | if (commondom == frombb) |
420 | return false; | |
b5b8b0ac | 421 | |
726a989a | 422 | *togsi = gsi_after_labels (commondom); |
b5b8b0ac | 423 | |
18965703 | 424 | return true; |
ec1e9f7c | 425 | } |
e106efc7 | 426 | else |
ec1e9f7c | 427 | { |
e106efc7 RG |
428 | FOR_EACH_IMM_USE_FAST (one_use, imm_iter, DEF_FROM_PTR (def_p)) |
429 | { | |
430 | if (is_gimple_debug (USE_STMT (one_use))) | |
431 | continue; | |
432 | break; | |
433 | } | |
434 | use = USE_STMT (one_use); | |
726a989a | 435 | |
e106efc7 RG |
436 | if (gimple_code (use) != GIMPLE_PHI) |
437 | { | |
1cc17820 | 438 | sinkbb = select_best_block (frombb, gimple_bb (use), stmt); |
791b59e3 | 439 | |
1cc17820 | 440 | if (sinkbb == frombb) |
e106efc7 | 441 | return false; |
b5b8b0ac | 442 | |
d6b3654a RB |
443 | if (sinkbb == gimple_bb (use)) |
444 | *togsi = gsi_for_stmt (use); | |
445 | else | |
446 | *togsi = gsi_after_labels (sinkbb); | |
ec1e9f7c | 447 | |
e106efc7 RG |
448 | return true; |
449 | } | |
450 | } | |
f47c96aa | 451 | |
538dd0b7 | 452 | sinkbb = find_bb_for_arg (as_a <gphi *> (use), DEF_FROM_PTR (def_p)); |
ec1e9f7c | 453 | |
1cc17820 JL |
454 | /* This can happen if there are multiple uses in a PHI. */ |
455 | if (!sinkbb) | |
18965703 | 456 | return false; |
1cc17820 JL |
457 | |
458 | sinkbb = select_best_block (frombb, sinkbb, stmt); | |
459 | if (!sinkbb || sinkbb == frombb) | |
18965703 ZD |
460 | return false; |
461 | ||
3834917d MM |
462 | /* If the latch block is empty, don't make it non-empty by sinking |
463 | something into it. */ | |
464 | if (sinkbb == frombb->loop_father->latch | |
465 | && empty_block_p (sinkbb)) | |
466 | return false; | |
467 | ||
726a989a | 468 | *togsi = gsi_after_labels (sinkbb); |
ec1e9f7c | 469 | |
18965703 | 470 | return true; |
ec1e9f7c DB |
471 | } |
472 | ||
84935c98 RB |
473 | /* Very simplistic code to sink common stores from the predecessor through |
474 | our virtual PHI. We do this before sinking stmts from BB as it might | |
475 | expose sinking opportunities of the merged stores. | |
476 | Once we have partial dead code elimination through sth like SSU-PRE this | |
477 | should be moved there. */ | |
478 | ||
479 | static unsigned | |
480 | sink_common_stores_to_bb (basic_block bb) | |
481 | { | |
482 | unsigned todo = 0; | |
483 | gphi *phi; | |
484 | ||
485 | if (EDGE_COUNT (bb->preds) > 1 | |
486 | && (phi = get_virtual_phi (bb))) | |
487 | { | |
488 | /* Repeat until no more common stores are found. */ | |
489 | while (1) | |
490 | { | |
491 | gimple *first_store = NULL; | |
492 | auto_vec <tree, 5> vdefs; | |
493 | gimple_stmt_iterator gsi; | |
494 | ||
495 | /* Search for common stores defined by all virtual PHI args. | |
496 | ??? Common stores not present in all predecessors could | |
497 | be handled by inserting a forwarder to sink to. Generally | |
498 | this involves deciding which stores to do this for if | |
499 | multiple common stores are present for different sets of | |
500 | predecessors. See PR11832 for an interesting case. */ | |
501 | for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i) | |
502 | { | |
503 | tree arg = gimple_phi_arg_def (phi, i); | |
504 | gimple *def = SSA_NAME_DEF_STMT (arg); | |
505 | if (! is_gimple_assign (def) | |
506 | || stmt_can_throw_internal (cfun, def)) | |
507 | { | |
508 | /* ??? We could handle some cascading with the def being | |
509 | another PHI. We'd have to insert multiple PHIs for | |
510 | the rhs then though (if they are not all equal). */ | |
511 | first_store = NULL; | |
512 | break; | |
513 | } | |
514 | /* ??? Do not try to do anything fancy with aliasing, thus | |
515 | do not sink across non-aliased loads (or even stores, | |
516 | so different store order will make the sinking fail). */ | |
517 | bool all_uses_on_phi = true; | |
518 | imm_use_iterator iter; | |
519 | use_operand_p use_p; | |
520 | FOR_EACH_IMM_USE_FAST (use_p, iter, arg) | |
521 | if (USE_STMT (use_p) != phi) | |
522 | { | |
523 | all_uses_on_phi = false; | |
524 | break; | |
525 | } | |
526 | if (! all_uses_on_phi) | |
527 | { | |
528 | first_store = NULL; | |
529 | break; | |
530 | } | |
531 | /* Check all stores are to the same LHS. */ | |
532 | if (! first_store) | |
533 | first_store = def; | |
534 | /* ??? We could handle differing SSA uses in the LHS by inserting | |
535 | PHIs for them. */ | |
536 | else if (! operand_equal_p (gimple_assign_lhs (first_store), | |
7c592aad RB |
537 | gimple_assign_lhs (def), 0) |
538 | || (gimple_clobber_p (first_store) | |
f73f8bab | 539 | != gimple_clobber_p (def))) |
84935c98 RB |
540 | { |
541 | first_store = NULL; | |
542 | break; | |
543 | } | |
544 | vdefs.safe_push (arg); | |
545 | } | |
546 | if (! first_store) | |
547 | break; | |
548 | ||
549 | /* Check if we need a PHI node to merge the stored values. */ | |
550 | bool allsame = true; | |
7c592aad RB |
551 | if (!gimple_clobber_p (first_store)) |
552 | for (unsigned i = 1; i < vdefs.length (); ++i) | |
553 | { | |
554 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); | |
555 | if (! operand_equal_p (gimple_assign_rhs1 (first_store), | |
556 | gimple_assign_rhs1 (def), 0)) | |
557 | { | |
558 | allsame = false; | |
559 | break; | |
560 | } | |
561 | } | |
84935c98 RB |
562 | |
563 | /* We cannot handle aggregate values if we need to merge them. */ | |
564 | tree type = TREE_TYPE (gimple_assign_lhs (first_store)); | |
565 | if (! allsame | |
566 | && ! is_gimple_reg_type (type)) | |
567 | break; | |
568 | ||
569 | if (dump_enabled_p ()) | |
570 | { | |
571 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, | |
572 | first_store, | |
573 | "sinking common stores %sto ", | |
574 | allsame ? "with same value " : ""); | |
575 | dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, | |
576 | gimple_assign_lhs (first_store)); | |
577 | dump_printf (MSG_OPTIMIZED_LOCATIONS, "\n"); | |
578 | } | |
579 | ||
580 | /* Insert a PHI to merge differing stored values if necessary. | |
581 | Note that in general inserting PHIs isn't a very good idea as | |
582 | it makes the job of coalescing and register allocation harder. | |
583 | Even common SSA uses on the rhs/lhs might extend their lifetime | |
584 | across multiple edges by this code motion which makes | |
585 | register allocation harder. */ | |
586 | tree from; | |
587 | if (! allsame) | |
588 | { | |
589 | from = make_ssa_name (type); | |
590 | gphi *newphi = create_phi_node (from, bb); | |
591 | for (unsigned i = 0; i < vdefs.length (); ++i) | |
592 | { | |
593 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); | |
594 | add_phi_arg (newphi, gimple_assign_rhs1 (def), | |
595 | EDGE_PRED (bb, i), UNKNOWN_LOCATION); | |
596 | } | |
597 | } | |
598 | else | |
599 | from = gimple_assign_rhs1 (first_store); | |
600 | ||
601 | /* Remove all stores. */ | |
602 | for (unsigned i = 0; i < vdefs.length (); ++i) | |
603 | TREE_VISITED (vdefs[i]) = 1; | |
604 | for (unsigned i = 0; i < vdefs.length (); ++i) | |
605 | /* If we have more than one use of a VDEF on the PHI make sure | |
606 | we remove the defining stmt only once. */ | |
607 | if (TREE_VISITED (vdefs[i])) | |
608 | { | |
609 | TREE_VISITED (vdefs[i]) = 0; | |
610 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); | |
611 | gsi = gsi_for_stmt (def); | |
612 | unlink_stmt_vdef (def); | |
613 | gsi_remove (&gsi, true); | |
614 | release_defs (def); | |
615 | } | |
616 | ||
617 | /* Insert the first store at the beginning of the merge BB. */ | |
618 | gimple_set_vdef (first_store, gimple_phi_result (phi)); | |
619 | SSA_NAME_DEF_STMT (gimple_vdef (first_store)) = first_store; | |
620 | gimple_phi_set_result (phi, make_ssa_name (gimple_vop (cfun))); | |
621 | gimple_set_vuse (first_store, gimple_phi_result (phi)); | |
622 | gimple_assign_set_rhs1 (first_store, from); | |
623 | /* ??? Should we reset first_stores location? */ | |
624 | gsi = gsi_after_labels (bb); | |
625 | gsi_insert_before (&gsi, first_store, GSI_SAME_STMT); | |
626 | sink_stats.commoned++; | |
627 | ||
628 | todo |= TODO_cleanup_cfg; | |
629 | } | |
630 | ||
631 | /* We could now have empty predecessors that we could remove, | |
632 | forming a proper CFG for further sinking. Note that even | |
633 | CFG cleanup doesn't do this fully at the moment and it | |
634 | doesn't preserve post-dominators in the process either. | |
635 | The mergephi pass might do it though. gcc.dg/tree-ssa/ssa-sink-13.c | |
636 | shows this nicely if you disable tail merging or (same effect) | |
637 | make the stored values unequal. */ | |
638 | } | |
639 | ||
640 | return todo; | |
641 | } | |
642 | ||
ec1e9f7c DB |
643 | /* Perform code sinking on BB */ |
644 | ||
84935c98 | 645 | static unsigned |
ec1e9f7c DB |
646 | sink_code_in_bb (basic_block bb) |
647 | { | |
648 | basic_block son; | |
726a989a | 649 | gimple_stmt_iterator gsi; |
ec1e9f7c DB |
650 | edge_iterator ei; |
651 | edge e; | |
9a287593 | 652 | bool last = true; |
84935c98 RB |
653 | unsigned todo = 0; |
654 | ||
655 | /* Sink common stores from the predecessor through our virtual PHI. */ | |
656 | todo |= sink_common_stores_to_bb (bb); | |
b8698a0f | 657 | |
ec1e9f7c DB |
658 | /* If this block doesn't dominate anything, there can't be any place to sink |
659 | the statements to. */ | |
660 | if (first_dom_son (CDI_DOMINATORS, bb) == NULL) | |
661 | goto earlyout; | |
662 | ||
663 | /* We can't move things across abnormal edges, so don't try. */ | |
664 | FOR_EACH_EDGE (e, ei, bb->succs) | |
665 | if (e->flags & EDGE_ABNORMAL) | |
666 | goto earlyout; | |
667 | ||
726a989a | 668 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);) |
ec1e9f7c | 669 | { |
355fe088 | 670 | gimple *stmt = gsi_stmt (gsi); |
726a989a | 671 | gimple_stmt_iterator togsi; |
4c7c437c | 672 | bool zero_uses_p; |
18965703 | 673 | |
4c7c437c | 674 | if (!statement_sink_location (stmt, bb, &togsi, &zero_uses_p)) |
ec1e9f7c | 675 | { |
4c7c437c | 676 | gimple_stmt_iterator saved = gsi; |
726a989a RB |
677 | if (!gsi_end_p (gsi)) |
678 | gsi_prev (&gsi); | |
4c7c437c RB |
679 | /* If we face a dead stmt remove it as it possibly blocks |
680 | sinking of uses. */ | |
681 | if (zero_uses_p | |
682 | && ! gimple_vdef (stmt)) | |
683 | { | |
684 | gsi_remove (&saved, true); | |
685 | release_defs (stmt); | |
686 | } | |
687 | else | |
688 | last = false; | |
ec1e9f7c | 689 | continue; |
b8698a0f | 690 | } |
ec1e9f7c DB |
691 | if (dump_file) |
692 | { | |
693 | fprintf (dump_file, "Sinking "); | |
726a989a | 694 | print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS); |
ec1e9f7c | 695 | fprintf (dump_file, " from bb %d to bb %d\n", |
726a989a | 696 | bb->index, (gsi_bb (togsi))->index); |
ec1e9f7c | 697 | } |
b8698a0f | 698 | |
ef13324e RG |
699 | /* Update virtual operands of statements in the path we |
700 | do not sink to. */ | |
e106efc7 RG |
701 | if (gimple_vdef (stmt)) |
702 | { | |
ef13324e RG |
703 | imm_use_iterator iter; |
704 | use_operand_p use_p; | |
355fe088 | 705 | gimple *vuse_stmt; |
ef13324e RG |
706 | |
707 | FOR_EACH_IMM_USE_STMT (vuse_stmt, iter, gimple_vdef (stmt)) | |
708 | if (gimple_code (vuse_stmt) != GIMPLE_PHI) | |
709 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) | |
710 | SET_USE (use_p, gimple_vuse (stmt)); | |
e106efc7 RG |
711 | } |
712 | ||
ec1e9f7c DB |
713 | /* If this is the end of the basic block, we need to insert at the end |
714 | of the basic block. */ | |
726a989a RB |
715 | if (gsi_end_p (togsi)) |
716 | gsi_move_to_bb_end (&gsi, gsi_bb (togsi)); | |
ec1e9f7c | 717 | else |
726a989a | 718 | gsi_move_before (&gsi, &togsi); |
ec1e9f7c DB |
719 | |
720 | sink_stats.sunk++; | |
9a287593 AO |
721 | |
722 | /* If we've just removed the last statement of the BB, the | |
726a989a | 723 | gsi_end_p() test below would fail, but gsi_prev() would have |
9a287593 AO |
724 | succeeded, and we want it to succeed. So we keep track of |
725 | whether we're at the last statement and pick up the new last | |
726 | statement. */ | |
727 | if (last) | |
728 | { | |
726a989a | 729 | gsi = gsi_last_bb (bb); |
9a287593 AO |
730 | continue; |
731 | } | |
732 | ||
733 | last = false; | |
726a989a RB |
734 | if (!gsi_end_p (gsi)) |
735 | gsi_prev (&gsi); | |
b8698a0f | 736 | |
ec1e9f7c DB |
737 | } |
738 | earlyout: | |
739 | for (son = first_dom_son (CDI_POST_DOMINATORS, bb); | |
740 | son; | |
741 | son = next_dom_son (CDI_POST_DOMINATORS, son)) | |
742 | { | |
84935c98 | 743 | todo |= sink_code_in_bb (son); |
ec1e9f7c | 744 | } |
84935c98 RB |
745 | |
746 | return todo; | |
b8698a0f | 747 | } |
ec1e9f7c DB |
748 | |
749 | /* Perform code sinking. | |
750 | This moves code down the flowgraph when we know it would be | |
751 | profitable to do so, or it wouldn't increase the number of | |
752 | executions of the statement. | |
753 | ||
754 | IE given | |
b8698a0f | 755 | |
ec1e9f7c DB |
756 | a_1 = b + c; |
757 | if (<something>) | |
758 | { | |
759 | } | |
760 | else | |
761 | { | |
762 | foo (&b, &c); | |
763 | a_5 = b + c; | |
764 | } | |
765 | a_6 = PHI (a_5, a_1); | |
766 | USE a_6. | |
767 | ||
768 | we'll transform this into: | |
769 | ||
770 | if (<something>) | |
771 | { | |
772 | a_1 = b + c; | |
773 | } | |
774 | else | |
775 | { | |
776 | foo (&b, &c); | |
777 | a_5 = b + c; | |
778 | } | |
779 | a_6 = PHI (a_5, a_1); | |
780 | USE a_6. | |
781 | ||
782 | Note that this reduces the number of computations of a = b + c to 1 | |
783 | when we take the else edge, instead of 2. | |
784 | */ | |
17795822 TS |
785 | namespace { |
786 | ||
787 | const pass_data pass_data_sink_code = | |
ec1e9f7c | 788 | { |
27a4cd48 DM |
789 | GIMPLE_PASS, /* type */ |
790 | "sink", /* name */ | |
791 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 | 792 | TV_TREE_SINK, /* tv_id */ |
d8bbf1d9 | 793 | /* PROP_no_crit_edges is ensured by running split_edges_for_insertion in |
be55bfe6 | 794 | pass_data_sink_code::execute (). */ |
91db3537 | 795 | ( PROP_cfg | PROP_ssa ), /* properties_required */ |
27a4cd48 DM |
796 | 0, /* properties_provided */ |
797 | 0, /* properties_destroyed */ | |
798 | 0, /* todo_flags_start */ | |
3bea341f | 799 | TODO_update_ssa, /* todo_flags_finish */ |
ec1e9f7c | 800 | }; |
27a4cd48 | 801 | |
17795822 | 802 | class pass_sink_code : public gimple_opt_pass |
27a4cd48 DM |
803 | { |
804 | public: | |
c3284718 RS |
805 | pass_sink_code (gcc::context *ctxt) |
806 | : gimple_opt_pass (pass_data_sink_code, ctxt) | |
27a4cd48 DM |
807 | {} |
808 | ||
809 | /* opt_pass methods: */ | |
1a3d085c | 810 | virtual bool gate (function *) { return flag_tree_sink != 0; } |
be55bfe6 | 811 | virtual unsigned int execute (function *); |
27a4cd48 DM |
812 | |
813 | }; // class pass_sink_code | |
814 | ||
be55bfe6 TS |
815 | unsigned int |
816 | pass_sink_code::execute (function *fun) | |
817 | { | |
818 | loop_optimizer_init (LOOPS_NORMAL); | |
d8bbf1d9 | 819 | split_edges_for_insertion (); |
be55bfe6 TS |
820 | connect_infinite_loops_to_exit (); |
821 | memset (&sink_stats, 0, sizeof (sink_stats)); | |
822 | calculate_dominance_info (CDI_DOMINATORS); | |
823 | calculate_dominance_info (CDI_POST_DOMINATORS); | |
84935c98 | 824 | unsigned todo = sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (fun)); |
be55bfe6 | 825 | statistics_counter_event (fun, "Sunk statements", sink_stats.sunk); |
84935c98 | 826 | statistics_counter_event (fun, "Commoned stores", sink_stats.commoned); |
be55bfe6 TS |
827 | free_dominance_info (CDI_POST_DOMINATORS); |
828 | remove_fake_exit_edges (); | |
829 | loop_optimizer_finalize (); | |
830 | ||
84935c98 | 831 | return todo; |
be55bfe6 TS |
832 | } |
833 | ||
17795822 TS |
834 | } // anon namespace |
835 | ||
27a4cd48 DM |
836 | gimple_opt_pass * |
837 | make_pass_sink_code (gcc::context *ctxt) | |
838 | { | |
839 | return new pass_sink_code (ctxt); | |
840 | } |