]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-ssa-sink.c
[Ada] Update headers
[thirdparty/gcc.git] / gcc / tree-ssa-sink.c
1 /* Code sinking for trees
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "cfganal.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "cfgloop.h"
37 #include "tree-eh.h"
38
39 /* TODO:
40 1. Sinking store only using scalar promotion (IE without moving the RHS):
41
42 *q = p;
43 p = p + 1;
44 if (something)
45 *q = <not p>;
46 else
47 y = *q;
48
49
50 should become
51 sinktemp = p;
52 p = p + 1;
53 if (something)
54 *q = <not p>;
55 else
56 {
57 *q = sinktemp;
58 y = *q
59 }
60 Store copy propagation will take care of the store elimination above.
61
62
63 2. Sinking using Partial Dead Code Elimination. */
64
65
66 static struct
67 {
68 /* The number of statements sunk down the flowgraph by code sinking. */
69 int sunk;
70
71 /* The number of stores commoned and sunk down by store commoning. */
72 int commoned;
73 } sink_stats;
74
75
76 /* Given a PHI, and one of its arguments (DEF), find the edge for
77 that argument and return it. If the argument occurs twice in the PHI node,
78 we return NULL. */
79
80 static basic_block
81 find_bb_for_arg (gphi *phi, tree def)
82 {
83 size_t i;
84 bool foundone = false;
85 basic_block result = NULL;
86 for (i = 0; i < gimple_phi_num_args (phi); i++)
87 if (PHI_ARG_DEF (phi, i) == def)
88 {
89 if (foundone)
90 return NULL;
91 foundone = true;
92 result = gimple_phi_arg_edge (phi, i)->src;
93 }
94 return result;
95 }
96
97 /* When the first immediate use is in a statement, then return true if all
98 immediate uses in IMM are in the same statement.
99 We could also do the case where the first immediate use is in a phi node,
100 and all the other uses are in phis in the same basic block, but this
101 requires some expensive checking later (you have to make sure no def/vdef
102 in the statement occurs for multiple edges in the various phi nodes it's
103 used in, so that you only have one place you can sink it to. */
104
105 static bool
106 all_immediate_uses_same_place (def_operand_p def_p)
107 {
108 tree var = DEF_FROM_PTR (def_p);
109 imm_use_iterator imm_iter;
110 use_operand_p use_p;
111
112 gimple *firstuse = NULL;
113 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
114 {
115 if (is_gimple_debug (USE_STMT (use_p)))
116 continue;
117 if (firstuse == NULL)
118 firstuse = USE_STMT (use_p);
119 else
120 if (firstuse != USE_STMT (use_p))
121 return false;
122 }
123
124 return true;
125 }
126
127 /* Find the nearest common dominator of all of the immediate uses in IMM. */
128
129 static basic_block
130 nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts)
131 {
132 tree var = DEF_FROM_PTR (def_p);
133 auto_bitmap blocks;
134 basic_block commondom;
135 unsigned int j;
136 bitmap_iterator bi;
137 imm_use_iterator imm_iter;
138 use_operand_p use_p;
139
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
141 {
142 gimple *usestmt = USE_STMT (use_p);
143 basic_block useblock;
144
145 if (gphi *phi = dyn_cast <gphi *> (usestmt))
146 {
147 int idx = PHI_ARG_INDEX_FROM_USE (use_p);
148
149 useblock = gimple_phi_arg_edge (phi, idx)->src;
150 }
151 else if (is_gimple_debug (usestmt))
152 {
153 *debug_stmts = true;
154 continue;
155 }
156 else
157 {
158 useblock = gimple_bb (usestmt);
159 }
160
161 /* Short circuit. Nothing dominates the entry block. */
162 if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
163 return NULL;
164
165 bitmap_set_bit (blocks, useblock->index);
166 }
167 commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks));
168 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
169 commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
170 BASIC_BLOCK_FOR_FN (cfun, j));
171 return commondom;
172 }
173
174 /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator
175 tree, return the best basic block between them (inclusive) to place
176 statements.
177
178 We want the most control dependent block in the shallowest loop nest.
179
180 If the resulting block is in a shallower loop nest, then use it. Else
181 only use the resulting block if it has significantly lower execution
182 frequency than EARLY_BB to avoid gratuitous statement movement. We
183 consider statements with VOPS more desirable to move.
184
185 This pass would obviously benefit from PDO as it utilizes block
186 frequencies. It would also benefit from recomputing frequencies
187 if profile data is not available since frequencies often get out
188 of sync with reality. */
189
190 static basic_block
191 select_best_block (basic_block early_bb,
192 basic_block late_bb,
193 gimple *stmt)
194 {
195 basic_block best_bb = late_bb;
196 basic_block temp_bb = late_bb;
197 int threshold;
198
199 while (temp_bb != early_bb)
200 {
201 /* If we've moved into a lower loop nest, then that becomes
202 our best block. */
203 if (bb_loop_depth (temp_bb) < bb_loop_depth (best_bb))
204 best_bb = temp_bb;
205
206 /* Walk up the dominator tree, hopefully we'll find a shallower
207 loop nest. */
208 temp_bb = get_immediate_dominator (CDI_DOMINATORS, temp_bb);
209 }
210
211 /* If we found a shallower loop nest, then we always consider that
212 a win. This will always give us the most control dependent block
213 within that loop nest. */
214 if (bb_loop_depth (best_bb) < bb_loop_depth (early_bb))
215 return best_bb;
216
217 /* Get the sinking threshold. If the statement to be moved has memory
218 operands, then increase the threshold by 7% as those are even more
219 profitable to avoid, clamping at 100%. */
220 threshold = param_sink_frequency_threshold;
221 if (gimple_vuse (stmt) || gimple_vdef (stmt))
222 {
223 threshold += 7;
224 if (threshold > 100)
225 threshold = 100;
226 }
227
228 /* If BEST_BB is at the same nesting level, then require it to have
229 significantly lower execution frequency to avoid gratuitous movement. */
230 if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb)
231 /* If result of comparsion is unknown, prefer EARLY_BB.
232 Thus use !(...>=..) rather than (...<...) */
233 && !(best_bb->count.apply_scale (100, 1)
234 >= early_bb->count.apply_scale (threshold, 1)))
235 return best_bb;
236
237 /* No better block found, so return EARLY_BB, which happens to be the
238 statement's original block. */
239 return early_bb;
240 }
241
242 /* Given a statement (STMT) and the basic block it is currently in (FROMBB),
243 determine the location to sink the statement to, if any.
244 Returns true if there is such location; in that case, TOGSI points to the
245 statement before that STMT should be moved. */
246
247 static bool
248 statement_sink_location (gimple *stmt, basic_block frombb,
249 gimple_stmt_iterator *togsi, bool *zero_uses_p)
250 {
251 gimple *use;
252 use_operand_p one_use = NULL_USE_OPERAND_P;
253 basic_block sinkbb;
254 use_operand_p use_p;
255 def_operand_p def_p;
256 ssa_op_iter iter;
257 imm_use_iterator imm_iter;
258
259 *zero_uses_p = false;
260
261 /* We only can sink assignments and non-looping const/pure calls. */
262 int cf;
263 if (!is_gimple_assign (stmt)
264 && (!is_gimple_call (stmt)
265 || !((cf = gimple_call_flags (stmt)) & (ECF_CONST|ECF_PURE))
266 || (cf & ECF_LOOPING_CONST_OR_PURE)))
267 return false;
268
269 /* We only can sink stmts with a single definition. */
270 def_p = single_ssa_def_operand (stmt, SSA_OP_ALL_DEFS);
271 if (def_p == NULL_DEF_OPERAND_P)
272 return false;
273
274 /* There are a few classes of things we can't or don't move, some because we
275 don't have code to handle it, some because it's not profitable and some
276 because it's not legal.
277
278 We can't sink things that may be global stores, at least not without
279 calculating a lot more information, because we may cause it to no longer
280 be seen by an external routine that needs it depending on where it gets
281 moved to.
282
283 We can't sink statements that end basic blocks without splitting the
284 incoming edge for the sink location to place it there.
285
286 We can't sink statements that have volatile operands.
287
288 We don't want to sink dead code, so anything with 0 immediate uses is not
289 sunk.
290
291 Don't sink BLKmode assignments if current function has any local explicit
292 register variables, as BLKmode assignments may involve memcpy or memset
293 calls or, on some targets, inline expansion thereof that sometimes need
294 to use specific hard registers.
295
296 */
297 if (stmt_ends_bb_p (stmt)
298 || gimple_has_side_effects (stmt)
299 || (cfun->has_local_explicit_reg_vars
300 && TYPE_MODE (TREE_TYPE (gimple_get_lhs (stmt))) == BLKmode))
301 return false;
302
303 /* Return if there are no immediate uses of this stmt. */
304 if (has_zero_uses (DEF_FROM_PTR (def_p)))
305 {
306 *zero_uses_p = true;
307 return false;
308 }
309
310 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p)))
311 return false;
312
313 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
314 {
315 tree use = USE_FROM_PTR (use_p);
316 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use))
317 return false;
318 }
319
320 use = NULL;
321
322 /* If stmt is a store the one and only use needs to be the VOP
323 merging PHI node. */
324 if (virtual_operand_p (DEF_FROM_PTR (def_p)))
325 {
326 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 {
328 gimple *use_stmt = USE_STMT (use_p);
329
330 /* A killing definition is not a use. */
331 if ((gimple_has_lhs (use_stmt)
332 && operand_equal_p (gimple_get_lhs (stmt),
333 gimple_get_lhs (use_stmt), 0))
334 || stmt_kills_ref_p (use_stmt, gimple_get_lhs (stmt)))
335 {
336 /* If use_stmt is or might be a nop assignment then USE_STMT
337 acts as a use as well as definition. */
338 if (stmt != use_stmt
339 && ref_maybe_used_by_stmt_p (use_stmt,
340 gimple_get_lhs (stmt)))
341 return false;
342 continue;
343 }
344
345 if (gimple_code (use_stmt) != GIMPLE_PHI)
346 return false;
347
348 if (use
349 && use != use_stmt)
350 return false;
351
352 use = use_stmt;
353 }
354 if (!use)
355 return false;
356 }
357 /* If all the immediate uses are not in the same place, find the nearest
358 common dominator of all the immediate uses. For PHI nodes, we have to
359 find the nearest common dominator of all of the predecessor blocks, since
360 that is where insertion would have to take place. */
361 else if (gimple_vuse (stmt)
362 || !all_immediate_uses_same_place (def_p))
363 {
364 bool debug_stmts = false;
365 basic_block commondom = nearest_common_dominator_of_uses (def_p,
366 &debug_stmts);
367
368 if (commondom == frombb)
369 return false;
370
371 /* If this is a load then do not sink past any stores.
372 ??? This is overly simple but cheap. We basically look
373 for an existing load with the same VUSE in the path to one
374 of the sink candidate blocks and we adjust commondom to the
375 nearest to commondom. */
376 if (gimple_vuse (stmt))
377 {
378 /* Do not sink loads from hard registers. */
379 if (gimple_assign_single_p (stmt)
380 && TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL
381 && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt)))
382 return false;
383
384 imm_use_iterator imm_iter;
385 use_operand_p use_p;
386 basic_block found = NULL;
387 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vuse (stmt))
388 {
389 gimple *use_stmt = USE_STMT (use_p);
390 basic_block bb = gimple_bb (use_stmt);
391 /* For PHI nodes the block we know sth about
392 is the incoming block with the use. */
393 if (gimple_code (use_stmt) == GIMPLE_PHI)
394 bb = EDGE_PRED (bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
395 /* Any dominator of commondom would be ok with
396 adjusting commondom to that block. */
397 bb = nearest_common_dominator (CDI_DOMINATORS, bb, commondom);
398 if (!found)
399 found = bb;
400 else if (dominated_by_p (CDI_DOMINATORS, bb, found))
401 found = bb;
402 /* If we can't improve, stop. */
403 if (found == commondom)
404 break;
405 }
406 commondom = found;
407 if (commondom == frombb)
408 return false;
409 }
410
411 /* Our common dominator has to be dominated by frombb in order to be a
412 trivially safe place to put this statement, since it has multiple
413 uses. */
414 if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb))
415 return false;
416
417 commondom = select_best_block (frombb, commondom, stmt);
418
419 if (commondom == frombb)
420 return false;
421
422 *togsi = gsi_after_labels (commondom);
423
424 return true;
425 }
426 else
427 {
428 FOR_EACH_IMM_USE_FAST (one_use, imm_iter, DEF_FROM_PTR (def_p))
429 {
430 if (is_gimple_debug (USE_STMT (one_use)))
431 continue;
432 break;
433 }
434 use = USE_STMT (one_use);
435
436 if (gimple_code (use) != GIMPLE_PHI)
437 {
438 sinkbb = select_best_block (frombb, gimple_bb (use), stmt);
439
440 if (sinkbb == frombb)
441 return false;
442
443 if (sinkbb == gimple_bb (use))
444 *togsi = gsi_for_stmt (use);
445 else
446 *togsi = gsi_after_labels (sinkbb);
447
448 return true;
449 }
450 }
451
452 sinkbb = find_bb_for_arg (as_a <gphi *> (use), DEF_FROM_PTR (def_p));
453
454 /* This can happen if there are multiple uses in a PHI. */
455 if (!sinkbb)
456 return false;
457
458 sinkbb = select_best_block (frombb, sinkbb, stmt);
459 if (!sinkbb || sinkbb == frombb)
460 return false;
461
462 /* If the latch block is empty, don't make it non-empty by sinking
463 something into it. */
464 if (sinkbb == frombb->loop_father->latch
465 && empty_block_p (sinkbb))
466 return false;
467
468 *togsi = gsi_after_labels (sinkbb);
469
470 return true;
471 }
472
473 /* Very simplistic code to sink common stores from the predecessor through
474 our virtual PHI. We do this before sinking stmts from BB as it might
475 expose sinking opportunities of the merged stores.
476 Once we have partial dead code elimination through sth like SSU-PRE this
477 should be moved there. */
478
479 static unsigned
480 sink_common_stores_to_bb (basic_block bb)
481 {
482 unsigned todo = 0;
483 gphi *phi;
484
485 if (EDGE_COUNT (bb->preds) > 1
486 && (phi = get_virtual_phi (bb)))
487 {
488 /* Repeat until no more common stores are found. */
489 while (1)
490 {
491 gimple *first_store = NULL;
492 auto_vec <tree, 5> vdefs;
493 gimple_stmt_iterator gsi;
494
495 /* Search for common stores defined by all virtual PHI args.
496 ??? Common stores not present in all predecessors could
497 be handled by inserting a forwarder to sink to. Generally
498 this involves deciding which stores to do this for if
499 multiple common stores are present for different sets of
500 predecessors. See PR11832 for an interesting case. */
501 for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
502 {
503 tree arg = gimple_phi_arg_def (phi, i);
504 gimple *def = SSA_NAME_DEF_STMT (arg);
505 if (! is_gimple_assign (def)
506 || stmt_can_throw_internal (cfun, def))
507 {
508 /* ??? We could handle some cascading with the def being
509 another PHI. We'd have to insert multiple PHIs for
510 the rhs then though (if they are not all equal). */
511 first_store = NULL;
512 break;
513 }
514 /* ??? Do not try to do anything fancy with aliasing, thus
515 do not sink across non-aliased loads (or even stores,
516 so different store order will make the sinking fail). */
517 bool all_uses_on_phi = true;
518 imm_use_iterator iter;
519 use_operand_p use_p;
520 FOR_EACH_IMM_USE_FAST (use_p, iter, arg)
521 if (USE_STMT (use_p) != phi)
522 {
523 all_uses_on_phi = false;
524 break;
525 }
526 if (! all_uses_on_phi)
527 {
528 first_store = NULL;
529 break;
530 }
531 /* Check all stores are to the same LHS. */
532 if (! first_store)
533 first_store = def;
534 /* ??? We could handle differing SSA uses in the LHS by inserting
535 PHIs for them. */
536 else if (! operand_equal_p (gimple_assign_lhs (first_store),
537 gimple_assign_lhs (def), 0)
538 || (gimple_clobber_p (first_store)
539 != gimple_clobber_p (def)))
540 {
541 first_store = NULL;
542 break;
543 }
544 vdefs.safe_push (arg);
545 }
546 if (! first_store)
547 break;
548
549 /* Check if we need a PHI node to merge the stored values. */
550 bool allsame = true;
551 if (!gimple_clobber_p (first_store))
552 for (unsigned i = 1; i < vdefs.length (); ++i)
553 {
554 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
555 if (! operand_equal_p (gimple_assign_rhs1 (first_store),
556 gimple_assign_rhs1 (def), 0))
557 {
558 allsame = false;
559 break;
560 }
561 }
562
563 /* We cannot handle aggregate values if we need to merge them. */
564 tree type = TREE_TYPE (gimple_assign_lhs (first_store));
565 if (! allsame
566 && ! is_gimple_reg_type (type))
567 break;
568
569 if (dump_enabled_p ())
570 {
571 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
572 first_store,
573 "sinking common stores %sto ",
574 allsame ? "with same value " : "");
575 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM,
576 gimple_assign_lhs (first_store));
577 dump_printf (MSG_OPTIMIZED_LOCATIONS, "\n");
578 }
579
580 /* Insert a PHI to merge differing stored values if necessary.
581 Note that in general inserting PHIs isn't a very good idea as
582 it makes the job of coalescing and register allocation harder.
583 Even common SSA uses on the rhs/lhs might extend their lifetime
584 across multiple edges by this code motion which makes
585 register allocation harder. */
586 tree from;
587 if (! allsame)
588 {
589 from = make_ssa_name (type);
590 gphi *newphi = create_phi_node (from, bb);
591 for (unsigned i = 0; i < vdefs.length (); ++i)
592 {
593 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
594 add_phi_arg (newphi, gimple_assign_rhs1 (def),
595 EDGE_PRED (bb, i), UNKNOWN_LOCATION);
596 }
597 }
598 else
599 from = gimple_assign_rhs1 (first_store);
600
601 /* Remove all stores. */
602 for (unsigned i = 0; i < vdefs.length (); ++i)
603 TREE_VISITED (vdefs[i]) = 1;
604 for (unsigned i = 0; i < vdefs.length (); ++i)
605 /* If we have more than one use of a VDEF on the PHI make sure
606 we remove the defining stmt only once. */
607 if (TREE_VISITED (vdefs[i]))
608 {
609 TREE_VISITED (vdefs[i]) = 0;
610 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
611 gsi = gsi_for_stmt (def);
612 unlink_stmt_vdef (def);
613 gsi_remove (&gsi, true);
614 release_defs (def);
615 }
616
617 /* Insert the first store at the beginning of the merge BB. */
618 gimple_set_vdef (first_store, gimple_phi_result (phi));
619 SSA_NAME_DEF_STMT (gimple_vdef (first_store)) = first_store;
620 gimple_phi_set_result (phi, make_ssa_name (gimple_vop (cfun)));
621 gimple_set_vuse (first_store, gimple_phi_result (phi));
622 gimple_assign_set_rhs1 (first_store, from);
623 /* ??? Should we reset first_stores location? */
624 gsi = gsi_after_labels (bb);
625 gsi_insert_before (&gsi, first_store, GSI_SAME_STMT);
626 sink_stats.commoned++;
627
628 todo |= TODO_cleanup_cfg;
629 }
630
631 /* We could now have empty predecessors that we could remove,
632 forming a proper CFG for further sinking. Note that even
633 CFG cleanup doesn't do this fully at the moment and it
634 doesn't preserve post-dominators in the process either.
635 The mergephi pass might do it though. gcc.dg/tree-ssa/ssa-sink-13.c
636 shows this nicely if you disable tail merging or (same effect)
637 make the stored values unequal. */
638 }
639
640 return todo;
641 }
642
643 /* Perform code sinking on BB */
644
645 static unsigned
646 sink_code_in_bb (basic_block bb)
647 {
648 basic_block son;
649 gimple_stmt_iterator gsi;
650 edge_iterator ei;
651 edge e;
652 bool last = true;
653 unsigned todo = 0;
654
655 /* Sink common stores from the predecessor through our virtual PHI. */
656 todo |= sink_common_stores_to_bb (bb);
657
658 /* If this block doesn't dominate anything, there can't be any place to sink
659 the statements to. */
660 if (first_dom_son (CDI_DOMINATORS, bb) == NULL)
661 goto earlyout;
662
663 /* We can't move things across abnormal edges, so don't try. */
664 FOR_EACH_EDGE (e, ei, bb->succs)
665 if (e->flags & EDGE_ABNORMAL)
666 goto earlyout;
667
668 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
669 {
670 gimple *stmt = gsi_stmt (gsi);
671 gimple_stmt_iterator togsi;
672 bool zero_uses_p;
673
674 if (!statement_sink_location (stmt, bb, &togsi, &zero_uses_p))
675 {
676 gimple_stmt_iterator saved = gsi;
677 if (!gsi_end_p (gsi))
678 gsi_prev (&gsi);
679 /* If we face a dead stmt remove it as it possibly blocks
680 sinking of uses. */
681 if (zero_uses_p
682 && ! gimple_vdef (stmt))
683 {
684 gsi_remove (&saved, true);
685 release_defs (stmt);
686 }
687 else
688 last = false;
689 continue;
690 }
691 if (dump_file)
692 {
693 fprintf (dump_file, "Sinking ");
694 print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
695 fprintf (dump_file, " from bb %d to bb %d\n",
696 bb->index, (gsi_bb (togsi))->index);
697 }
698
699 /* Update virtual operands of statements in the path we
700 do not sink to. */
701 if (gimple_vdef (stmt))
702 {
703 imm_use_iterator iter;
704 use_operand_p use_p;
705 gimple *vuse_stmt;
706
707 FOR_EACH_IMM_USE_STMT (vuse_stmt, iter, gimple_vdef (stmt))
708 if (gimple_code (vuse_stmt) != GIMPLE_PHI)
709 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
710 SET_USE (use_p, gimple_vuse (stmt));
711 }
712
713 /* If this is the end of the basic block, we need to insert at the end
714 of the basic block. */
715 if (gsi_end_p (togsi))
716 gsi_move_to_bb_end (&gsi, gsi_bb (togsi));
717 else
718 gsi_move_before (&gsi, &togsi);
719
720 sink_stats.sunk++;
721
722 /* If we've just removed the last statement of the BB, the
723 gsi_end_p() test below would fail, but gsi_prev() would have
724 succeeded, and we want it to succeed. So we keep track of
725 whether we're at the last statement and pick up the new last
726 statement. */
727 if (last)
728 {
729 gsi = gsi_last_bb (bb);
730 continue;
731 }
732
733 last = false;
734 if (!gsi_end_p (gsi))
735 gsi_prev (&gsi);
736
737 }
738 earlyout:
739 for (son = first_dom_son (CDI_POST_DOMINATORS, bb);
740 son;
741 son = next_dom_son (CDI_POST_DOMINATORS, son))
742 {
743 todo |= sink_code_in_bb (son);
744 }
745
746 return todo;
747 }
748
749 /* Perform code sinking.
750 This moves code down the flowgraph when we know it would be
751 profitable to do so, or it wouldn't increase the number of
752 executions of the statement.
753
754 IE given
755
756 a_1 = b + c;
757 if (<something>)
758 {
759 }
760 else
761 {
762 foo (&b, &c);
763 a_5 = b + c;
764 }
765 a_6 = PHI (a_5, a_1);
766 USE a_6.
767
768 we'll transform this into:
769
770 if (<something>)
771 {
772 a_1 = b + c;
773 }
774 else
775 {
776 foo (&b, &c);
777 a_5 = b + c;
778 }
779 a_6 = PHI (a_5, a_1);
780 USE a_6.
781
782 Note that this reduces the number of computations of a = b + c to 1
783 when we take the else edge, instead of 2.
784 */
785 namespace {
786
787 const pass_data pass_data_sink_code =
788 {
789 GIMPLE_PASS, /* type */
790 "sink", /* name */
791 OPTGROUP_NONE, /* optinfo_flags */
792 TV_TREE_SINK, /* tv_id */
793 /* PROP_no_crit_edges is ensured by running split_edges_for_insertion in
794 pass_data_sink_code::execute (). */
795 ( PROP_cfg | PROP_ssa ), /* properties_required */
796 0, /* properties_provided */
797 0, /* properties_destroyed */
798 0, /* todo_flags_start */
799 TODO_update_ssa, /* todo_flags_finish */
800 };
801
802 class pass_sink_code : public gimple_opt_pass
803 {
804 public:
805 pass_sink_code (gcc::context *ctxt)
806 : gimple_opt_pass (pass_data_sink_code, ctxt)
807 {}
808
809 /* opt_pass methods: */
810 virtual bool gate (function *) { return flag_tree_sink != 0; }
811 virtual unsigned int execute (function *);
812
813 }; // class pass_sink_code
814
815 unsigned int
816 pass_sink_code::execute (function *fun)
817 {
818 loop_optimizer_init (LOOPS_NORMAL);
819 split_edges_for_insertion ();
820 connect_infinite_loops_to_exit ();
821 memset (&sink_stats, 0, sizeof (sink_stats));
822 calculate_dominance_info (CDI_DOMINATORS);
823 calculate_dominance_info (CDI_POST_DOMINATORS);
824 unsigned todo = sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (fun));
825 statistics_counter_event (fun, "Sunk statements", sink_stats.sunk);
826 statistics_counter_event (fun, "Commoned stores", sink_stats.commoned);
827 free_dominance_info (CDI_POST_DOMINATORS);
828 remove_fake_exit_edges ();
829 loop_optimizer_finalize ();
830
831 return todo;
832 }
833
834 } // anon namespace
835
836 gimple_opt_pass *
837 make_pass_sink_code (gcc::context *ctxt)
838 {
839 return new pass_sink_code (ctxt);
840 }