]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-cfgcleanup.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-cfgcleanup.c
CommitLineData
bfd49939 1/* CFG cleanup for trees.
fbd26352 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
bfd49939 3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8c4c00c1 8the Free Software Foundation; either version 3, or (at your option)
bfd49939 9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
8c4c00c1 17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
bfd49939 19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
9ef16211 23#include "backend.h"
7c29e30e 24#include "rtl.h"
bfd49939 25#include "tree.h"
9ef16211 26#include "gimple.h"
7c29e30e 27#include "cfghooks.h"
28#include "tree-pass.h"
9ef16211 29#include "ssa.h"
7c29e30e 30#include "diagnostic-core.h"
b20a8bb4 31#include "fold-const.h"
94ea8568 32#include "cfganal.h"
33#include "cfgcleanup.h"
bc61cadb 34#include "tree-eh.h"
a8783bee 35#include "gimplify.h"
dcf1a1ec 36#include "gimple-iterator.h"
073c1fd5 37#include "tree-cfg.h"
05d9c18a 38#include "tree-ssa-loop-manip.h"
073c1fd5 39#include "tree-dfa.h"
69ee5dbb 40#include "tree-ssa.h"
bfd49939 41#include "cfgloop.h"
b30560de 42#include "tree-scalar-evolution.h"
04a37b12 43#include "gimple-match.h"
44#include "gimple-fold.h"
ffeccaca 45#include "tree-ssa-loop-niter.h"
04a37b12 46
bfd49939 47
31a8456e 48/* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55bitmap cfgcleanup_altered_bbs;
56
bfd49939 57/* Remove any fallthru edge from EV. Return true if an edge was removed. */
58
59static bool
f1f41a6c 60remove_fallthru_edge (vec<edge, va_gc> *ev)
bfd49939 61{
62 edge_iterator ei;
63 edge e;
64
65 FOR_EACH_EDGE (e, ei, ev)
66 if ((e->flags & EDGE_FALLTHRU) != 0)
67 {
e2a6b9da 68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
bfd49939 72 return true;
73 }
74 return false;
75}
76
baab4554 77/* Convert a SWTCH with single non-default case to gcond and replace it
78 at GSI. */
79
80static bool
81convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
82{
83 if (gimple_switch_num_labels (swtch) != 2)
84 return false;
85
86 tree index = gimple_switch_index (swtch);
baab4554 87 tree label = gimple_switch_label (swtch, 1);
88 tree low = CASE_LOW (label);
89 tree high = CASE_HIGH (label);
90
0fb4f2ce 91 basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
92 basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
baab4554 93
94 basic_block bb = gimple_bb (swtch);
95 gcond *cond;
96
97 /* Replace switch statement with condition statement. */
98 if (high)
99 {
100 tree lhs, rhs;
101 generate_range_test (bb, index, low, high, &lhs, &rhs);
102 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
103 }
104 else
105 cond = gimple_build_cond (EQ_EXPR, index,
106 fold_convert (TREE_TYPE (index), low),
107 NULL_TREE, NULL_TREE);
108
109 gsi_replace (&gsi, cond, true);
110
111 /* Update edges. */
112 edge case_edge = find_edge (bb, case_bb);
113 edge default_edge = find_edge (bb, default_bb);
114
115 case_edge->flags |= EDGE_TRUE_VALUE;
116 default_edge->flags |= EDGE_FALSE_VALUE;
117 return true;
118}
75a70cf9 119
bfd49939 120/* Disconnect an unreachable block in the control expression starting
121 at block BB. */
122
123static bool
53fe6e2f 124cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
bfd49939 125{
126 edge taken_edge;
127 bool retval = false;
42acab1c 128 gimple *stmt = gsi_stmt (gsi);
bfd49939 129
130 if (!single_succ_p (bb))
131 {
132 edge e;
133 edge_iterator ei;
add6ee5e 134 bool warned;
04a37b12 135 tree val = NULL_TREE;
add6ee5e 136
baab4554 137 /* Try to convert a switch with just a single non-default case to
138 GIMPLE condition. */
139 if (gimple_code (stmt) == GIMPLE_SWITCH
140 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
141 stmt = gsi_stmt (gsi);
142
add6ee5e 143 fold_defer_overflow_warnings ();
db2cc26b 144 switch (gimple_code (stmt))
145 {
146 case GIMPLE_COND:
53fe6e2f 147 {
49446baa 148 gimple_match_op res_op;
149 if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
53fe6e2f 150 no_follow_ssa_edges)
49446baa 151 && res_op.code == INTEGER_CST)
152 val = res_op.ops[0];
53fe6e2f 153 }
b5e7cd42 154 break;
db2cc26b 155
156 case GIMPLE_SWITCH:
1a91d914 157 val = gimple_switch_index (as_a <gswitch *> (stmt));
db2cc26b 158 break;
159
160 default:
04a37b12 161 ;
db2cc26b 162 }
bfd49939 163 taken_edge = find_taken_edge (bb, val);
164 if (!taken_edge)
add6ee5e 165 {
166 fold_undefer_and_ignore_overflow_warnings ();
167 return false;
168 }
bfd49939 169
170 /* Remove all the edges except the one that is always executed. */
add6ee5e 171 warned = false;
bfd49939 172 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
173 {
174 if (e != taken_edge)
175 {
add6ee5e 176 if (!warned)
177 {
178 fold_undefer_overflow_warnings
75a70cf9 179 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
add6ee5e 180 warned = true;
181 }
182
bfd49939 183 taken_edge->probability += e->probability;
31a8456e 184 remove_edge_and_dominated_blocks (e);
bfd49939 185 retval = true;
186 }
187 else
188 ei_next (&ei);
189 }
add6ee5e 190 if (!warned)
191 fold_undefer_and_ignore_overflow_warnings ();
bfd49939 192 }
193 else
194 taken_edge = single_succ_edge (bb);
195
31a8456e 196 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
75a70cf9 197 gsi_remove (&gsi, true);
bfd49939 198 taken_edge->flags = EDGE_FALLTHRU;
199
bfd49939 200 return retval;
201}
202
c1e02247 203/* Cleanup the GF_CALL_CTRL_ALTERING flag according to
204 to updated gimple_call_flags. */
205
206static void
42acab1c 207cleanup_call_ctrl_altering_flag (gimple *bb_end)
c1e02247 208{
209 if (!is_gimple_call (bb_end)
210 || !gimple_call_ctrl_altering_p (bb_end))
211 return;
212
213 int flags = gimple_call_flags (bb_end);
214 if (((flags & (ECF_CONST | ECF_PURE))
215 && !(flags & ECF_LOOPING_CONST_OR_PURE))
216 || (flags & ECF_LEAF))
217 gimple_call_set_ctrl_altering (bb_end, false);
218}
219
31a8456e 220/* Try to remove superfluous control structures in basic block BB. Returns
221 true if anything changes. */
bfd49939 222
223static bool
53fe6e2f 224cleanup_control_flow_bb (basic_block bb)
bfd49939 225{
75a70cf9 226 gimple_stmt_iterator gsi;
bfd49939 227 bool retval = false;
42acab1c 228 gimple *stmt;
bfd49939 229
31a8456e 230 /* If the last statement of the block could throw and now cannot,
231 we need to prune cfg. */
75a70cf9 232 retval |= gimple_purge_dead_eh_edges (bb);
31a8456e 233
1f531aaf 234 gsi = gsi_last_nondebug_bb (bb);
75a70cf9 235 if (gsi_end_p (gsi))
31a8456e 236 return retval;
237
75a70cf9 238 stmt = gsi_stmt (gsi);
31a8456e 239
c1e02247 240 /* Try to cleanup ctrl altering flag for call which ends bb. */
241 cleanup_call_ctrl_altering_flag (stmt);
242
75a70cf9 243 if (gimple_code (stmt) == GIMPLE_COND
244 || gimple_code (stmt) == GIMPLE_SWITCH)
1f531aaf 245 {
246 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
53fe6e2f 247 retval |= cleanup_control_expr_graph (bb, gsi);
1f531aaf 248 }
75a70cf9 249 else if (gimple_code (stmt) == GIMPLE_GOTO
250 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
251 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
31a8456e 252 == LABEL_DECL))
bfd49939 253 {
75a70cf9 254 /* If we had a computed goto which has a compile-time determinable
255 destination, then we can eliminate the goto. */
31a8456e 256 edge e;
257 tree label;
258 edge_iterator ei;
259 basic_block target_block;
bfd49939 260
1f531aaf 261 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
31a8456e 262 /* First look at all the outgoing edges. Delete any outgoing
263 edges which do not go to the right block. For the one
264 edge which goes to the right block, fix up its flags. */
75a70cf9 265 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
68148367 266 if (DECL_CONTEXT (label) != cfun->decl)
267 return retval;
0fb4f2ce 268 target_block = label_to_block (cfun, label);
31a8456e 269 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
bfd49939 270 {
31a8456e 271 if (e->dest != target_block)
272 remove_edge_and_dominated_blocks (e);
273 else
bfd49939 274 {
31a8456e 275 /* Turn off the EDGE_ABNORMAL flag. */
276 e->flags &= ~EDGE_ABNORMAL;
bfd49939 277
31a8456e 278 /* And set EDGE_FALLTHRU. */
279 e->flags |= EDGE_FALLTHRU;
280 ei_next (&ei);
281 }
bfd49939 282 }
283
31a8456e 284 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
285 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
286
287 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
288 relevant information we need. */
75a70cf9 289 gsi_remove (&gsi, true);
31a8456e 290 retval = true;
bfd49939 291 }
31a8456e 292
293 /* Check for indirect calls that have been turned into
294 noreturn calls. */
75a70cf9 295 else if (is_gimple_call (stmt)
1f531aaf 296 && gimple_call_noreturn_p (stmt))
297 {
298 /* If there are debug stmts after the noreturn call, remove them
299 now, they should be all unreachable anyway. */
300 for (gsi_next (&gsi); !gsi_end_p (gsi); )
301 gsi_remove (&gsi, true);
302 if (remove_fallthru_edge (bb->succs))
303 retval = true;
304 }
31a8456e 305
bfd49939 306 return retval;
307}
308
309/* Return true if basic block BB does nothing except pass control
310 flow to another block and that we can safely insert a label at
311 the start of the successor block.
312
313 As a precondition, we require that BB be not equal to
efee62d1 314 the entry block. */
bfd49939 315
316static bool
317tree_forwarder_block_p (basic_block bb, bool phi_wanted)
318{
75a70cf9 319 gimple_stmt_iterator gsi;
65138c0f 320 location_t locus;
bfd49939 321
322 /* BB must have a single outgoing edge. */
323 if (single_succ_p (bb) != 1
324 /* If PHI_WANTED is false, BB must not have any PHI nodes.
325 Otherwise, BB must have PHI nodes. */
75a70cf9 326 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
efee62d1 327 /* BB may not be a predecessor of the exit block. */
34154e27 328 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
bfd49939 329 /* Nor should this be an infinite loop. */
330 || single_succ (bb) == bb
331 /* BB may not have an abnormal outgoing edge. */
332 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
333 return false;
334
34154e27 335 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
bfd49939 336
65138c0f 337 locus = single_succ_edge (bb)->goto_locus;
338
e38def9c 339 /* There should not be an edge coming from entry, or an EH edge. */
340 {
341 edge_iterator ei;
342 edge e;
343
344 FOR_EACH_EDGE (e, ei, bb->preds)
34154e27 345 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
e38def9c 346 return false;
65138c0f 347 /* If goto_locus of any of the edges differs, prevent removing
06a8367a 348 the forwarder block when not optimizing. */
349 else if (!optimize
350 && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
351 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
352 && e->goto_locus != locus)
65138c0f 353 return false;
e38def9c 354 }
355
bfd49939 356 /* Now walk through the statements backward. We can ignore labels,
357 anything else means this is not a forwarder block. */
75a70cf9 358 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
bfd49939 359 {
42acab1c 360 gimple *stmt = gsi_stmt (gsi);
bfd49939 361
75a70cf9 362 switch (gimple_code (stmt))
bfd49939 363 {
75a70cf9 364 case GIMPLE_LABEL:
1a91d914 365 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
bfd49939 366 return false;
06a8367a 367 if (!optimize
368 && (gimple_has_location (stmt)
369 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
370 && gimple_location (stmt) != locus)
65138c0f 371 return false;
bfd49939 372 break;
373
9845d120 374 /* ??? For now, hope there's a corresponding debug
375 assignment at the destination. */
376 case GIMPLE_DEBUG:
377 break;
378
bfd49939 379 default:
380 return false;
381 }
382 }
383
bfd49939 384 if (current_loops)
385 {
386 basic_block dest;
3c2e3ed6 387 /* Protect loop headers. */
d7407ea0 388 if (bb_loop_header_p (bb))
bfd49939 389 return false;
bfd49939 390
3c2e3ed6 391 dest = EDGE_SUCC (bb, 0)->dest;
392 /* Protect loop preheaders and latches if requested. */
bfd49939 393 if (dest->loop_father->header == dest)
3c2e3ed6 394 {
0ab3f603 395 if (bb->loop_father == dest->loop_father)
396 {
397 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
398 return false;
399 /* If bb doesn't have a single predecessor we'd make this
400 loop have multiple latches. Don't do that if that
401 would in turn require disambiguating them. */
402 return (single_pred_p (bb)
403 || loops_state_satisfies_p
404 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
405 }
406 else if (bb->loop_father == loop_outer (dest->loop_father))
407 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
408 /* Always preserve other edges into loop headers that are
409 not simple latches or preheaders. */
410 return false;
3c2e3ed6 411 }
bfd49939 412 }
3c2e3ed6 413
bfd49939 414 return true;
415}
416
bfd49939 417/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
418 those alternatives are equal in each of the PHI nodes, then return
419 true, else return false. */
420
421static bool
422phi_alternatives_equal (basic_block dest, edge e1, edge e2)
423{
424 int n1 = e1->dest_idx;
425 int n2 = e2->dest_idx;
1a91d914 426 gphi_iterator gsi;
bfd49939 427
75a70cf9 428 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
bfd49939 429 {
1a91d914 430 gphi *phi = gsi.phi ();
75a70cf9 431 tree val1 = gimple_phi_arg_def (phi, n1);
432 tree val2 = gimple_phi_arg_def (phi, n2);
bfd49939 433
434 gcc_assert (val1 != NULL_TREE);
435 gcc_assert (val2 != NULL_TREE);
436
437 if (!operand_equal_for_phi_arg_p (val1, val2))
438 return false;
439 }
440
441 return true;
442}
443
31a8456e 444/* Removes forwarder block BB. Returns false if this failed. */
bfd49939 445
446static bool
31a8456e 447remove_forwarder_block (basic_block bb)
bfd49939 448{
449 edge succ = single_succ_edge (bb), e, s;
450 basic_block dest = succ->dest;
d71c7316 451 gimple *stmt;
bfd49939 452 edge_iterator ei;
75a70cf9 453 gimple_stmt_iterator gsi, gsi_to;
f24c0e3a 454 bool can_move_debug_stmts;
bfd49939 455
456 /* We check for infinite loops already in tree_forwarder_block_p.
457 However it may happen that the infinite loop is created
458 afterwards due to removal of forwarders. */
459 if (dest == bb)
460 return false;
461
11d28f28 462 /* If the destination block consists of a nonlocal label or is a
463 EH landing pad, do not merge it. */
d71c7316 464 stmt = first_stmt (dest);
465 if (stmt)
466 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
1a91d914 467 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
468 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
469 return false;
bfd49939 470
471 /* If there is an abnormal edge to basic block BB, but not into
472 dest, problems might occur during removal of the phi node at out
473 of ssa due to overlapping live ranges of registers.
474
475 If there is an abnormal edge in DEST, the problems would occur
476 anyway since cleanup_dead_labels would then merge the labels for
477 two different eh regions, and rest of exception handling code
478 does not like it.
479
480 So if there is an abnormal edge to BB, proceed only if there is
481 no abnormal edge to DEST and there are no phi nodes in DEST. */
f1047120 482 if (bb_has_abnormal_pred (bb)
483 && (bb_has_abnormal_pred (dest)
7b960d37 484 || !gimple_seq_empty_p (phi_nodes (dest))))
485 return false;
bfd49939 486
487 /* If there are phi nodes in DEST, and some of the blocks that are
488 predecessors of BB are also predecessors of DEST, check that the
489 phi node arguments match. */
75a70cf9 490 if (!gimple_seq_empty_p (phi_nodes (dest)))
bfd49939 491 {
492 FOR_EACH_EDGE (e, ei, bb->preds)
493 {
494 s = find_edge (e->src, dest);
495 if (!s)
496 continue;
497
498 if (!phi_alternatives_equal (dest, succ, s))
499 return false;
500 }
501 }
502
6d31b223 503 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
f24c0e3a 504
0ab3f603 505 basic_block pred = NULL;
506 if (single_pred_p (bb))
507 pred = single_pred (bb);
508
bfd49939 509 /* Redirect the edges. */
510 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
511 {
31a8456e 512 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
513
bfd49939 514 if (e->flags & EDGE_ABNORMAL)
515 {
516 /* If there is an abnormal edge, redirect it anyway, and
517 move the labels to the new block to make it legal. */
518 s = redirect_edge_succ_nodup (e, dest);
519 }
520 else
521 s = redirect_edge_and_branch (e, dest);
522
523 if (s == e)
524 {
525 /* Create arguments for the phi nodes, since the edge was not
526 here before. */
1a91d914 527 for (gphi_iterator psi = gsi_start_phis (dest);
528 !gsi_end_p (psi);
529 gsi_next (&psi))
75a70cf9 530 {
1a91d914 531 gphi *phi = psi.phi ();
be1e7283 532 location_t l = gimple_phi_arg_location_from_edge (phi, succ);
11db727d 533 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
534 add_phi_arg (phi, unshare_expr (def), s, l);
75a70cf9 535 }
bfd49939 536 }
bfd49939 537 }
538
7b960d37 539 /* Move nonlocal labels and computed goto targets as well as user
540 defined labels and labels with an EH landing pad number to the
541 new block, so that the redirection of the abnormal edges works,
542 jump targets end up in a sane place and debug information for
d71c7316 543 labels is retained. */
7b960d37 544 gsi_to = gsi_start_bb (dest);
d71c7316 545 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
7b960d37 546 {
d71c7316 547 stmt = gsi_stmt (gsi);
548 if (is_gimple_debug (stmt))
549 break;
550
551 /* Forwarder blocks can only contain labels and debug stmts, and
552 labels must come first, so if we get to this point, we know
553 we're looking at a label. */
554 tree decl = gimple_label_label (as_a <glabel *> (stmt));
555 if (EH_LANDING_PAD_NR (decl) != 0
556 || DECL_NONLOCAL (decl)
557 || FORCED_LABEL (decl)
558 || !DECL_ARTIFICIAL (decl))
559 gsi_move_before (&gsi, &gsi_to);
7b960d37 560 else
561 gsi_next (&gsi);
562 }
563
5c4c939c 564 /* Move debug statements if the destination has a single predecessor. */
565 if (can_move_debug_stmts && !gsi_end_p (gsi))
566 {
d71c7316 567 gsi_to = gsi_after_labels (dest);
5c4c939c 568 do
569 {
570 gimple *debug = gsi_stmt (gsi);
571 gcc_assert (is_gimple_debug (debug));
d71c7316 572 gsi_move_before (&gsi, &gsi_to);
5c4c939c 573 }
574 while (!gsi_end_p (gsi));
575 }
576
31a8456e 577 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
578
bfd49939 579 /* Update the dominators. */
580 if (dom_info_available_p (CDI_DOMINATORS))
581 {
582 basic_block dom, dombb, domdest;
583
584 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
585 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
586 if (domdest == bb)
587 {
588 /* Shortcut to avoid calling (relatively expensive)
589 nearest_common_dominator unless necessary. */
590 dom = dombb;
591 }
592 else
593 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
594
595 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
596 }
597
3c2e3ed6 598 /* Adjust latch infomation of BB's parent loop as otherwise
599 the cfg hook has a hard time not to kill the loop. */
600 if (current_loops && bb->loop_father->latch == bb)
0ab3f603 601 bb->loop_father->latch = pred;
3c2e3ed6 602
bfd49939 603 /* And kill the forwarder block. */
604 delete_basic_block (bb);
605
606 return true;
607}
608
305d4389 609/* STMT is a call that has been discovered noreturn. Split the
610 block to prepare fixing up the CFG and remove LHS.
611 Return true if cleanup-cfg needs to run. */
3b4fe440 612
613bool
42acab1c 614fixup_noreturn_call (gimple *stmt)
3b4fe440 615{
616 basic_block bb = gimple_bb (stmt);
305d4389 617 bool changed = false;
3b4fe440 618
619 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
620 return false;
621
622 /* First split basic block if stmt is not last. */
623 if (stmt != gsi_stmt (gsi_last_bb (bb)))
4e9d7164 624 {
625 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
626 {
627 /* Don't split if there are only debug stmts
628 after stmt, that can result in -fcompare-debug
629 failures. Remove the debug stmts instead,
630 they should be all unreachable anyway. */
631 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
632 for (gsi_next (&gsi); !gsi_end_p (gsi); )
633 gsi_remove (&gsi, true);
634 }
635 else
305d4389 636 {
637 split_block (bb, stmt);
638 changed = true;
639 }
4e9d7164 640 }
3b4fe440 641
33b2642e 642 /* If there is an LHS, remove it, but only if its type has fixed size.
643 The LHS will need to be recreated during RTL expansion and creating
1fe75cf2 644 temporaries of variable-sized types is not supported. Also don't
c4994c0b 645 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
646 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
647 has been changed into a call that does not return a value, like
648 __builtin_unreachable or __cxa_pure_virtual. */
6d6c8aba 649 tree lhs = gimple_call_lhs (stmt);
c4994c0b 650 if (lhs
651 && (should_remove_lhs_p (lhs)
652 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
3b4fe440 653 {
3b4fe440 654 gimple_call_set_lhs (stmt, NULL_TREE);
b703d146 655
6d6c8aba 656 /* We need to fix up the SSA name to avoid checking errors. */
657 if (TREE_CODE (lhs) == SSA_NAME)
3b4fe440 658 {
f9e245b2 659 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
6d6c8aba 660 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
661 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
662 set_ssa_default_def (cfun, new_var, lhs);
3b4fe440 663 }
6d6c8aba 664
3b4fe440 665 update_stmt (stmt);
3b4fe440 666 }
6d6c8aba 667
25959a39 668 /* Mark the call as altering control flow. */
305d4389 669 if (!gimple_call_ctrl_altering_p (stmt))
670 {
671 gimple_call_set_ctrl_altering (stmt, true);
672 changed = true;
673 }
25959a39 674
305d4389 675 return changed;
3b4fe440 676}
677
4669b904 678/* Return true if we want to merge BB1 and BB2 into a single block. */
679
680static bool
681want_merge_blocks_p (basic_block bb1, basic_block bb2)
682{
683 if (!can_merge_blocks_p (bb1, bb2))
684 return false;
685 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
686 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
687 return true;
688 return bb1->count.ok_for_merging (bb2->count);
689}
690
3b4fe440 691
11f5525b 692/* Tries to cleanup cfg in basic block BB by merging blocks. Returns
693 true if anything changes. */
bfd49939 694
0c6b4703 695static bool
31a8456e 696cleanup_tree_cfg_bb (basic_block bb)
bfd49939 697{
65138c0f 698 if (tree_forwarder_block_p (bb, false)
31a8456e 699 && remove_forwarder_block (bb))
700 return true;
bfd49939 701
c162fa25 702 /* If there is a merge opportunity with the predecessor
703 do nothing now but wait until we process the predecessor.
704 This happens when we visit BBs in a non-optimal order and
705 avoids quadratic behavior with adjusting stmts BB pointer. */
706 if (single_pred_p (bb)
4669b904 707 && want_merge_blocks_p (single_pred (bb), bb))
c162fa25 708 /* But make sure we _do_ visit it. When we remove unreachable paths
709 ending in a backedge we fail to mark the destinations predecessors
710 as changed. */
711 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
712
0c6b4703 713 /* Merging the blocks may create new opportunities for folding
714 conditional branches (due to the elimination of single-valued PHI
715 nodes). */
c162fa25 716 else if (single_succ_p (bb)
4669b904 717 && want_merge_blocks_p (bb, single_succ (bb)))
31a8456e 718 {
c162fa25 719 merge_blocks (bb, single_succ (bb));
720 return true;
31a8456e 721 }
722
5a0e130a 723 return false;
31a8456e 724}
725
53fe6e2f 726/* Do cleanup_control_flow_bb in PRE order. */
727
728static bool
729cleanup_control_flow_pre ()
730{
731 bool retval = false;
732
11f5525b 733 /* We want remove_edge_and_dominated_blocks to only remove edges,
734 not dominated blocks which it does when dom info isn't available.
735 Pretend so. */
736 dom_state saved_state = dom_info_state (CDI_DOMINATORS);
737 set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
738
53fe6e2f 739 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
740 auto_sbitmap visited (last_basic_block_for_fn (cfun));
741 bitmap_clear (visited);
742
743 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
744
745 while (! stack.is_empty ())
746 {
747 /* Look at the edge on the top of the stack. */
748 edge_iterator ei = stack.last ();
749 basic_block dest = ei_edge (ei)->dest;
750
751 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
752 && ! bitmap_bit_p (visited, dest->index))
753 {
754 bitmap_set_bit (visited, dest->index);
11f5525b 755 /* We only possibly remove edges from DEST here, leaving
756 possibly unreachable code in the IL. */
53fe6e2f 757 retval |= cleanup_control_flow_bb (dest);
758 if (EDGE_COUNT (dest->succs) > 0)
759 stack.quick_push (ei_start (dest->succs));
760 }
761 else
762 {
763 if (!ei_one_before_end_p (ei))
764 ei_next (&stack.last ());
765 else
766 stack.pop ();
767 }
768 }
769
11f5525b 770 set_dom_info_availability (CDI_DOMINATORS, saved_state);
771
772 /* We are deleting BBs in non-reverse dominator order, make sure
773 insert_debug_temps_for_defs is prepared for that. */
774 if (retval)
775 free_dominance_info (CDI_DOMINATORS);
776
777 /* Remove all now (and previously) unreachable blocks. */
778 for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
779 {
780 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
781 if (bb && !bitmap_bit_p (visited, bb->index))
782 {
783 if (!retval)
784 free_dominance_info (CDI_DOMINATORS);
785 delete_basic_block (bb);
786 retval = true;
787 }
788 }
789
53fe6e2f 790 return retval;
791}
792
f8b0a3dc 793static bool
794mfb_keep_latches (edge e)
795{
11f5525b 796 return !((dom_info_available_p (CDI_DOMINATORS)
797 && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
798 || (e->flags & EDGE_DFS_BACK));
f8b0a3dc 799}
0c6b4703 800
9ff23274 801/* Remove unreachable blocks and other miscellaneous clean up work.
802 Return true if the flowgraph was modified, false otherwise. */
0c6b4703 803
eb2a640e 804static bool
805cleanup_tree_cfg_noloop (void)
0c6b4703 806{
0c6b4703 807 timevar_push (TV_TREE_CLEANUP_CFG);
808
f8b0a3dc 809 /* Ensure that we have single entries into loop headers. Otherwise
810 if one of the entries is becoming a latch due to CFG cleanup
811 (from formerly being part of an irreducible region) then we mess
812 up loop fixup and associate the old loop with a different region
813 which makes niter upper bounds invalid. See for example PR80549.
814 This needs to be done before we remove trivially dead edges as
815 we need to capture the dominance state before the pending transform. */
816 if (current_loops)
817 {
11f5525b 818 /* This needs backedges or dominators. */
819 if (!dom_info_available_p (CDI_DOMINATORS))
820 mark_dfs_back_edges ();
821
f8b0a3dc 822 loop_p loop;
823 unsigned i;
824 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
825 if (loop && loop->header)
826 {
827 basic_block bb = loop->header;
828 edge_iterator ei;
829 edge e;
830 bool found_latch = false;
831 bool any_abnormal = false;
832 unsigned n = 0;
833 /* We are only interested in preserving existing loops, but
834 we need to check whether they are still real and of course
835 if we need to add a preheader at all. */
836 FOR_EACH_EDGE (e, ei, bb->preds)
837 {
838 if (e->flags & EDGE_ABNORMAL)
839 {
840 any_abnormal = true;
841 break;
842 }
11f5525b 843 if ((dom_info_available_p (CDI_DOMINATORS)
844 && dominated_by_p (CDI_DOMINATORS, e->src, bb))
845 || (e->flags & EDGE_DFS_BACK))
f8b0a3dc 846 {
847 found_latch = true;
848 continue;
849 }
850 n++;
851 }
852 /* If we have more than one entry to the loop header
853 create a forwarder. */
854 if (found_latch && ! any_abnormal && n > 1)
855 {
856 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
857 NULL);
858 loop->header = fallthru->dest;
859 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
860 {
861 /* The loop updating from the CFG hook is incomplete
862 when we have multiple latches, fixup manually. */
863 remove_bb_from_loops (fallthru->src);
864 loop_p cloop = loop;
865 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
866 cloop = find_common_loop (cloop, e->src->loop_father);
867 add_bb_to_loop (fallthru->src, cloop);
868 }
869 }
870 }
871 }
872
89e9c625 873 /* Prepare the worklists of altered blocks. */
874 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
875
876 /* Start by iterating over all basic blocks in PRE order looking for
877 edge removal opportunities. Do this first because incoming SSA form
878 may be invalid and we want to avoid performing SSA related tasks such
11f5525b 879 as propgating out a PHI node during BB merging in that state. This
880 also gets rid of unreachable blocks. */
881 bool changed = cleanup_control_flow_pre ();
89e9c625 882
883 /* After doing the above SSA form should be valid (or an update SSA
884 should be required). */
885
11f5525b 886 /* Compute dominator info which we need for the iterative process below. */
887 if (!dom_info_available_p (CDI_DOMINATORS))
888 calculate_dominance_info (CDI_DOMINATORS);
889 else
890 checking_verify_dominators (CDI_DOMINATORS);
891
89e9c625 892 /* During forwarder block cleanup, we may redirect edges out of
893 SWITCH_EXPRs, which can get expensive. So we want to enable
894 recording of edge to CASE_LABEL_EXPR. */
895 start_recording_case_labels ();
896
897 /* Continue by iterating over all basic blocks looking for BB merging
898 opportunities. We cannot use FOR_EACH_BB_FN for the BB iteration
899 since the basic blocks may get removed. */
900 unsigned n = last_basic_block_for_fn (cfun);
901 for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
902 {
903 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
904 if (bb)
905 changed |= cleanup_tree_cfg_bb (bb);
906 }
907
908 /* Now process the altered blocks, as long as any are available. */
909 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
910 {
911 unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
912 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
913 if (i < NUM_FIXED_BLOCKS)
914 continue;
915
916 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
917 if (!bb)
918 continue;
919
11f5525b 920 /* BB merging done by cleanup_tree_cfg_bb can end up propagating
921 out single-argument PHIs which in turn can expose
922 cleanup_control_flow_bb opportunities so we have to repeat
923 that here. */
89e9c625 924 changed |= cleanup_control_flow_bb (bb);
925 changed |= cleanup_tree_cfg_bb (bb);
926 }
927
928 end_recording_case_labels ();
929 BITMAP_FREE (cfgcleanup_altered_bbs);
31a8456e 930
50b08d37 931 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
c3f8ce55 932
933 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
934 basic-block numbers. */
935 if (! scev_initialized_p ())
936 compact_blocks ();
bfd49939 937
382ecba7 938 checking_verify_flow_info ();
0c6b4703 939
bfd49939 940 timevar_pop (TV_TREE_CLEANUP_CFG);
0c6b4703 941
eb2a640e 942 if (changed && current_loops)
34df3cfb 943 {
944 /* Removing edges and/or blocks may make recorded bounds refer
945 to stale GIMPLE stmts now, so clear them. */
946 free_numbers_of_iterations_estimates (cfun);
947 loops_state_set (LOOPS_NEED_FIXUP);
948 }
eb2a640e 949
9ff23274 950 return changed;
bfd49939 951}
952
eb2a640e 953/* Repairs loop structures. */
bfd49939 954
eb2a640e 955static void
956repair_loop_structures (void)
bfd49939 957{
4b366dd3 958 bitmap changed_bbs;
b6f3c6f1 959 unsigned n_new_loops;
4b366dd3 960
4e976818 961 calculate_dominance_info (CDI_DOMINATORS);
962
4b366dd3 963 timevar_push (TV_REPAIR_LOOPS);
964 changed_bbs = BITMAP_ALLOC (NULL);
b6f3c6f1 965 n_new_loops = fix_loop_structure (changed_bbs);
bfd49939 966
eb2a640e 967 /* This usually does nothing. But sometimes parts of cfg that originally
968 were inside a loop get out of it due to edge removal (since they
b6f3c6f1 969 become unreachable by back edges from latch). Also a former
970 irreducible loop can become reducible - in this case force a full
971 rewrite into loop-closed SSA form. */
f24ec26f 972 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
b6f3c6f1 973 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
974 TODO_update_ssa);
bfd49939 975
eb2a640e 976 BITMAP_FREE (changed_bbs);
bfd49939 977
382ecba7 978 checking_verify_loop_structure ();
eb2a640e 979 scev_reset ();
980
4b366dd3 981 timevar_pop (TV_REPAIR_LOOPS);
eb2a640e 982}
983
984/* Cleanup cfg and repair loop structures. */
985
986bool
987cleanup_tree_cfg (void)
988{
989 bool changed = cleanup_tree_cfg_noloop ();
990
991 if (current_loops != NULL
f24ec26f 992 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
eb2a640e 993 repair_loop_structures ();
994
6fa78c7b 995 return changed;
bfd49939 996}
997
f6568ea4 998/* Tries to merge the PHI nodes at BB into those at BB's sole successor.
999 Returns true if successful. */
bfd49939 1000
f6568ea4 1001static bool
bfd49939 1002remove_forwarder_block_with_phi (basic_block bb)
1003{
1004 edge succ = single_succ_edge (bb);
1005 basic_block dest = succ->dest;
42acab1c 1006 gimple *label;
bfd49939 1007 basic_block dombb, domdest, dom;
1008
1009 /* We check for infinite loops already in tree_forwarder_block_p.
1010 However it may happen that the infinite loop is created
1011 afterwards due to removal of forwarders. */
1012 if (dest == bb)
f6568ea4 1013 return false;
3ff1ad36 1014
1015 /* Removal of forwarders may expose new natural loops and thus
1016 a block may turn into a loop header. */
1017 if (current_loops && bb_loop_header_p (bb))
1018 return false;
bfd49939 1019
1020 /* If the destination block consists of a nonlocal label, do not
1021 merge it. */
1022 label = first_stmt (dest);
1a91d914 1023 if (label)
1024 if (glabel *label_stmt = dyn_cast <glabel *> (label))
1025 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1026 return false;
bfd49939 1027
31a370d3 1028 /* Record BB's single pred in case we need to update the father
1029 loop's latch information later. */
1030 basic_block pred = NULL;
1031 if (single_pred_p (bb))
1032 pred = single_pred (bb);
1033
bfd49939 1034 /* Redirect each incoming edge to BB to DEST. */
1035 while (EDGE_COUNT (bb->preds) > 0)
1036 {
1037 edge e = EDGE_PRED (bb, 0), s;
1a91d914 1038 gphi_iterator gsi;
bfd49939 1039
1040 s = find_edge (e->src, dest);
1041 if (s)
1042 {
1043 /* We already have an edge S from E->src to DEST. If S and
1044 E->dest's sole successor edge have the same PHI arguments
1045 at DEST, redirect S to DEST. */
1046 if (phi_alternatives_equal (dest, s, succ))
1047 {
1048 e = redirect_edge_and_branch (e, dest);
d03ba86f 1049 redirect_edge_var_map_clear (e);
bfd49939 1050 continue;
1051 }
1052
1053 /* PHI arguments are different. Create a forwarder block by
1054 splitting E so that we can merge PHI arguments on E to
1055 DEST. */
1056 e = single_succ_edge (split_edge (e));
1057 }
ffeccaca 1058 else
1059 {
1060 /* If we merge the forwarder into a loop header verify if we
1061 are creating another loop latch edge. If so, reset
1062 number of iteration information of the loop. */
1063 if (dest->loop_father->header == dest
1064 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1065 {
1066 dest->loop_father->any_upper_bound = false;
1067 dest->loop_father->any_likely_upper_bound = false;
46480a95 1068 free_numbers_of_iterations_estimates (dest->loop_father);
ffeccaca 1069 }
1070 }
bfd49939 1071
1072 s = redirect_edge_and_branch (e, dest);
1073
1074 /* redirect_edge_and_branch must not create a new edge. */
1075 gcc_assert (s == e);
1076
1077 /* Add to the PHI nodes at DEST each PHI argument removed at the
1078 destination of E. */
75a70cf9 1079 for (gsi = gsi_start_phis (dest);
1080 !gsi_end_p (gsi);
1081 gsi_next (&gsi))
bfd49939 1082 {
1a91d914 1083 gphi *phi = gsi.phi ();
75a70cf9 1084 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
be1e7283 1085 location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
bfd49939 1086
1087 if (TREE_CODE (def) == SSA_NAME)
1088 {
bfd49939 1089 /* If DEF is one of the results of PHI nodes removed during
1090 redirection, replace it with the PHI argument that used
1091 to be on E. */
06ecf488 1092 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1093 size_t length = head ? head->length () : 0;
1094 for (size_t i = 0; i < length; i++)
bfd49939 1095 {
06ecf488 1096 edge_var_map *vm = &(*head)[i];
d03ba86f 1097 tree old_arg = redirect_edge_var_map_result (vm);
1098 tree new_arg = redirect_edge_var_map_def (vm);
bfd49939 1099
1100 if (def == old_arg)
1101 {
1102 def = new_arg;
efbcb6de 1103 locus = redirect_edge_var_map_location (vm);
bfd49939 1104 break;
1105 }
1106 }
1107 }
1108
60d535d2 1109 add_phi_arg (phi, def, s, locus);
bfd49939 1110 }
1111
d03ba86f 1112 redirect_edge_var_map_clear (e);
bfd49939 1113 }
1114
1115 /* Update the dominators. */
1116 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1117 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1118 if (domdest == bb)
1119 {
1120 /* Shortcut to avoid calling (relatively expensive)
1121 nearest_common_dominator unless necessary. */
1122 dom = dombb;
1123 }
1124 else
1125 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1126
1127 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1128
31a370d3 1129 /* Adjust latch infomation of BB's parent loop as otherwise
1130 the cfg hook has a hard time not to kill the loop. */
1131 if (current_loops && bb->loop_father->latch == bb)
1132 bb->loop_father->latch = pred;
1133
bfd49939 1134 /* Remove BB since all of BB's incoming edges have been redirected
1135 to DEST. */
1136 delete_basic_block (bb);
f6568ea4 1137
1138 return true;
bfd49939 1139}
1140
1141/* This pass merges PHI nodes if one feeds into another. For example,
1142 suppose we have the following:
1143
1144 goto <bb 9> (<L9>);
1145
1146<L8>:;
1147 tem_17 = foo ();
1148
1149 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1150<L9>:;
1151
1152 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1153<L10>:;
1154
1155 Then we merge the first PHI node into the second one like so:
1156
1157 goto <bb 9> (<L10>);
1158
1159<L8>:;
1160 tem_17 = foo ();
1161
1162 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1163<L10>:;
1164*/
1165
65b0537f 1166namespace {
1167
1168const pass_data pass_data_merge_phi =
1169{
1170 GIMPLE_PASS, /* type */
1171 "mergephi", /* name */
1172 OPTGROUP_NONE, /* optinfo_flags */
65b0537f 1173 TV_TREE_MERGE_PHI, /* tv_id */
1174 ( PROP_cfg | PROP_ssa ), /* properties_required */
1175 0, /* properties_provided */
1176 0, /* properties_destroyed */
1177 0, /* todo_flags_start */
8b88439e 1178 0, /* todo_flags_finish */
65b0537f 1179};
1180
1181class pass_merge_phi : public gimple_opt_pass
bfd49939 1182{
65b0537f 1183public:
1184 pass_merge_phi (gcc::context *ctxt)
1185 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1186 {}
1187
1188 /* opt_pass methods: */
1189 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1190 virtual unsigned int execute (function *);
1191
1192}; // class pass_merge_phi
1193
1194unsigned int
1195pass_merge_phi::execute (function *fun)
1196{
1197 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
bfd49939 1198 basic_block *current = worklist;
1199 basic_block bb;
1200
1201 calculate_dominance_info (CDI_DOMINATORS);
1202
1203 /* Find all PHI nodes that we may be able to merge. */
65b0537f 1204 FOR_EACH_BB_FN (bb, fun)
bfd49939 1205 {
1206 basic_block dest;
1207
1208 /* Look for a forwarder block with PHI nodes. */
1209 if (!tree_forwarder_block_p (bb, true))
1210 continue;
1211
1212 dest = single_succ (bb);
1213
1214 /* We have to feed into another basic block with PHI
1215 nodes. */
be2517f5 1216 if (gimple_seq_empty_p (phi_nodes (dest))
bfd49939 1217 /* We don't want to deal with a basic block with
1218 abnormal edges. */
f1047120 1219 || bb_has_abnormal_pred (bb))
bfd49939 1220 continue;
1221
1222 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1223 {
1224 /* If BB does not dominate DEST, then the PHI nodes at
1225 DEST must be the only users of the results of the PHI
1226 nodes at BB. */
1227 *current++ = bb;
1228 }
cbbe9173 1229 else
1230 {
1a91d914 1231 gphi_iterator gsi;
f9ead664 1232 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
cbbe9173 1233
1234 /* BB dominates DEST. There may be many users of the PHI
1235 nodes in BB. However, there is still a trivial case we
1236 can handle. If the result of every PHI in BB is used
1237 only by a PHI in DEST, then we can trivially merge the
1238 PHI nodes from BB into DEST. */
75a70cf9 1239 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1240 gsi_next (&gsi))
cbbe9173 1241 {
1a91d914 1242 gphi *phi = gsi.phi ();
75a70cf9 1243 tree result = gimple_phi_result (phi);
cbbe9173 1244 use_operand_p imm_use;
42acab1c 1245 gimple *use_stmt;
cbbe9173 1246
1247 /* If the PHI's result is never used, then we can just
1248 ignore it. */
72e3ec84 1249 if (has_zero_uses (result))
cbbe9173 1250 continue;
1251
1252 /* Get the single use of the result of this PHI node. */
1253 if (!single_imm_use (result, &imm_use, &use_stmt)
75a70cf9 1254 || gimple_code (use_stmt) != GIMPLE_PHI
1255 || gimple_bb (use_stmt) != dest
1256 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
cbbe9173 1257 break;
1258 }
1259
334ec2d8 1260 /* If the loop above iterated through all the PHI nodes
cbbe9173 1261 in BB, then we can merge the PHIs from BB into DEST. */
75a70cf9 1262 if (gsi_end_p (gsi))
cbbe9173 1263 *current++ = bb;
1264 }
bfd49939 1265 }
1266
1267 /* Now let's drain WORKLIST. */
f6568ea4 1268 bool changed = false;
bfd49939 1269 while (current != worklist)
1270 {
1271 bb = *--current;
f6568ea4 1272 changed |= remove_forwarder_block_with_phi (bb);
bfd49939 1273 }
bfd49939 1274 free (worklist);
f6568ea4 1275
1276 /* Removing forwarder blocks can cause formerly irreducible loops
1277 to become reducible if we merged two entry blocks. */
1278 if (changed
1279 && current_loops)
1280 loops_state_set (LOOPS_NEED_FIXUP);
1281
2a1990e9 1282 return 0;
bfd49939 1283}
1284
cbe8bda8 1285} // anon namespace
1286
1287gimple_opt_pass *
1288make_pass_merge_phi (gcc::context *ctxt)
1289{
1290 return new pass_merge_phi (ctxt);
1291}
424a4a92 1292
1293/* Pass: cleanup the CFG just before expanding trees to RTL.
1294 This is just a round of label cleanups and case node grouping
1295 because after the tree optimizers have run such cleanups may
1296 be necessary. */
1297
1298static unsigned int
1299execute_cleanup_cfg_post_optimizing (void)
1300{
25959a39 1301 unsigned int todo = execute_fixup_cfg ();
424a4a92 1302 if (cleanup_tree_cfg ())
25959a39 1303 {
1304 todo &= ~TODO_cleanup_cfg;
1305 todo |= TODO_update_ssa;
1306 }
424a4a92 1307 maybe_remove_unreachable_handlers ();
1308 cleanup_dead_labels ();
eede5d6f 1309 if (group_case_labels ())
1310 todo |= TODO_cleanup_cfg;
424a4a92 1311 if ((flag_compare_debug_opt || flag_compare_debug)
1312 && flag_dump_final_insns)
1313 {
1314 FILE *final_output = fopen (flag_dump_final_insns, "a");
1315
1316 if (!final_output)
1317 {
1318 error ("could not open final insn dump file %qs: %m",
1319 flag_dump_final_insns);
1320 flag_dump_final_insns = NULL;
1321 }
1322 else
1323 {
1324 int save_unnumbered = flag_dump_unnumbered;
1325 int save_noaddr = flag_dump_noaddr;
1326
1327 flag_dump_noaddr = flag_dump_unnumbered = 1;
1328 fprintf (final_output, "\n");
bce107d7 1329 dump_enumerated_decls (final_output,
1330 dump_flags | TDF_SLIM | TDF_NOUID);
424a4a92 1331 flag_dump_noaddr = save_noaddr;
1332 flag_dump_unnumbered = save_unnumbered;
1333 if (fclose (final_output))
1334 {
1335 error ("could not close final insn dump file %qs: %m",
1336 flag_dump_final_insns);
1337 flag_dump_final_insns = NULL;
1338 }
1339 }
1340 }
1341 return todo;
1342}
1343
1344namespace {
1345
1346const pass_data pass_data_cleanup_cfg_post_optimizing =
1347{
1348 GIMPLE_PASS, /* type */
1349 "optimized", /* name */
1350 OPTGROUP_NONE, /* optinfo_flags */
424a4a92 1351 TV_TREE_CLEANUP_CFG, /* tv_id */
1352 PROP_cfg, /* properties_required */
1353 0, /* properties_provided */
1354 0, /* properties_destroyed */
1355 0, /* todo_flags_start */
1356 TODO_remove_unused_locals, /* todo_flags_finish */
1357};
1358
1359class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1360{
1361public:
1362 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1363 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1364 {}
1365
1366 /* opt_pass methods: */
65b0537f 1367 virtual unsigned int execute (function *)
1368 {
1369 return execute_cleanup_cfg_post_optimizing ();
1370 }
424a4a92 1371
1372}; // class pass_cleanup_cfg_post_optimizing
1373
1374} // anon namespace
1375
1376gimple_opt_pass *
1377make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1378{
1379 return new pass_cleanup_cfg_post_optimizing (ctxt);
1380}
1381
1382