]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-cfgcleanup.c
[SFN] boilerplate changes in preparation to introduce nonbind markers
[thirdparty/gcc.git] / gcc / tree-cfgcleanup.c
CommitLineData
c9784e6d 1/* CFG cleanup for trees.
cbe34bb5 2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
c9784e6d
KH
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
9dcd6f09 8the Free Software Foundation; either version 3, or (at your option)
c9784e6d
KH
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
c9784e6d
KH
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
c7131fb2 23#include "backend.h"
957060b5 24#include "rtl.h"
c9784e6d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "cfghooks.h"
28#include "tree-pass.h"
c7131fb2 29#include "ssa.h"
957060b5 30#include "diagnostic-core.h"
40e23961 31#include "fold-const.h"
60393bbc
AM
32#include "cfganal.h"
33#include "cfgcleanup.h"
2fb9a547 34#include "tree-eh.h"
45b0be94 35#include "gimplify.h"
5be5c238 36#include "gimple-iterator.h"
442b4905 37#include "tree-cfg.h"
e28030cf 38#include "tree-ssa-loop-manip.h"
442b4905 39#include "tree-dfa.h"
7a300452 40#include "tree-ssa.h"
c9784e6d 41#include "cfgloop.h"
17684618 42#include "tree-scalar-evolution.h"
c3bea076
RB
43#include "gimple-match.h"
44#include "gimple-fold.h"
cbb88345 45#include "tree-ssa-loop-niter.h"
c3bea076 46
c9784e6d 47
672987e8
ZD
48/* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55bitmap cfgcleanup_altered_bbs;
56
c9784e6d
KH
57/* Remove any fallthru edge from EV. Return true if an edge was removed. */
58
59static bool
9771b263 60remove_fallthru_edge (vec<edge, va_gc> *ev)
c9784e6d
KH
61{
62 edge_iterator ei;
63 edge e;
64
65 FOR_EACH_EDGE (e, ei, ev)
66 if ((e->flags & EDGE_FALLTHRU) != 0)
67 {
0107dca2
RB
68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
72 return true;
73 }
74 return false;
75}
76
02e637d8
ML
77/* Convert a SWTCH with single non-default case to gcond and replace it
78 at GSI. */
79
80static bool
81convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
82{
83 if (gimple_switch_num_labels (swtch) != 2)
84 return false;
85
86 tree index = gimple_switch_index (swtch);
87 tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
88 tree label = gimple_switch_label (swtch, 1);
89 tree low = CASE_LOW (label);
90 tree high = CASE_HIGH (label);
91
92 basic_block default_bb = label_to_block_fn (cfun, default_label);
93 basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
94
95 basic_block bb = gimple_bb (swtch);
96 gcond *cond;
97
98 /* Replace switch statement with condition statement. */
99 if (high)
100 {
101 tree lhs, rhs;
102 generate_range_test (bb, index, low, high, &lhs, &rhs);
103 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
104 }
105 else
106 cond = gimple_build_cond (EQ_EXPR, index,
107 fold_convert (TREE_TYPE (index), low),
108 NULL_TREE, NULL_TREE);
109
110 gsi_replace (&gsi, cond, true);
111
112 /* Update edges. */
113 edge case_edge = find_edge (bb, case_bb);
114 edge default_edge = find_edge (bb, default_bb);
115
116 case_edge->flags |= EDGE_TRUE_VALUE;
117 default_edge->flags |= EDGE_FALSE_VALUE;
118 return true;
119}
726a989a 120
c9784e6d
KH
121/* Disconnect an unreachable block in the control expression starting
122 at block BB. */
123
124static bool
55518e0f 125cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
c9784e6d
KH
126{
127 edge taken_edge;
128 bool retval = false;
355fe088 129 gimple *stmt = gsi_stmt (gsi);
c9784e6d
KH
130
131 if (!single_succ_p (bb))
132 {
133 edge e;
134 edge_iterator ei;
6ac01510 135 bool warned;
c3bea076 136 tree val = NULL_TREE;
6ac01510 137
02e637d8
ML
138 /* Try to convert a switch with just a single non-default case to
139 GIMPLE condition. */
140 if (gimple_code (stmt) == GIMPLE_SWITCH
141 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
142 stmt = gsi_stmt (gsi);
143
6ac01510 144 fold_defer_overflow_warnings ();
37530014
RG
145 switch (gimple_code (stmt))
146 {
147 case GIMPLE_COND:
55518e0f
RB
148 {
149 code_helper rcode;
150 tree ops[3] = {};
151 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
152 no_follow_ssa_edges)
153 && rcode == INTEGER_CST)
154 val = ops[0];
155 }
7b5ab0cd 156 break;
37530014
RG
157
158 case GIMPLE_SWITCH:
538dd0b7 159 val = gimple_switch_index (as_a <gswitch *> (stmt));
37530014
RG
160 break;
161
162 default:
c3bea076 163 ;
37530014 164 }
c9784e6d
KH
165 taken_edge = find_taken_edge (bb, val);
166 if (!taken_edge)
6ac01510
ILT
167 {
168 fold_undefer_and_ignore_overflow_warnings ();
169 return false;
170 }
c9784e6d
KH
171
172 /* Remove all the edges except the one that is always executed. */
6ac01510 173 warned = false;
c9784e6d
KH
174 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
175 {
176 if (e != taken_edge)
177 {
6ac01510
ILT
178 if (!warned)
179 {
180 fold_undefer_overflow_warnings
726a989a 181 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
6ac01510
ILT
182 warned = true;
183 }
184
c9784e6d 185 taken_edge->probability += e->probability;
672987e8 186 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
187 retval = true;
188 }
189 else
190 ei_next (&ei);
191 }
6ac01510
ILT
192 if (!warned)
193 fold_undefer_and_ignore_overflow_warnings ();
c9784e6d
KH
194 }
195 else
196 taken_edge = single_succ_edge (bb);
197
672987e8 198 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
726a989a 199 gsi_remove (&gsi, true);
c9784e6d
KH
200 taken_edge->flags = EDGE_FALLTHRU;
201
c9784e6d
KH
202 return retval;
203}
204
58041fe6
MJ
205/* Cleanup the GF_CALL_CTRL_ALTERING flag according to
206 to updated gimple_call_flags. */
207
208static void
355fe088 209cleanup_call_ctrl_altering_flag (gimple *bb_end)
58041fe6
MJ
210{
211 if (!is_gimple_call (bb_end)
212 || !gimple_call_ctrl_altering_p (bb_end))
213 return;
214
215 int flags = gimple_call_flags (bb_end);
216 if (((flags & (ECF_CONST | ECF_PURE))
217 && !(flags & ECF_LOOPING_CONST_OR_PURE))
218 || (flags & ECF_LEAF))
219 gimple_call_set_ctrl_altering (bb_end, false);
220}
221
672987e8
ZD
222/* Try to remove superfluous control structures in basic block BB. Returns
223 true if anything changes. */
c9784e6d
KH
224
225static bool
55518e0f 226cleanup_control_flow_bb (basic_block bb)
c9784e6d 227{
726a989a 228 gimple_stmt_iterator gsi;
c9784e6d 229 bool retval = false;
355fe088 230 gimple *stmt;
c9784e6d 231
672987e8
ZD
232 /* If the last statement of the block could throw and now cannot,
233 we need to prune cfg. */
726a989a 234 retval |= gimple_purge_dead_eh_edges (bb);
672987e8 235
3be33b6e 236 gsi = gsi_last_nondebug_bb (bb);
726a989a 237 if (gsi_end_p (gsi))
672987e8
ZD
238 return retval;
239
726a989a 240 stmt = gsi_stmt (gsi);
672987e8 241
58041fe6
MJ
242 /* Try to cleanup ctrl altering flag for call which ends bb. */
243 cleanup_call_ctrl_altering_flag (stmt);
244
726a989a
RB
245 if (gimple_code (stmt) == GIMPLE_COND
246 || gimple_code (stmt) == GIMPLE_SWITCH)
3be33b6e
JJ
247 {
248 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
55518e0f 249 retval |= cleanup_control_expr_graph (bb, gsi);
3be33b6e 250 }
726a989a
RB
251 else if (gimple_code (stmt) == GIMPLE_GOTO
252 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
253 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
672987e8 254 == LABEL_DECL))
c9784e6d 255 {
726a989a
RB
256 /* If we had a computed goto which has a compile-time determinable
257 destination, then we can eliminate the goto. */
672987e8
ZD
258 edge e;
259 tree label;
260 edge_iterator ei;
261 basic_block target_block;
c9784e6d 262
3be33b6e 263 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
672987e8
ZD
264 /* First look at all the outgoing edges. Delete any outgoing
265 edges which do not go to the right block. For the one
266 edge which goes to the right block, fix up its flags. */
726a989a 267 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
b05c3fd6
RB
268 if (DECL_CONTEXT (label) != cfun->decl)
269 return retval;
672987e8
ZD
270 target_block = label_to_block (label);
271 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
c9784e6d 272 {
672987e8
ZD
273 if (e->dest != target_block)
274 remove_edge_and_dominated_blocks (e);
275 else
c9784e6d 276 {
672987e8
ZD
277 /* Turn off the EDGE_ABNORMAL flag. */
278 e->flags &= ~EDGE_ABNORMAL;
c9784e6d 279
672987e8
ZD
280 /* And set EDGE_FALLTHRU. */
281 e->flags |= EDGE_FALLTHRU;
282 ei_next (&ei);
283 }
c9784e6d
KH
284 }
285
672987e8
ZD
286 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
287 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
288
289 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
290 relevant information we need. */
726a989a 291 gsi_remove (&gsi, true);
672987e8 292 retval = true;
c9784e6d 293 }
672987e8
ZD
294
295 /* Check for indirect calls that have been turned into
296 noreturn calls. */
726a989a 297 else if (is_gimple_call (stmt)
3be33b6e
JJ
298 && gimple_call_noreturn_p (stmt))
299 {
300 /* If there are debug stmts after the noreturn call, remove them
301 now, they should be all unreachable anyway. */
302 for (gsi_next (&gsi); !gsi_end_p (gsi); )
303 gsi_remove (&gsi, true);
304 if (remove_fallthru_edge (bb->succs))
305 retval = true;
306 }
672987e8 307
c9784e6d
KH
308 return retval;
309}
310
311/* Return true if basic block BB does nothing except pass control
312 flow to another block and that we can safely insert a label at
313 the start of the successor block.
314
315 As a precondition, we require that BB be not equal to
6626665f 316 the entry block. */
c9784e6d
KH
317
318static bool
319tree_forwarder_block_p (basic_block bb, bool phi_wanted)
320{
726a989a 321 gimple_stmt_iterator gsi;
f99fcb3b 322 location_t locus;
c9784e6d
KH
323
324 /* BB must have a single outgoing edge. */
325 if (single_succ_p (bb) != 1
326 /* If PHI_WANTED is false, BB must not have any PHI nodes.
327 Otherwise, BB must have PHI nodes. */
726a989a 328 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
6626665f 329 /* BB may not be a predecessor of the exit block. */
fefa31b5 330 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
c9784e6d
KH
331 /* Nor should this be an infinite loop. */
332 || single_succ (bb) == bb
333 /* BB may not have an abnormal outgoing edge. */
334 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
335 return false;
336
fefa31b5 337 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
c9784e6d 338
f99fcb3b
JJ
339 locus = single_succ_edge (bb)->goto_locus;
340
1d65f45c
RH
341 /* There should not be an edge coming from entry, or an EH edge. */
342 {
343 edge_iterator ei;
344 edge e;
345
346 FOR_EACH_EDGE (e, ei, bb->preds)
fefa31b5 347 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
1d65f45c 348 return false;
f99fcb3b
JJ
349 /* If goto_locus of any of the edges differs, prevent removing
350 the forwarder block for -O0. */
351 else if (optimize == 0 && e->goto_locus != locus)
352 return false;
1d65f45c
RH
353 }
354
c9784e6d
KH
355 /* Now walk through the statements backward. We can ignore labels,
356 anything else means this is not a forwarder block. */
726a989a 357 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
c9784e6d 358 {
355fe088 359 gimple *stmt = gsi_stmt (gsi);
c9784e6d 360
726a989a 361 switch (gimple_code (stmt))
c9784e6d 362 {
726a989a 363 case GIMPLE_LABEL:
538dd0b7 364 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
c9784e6d 365 return false;
f99fcb3b
JJ
366 if (optimize == 0 && gimple_location (stmt) != locus)
367 return false;
c9784e6d
KH
368 break;
369
b5b8b0ac
AO
370 /* ??? For now, hope there's a corresponding debug
371 assignment at the destination. */
372 case GIMPLE_DEBUG:
373 break;
374
c9784e6d
KH
375 default:
376 return false;
377 }
378 }
379
c9784e6d
KH
380 if (current_loops)
381 {
382 basic_block dest;
a3afdbb8 383 /* Protect loop headers. */
ee281008 384 if (bb_loop_header_p (bb))
c9784e6d 385 return false;
c9784e6d 386
a3afdbb8
BC
387 dest = EDGE_SUCC (bb, 0)->dest;
388 /* Protect loop preheaders and latches if requested. */
c9784e6d 389 if (dest->loop_father->header == dest)
a3afdbb8 390 {
fba7c564
RB
391 if (bb->loop_father == dest->loop_father)
392 {
393 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
394 return false;
395 /* If bb doesn't have a single predecessor we'd make this
396 loop have multiple latches. Don't do that if that
397 would in turn require disambiguating them. */
398 return (single_pred_p (bb)
399 || loops_state_satisfies_p
400 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
401 }
402 else if (bb->loop_father == loop_outer (dest->loop_father))
403 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
404 /* Always preserve other edges into loop headers that are
405 not simple latches or preheaders. */
406 return false;
a3afdbb8 407 }
c9784e6d 408 }
a3afdbb8 409
c9784e6d
KH
410 return true;
411}
412
c9784e6d
KH
413/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
414 those alternatives are equal in each of the PHI nodes, then return
415 true, else return false. */
416
417static bool
418phi_alternatives_equal (basic_block dest, edge e1, edge e2)
419{
420 int n1 = e1->dest_idx;
421 int n2 = e2->dest_idx;
538dd0b7 422 gphi_iterator gsi;
c9784e6d 423
726a989a 424 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
c9784e6d 425 {
538dd0b7 426 gphi *phi = gsi.phi ();
726a989a
RB
427 tree val1 = gimple_phi_arg_def (phi, n1);
428 tree val2 = gimple_phi_arg_def (phi, n2);
c9784e6d
KH
429
430 gcc_assert (val1 != NULL_TREE);
431 gcc_assert (val2 != NULL_TREE);
432
433 if (!operand_equal_for_phi_arg_p (val1, val2))
434 return false;
435 }
436
437 return true;
438}
439
672987e8 440/* Removes forwarder block BB. Returns false if this failed. */
c9784e6d
KH
441
442static bool
672987e8 443remove_forwarder_block (basic_block bb)
c9784e6d
KH
444{
445 edge succ = single_succ_edge (bb), e, s;
446 basic_block dest = succ->dest;
355fe088 447 gimple *label;
c9784e6d 448 edge_iterator ei;
726a989a 449 gimple_stmt_iterator gsi, gsi_to;
70235ab9 450 bool can_move_debug_stmts;
c9784e6d
KH
451
452 /* We check for infinite loops already in tree_forwarder_block_p.
453 However it may happen that the infinite loop is created
454 afterwards due to removal of forwarders. */
455 if (dest == bb)
456 return false;
457
28e5ca15
RB
458 /* If the destination block consists of a nonlocal label or is a
459 EH landing pad, do not merge it. */
c9784e6d 460 label = first_stmt (dest);
538dd0b7
DM
461 if (label)
462 if (glabel *label_stmt = dyn_cast <glabel *> (label))
463 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
464 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
465 return false;
c9784e6d
KH
466
467 /* If there is an abnormal edge to basic block BB, but not into
468 dest, problems might occur during removal of the phi node at out
469 of ssa due to overlapping live ranges of registers.
470
471 If there is an abnormal edge in DEST, the problems would occur
472 anyway since cleanup_dead_labels would then merge the labels for
473 two different eh regions, and rest of exception handling code
474 does not like it.
475
476 So if there is an abnormal edge to BB, proceed only if there is
477 no abnormal edge to DEST and there are no phi nodes in DEST. */
31ff2426
NF
478 if (bb_has_abnormal_pred (bb)
479 && (bb_has_abnormal_pred (dest)
1197e789
RG
480 || !gimple_seq_empty_p (phi_nodes (dest))))
481 return false;
c9784e6d
KH
482
483 /* If there are phi nodes in DEST, and some of the blocks that are
484 predecessors of BB are also predecessors of DEST, check that the
485 phi node arguments match. */
726a989a 486 if (!gimple_seq_empty_p (phi_nodes (dest)))
c9784e6d
KH
487 {
488 FOR_EACH_EDGE (e, ei, bb->preds)
489 {
490 s = find_edge (e->src, dest);
491 if (!s)
492 continue;
493
494 if (!phi_alternatives_equal (dest, succ, s))
495 return false;
496 }
497 }
498
dc764d10 499 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
70235ab9 500
fba7c564
RB
501 basic_block pred = NULL;
502 if (single_pred_p (bb))
503 pred = single_pred (bb);
504
c9784e6d
KH
505 /* Redirect the edges. */
506 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
507 {
672987e8
ZD
508 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
509
c9784e6d
KH
510 if (e->flags & EDGE_ABNORMAL)
511 {
512 /* If there is an abnormal edge, redirect it anyway, and
513 move the labels to the new block to make it legal. */
514 s = redirect_edge_succ_nodup (e, dest);
515 }
516 else
517 s = redirect_edge_and_branch (e, dest);
518
519 if (s == e)
520 {
521 /* Create arguments for the phi nodes, since the edge was not
522 here before. */
538dd0b7
DM
523 for (gphi_iterator psi = gsi_start_phis (dest);
524 !gsi_end_p (psi);
525 gsi_next (&psi))
726a989a 526 {
538dd0b7 527 gphi *phi = psi.phi ();
f5045c96 528 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
2724573f
RB
529 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
530 add_phi_arg (phi, unshare_expr (def), s, l);
726a989a 531 }
c9784e6d 532 }
c9784e6d
KH
533 }
534
1197e789
RG
535 /* Move nonlocal labels and computed goto targets as well as user
536 defined labels and labels with an EH landing pad number to the
537 new block, so that the redirection of the abnormal edges works,
538 jump targets end up in a sane place and debug information for
539 labels is retained. */
540 gsi_to = gsi_start_bb (dest);
541 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
542 {
543 tree decl;
544 label = gsi_stmt (gsi);
545 if (is_gimple_debug (label))
546 break;
538dd0b7 547 decl = gimple_label_label (as_a <glabel *> (label));
1197e789
RG
548 if (EH_LANDING_PAD_NR (decl) != 0
549 || DECL_NONLOCAL (decl)
550 || FORCED_LABEL (decl)
551 || !DECL_ARTIFICIAL (decl))
552 {
553 gsi_remove (&gsi, false);
554 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
555 }
556 else
557 gsi_next (&gsi);
558 }
559
dc764d10 560 /* Move debug statements if the destination has a single predecessor. */
70235ab9 561 if (can_move_debug_stmts)
c9784e6d 562 {
1197e789
RG
563 gsi_to = gsi_after_labels (dest);
564 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
c9784e6d 565 {
355fe088 566 gimple *debug = gsi_stmt (gsi);
70235ab9 567 if (!is_gimple_debug (debug))
1197e789 568 break;
726a989a 569 gsi_remove (&gsi, false);
70235ab9 570 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
c9784e6d
KH
571 }
572 }
573
672987e8
ZD
574 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
575
c9784e6d
KH
576 /* Update the dominators. */
577 if (dom_info_available_p (CDI_DOMINATORS))
578 {
579 basic_block dom, dombb, domdest;
580
581 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
582 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
583 if (domdest == bb)
584 {
585 /* Shortcut to avoid calling (relatively expensive)
586 nearest_common_dominator unless necessary. */
587 dom = dombb;
588 }
589 else
590 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
591
592 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
593 }
594
a3afdbb8
BC
595 /* Adjust latch infomation of BB's parent loop as otherwise
596 the cfg hook has a hard time not to kill the loop. */
597 if (current_loops && bb->loop_father->latch == bb)
fba7c564 598 bb->loop_father->latch = pred;
a3afdbb8 599
c9784e6d
KH
600 /* And kill the forwarder block. */
601 delete_basic_block (bb);
602
603 return true;
604}
605
bed18fbd
RB
606/* STMT is a call that has been discovered noreturn. Split the
607 block to prepare fixing up the CFG and remove LHS.
608 Return true if cleanup-cfg needs to run. */
566d09ef
JH
609
610bool
355fe088 611fixup_noreturn_call (gimple *stmt)
566d09ef
JH
612{
613 basic_block bb = gimple_bb (stmt);
bed18fbd 614 bool changed = false;
566d09ef
JH
615
616 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
617 return false;
618
619 /* First split basic block if stmt is not last. */
620 if (stmt != gsi_stmt (gsi_last_bb (bb)))
c83ee180
JJ
621 {
622 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
623 {
624 /* Don't split if there are only debug stmts
625 after stmt, that can result in -fcompare-debug
626 failures. Remove the debug stmts instead,
627 they should be all unreachable anyway. */
628 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
629 for (gsi_next (&gsi); !gsi_end_p (gsi); )
630 gsi_remove (&gsi, true);
631 }
632 else
bed18fbd
RB
633 {
634 split_block (bb, stmt);
635 changed = true;
636 }
c83ee180 637 }
566d09ef 638
e6a54b01
EB
639 /* If there is an LHS, remove it, but only if its type has fixed size.
640 The LHS will need to be recreated during RTL expansion and creating
45177337 641 temporaries of variable-sized types is not supported. Also don't
458a1cd0
JJ
642 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
643 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
644 has been changed into a call that does not return a value, like
645 __builtin_unreachable or __cxa_pure_virtual. */
2f1b0141 646 tree lhs = gimple_call_lhs (stmt);
458a1cd0
JJ
647 if (lhs
648 && (should_remove_lhs_p (lhs)
649 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
566d09ef 650 {
566d09ef 651 gimple_call_set_lhs (stmt, NULL_TREE);
02d635a2 652
2f1b0141
EB
653 /* We need to fix up the SSA name to avoid checking errors. */
654 if (TREE_CODE (lhs) == SSA_NAME)
566d09ef 655 {
b731b390 656 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
2f1b0141
EB
657 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
658 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
659 set_ssa_default_def (cfun, new_var, lhs);
566d09ef 660 }
2f1b0141 661
566d09ef 662 update_stmt (stmt);
566d09ef 663 }
2f1b0141 664
2a5671ee 665 /* Mark the call as altering control flow. */
bed18fbd
RB
666 if (!gimple_call_ctrl_altering_p (stmt))
667 {
668 gimple_call_set_ctrl_altering (stmt, true);
669 changed = true;
670 }
2a5671ee 671
bed18fbd 672 return changed;
566d09ef
JH
673}
674
b5db8b44
JH
675/* Return true if we want to merge BB1 and BB2 into a single block. */
676
677static bool
678want_merge_blocks_p (basic_block bb1, basic_block bb2)
679{
680 if (!can_merge_blocks_p (bb1, bb2))
681 return false;
682 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
683 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
684 return true;
685 return bb1->count.ok_for_merging (bb2->count);
686}
687
566d09ef 688
672987e8
ZD
689/* Tries to cleanup cfg in basic block BB. Returns true if anything
690 changes. */
c9784e6d 691
89e80dd4 692static bool
672987e8 693cleanup_tree_cfg_bb (basic_block bb)
c9784e6d 694{
f99fcb3b 695 if (tree_forwarder_block_p (bb, false)
672987e8
ZD
696 && remove_forwarder_block (bb))
697 return true;
c9784e6d 698
50bf47fd
RB
699 /* If there is a merge opportunity with the predecessor
700 do nothing now but wait until we process the predecessor.
701 This happens when we visit BBs in a non-optimal order and
702 avoids quadratic behavior with adjusting stmts BB pointer. */
703 if (single_pred_p (bb)
b5db8b44 704 && want_merge_blocks_p (single_pred (bb), bb))
50bf47fd
RB
705 /* But make sure we _do_ visit it. When we remove unreachable paths
706 ending in a backedge we fail to mark the destinations predecessors
707 as changed. */
708 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
709
89e80dd4
DN
710 /* Merging the blocks may create new opportunities for folding
711 conditional branches (due to the elimination of single-valued PHI
712 nodes). */
50bf47fd 713 else if (single_succ_p (bb)
b5db8b44 714 && want_merge_blocks_p (bb, single_succ (bb)))
672987e8 715 {
50bf47fd
RB
716 merge_blocks (bb, single_succ (bb));
717 return true;
672987e8
ZD
718 }
719
3810ff24 720 return false;
672987e8
ZD
721}
722
55518e0f
RB
723/* Do cleanup_control_flow_bb in PRE order. */
724
725static bool
726cleanup_control_flow_pre ()
727{
728 bool retval = false;
729
730 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
731 auto_sbitmap visited (last_basic_block_for_fn (cfun));
732 bitmap_clear (visited);
733
734 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
735
736 while (! stack.is_empty ())
737 {
738 /* Look at the edge on the top of the stack. */
739 edge_iterator ei = stack.last ();
740 basic_block dest = ei_edge (ei)->dest;
741
742 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
743 && ! bitmap_bit_p (visited, dest->index))
744 {
745 bitmap_set_bit (visited, dest->index);
746 retval |= cleanup_control_flow_bb (dest);
747 if (EDGE_COUNT (dest->succs) > 0)
748 stack.quick_push (ei_start (dest->succs));
749 }
750 else
751 {
752 if (!ei_one_before_end_p (ei))
753 ei_next (&stack.last ());
754 else
755 stack.pop ();
756 }
757 }
758
759 return retval;
760}
761
672987e8
ZD
762/* Iterate the cfg cleanups, while anything changes. */
763
764static bool
765cleanup_tree_cfg_1 (void)
766{
767 bool retval = false;
768 basic_block bb;
769 unsigned i, n;
770
672987e8
ZD
771 /* Prepare the worklists of altered blocks. */
772 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
773
774 /* During forwarder block cleanup, we may redirect edges out of
775 SWITCH_EXPRs, which can get expensive. So we want to enable
776 recording of edge to CASE_LABEL_EXPR. */
777 start_recording_case_labels ();
89e80dd4 778
3810ff24 779 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
672987e8 780 since the basic blocks may get removed. */
3810ff24 781
55518e0f
RB
782 /* Start by iterating over all basic blocks in PRE order looking for
783 edge removal opportunities. Do this first because incoming SSA form
784 may be invalid and we want to avoid performing SSA related tasks such
3810ff24 785 as propgating out a PHI node during BB merging in that state. */
55518e0f 786 retval |= cleanup_control_flow_pre ();
3810ff24
RB
787
788 /* After doing the above SSA form should be valid (or an update SSA
789 should be required). */
790
791 /* Continue by iterating over all basic blocks looking for BB merging
792 opportunities. */
8b1c6fd7 793 n = last_basic_block_for_fn (cfun);
672987e8
ZD
794 for (i = NUM_FIXED_BLOCKS; i < n; i++)
795 {
06e28de2 796 bb = BASIC_BLOCK_FOR_FN (cfun, i);
672987e8 797 if (bb)
2a5671ee 798 retval |= cleanup_tree_cfg_bb (bb);
672987e8
ZD
799 }
800
801 /* Now process the altered blocks, as long as any are available. */
802 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
803 {
804 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
805 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
806 if (i < NUM_FIXED_BLOCKS)
807 continue;
808
06e28de2 809 bb = BASIC_BLOCK_FOR_FN (cfun, i);
672987e8
ZD
810 if (!bb)
811 continue;
812
55518e0f 813 retval |= cleanup_control_flow_bb (bb);
672987e8 814 retval |= cleanup_tree_cfg_bb (bb);
672987e8 815 }
b8698a0f 816
672987e8
ZD
817 end_recording_case_labels ();
818 BITMAP_FREE (cfgcleanup_altered_bbs);
89e80dd4
DN
819 return retval;
820}
821
e7d70c6c
RB
822static bool
823mfb_keep_latches (edge e)
824{
825 return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
826}
89e80dd4 827
e3594cb3
DN
828/* Remove unreachable blocks and other miscellaneous clean up work.
829 Return true if the flowgraph was modified, false otherwise. */
89e80dd4 830
592c303d
ZD
831static bool
832cleanup_tree_cfg_noloop (void)
89e80dd4 833{
672987e8 834 bool changed;
89e80dd4
DN
835
836 timevar_push (TV_TREE_CLEANUP_CFG);
837
e3594cb3 838 /* Iterate until there are no more cleanups left to do. If any
672987e8
ZD
839 iteration changed the flowgraph, set CHANGED to true.
840
841 If dominance information is available, there cannot be any unreachable
842 blocks. */
2b28c07a 843 if (!dom_info_available_p (CDI_DOMINATORS))
e3594cb3 844 {
672987e8
ZD
845 changed = delete_unreachable_blocks ();
846 calculate_dominance_info (CDI_DOMINATORS);
e3594cb3 847 }
672987e8 848 else
30251f7a 849 {
b2b29377 850 checking_verify_dominators (CDI_DOMINATORS);
30251f7a
ZD
851 changed = false;
852 }
c9784e6d 853
e7d70c6c
RB
854 /* Ensure that we have single entries into loop headers. Otherwise
855 if one of the entries is becoming a latch due to CFG cleanup
856 (from formerly being part of an irreducible region) then we mess
857 up loop fixup and associate the old loop with a different region
858 which makes niter upper bounds invalid. See for example PR80549.
859 This needs to be done before we remove trivially dead edges as
860 we need to capture the dominance state before the pending transform. */
861 if (current_loops)
862 {
863 loop_p loop;
864 unsigned i;
865 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
866 if (loop && loop->header)
867 {
868 basic_block bb = loop->header;
869 edge_iterator ei;
870 edge e;
871 bool found_latch = false;
872 bool any_abnormal = false;
873 unsigned n = 0;
874 /* We are only interested in preserving existing loops, but
875 we need to check whether they are still real and of course
876 if we need to add a preheader at all. */
877 FOR_EACH_EDGE (e, ei, bb->preds)
878 {
879 if (e->flags & EDGE_ABNORMAL)
880 {
881 any_abnormal = true;
882 break;
883 }
884 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
885 {
886 found_latch = true;
887 continue;
888 }
889 n++;
890 }
891 /* If we have more than one entry to the loop header
892 create a forwarder. */
893 if (found_latch && ! any_abnormal && n > 1)
894 {
895 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
896 NULL);
897 loop->header = fallthru->dest;
898 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
899 {
900 /* The loop updating from the CFG hook is incomplete
901 when we have multiple latches, fixup manually. */
902 remove_bb_from_loops (fallthru->src);
903 loop_p cloop = loop;
904 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
905 cloop = find_common_loop (cloop, e->src->loop_father);
906 add_bb_to_loop (fallthru->src, cloop);
907 }
908 }
909 }
910 }
911
672987e8
ZD
912 changed |= cleanup_tree_cfg_1 ();
913
2b28c07a 914 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
fb1fe1f3
RB
915
916 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
917 basic-block numbers. */
918 if (! scev_initialized_p ())
919 compact_blocks ();
c9784e6d 920
b2b29377 921 checking_verify_flow_info ();
89e80dd4 922
c9784e6d 923 timevar_pop (TV_TREE_CLEANUP_CFG);
89e80dd4 924
592c303d 925 if (changed && current_loops)
21e4389f
RB
926 {
927 /* Removing edges and/or blocks may make recorded bounds refer
928 to stale GIMPLE stmts now, so clear them. */
929 free_numbers_of_iterations_estimates (cfun);
930 loops_state_set (LOOPS_NEED_FIXUP);
931 }
592c303d 932
e3594cb3 933 return changed;
c9784e6d
KH
934}
935
592c303d 936/* Repairs loop structures. */
c9784e6d 937
592c303d
ZD
938static void
939repair_loop_structures (void)
c9784e6d 940{
a222c01a 941 bitmap changed_bbs;
8e89b5b5 942 unsigned n_new_loops;
a222c01a 943
cc360b36
SB
944 calculate_dominance_info (CDI_DOMINATORS);
945
a222c01a
MM
946 timevar_push (TV_REPAIR_LOOPS);
947 changed_bbs = BITMAP_ALLOC (NULL);
8e89b5b5 948 n_new_loops = fix_loop_structure (changed_bbs);
c9784e6d 949
592c303d
ZD
950 /* This usually does nothing. But sometimes parts of cfg that originally
951 were inside a loop get out of it due to edge removal (since they
8e89b5b5
RB
952 become unreachable by back edges from latch). Also a former
953 irreducible loop can become reducible - in this case force a full
954 rewrite into loop-closed SSA form. */
f87000d0 955 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
8e89b5b5
RB
956 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
957 TODO_update_ssa);
c9784e6d 958
592c303d 959 BITMAP_FREE (changed_bbs);
c9784e6d 960
b2b29377 961 checking_verify_loop_structure ();
592c303d
ZD
962 scev_reset ();
963
a222c01a 964 timevar_pop (TV_REPAIR_LOOPS);
592c303d
ZD
965}
966
967/* Cleanup cfg and repair loop structures. */
968
969bool
970cleanup_tree_cfg (void)
971{
972 bool changed = cleanup_tree_cfg_noloop ();
973
974 if (current_loops != NULL
f87000d0 975 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
592c303d
ZD
976 repair_loop_structures ();
977
1994bfea 978 return changed;
c9784e6d
KH
979}
980
a9e0d843
RB
981/* Tries to merge the PHI nodes at BB into those at BB's sole successor.
982 Returns true if successful. */
c9784e6d 983
a9e0d843 984static bool
c9784e6d
KH
985remove_forwarder_block_with_phi (basic_block bb)
986{
987 edge succ = single_succ_edge (bb);
988 basic_block dest = succ->dest;
355fe088 989 gimple *label;
c9784e6d
KH
990 basic_block dombb, domdest, dom;
991
992 /* We check for infinite loops already in tree_forwarder_block_p.
993 However it may happen that the infinite loop is created
994 afterwards due to removal of forwarders. */
995 if (dest == bb)
a9e0d843 996 return false;
f3127685
JL
997
998 /* Removal of forwarders may expose new natural loops and thus
999 a block may turn into a loop header. */
1000 if (current_loops && bb_loop_header_p (bb))
1001 return false;
c9784e6d
KH
1002
1003 /* If the destination block consists of a nonlocal label, do not
1004 merge it. */
1005 label = first_stmt (dest);
538dd0b7
DM
1006 if (label)
1007 if (glabel *label_stmt = dyn_cast <glabel *> (label))
1008 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1009 return false;
c9784e6d 1010
d731ee04
BC
1011 /* Record BB's single pred in case we need to update the father
1012 loop's latch information later. */
1013 basic_block pred = NULL;
1014 if (single_pred_p (bb))
1015 pred = single_pred (bb);
1016
c9784e6d
KH
1017 /* Redirect each incoming edge to BB to DEST. */
1018 while (EDGE_COUNT (bb->preds) > 0)
1019 {
1020 edge e = EDGE_PRED (bb, 0), s;
538dd0b7 1021 gphi_iterator gsi;
c9784e6d
KH
1022
1023 s = find_edge (e->src, dest);
1024 if (s)
1025 {
1026 /* We already have an edge S from E->src to DEST. If S and
1027 E->dest's sole successor edge have the same PHI arguments
1028 at DEST, redirect S to DEST. */
1029 if (phi_alternatives_equal (dest, s, succ))
1030 {
1031 e = redirect_edge_and_branch (e, dest);
ea7e6d5a 1032 redirect_edge_var_map_clear (e);
c9784e6d
KH
1033 continue;
1034 }
1035
1036 /* PHI arguments are different. Create a forwarder block by
1037 splitting E so that we can merge PHI arguments on E to
1038 DEST. */
1039 e = single_succ_edge (split_edge (e));
1040 }
cbb88345
RB
1041 else
1042 {
1043 /* If we merge the forwarder into a loop header verify if we
1044 are creating another loop latch edge. If so, reset
1045 number of iteration information of the loop. */
1046 if (dest->loop_father->header == dest
1047 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1048 {
1049 dest->loop_father->any_upper_bound = false;
1050 dest->loop_father->any_likely_upper_bound = false;
adb7eaa2 1051 free_numbers_of_iterations_estimates (dest->loop_father);
cbb88345
RB
1052 }
1053 }
c9784e6d
KH
1054
1055 s = redirect_edge_and_branch (e, dest);
1056
1057 /* redirect_edge_and_branch must not create a new edge. */
1058 gcc_assert (s == e);
1059
1060 /* Add to the PHI nodes at DEST each PHI argument removed at the
1061 destination of E. */
726a989a
RB
1062 for (gsi = gsi_start_phis (dest);
1063 !gsi_end_p (gsi);
1064 gsi_next (&gsi))
c9784e6d 1065 {
538dd0b7 1066 gphi *phi = gsi.phi ();
726a989a 1067 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
f5045c96 1068 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
c9784e6d
KH
1069
1070 if (TREE_CODE (def) == SSA_NAME)
1071 {
c9784e6d
KH
1072 /* If DEF is one of the results of PHI nodes removed during
1073 redirection, replace it with the PHI argument that used
1074 to be on E. */
b787e7a2
TS
1075 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1076 size_t length = head ? head->length () : 0;
1077 for (size_t i = 0; i < length; i++)
c9784e6d 1078 {
b787e7a2 1079 edge_var_map *vm = &(*head)[i];
ea7e6d5a
AH
1080 tree old_arg = redirect_edge_var_map_result (vm);
1081 tree new_arg = redirect_edge_var_map_def (vm);
c9784e6d
KH
1082
1083 if (def == old_arg)
1084 {
1085 def = new_arg;
f5045c96 1086 locus = redirect_edge_var_map_location (vm);
c9784e6d
KH
1087 break;
1088 }
1089 }
1090 }
1091
9e227d60 1092 add_phi_arg (phi, def, s, locus);
c9784e6d
KH
1093 }
1094
ea7e6d5a 1095 redirect_edge_var_map_clear (e);
c9784e6d
KH
1096 }
1097
1098 /* Update the dominators. */
1099 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1100 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1101 if (domdest == bb)
1102 {
1103 /* Shortcut to avoid calling (relatively expensive)
1104 nearest_common_dominator unless necessary. */
1105 dom = dombb;
1106 }
1107 else
1108 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1109
1110 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1111
d731ee04
BC
1112 /* Adjust latch infomation of BB's parent loop as otherwise
1113 the cfg hook has a hard time not to kill the loop. */
1114 if (current_loops && bb->loop_father->latch == bb)
1115 bb->loop_father->latch = pred;
1116
c9784e6d
KH
1117 /* Remove BB since all of BB's incoming edges have been redirected
1118 to DEST. */
1119 delete_basic_block (bb);
a9e0d843
RB
1120
1121 return true;
c9784e6d
KH
1122}
1123
1124/* This pass merges PHI nodes if one feeds into another. For example,
1125 suppose we have the following:
1126
1127 goto <bb 9> (<L9>);
1128
1129<L8>:;
1130 tem_17 = foo ();
1131
1132 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1133<L9>:;
1134
1135 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1136<L10>:;
1137
1138 Then we merge the first PHI node into the second one like so:
1139
1140 goto <bb 9> (<L10>);
1141
1142<L8>:;
1143 tem_17 = foo ();
1144
1145 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1146<L10>:;
1147*/
1148
be55bfe6
TS
1149namespace {
1150
1151const pass_data pass_data_merge_phi =
1152{
1153 GIMPLE_PASS, /* type */
1154 "mergephi", /* name */
1155 OPTGROUP_NONE, /* optinfo_flags */
be55bfe6
TS
1156 TV_TREE_MERGE_PHI, /* tv_id */
1157 ( PROP_cfg | PROP_ssa ), /* properties_required */
1158 0, /* properties_provided */
1159 0, /* properties_destroyed */
1160 0, /* todo_flags_start */
3bea341f 1161 0, /* todo_flags_finish */
be55bfe6
TS
1162};
1163
1164class pass_merge_phi : public gimple_opt_pass
c9784e6d 1165{
be55bfe6
TS
1166public:
1167 pass_merge_phi (gcc::context *ctxt)
1168 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1169 {}
1170
1171 /* opt_pass methods: */
1172 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1173 virtual unsigned int execute (function *);
1174
1175}; // class pass_merge_phi
1176
1177unsigned int
1178pass_merge_phi::execute (function *fun)
1179{
1180 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
c9784e6d
KH
1181 basic_block *current = worklist;
1182 basic_block bb;
1183
1184 calculate_dominance_info (CDI_DOMINATORS);
1185
1186 /* Find all PHI nodes that we may be able to merge. */
be55bfe6 1187 FOR_EACH_BB_FN (bb, fun)
c9784e6d
KH
1188 {
1189 basic_block dest;
1190
1191 /* Look for a forwarder block with PHI nodes. */
1192 if (!tree_forwarder_block_p (bb, true))
1193 continue;
1194
1195 dest = single_succ (bb);
1196
1197 /* We have to feed into another basic block with PHI
1198 nodes. */
8eacd016 1199 if (gimple_seq_empty_p (phi_nodes (dest))
c9784e6d
KH
1200 /* We don't want to deal with a basic block with
1201 abnormal edges. */
31ff2426 1202 || bb_has_abnormal_pred (bb))
c9784e6d
KH
1203 continue;
1204
1205 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1206 {
1207 /* If BB does not dominate DEST, then the PHI nodes at
1208 DEST must be the only users of the results of the PHI
1209 nodes at BB. */
1210 *current++ = bb;
1211 }
ea65cd37
JL
1212 else
1213 {
538dd0b7 1214 gphi_iterator gsi;
338b5886 1215 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
ea65cd37
JL
1216
1217 /* BB dominates DEST. There may be many users of the PHI
1218 nodes in BB. However, there is still a trivial case we
1219 can handle. If the result of every PHI in BB is used
1220 only by a PHI in DEST, then we can trivially merge the
1221 PHI nodes from BB into DEST. */
726a989a
RB
1222 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1223 gsi_next (&gsi))
ea65cd37 1224 {
538dd0b7 1225 gphi *phi = gsi.phi ();
726a989a 1226 tree result = gimple_phi_result (phi);
ea65cd37 1227 use_operand_p imm_use;
355fe088 1228 gimple *use_stmt;
ea65cd37
JL
1229
1230 /* If the PHI's result is never used, then we can just
1231 ignore it. */
bfc646bf 1232 if (has_zero_uses (result))
ea65cd37
JL
1233 continue;
1234
1235 /* Get the single use of the result of this PHI node. */
1236 if (!single_imm_use (result, &imm_use, &use_stmt)
726a989a
RB
1237 || gimple_code (use_stmt) != GIMPLE_PHI
1238 || gimple_bb (use_stmt) != dest
1239 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
ea65cd37
JL
1240 break;
1241 }
1242
c0220ea4 1243 /* If the loop above iterated through all the PHI nodes
ea65cd37 1244 in BB, then we can merge the PHIs from BB into DEST. */
726a989a 1245 if (gsi_end_p (gsi))
ea65cd37
JL
1246 *current++ = bb;
1247 }
c9784e6d
KH
1248 }
1249
1250 /* Now let's drain WORKLIST. */
a9e0d843 1251 bool changed = false;
c9784e6d
KH
1252 while (current != worklist)
1253 {
1254 bb = *--current;
a9e0d843 1255 changed |= remove_forwarder_block_with_phi (bb);
c9784e6d 1256 }
c9784e6d 1257 free (worklist);
a9e0d843
RB
1258
1259 /* Removing forwarder blocks can cause formerly irreducible loops
1260 to become reducible if we merged two entry blocks. */
1261 if (changed
1262 && current_loops)
1263 loops_state_set (LOOPS_NEED_FIXUP);
1264
c2924966 1265 return 0;
c9784e6d
KH
1266}
1267
27a4cd48
DM
1268} // anon namespace
1269
1270gimple_opt_pass *
1271make_pass_merge_phi (gcc::context *ctxt)
1272{
1273 return new pass_merge_phi (ctxt);
1274}
4484a35a
AM
1275
1276/* Pass: cleanup the CFG just before expanding trees to RTL.
1277 This is just a round of label cleanups and case node grouping
1278 because after the tree optimizers have run such cleanups may
1279 be necessary. */
1280
1281static unsigned int
1282execute_cleanup_cfg_post_optimizing (void)
1283{
2a5671ee 1284 unsigned int todo = execute_fixup_cfg ();
4484a35a 1285 if (cleanup_tree_cfg ())
2a5671ee
RB
1286 {
1287 todo &= ~TODO_cleanup_cfg;
1288 todo |= TODO_update_ssa;
1289 }
4484a35a
AM
1290 maybe_remove_unreachable_handlers ();
1291 cleanup_dead_labels ();
4e3e8a5f
RB
1292 if (group_case_labels ())
1293 todo |= TODO_cleanup_cfg;
4484a35a
AM
1294 if ((flag_compare_debug_opt || flag_compare_debug)
1295 && flag_dump_final_insns)
1296 {
1297 FILE *final_output = fopen (flag_dump_final_insns, "a");
1298
1299 if (!final_output)
1300 {
1301 error ("could not open final insn dump file %qs: %m",
1302 flag_dump_final_insns);
1303 flag_dump_final_insns = NULL;
1304 }
1305 else
1306 {
1307 int save_unnumbered = flag_dump_unnumbered;
1308 int save_noaddr = flag_dump_noaddr;
1309
1310 flag_dump_noaddr = flag_dump_unnumbered = 1;
1311 fprintf (final_output, "\n");
1312 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1313 flag_dump_noaddr = save_noaddr;
1314 flag_dump_unnumbered = save_unnumbered;
1315 if (fclose (final_output))
1316 {
1317 error ("could not close final insn dump file %qs: %m",
1318 flag_dump_final_insns);
1319 flag_dump_final_insns = NULL;
1320 }
1321 }
1322 }
1323 return todo;
1324}
1325
1326namespace {
1327
1328const pass_data pass_data_cleanup_cfg_post_optimizing =
1329{
1330 GIMPLE_PASS, /* type */
1331 "optimized", /* name */
1332 OPTGROUP_NONE, /* optinfo_flags */
4484a35a
AM
1333 TV_TREE_CLEANUP_CFG, /* tv_id */
1334 PROP_cfg, /* properties_required */
1335 0, /* properties_provided */
1336 0, /* properties_destroyed */
1337 0, /* todo_flags_start */
1338 TODO_remove_unused_locals, /* todo_flags_finish */
1339};
1340
1341class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1342{
1343public:
1344 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1345 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1346 {}
1347
1348 /* opt_pass methods: */
be55bfe6
TS
1349 virtual unsigned int execute (function *)
1350 {
1351 return execute_cleanup_cfg_post_optimizing ();
1352 }
4484a35a
AM
1353
1354}; // class pass_cleanup_cfg_post_optimizing
1355
1356} // anon namespace
1357
1358gimple_opt_pass *
1359make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1360{
1361 return new pass_cleanup_cfg_post_optimizing (ctxt);
1362}
1363
1364