]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-cfgcleanup.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-cfgcleanup.c
CommitLineData
c9784e6d 1/* CFG cleanup for trees.
a5544970 2 Copyright (C) 2001-2019 Free Software Foundation, Inc.
c9784e6d
KH
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
9dcd6f09 8the Free Software Foundation; either version 3, or (at your option)
c9784e6d
KH
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
c9784e6d
KH
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
c7131fb2 23#include "backend.h"
957060b5 24#include "rtl.h"
c9784e6d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "cfghooks.h"
28#include "tree-pass.h"
c7131fb2 29#include "ssa.h"
957060b5 30#include "diagnostic-core.h"
40e23961 31#include "fold-const.h"
60393bbc
AM
32#include "cfganal.h"
33#include "cfgcleanup.h"
2fb9a547 34#include "tree-eh.h"
45b0be94 35#include "gimplify.h"
5be5c238 36#include "gimple-iterator.h"
442b4905 37#include "tree-cfg.h"
e28030cf 38#include "tree-ssa-loop-manip.h"
442b4905 39#include "tree-dfa.h"
7a300452 40#include "tree-ssa.h"
c9784e6d 41#include "cfgloop.h"
17684618 42#include "tree-scalar-evolution.h"
c3bea076
RB
43#include "gimple-match.h"
44#include "gimple-fold.h"
cbb88345 45#include "tree-ssa-loop-niter.h"
c3bea076 46
c9784e6d 47
672987e8
ZD
48/* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55bitmap cfgcleanup_altered_bbs;
56
c9784e6d
KH
57/* Remove any fallthru edge from EV. Return true if an edge was removed. */
58
59static bool
9771b263 60remove_fallthru_edge (vec<edge, va_gc> *ev)
c9784e6d
KH
61{
62 edge_iterator ei;
63 edge e;
64
65 FOR_EACH_EDGE (e, ei, ev)
66 if ((e->flags & EDGE_FALLTHRU) != 0)
67 {
0107dca2
RB
68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
72 return true;
73 }
74 return false;
75}
76
02e637d8
ML
77/* Convert a SWTCH with single non-default case to gcond and replace it
78 at GSI. */
79
80static bool
81convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
82{
83 if (gimple_switch_num_labels (swtch) != 2)
84 return false;
85
86 tree index = gimple_switch_index (swtch);
02e637d8
ML
87 tree label = gimple_switch_label (swtch, 1);
88 tree low = CASE_LOW (label);
89 tree high = CASE_HIGH (label);
90
61ff5d6f
ML
91 basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
92 basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
02e637d8
ML
93
94 basic_block bb = gimple_bb (swtch);
95 gcond *cond;
96
97 /* Replace switch statement with condition statement. */
98 if (high)
99 {
100 tree lhs, rhs;
101 generate_range_test (bb, index, low, high, &lhs, &rhs);
102 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
103 }
104 else
105 cond = gimple_build_cond (EQ_EXPR, index,
106 fold_convert (TREE_TYPE (index), low),
107 NULL_TREE, NULL_TREE);
108
109 gsi_replace (&gsi, cond, true);
110
111 /* Update edges. */
112 edge case_edge = find_edge (bb, case_bb);
113 edge default_edge = find_edge (bb, default_bb);
114
115 case_edge->flags |= EDGE_TRUE_VALUE;
116 default_edge->flags |= EDGE_FALSE_VALUE;
117 return true;
118}
726a989a 119
c9784e6d
KH
120/* Disconnect an unreachable block in the control expression starting
121 at block BB. */
122
123static bool
55518e0f 124cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
c9784e6d
KH
125{
126 edge taken_edge;
127 bool retval = false;
355fe088 128 gimple *stmt = gsi_stmt (gsi);
c9784e6d
KH
129
130 if (!single_succ_p (bb))
131 {
132 edge e;
133 edge_iterator ei;
6ac01510 134 bool warned;
c3bea076 135 tree val = NULL_TREE;
6ac01510 136
02e637d8
ML
137 /* Try to convert a switch with just a single non-default case to
138 GIMPLE condition. */
139 if (gimple_code (stmt) == GIMPLE_SWITCH
140 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
141 stmt = gsi_stmt (gsi);
142
6ac01510 143 fold_defer_overflow_warnings ();
37530014
RG
144 switch (gimple_code (stmt))
145 {
146 case GIMPLE_COND:
55518e0f 147 {
5d75ad95
RS
148 gimple_match_op res_op;
149 if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
55518e0f 150 no_follow_ssa_edges)
5d75ad95
RS
151 && res_op.code == INTEGER_CST)
152 val = res_op.ops[0];
55518e0f 153 }
7b5ab0cd 154 break;
37530014
RG
155
156 case GIMPLE_SWITCH:
538dd0b7 157 val = gimple_switch_index (as_a <gswitch *> (stmt));
37530014
RG
158 break;
159
160 default:
c3bea076 161 ;
37530014 162 }
c9784e6d
KH
163 taken_edge = find_taken_edge (bb, val);
164 if (!taken_edge)
6ac01510
ILT
165 {
166 fold_undefer_and_ignore_overflow_warnings ();
167 return false;
168 }
c9784e6d
KH
169
170 /* Remove all the edges except the one that is always executed. */
6ac01510 171 warned = false;
c9784e6d
KH
172 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
173 {
174 if (e != taken_edge)
175 {
6ac01510
ILT
176 if (!warned)
177 {
178 fold_undefer_overflow_warnings
726a989a 179 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
6ac01510
ILT
180 warned = true;
181 }
182
c9784e6d 183 taken_edge->probability += e->probability;
672987e8 184 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
185 retval = true;
186 }
187 else
188 ei_next (&ei);
189 }
6ac01510
ILT
190 if (!warned)
191 fold_undefer_and_ignore_overflow_warnings ();
c9784e6d
KH
192 }
193 else
194 taken_edge = single_succ_edge (bb);
195
672987e8 196 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
726a989a 197 gsi_remove (&gsi, true);
c9784e6d
KH
198 taken_edge->flags = EDGE_FALLTHRU;
199
c9784e6d
KH
200 return retval;
201}
202
58041fe6
MJ
203/* Cleanup the GF_CALL_CTRL_ALTERING flag according to
204 to updated gimple_call_flags. */
205
206static void
355fe088 207cleanup_call_ctrl_altering_flag (gimple *bb_end)
58041fe6
MJ
208{
209 if (!is_gimple_call (bb_end)
210 || !gimple_call_ctrl_altering_p (bb_end))
211 return;
212
213 int flags = gimple_call_flags (bb_end);
214 if (((flags & (ECF_CONST | ECF_PURE))
215 && !(flags & ECF_LOOPING_CONST_OR_PURE))
216 || (flags & ECF_LEAF))
217 gimple_call_set_ctrl_altering (bb_end, false);
218}
219
672987e8
ZD
220/* Try to remove superfluous control structures in basic block BB. Returns
221 true if anything changes. */
c9784e6d
KH
222
223static bool
55518e0f 224cleanup_control_flow_bb (basic_block bb)
c9784e6d 225{
726a989a 226 gimple_stmt_iterator gsi;
c9784e6d 227 bool retval = false;
355fe088 228 gimple *stmt;
c9784e6d 229
672987e8
ZD
230 /* If the last statement of the block could throw and now cannot,
231 we need to prune cfg. */
726a989a 232 retval |= gimple_purge_dead_eh_edges (bb);
672987e8 233
3be33b6e 234 gsi = gsi_last_nondebug_bb (bb);
726a989a 235 if (gsi_end_p (gsi))
672987e8
ZD
236 return retval;
237
726a989a 238 stmt = gsi_stmt (gsi);
672987e8 239
58041fe6
MJ
240 /* Try to cleanup ctrl altering flag for call which ends bb. */
241 cleanup_call_ctrl_altering_flag (stmt);
242
726a989a
RB
243 if (gimple_code (stmt) == GIMPLE_COND
244 || gimple_code (stmt) == GIMPLE_SWITCH)
3be33b6e
JJ
245 {
246 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
55518e0f 247 retval |= cleanup_control_expr_graph (bb, gsi);
3be33b6e 248 }
726a989a
RB
249 else if (gimple_code (stmt) == GIMPLE_GOTO
250 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
251 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
672987e8 252 == LABEL_DECL))
c9784e6d 253 {
726a989a
RB
254 /* If we had a computed goto which has a compile-time determinable
255 destination, then we can eliminate the goto. */
672987e8
ZD
256 edge e;
257 tree label;
258 edge_iterator ei;
259 basic_block target_block;
c9784e6d 260
3be33b6e 261 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
672987e8
ZD
262 /* First look at all the outgoing edges. Delete any outgoing
263 edges which do not go to the right block. For the one
264 edge which goes to the right block, fix up its flags. */
726a989a 265 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
b05c3fd6
RB
266 if (DECL_CONTEXT (label) != cfun->decl)
267 return retval;
61ff5d6f 268 target_block = label_to_block (cfun, label);
672987e8 269 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
c9784e6d 270 {
672987e8
ZD
271 if (e->dest != target_block)
272 remove_edge_and_dominated_blocks (e);
273 else
c9784e6d 274 {
672987e8
ZD
275 /* Turn off the EDGE_ABNORMAL flag. */
276 e->flags &= ~EDGE_ABNORMAL;
c9784e6d 277
672987e8
ZD
278 /* And set EDGE_FALLTHRU. */
279 e->flags |= EDGE_FALLTHRU;
280 ei_next (&ei);
281 }
c9784e6d
KH
282 }
283
672987e8
ZD
284 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
285 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
286
287 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
288 relevant information we need. */
726a989a 289 gsi_remove (&gsi, true);
672987e8 290 retval = true;
c9784e6d 291 }
672987e8
ZD
292
293 /* Check for indirect calls that have been turned into
294 noreturn calls. */
726a989a 295 else if (is_gimple_call (stmt)
3be33b6e
JJ
296 && gimple_call_noreturn_p (stmt))
297 {
298 /* If there are debug stmts after the noreturn call, remove them
299 now, they should be all unreachable anyway. */
300 for (gsi_next (&gsi); !gsi_end_p (gsi); )
301 gsi_remove (&gsi, true);
302 if (remove_fallthru_edge (bb->succs))
303 retval = true;
304 }
672987e8 305
c9784e6d
KH
306 return retval;
307}
308
309/* Return true if basic block BB does nothing except pass control
310 flow to another block and that we can safely insert a label at
311 the start of the successor block.
312
313 As a precondition, we require that BB be not equal to
6626665f 314 the entry block. */
c9784e6d
KH
315
316static bool
317tree_forwarder_block_p (basic_block bb, bool phi_wanted)
318{
726a989a 319 gimple_stmt_iterator gsi;
f99fcb3b 320 location_t locus;
c9784e6d
KH
321
322 /* BB must have a single outgoing edge. */
323 if (single_succ_p (bb) != 1
324 /* If PHI_WANTED is false, BB must not have any PHI nodes.
325 Otherwise, BB must have PHI nodes. */
726a989a 326 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
6626665f 327 /* BB may not be a predecessor of the exit block. */
fefa31b5 328 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
c9784e6d
KH
329 /* Nor should this be an infinite loop. */
330 || single_succ (bb) == bb
331 /* BB may not have an abnormal outgoing edge. */
332 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
333 return false;
334
fefa31b5 335 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
c9784e6d 336
f99fcb3b
JJ
337 locus = single_succ_edge (bb)->goto_locus;
338
1d65f45c
RH
339 /* There should not be an edge coming from entry, or an EH edge. */
340 {
341 edge_iterator ei;
342 edge e;
343
344 FOR_EACH_EDGE (e, ei, bb->preds)
fefa31b5 345 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
1d65f45c 346 return false;
f99fcb3b 347 /* If goto_locus of any of the edges differs, prevent removing
180720a5
EB
348 the forwarder block when not optimizing. */
349 else if (!optimize
350 && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
351 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
352 && e->goto_locus != locus)
f99fcb3b 353 return false;
1d65f45c
RH
354 }
355
c9784e6d
KH
356 /* Now walk through the statements backward. We can ignore labels,
357 anything else means this is not a forwarder block. */
726a989a 358 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
c9784e6d 359 {
355fe088 360 gimple *stmt = gsi_stmt (gsi);
c9784e6d 361
726a989a 362 switch (gimple_code (stmt))
c9784e6d 363 {
726a989a 364 case GIMPLE_LABEL:
538dd0b7 365 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
c9784e6d 366 return false;
180720a5
EB
367 if (!optimize
368 && (gimple_has_location (stmt)
369 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
370 && gimple_location (stmt) != locus)
f99fcb3b 371 return false;
c9784e6d
KH
372 break;
373
b5b8b0ac
AO
374 /* ??? For now, hope there's a corresponding debug
375 assignment at the destination. */
376 case GIMPLE_DEBUG:
377 break;
378
c9784e6d
KH
379 default:
380 return false;
381 }
382 }
383
c9784e6d
KH
384 if (current_loops)
385 {
386 basic_block dest;
a3afdbb8 387 /* Protect loop headers. */
ee281008 388 if (bb_loop_header_p (bb))
c9784e6d 389 return false;
c9784e6d 390
a3afdbb8
BC
391 dest = EDGE_SUCC (bb, 0)->dest;
392 /* Protect loop preheaders and latches if requested. */
c9784e6d 393 if (dest->loop_father->header == dest)
a3afdbb8 394 {
fba7c564
RB
395 if (bb->loop_father == dest->loop_father)
396 {
397 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
398 return false;
399 /* If bb doesn't have a single predecessor we'd make this
400 loop have multiple latches. Don't do that if that
401 would in turn require disambiguating them. */
402 return (single_pred_p (bb)
403 || loops_state_satisfies_p
404 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
405 }
406 else if (bb->loop_father == loop_outer (dest->loop_father))
407 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
408 /* Always preserve other edges into loop headers that are
409 not simple latches or preheaders. */
410 return false;
a3afdbb8 411 }
c9784e6d 412 }
a3afdbb8 413
c9784e6d
KH
414 return true;
415}
416
c9784e6d
KH
417/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
418 those alternatives are equal in each of the PHI nodes, then return
419 true, else return false. */
420
421static bool
422phi_alternatives_equal (basic_block dest, edge e1, edge e2)
423{
424 int n1 = e1->dest_idx;
425 int n2 = e2->dest_idx;
538dd0b7 426 gphi_iterator gsi;
c9784e6d 427
726a989a 428 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
c9784e6d 429 {
538dd0b7 430 gphi *phi = gsi.phi ();
726a989a
RB
431 tree val1 = gimple_phi_arg_def (phi, n1);
432 tree val2 = gimple_phi_arg_def (phi, n2);
c9784e6d
KH
433
434 gcc_assert (val1 != NULL_TREE);
435 gcc_assert (val2 != NULL_TREE);
436
437 if (!operand_equal_for_phi_arg_p (val1, val2))
438 return false;
439 }
440
441 return true;
442}
443
672987e8 444/* Removes forwarder block BB. Returns false if this failed. */
c9784e6d
KH
445
446static bool
672987e8 447remove_forwarder_block (basic_block bb)
c9784e6d
KH
448{
449 edge succ = single_succ_edge (bb), e, s;
450 basic_block dest = succ->dest;
67a8d719 451 gimple *stmt;
c9784e6d 452 edge_iterator ei;
726a989a 453 gimple_stmt_iterator gsi, gsi_to;
70235ab9 454 bool can_move_debug_stmts;
c9784e6d
KH
455
456 /* We check for infinite loops already in tree_forwarder_block_p.
457 However it may happen that the infinite loop is created
458 afterwards due to removal of forwarders. */
459 if (dest == bb)
460 return false;
461
28e5ca15
RB
462 /* If the destination block consists of a nonlocal label or is a
463 EH landing pad, do not merge it. */
67a8d719
AO
464 stmt = first_stmt (dest);
465 if (stmt)
466 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
538dd0b7
DM
467 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
468 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
469 return false;
c9784e6d
KH
470
471 /* If there is an abnormal edge to basic block BB, but not into
472 dest, problems might occur during removal of the phi node at out
473 of ssa due to overlapping live ranges of registers.
474
475 If there is an abnormal edge in DEST, the problems would occur
476 anyway since cleanup_dead_labels would then merge the labels for
477 two different eh regions, and rest of exception handling code
478 does not like it.
479
480 So if there is an abnormal edge to BB, proceed only if there is
481 no abnormal edge to DEST and there are no phi nodes in DEST. */
31ff2426
NF
482 if (bb_has_abnormal_pred (bb)
483 && (bb_has_abnormal_pred (dest)
1197e789
RG
484 || !gimple_seq_empty_p (phi_nodes (dest))))
485 return false;
c9784e6d
KH
486
487 /* If there are phi nodes in DEST, and some of the blocks that are
488 predecessors of BB are also predecessors of DEST, check that the
489 phi node arguments match. */
726a989a 490 if (!gimple_seq_empty_p (phi_nodes (dest)))
c9784e6d
KH
491 {
492 FOR_EACH_EDGE (e, ei, bb->preds)
493 {
494 s = find_edge (e->src, dest);
495 if (!s)
496 continue;
497
498 if (!phi_alternatives_equal (dest, succ, s))
499 return false;
500 }
501 }
502
dc764d10 503 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
70235ab9 504
fba7c564
RB
505 basic_block pred = NULL;
506 if (single_pred_p (bb))
507 pred = single_pred (bb);
508
c9784e6d
KH
509 /* Redirect the edges. */
510 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
511 {
672987e8
ZD
512 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
513
c9784e6d
KH
514 if (e->flags & EDGE_ABNORMAL)
515 {
516 /* If there is an abnormal edge, redirect it anyway, and
517 move the labels to the new block to make it legal. */
518 s = redirect_edge_succ_nodup (e, dest);
519 }
520 else
521 s = redirect_edge_and_branch (e, dest);
522
523 if (s == e)
524 {
525 /* Create arguments for the phi nodes, since the edge was not
526 here before. */
538dd0b7
DM
527 for (gphi_iterator psi = gsi_start_phis (dest);
528 !gsi_end_p (psi);
529 gsi_next (&psi))
726a989a 530 {
538dd0b7 531 gphi *phi = psi.phi ();
620e594b 532 location_t l = gimple_phi_arg_location_from_edge (phi, succ);
2724573f
RB
533 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
534 add_phi_arg (phi, unshare_expr (def), s, l);
726a989a 535 }
c9784e6d 536 }
c9784e6d
KH
537 }
538
1197e789
RG
539 /* Move nonlocal labels and computed goto targets as well as user
540 defined labels and labels with an EH landing pad number to the
541 new block, so that the redirection of the abnormal edges works,
542 jump targets end up in a sane place and debug information for
67a8d719 543 labels is retained. */
1197e789 544 gsi_to = gsi_start_bb (dest);
67a8d719 545 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1197e789 546 {
67a8d719
AO
547 stmt = gsi_stmt (gsi);
548 if (is_gimple_debug (stmt))
549 break;
550
551 /* Forwarder blocks can only contain labels and debug stmts, and
552 labels must come first, so if we get to this point, we know
553 we're looking at a label. */
554 tree decl = gimple_label_label (as_a <glabel *> (stmt));
555 if (EH_LANDING_PAD_NR (decl) != 0
556 || DECL_NONLOCAL (decl)
557 || FORCED_LABEL (decl)
558 || !DECL_ARTIFICIAL (decl))
559 gsi_move_before (&gsi, &gsi_to);
1197e789
RG
560 else
561 gsi_next (&gsi);
562 }
563
3b778d9d
AO
564 /* Move debug statements if the destination has a single predecessor. */
565 if (can_move_debug_stmts && !gsi_end_p (gsi))
566 {
67a8d719 567 gsi_to = gsi_after_labels (dest);
3b778d9d
AO
568 do
569 {
570 gimple *debug = gsi_stmt (gsi);
571 gcc_assert (is_gimple_debug (debug));
67a8d719 572 gsi_move_before (&gsi, &gsi_to);
3b778d9d
AO
573 }
574 while (!gsi_end_p (gsi));
575 }
576
672987e8
ZD
577 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
578
c9784e6d
KH
579 /* Update the dominators. */
580 if (dom_info_available_p (CDI_DOMINATORS))
581 {
582 basic_block dom, dombb, domdest;
583
584 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
585 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
586 if (domdest == bb)
587 {
588 /* Shortcut to avoid calling (relatively expensive)
589 nearest_common_dominator unless necessary. */
590 dom = dombb;
591 }
592 else
593 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
594
595 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
596 }
597
a3afdbb8
BC
598 /* Adjust latch infomation of BB's parent loop as otherwise
599 the cfg hook has a hard time not to kill the loop. */
600 if (current_loops && bb->loop_father->latch == bb)
fba7c564 601 bb->loop_father->latch = pred;
a3afdbb8 602
c9784e6d
KH
603 /* And kill the forwarder block. */
604 delete_basic_block (bb);
605
606 return true;
607}
608
bed18fbd
RB
609/* STMT is a call that has been discovered noreturn. Split the
610 block to prepare fixing up the CFG and remove LHS.
611 Return true if cleanup-cfg needs to run. */
566d09ef
JH
612
613bool
355fe088 614fixup_noreturn_call (gimple *stmt)
566d09ef
JH
615{
616 basic_block bb = gimple_bb (stmt);
bed18fbd 617 bool changed = false;
566d09ef
JH
618
619 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
620 return false;
621
622 /* First split basic block if stmt is not last. */
623 if (stmt != gsi_stmt (gsi_last_bb (bb)))
c83ee180
JJ
624 {
625 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
626 {
627 /* Don't split if there are only debug stmts
628 after stmt, that can result in -fcompare-debug
629 failures. Remove the debug stmts instead,
630 they should be all unreachable anyway. */
631 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
632 for (gsi_next (&gsi); !gsi_end_p (gsi); )
633 gsi_remove (&gsi, true);
634 }
635 else
bed18fbd
RB
636 {
637 split_block (bb, stmt);
638 changed = true;
639 }
c83ee180 640 }
566d09ef 641
e6a54b01
EB
642 /* If there is an LHS, remove it, but only if its type has fixed size.
643 The LHS will need to be recreated during RTL expansion and creating
45177337 644 temporaries of variable-sized types is not supported. Also don't
458a1cd0
JJ
645 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
646 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
647 has been changed into a call that does not return a value, like
648 __builtin_unreachable or __cxa_pure_virtual. */
2f1b0141 649 tree lhs = gimple_call_lhs (stmt);
458a1cd0
JJ
650 if (lhs
651 && (should_remove_lhs_p (lhs)
652 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
566d09ef 653 {
566d09ef 654 gimple_call_set_lhs (stmt, NULL_TREE);
02d635a2 655
2f1b0141
EB
656 /* We need to fix up the SSA name to avoid checking errors. */
657 if (TREE_CODE (lhs) == SSA_NAME)
566d09ef 658 {
b731b390 659 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
2f1b0141
EB
660 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
661 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
662 set_ssa_default_def (cfun, new_var, lhs);
566d09ef 663 }
2f1b0141 664
566d09ef 665 update_stmt (stmt);
566d09ef 666 }
2f1b0141 667
2a5671ee 668 /* Mark the call as altering control flow. */
bed18fbd
RB
669 if (!gimple_call_ctrl_altering_p (stmt))
670 {
671 gimple_call_set_ctrl_altering (stmt, true);
672 changed = true;
673 }
2a5671ee 674
bed18fbd 675 return changed;
566d09ef
JH
676}
677
b5db8b44
JH
678/* Return true if we want to merge BB1 and BB2 into a single block. */
679
680static bool
681want_merge_blocks_p (basic_block bb1, basic_block bb2)
682{
683 if (!can_merge_blocks_p (bb1, bb2))
684 return false;
685 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
686 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
687 return true;
688 return bb1->count.ok_for_merging (bb2->count);
689}
690
566d09ef 691
b401e50f
RB
692/* Tries to cleanup cfg in basic block BB by merging blocks. Returns
693 true if anything changes. */
c9784e6d 694
89e80dd4 695static bool
672987e8 696cleanup_tree_cfg_bb (basic_block bb)
c9784e6d 697{
f99fcb3b 698 if (tree_forwarder_block_p (bb, false)
672987e8
ZD
699 && remove_forwarder_block (bb))
700 return true;
c9784e6d 701
50bf47fd
RB
702 /* If there is a merge opportunity with the predecessor
703 do nothing now but wait until we process the predecessor.
704 This happens when we visit BBs in a non-optimal order and
705 avoids quadratic behavior with adjusting stmts BB pointer. */
706 if (single_pred_p (bb)
b5db8b44 707 && want_merge_blocks_p (single_pred (bb), bb))
50bf47fd
RB
708 /* But make sure we _do_ visit it. When we remove unreachable paths
709 ending in a backedge we fail to mark the destinations predecessors
710 as changed. */
711 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
712
89e80dd4
DN
713 /* Merging the blocks may create new opportunities for folding
714 conditional branches (due to the elimination of single-valued PHI
715 nodes). */
50bf47fd 716 else if (single_succ_p (bb)
b5db8b44 717 && want_merge_blocks_p (bb, single_succ (bb)))
672987e8 718 {
50bf47fd
RB
719 merge_blocks (bb, single_succ (bb));
720 return true;
672987e8
ZD
721 }
722
3810ff24 723 return false;
672987e8
ZD
724}
725
55518e0f
RB
726/* Do cleanup_control_flow_bb in PRE order. */
727
728static bool
729cleanup_control_flow_pre ()
730{
731 bool retval = false;
732
b401e50f
RB
733 /* We want remove_edge_and_dominated_blocks to only remove edges,
734 not dominated blocks which it does when dom info isn't available.
735 Pretend so. */
736 dom_state saved_state = dom_info_state (CDI_DOMINATORS);
737 set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
738
55518e0f
RB
739 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
740 auto_sbitmap visited (last_basic_block_for_fn (cfun));
741 bitmap_clear (visited);
742
743 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
744
745 while (! stack.is_empty ())
746 {
747 /* Look at the edge on the top of the stack. */
748 edge_iterator ei = stack.last ();
749 basic_block dest = ei_edge (ei)->dest;
750
751 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
752 && ! bitmap_bit_p (visited, dest->index))
753 {
754 bitmap_set_bit (visited, dest->index);
b401e50f
RB
755 /* We only possibly remove edges from DEST here, leaving
756 possibly unreachable code in the IL. */
55518e0f
RB
757 retval |= cleanup_control_flow_bb (dest);
758 if (EDGE_COUNT (dest->succs) > 0)
759 stack.quick_push (ei_start (dest->succs));
760 }
761 else
762 {
763 if (!ei_one_before_end_p (ei))
764 ei_next (&stack.last ());
765 else
766 stack.pop ();
767 }
768 }
769
b401e50f
RB
770 set_dom_info_availability (CDI_DOMINATORS, saved_state);
771
772 /* We are deleting BBs in non-reverse dominator order, make sure
773 insert_debug_temps_for_defs is prepared for that. */
774 if (retval)
775 free_dominance_info (CDI_DOMINATORS);
776
777 /* Remove all now (and previously) unreachable blocks. */
778 for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
779 {
780 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
781 if (bb && !bitmap_bit_p (visited, bb->index))
782 {
783 if (!retval)
784 free_dominance_info (CDI_DOMINATORS);
785 delete_basic_block (bb);
786 retval = true;
787 }
788 }
789
55518e0f
RB
790 return retval;
791}
792
e7d70c6c
RB
793static bool
794mfb_keep_latches (edge e)
795{
b401e50f
RB
796 return !((dom_info_available_p (CDI_DOMINATORS)
797 && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
798 || (e->flags & EDGE_DFS_BACK));
e7d70c6c 799}
89e80dd4 800
e3594cb3
DN
801/* Remove unreachable blocks and other miscellaneous clean up work.
802 Return true if the flowgraph was modified, false otherwise. */
89e80dd4 803
592c303d
ZD
804static bool
805cleanup_tree_cfg_noloop (void)
89e80dd4 806{
89e80dd4
DN
807 timevar_push (TV_TREE_CLEANUP_CFG);
808
e7d70c6c
RB
809 /* Ensure that we have single entries into loop headers. Otherwise
810 if one of the entries is becoming a latch due to CFG cleanup
811 (from formerly being part of an irreducible region) then we mess
812 up loop fixup and associate the old loop with a different region
813 which makes niter upper bounds invalid. See for example PR80549.
814 This needs to be done before we remove trivially dead edges as
815 we need to capture the dominance state before the pending transform. */
816 if (current_loops)
817 {
b401e50f
RB
818 /* This needs backedges or dominators. */
819 if (!dom_info_available_p (CDI_DOMINATORS))
820 mark_dfs_back_edges ();
821
e7d70c6c
RB
822 loop_p loop;
823 unsigned i;
824 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
825 if (loop && loop->header)
826 {
827 basic_block bb = loop->header;
828 edge_iterator ei;
829 edge e;
830 bool found_latch = false;
831 bool any_abnormal = false;
832 unsigned n = 0;
833 /* We are only interested in preserving existing loops, but
834 we need to check whether they are still real and of course
835 if we need to add a preheader at all. */
836 FOR_EACH_EDGE (e, ei, bb->preds)
837 {
838 if (e->flags & EDGE_ABNORMAL)
839 {
840 any_abnormal = true;
841 break;
842 }
b401e50f
RB
843 if ((dom_info_available_p (CDI_DOMINATORS)
844 && dominated_by_p (CDI_DOMINATORS, e->src, bb))
845 || (e->flags & EDGE_DFS_BACK))
e7d70c6c
RB
846 {
847 found_latch = true;
848 continue;
849 }
850 n++;
851 }
852 /* If we have more than one entry to the loop header
853 create a forwarder. */
854 if (found_latch && ! any_abnormal && n > 1)
855 {
856 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
857 NULL);
858 loop->header = fallthru->dest;
859 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
860 {
861 /* The loop updating from the CFG hook is incomplete
862 when we have multiple latches, fixup manually. */
863 remove_bb_from_loops (fallthru->src);
864 loop_p cloop = loop;
865 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
866 cloop = find_common_loop (cloop, e->src->loop_father);
867 add_bb_to_loop (fallthru->src, cloop);
868 }
869 }
870 }
871 }
872
1ccaa21f
RB
873 /* Prepare the worklists of altered blocks. */
874 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
875
876 /* Start by iterating over all basic blocks in PRE order looking for
877 edge removal opportunities. Do this first because incoming SSA form
878 may be invalid and we want to avoid performing SSA related tasks such
b401e50f
RB
879 as propgating out a PHI node during BB merging in that state. This
880 also gets rid of unreachable blocks. */
881 bool changed = cleanup_control_flow_pre ();
1ccaa21f
RB
882
883 /* After doing the above SSA form should be valid (or an update SSA
884 should be required). */
885
b401e50f
RB
886 /* Compute dominator info which we need for the iterative process below. */
887 if (!dom_info_available_p (CDI_DOMINATORS))
888 calculate_dominance_info (CDI_DOMINATORS);
889 else
890 checking_verify_dominators (CDI_DOMINATORS);
891
1ccaa21f
RB
892 /* During forwarder block cleanup, we may redirect edges out of
893 SWITCH_EXPRs, which can get expensive. So we want to enable
894 recording of edge to CASE_LABEL_EXPR. */
895 start_recording_case_labels ();
896
897 /* Continue by iterating over all basic blocks looking for BB merging
898 opportunities. We cannot use FOR_EACH_BB_FN for the BB iteration
899 since the basic blocks may get removed. */
900 unsigned n = last_basic_block_for_fn (cfun);
901 for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
902 {
903 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
904 if (bb)
905 changed |= cleanup_tree_cfg_bb (bb);
906 }
907
908 /* Now process the altered blocks, as long as any are available. */
909 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
910 {
911 unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
912 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
913 if (i < NUM_FIXED_BLOCKS)
914 continue;
915
916 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
917 if (!bb)
918 continue;
919
b401e50f
RB
920 /* BB merging done by cleanup_tree_cfg_bb can end up propagating
921 out single-argument PHIs which in turn can expose
922 cleanup_control_flow_bb opportunities so we have to repeat
923 that here. */
1ccaa21f
RB
924 changed |= cleanup_control_flow_bb (bb);
925 changed |= cleanup_tree_cfg_bb (bb);
926 }
927
928 end_recording_case_labels ();
929 BITMAP_FREE (cfgcleanup_altered_bbs);
672987e8 930
2b28c07a 931 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
fb1fe1f3
RB
932
933 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
934 basic-block numbers. */
935 if (! scev_initialized_p ())
936 compact_blocks ();
c9784e6d 937
b2b29377 938 checking_verify_flow_info ();
89e80dd4 939
c9784e6d 940 timevar_pop (TV_TREE_CLEANUP_CFG);
89e80dd4 941
592c303d 942 if (changed && current_loops)
21e4389f
RB
943 {
944 /* Removing edges and/or blocks may make recorded bounds refer
945 to stale GIMPLE stmts now, so clear them. */
946 free_numbers_of_iterations_estimates (cfun);
947 loops_state_set (LOOPS_NEED_FIXUP);
948 }
592c303d 949
e3594cb3 950 return changed;
c9784e6d
KH
951}
952
592c303d 953/* Repairs loop structures. */
c9784e6d 954
592c303d
ZD
955static void
956repair_loop_structures (void)
c9784e6d 957{
a222c01a 958 bitmap changed_bbs;
8e89b5b5 959 unsigned n_new_loops;
a222c01a 960
cc360b36
SB
961 calculate_dominance_info (CDI_DOMINATORS);
962
a222c01a
MM
963 timevar_push (TV_REPAIR_LOOPS);
964 changed_bbs = BITMAP_ALLOC (NULL);
8e89b5b5 965 n_new_loops = fix_loop_structure (changed_bbs);
c9784e6d 966
592c303d
ZD
967 /* This usually does nothing. But sometimes parts of cfg that originally
968 were inside a loop get out of it due to edge removal (since they
8e89b5b5
RB
969 become unreachable by back edges from latch). Also a former
970 irreducible loop can become reducible - in this case force a full
971 rewrite into loop-closed SSA form. */
f87000d0 972 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
8e89b5b5
RB
973 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
974 TODO_update_ssa);
c9784e6d 975
592c303d 976 BITMAP_FREE (changed_bbs);
c9784e6d 977
b2b29377 978 checking_verify_loop_structure ();
592c303d
ZD
979 scev_reset ();
980
a222c01a 981 timevar_pop (TV_REPAIR_LOOPS);
592c303d
ZD
982}
983
984/* Cleanup cfg and repair loop structures. */
985
986bool
987cleanup_tree_cfg (void)
988{
989 bool changed = cleanup_tree_cfg_noloop ();
990
991 if (current_loops != NULL
f87000d0 992 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
592c303d
ZD
993 repair_loop_structures ();
994
1994bfea 995 return changed;
c9784e6d
KH
996}
997
a9e0d843
RB
998/* Tries to merge the PHI nodes at BB into those at BB's sole successor.
999 Returns true if successful. */
c9784e6d 1000
a9e0d843 1001static bool
c9784e6d
KH
1002remove_forwarder_block_with_phi (basic_block bb)
1003{
1004 edge succ = single_succ_edge (bb);
1005 basic_block dest = succ->dest;
355fe088 1006 gimple *label;
c9784e6d
KH
1007 basic_block dombb, domdest, dom;
1008
1009 /* We check for infinite loops already in tree_forwarder_block_p.
1010 However it may happen that the infinite loop is created
1011 afterwards due to removal of forwarders. */
1012 if (dest == bb)
a9e0d843 1013 return false;
f3127685
JL
1014
1015 /* Removal of forwarders may expose new natural loops and thus
1016 a block may turn into a loop header. */
1017 if (current_loops && bb_loop_header_p (bb))
1018 return false;
c9784e6d
KH
1019
1020 /* If the destination block consists of a nonlocal label, do not
1021 merge it. */
1022 label = first_stmt (dest);
538dd0b7
DM
1023 if (label)
1024 if (glabel *label_stmt = dyn_cast <glabel *> (label))
1025 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1026 return false;
c9784e6d 1027
d731ee04
BC
1028 /* Record BB's single pred in case we need to update the father
1029 loop's latch information later. */
1030 basic_block pred = NULL;
1031 if (single_pred_p (bb))
1032 pred = single_pred (bb);
1033
c9784e6d
KH
1034 /* Redirect each incoming edge to BB to DEST. */
1035 while (EDGE_COUNT (bb->preds) > 0)
1036 {
1037 edge e = EDGE_PRED (bb, 0), s;
538dd0b7 1038 gphi_iterator gsi;
c9784e6d
KH
1039
1040 s = find_edge (e->src, dest);
1041 if (s)
1042 {
1043 /* We already have an edge S from E->src to DEST. If S and
1044 E->dest's sole successor edge have the same PHI arguments
1045 at DEST, redirect S to DEST. */
1046 if (phi_alternatives_equal (dest, s, succ))
1047 {
1048 e = redirect_edge_and_branch (e, dest);
ea7e6d5a 1049 redirect_edge_var_map_clear (e);
c9784e6d
KH
1050 continue;
1051 }
1052
1053 /* PHI arguments are different. Create a forwarder block by
1054 splitting E so that we can merge PHI arguments on E to
1055 DEST. */
1056 e = single_succ_edge (split_edge (e));
1057 }
cbb88345
RB
1058 else
1059 {
1060 /* If we merge the forwarder into a loop header verify if we
1061 are creating another loop latch edge. If so, reset
1062 number of iteration information of the loop. */
1063 if (dest->loop_father->header == dest
1064 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1065 {
1066 dest->loop_father->any_upper_bound = false;
1067 dest->loop_father->any_likely_upper_bound = false;
adb7eaa2 1068 free_numbers_of_iterations_estimates (dest->loop_father);
cbb88345
RB
1069 }
1070 }
c9784e6d
KH
1071
1072 s = redirect_edge_and_branch (e, dest);
1073
1074 /* redirect_edge_and_branch must not create a new edge. */
1075 gcc_assert (s == e);
1076
1077 /* Add to the PHI nodes at DEST each PHI argument removed at the
1078 destination of E. */
726a989a
RB
1079 for (gsi = gsi_start_phis (dest);
1080 !gsi_end_p (gsi);
1081 gsi_next (&gsi))
c9784e6d 1082 {
538dd0b7 1083 gphi *phi = gsi.phi ();
726a989a 1084 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
620e594b 1085 location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
c9784e6d
KH
1086
1087 if (TREE_CODE (def) == SSA_NAME)
1088 {
c9784e6d
KH
1089 /* If DEF is one of the results of PHI nodes removed during
1090 redirection, replace it with the PHI argument that used
1091 to be on E. */
b787e7a2
TS
1092 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1093 size_t length = head ? head->length () : 0;
1094 for (size_t i = 0; i < length; i++)
c9784e6d 1095 {
b787e7a2 1096 edge_var_map *vm = &(*head)[i];
ea7e6d5a
AH
1097 tree old_arg = redirect_edge_var_map_result (vm);
1098 tree new_arg = redirect_edge_var_map_def (vm);
c9784e6d
KH
1099
1100 if (def == old_arg)
1101 {
1102 def = new_arg;
f5045c96 1103 locus = redirect_edge_var_map_location (vm);
c9784e6d
KH
1104 break;
1105 }
1106 }
1107 }
1108
9e227d60 1109 add_phi_arg (phi, def, s, locus);
c9784e6d
KH
1110 }
1111
ea7e6d5a 1112 redirect_edge_var_map_clear (e);
c9784e6d
KH
1113 }
1114
1115 /* Update the dominators. */
1116 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1117 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1118 if (domdest == bb)
1119 {
1120 /* Shortcut to avoid calling (relatively expensive)
1121 nearest_common_dominator unless necessary. */
1122 dom = dombb;
1123 }
1124 else
1125 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1126
1127 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1128
d731ee04
BC
1129 /* Adjust latch infomation of BB's parent loop as otherwise
1130 the cfg hook has a hard time not to kill the loop. */
1131 if (current_loops && bb->loop_father->latch == bb)
1132 bb->loop_father->latch = pred;
1133
c9784e6d
KH
1134 /* Remove BB since all of BB's incoming edges have been redirected
1135 to DEST. */
1136 delete_basic_block (bb);
a9e0d843
RB
1137
1138 return true;
c9784e6d
KH
1139}
1140
1141/* This pass merges PHI nodes if one feeds into another. For example,
1142 suppose we have the following:
1143
1144 goto <bb 9> (<L9>);
1145
1146<L8>:;
1147 tem_17 = foo ();
1148
1149 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1150<L9>:;
1151
1152 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1153<L10>:;
1154
1155 Then we merge the first PHI node into the second one like so:
1156
1157 goto <bb 9> (<L10>);
1158
1159<L8>:;
1160 tem_17 = foo ();
1161
1162 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1163<L10>:;
1164*/
1165
be55bfe6
TS
1166namespace {
1167
1168const pass_data pass_data_merge_phi =
1169{
1170 GIMPLE_PASS, /* type */
1171 "mergephi", /* name */
1172 OPTGROUP_NONE, /* optinfo_flags */
be55bfe6
TS
1173 TV_TREE_MERGE_PHI, /* tv_id */
1174 ( PROP_cfg | PROP_ssa ), /* properties_required */
1175 0, /* properties_provided */
1176 0, /* properties_destroyed */
1177 0, /* todo_flags_start */
3bea341f 1178 0, /* todo_flags_finish */
be55bfe6
TS
1179};
1180
1181class pass_merge_phi : public gimple_opt_pass
c9784e6d 1182{
be55bfe6
TS
1183public:
1184 pass_merge_phi (gcc::context *ctxt)
1185 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1186 {}
1187
1188 /* opt_pass methods: */
1189 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1190 virtual unsigned int execute (function *);
1191
1192}; // class pass_merge_phi
1193
1194unsigned int
1195pass_merge_phi::execute (function *fun)
1196{
1197 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
c9784e6d
KH
1198 basic_block *current = worklist;
1199 basic_block bb;
1200
1201 calculate_dominance_info (CDI_DOMINATORS);
1202
1203 /* Find all PHI nodes that we may be able to merge. */
be55bfe6 1204 FOR_EACH_BB_FN (bb, fun)
c9784e6d
KH
1205 {
1206 basic_block dest;
1207
1208 /* Look for a forwarder block with PHI nodes. */
1209 if (!tree_forwarder_block_p (bb, true))
1210 continue;
1211
1212 dest = single_succ (bb);
1213
1214 /* We have to feed into another basic block with PHI
1215 nodes. */
8eacd016 1216 if (gimple_seq_empty_p (phi_nodes (dest))
c9784e6d
KH
1217 /* We don't want to deal with a basic block with
1218 abnormal edges. */
31ff2426 1219 || bb_has_abnormal_pred (bb))
c9784e6d
KH
1220 continue;
1221
1222 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1223 {
1224 /* If BB does not dominate DEST, then the PHI nodes at
1225 DEST must be the only users of the results of the PHI
1226 nodes at BB. */
1227 *current++ = bb;
1228 }
ea65cd37
JL
1229 else
1230 {
538dd0b7 1231 gphi_iterator gsi;
338b5886 1232 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
ea65cd37
JL
1233
1234 /* BB dominates DEST. There may be many users of the PHI
1235 nodes in BB. However, there is still a trivial case we
1236 can handle. If the result of every PHI in BB is used
1237 only by a PHI in DEST, then we can trivially merge the
1238 PHI nodes from BB into DEST. */
726a989a
RB
1239 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1240 gsi_next (&gsi))
ea65cd37 1241 {
538dd0b7 1242 gphi *phi = gsi.phi ();
726a989a 1243 tree result = gimple_phi_result (phi);
ea65cd37 1244 use_operand_p imm_use;
355fe088 1245 gimple *use_stmt;
ea65cd37
JL
1246
1247 /* If the PHI's result is never used, then we can just
1248 ignore it. */
bfc646bf 1249 if (has_zero_uses (result))
ea65cd37
JL
1250 continue;
1251
1252 /* Get the single use of the result of this PHI node. */
1253 if (!single_imm_use (result, &imm_use, &use_stmt)
726a989a
RB
1254 || gimple_code (use_stmt) != GIMPLE_PHI
1255 || gimple_bb (use_stmt) != dest
1256 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
ea65cd37
JL
1257 break;
1258 }
1259
c0220ea4 1260 /* If the loop above iterated through all the PHI nodes
ea65cd37 1261 in BB, then we can merge the PHIs from BB into DEST. */
726a989a 1262 if (gsi_end_p (gsi))
ea65cd37
JL
1263 *current++ = bb;
1264 }
c9784e6d
KH
1265 }
1266
1267 /* Now let's drain WORKLIST. */
a9e0d843 1268 bool changed = false;
c9784e6d
KH
1269 while (current != worklist)
1270 {
1271 bb = *--current;
a9e0d843 1272 changed |= remove_forwarder_block_with_phi (bb);
c9784e6d 1273 }
c9784e6d 1274 free (worklist);
a9e0d843
RB
1275
1276 /* Removing forwarder blocks can cause formerly irreducible loops
1277 to become reducible if we merged two entry blocks. */
1278 if (changed
1279 && current_loops)
1280 loops_state_set (LOOPS_NEED_FIXUP);
1281
c2924966 1282 return 0;
c9784e6d
KH
1283}
1284
27a4cd48
DM
1285} // anon namespace
1286
1287gimple_opt_pass *
1288make_pass_merge_phi (gcc::context *ctxt)
1289{
1290 return new pass_merge_phi (ctxt);
1291}
4484a35a
AM
1292
1293/* Pass: cleanup the CFG just before expanding trees to RTL.
1294 This is just a round of label cleanups and case node grouping
1295 because after the tree optimizers have run such cleanups may
1296 be necessary. */
1297
1298static unsigned int
1299execute_cleanup_cfg_post_optimizing (void)
1300{
2a5671ee 1301 unsigned int todo = execute_fixup_cfg ();
4484a35a 1302 if (cleanup_tree_cfg ())
2a5671ee
RB
1303 {
1304 todo &= ~TODO_cleanup_cfg;
1305 todo |= TODO_update_ssa;
1306 }
4484a35a
AM
1307 maybe_remove_unreachable_handlers ();
1308 cleanup_dead_labels ();
4e3e8a5f
RB
1309 if (group_case_labels ())
1310 todo |= TODO_cleanup_cfg;
4484a35a
AM
1311 if ((flag_compare_debug_opt || flag_compare_debug)
1312 && flag_dump_final_insns)
1313 {
1314 FILE *final_output = fopen (flag_dump_final_insns, "a");
1315
1316 if (!final_output)
1317 {
1318 error ("could not open final insn dump file %qs: %m",
1319 flag_dump_final_insns);
1320 flag_dump_final_insns = NULL;
1321 }
1322 else
1323 {
1324 int save_unnumbered = flag_dump_unnumbered;
1325 int save_noaddr = flag_dump_noaddr;
1326
1327 flag_dump_noaddr = flag_dump_unnumbered = 1;
1328 fprintf (final_output, "\n");
65f4b875
AO
1329 dump_enumerated_decls (final_output,
1330 dump_flags | TDF_SLIM | TDF_NOUID);
4484a35a
AM
1331 flag_dump_noaddr = save_noaddr;
1332 flag_dump_unnumbered = save_unnumbered;
1333 if (fclose (final_output))
1334 {
1335 error ("could not close final insn dump file %qs: %m",
1336 flag_dump_final_insns);
1337 flag_dump_final_insns = NULL;
1338 }
1339 }
1340 }
1341 return todo;
1342}
1343
1344namespace {
1345
1346const pass_data pass_data_cleanup_cfg_post_optimizing =
1347{
1348 GIMPLE_PASS, /* type */
1349 "optimized", /* name */
1350 OPTGROUP_NONE, /* optinfo_flags */
4484a35a
AM
1351 TV_TREE_CLEANUP_CFG, /* tv_id */
1352 PROP_cfg, /* properties_required */
1353 0, /* properties_provided */
1354 0, /* properties_destroyed */
1355 0, /* todo_flags_start */
1356 TODO_remove_unused_locals, /* todo_flags_finish */
1357};
1358
1359class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1360{
1361public:
1362 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1363 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1364 {}
1365
1366 /* opt_pass methods: */
be55bfe6
TS
1367 virtual unsigned int execute (function *)
1368 {
1369 return execute_cleanup_cfg_post_optimizing ();
1370 }
4484a35a
AM
1371
1372}; // class pass_cleanup_cfg_post_optimizing
1373
1374} // anon namespace
1375
1376gimple_opt_pass *
1377make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1378{
1379 return new pass_cleanup_cfg_post_optimizing (ctxt);
1380}
1381
1382