]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-cfgcleanup.c
Update copyright years.
[thirdparty/gcc.git] / gcc / tree-cfgcleanup.c
CommitLineData
c9784e6d 1/* CFG cleanup for trees.
8d9254fc 2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
c9784e6d
KH
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
9dcd6f09 8the Free Software Foundation; either version 3, or (at your option)
c9784e6d
KH
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
c9784e6d
KH
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
c7131fb2 23#include "backend.h"
957060b5 24#include "rtl.h"
c9784e6d 25#include "tree.h"
c7131fb2 26#include "gimple.h"
957060b5
AM
27#include "cfghooks.h"
28#include "tree-pass.h"
c7131fb2 29#include "ssa.h"
957060b5 30#include "diagnostic-core.h"
40e23961 31#include "fold-const.h"
60393bbc
AM
32#include "cfganal.h"
33#include "cfgcleanup.h"
2fb9a547 34#include "tree-eh.h"
45b0be94 35#include "gimplify.h"
5be5c238 36#include "gimple-iterator.h"
442b4905 37#include "tree-cfg.h"
e28030cf 38#include "tree-ssa-loop-manip.h"
442b4905 39#include "tree-dfa.h"
7a300452 40#include "tree-ssa.h"
c9784e6d 41#include "cfgloop.h"
17684618 42#include "tree-scalar-evolution.h"
c3bea076
RB
43#include "gimple-match.h"
44#include "gimple-fold.h"
cbb88345 45#include "tree-ssa-loop-niter.h"
95a2ed03 46#include "cgraph.h"
d7a700e0
RB
47#include "tree-into-ssa.h"
48#include "tree-cfgcleanup.h"
49
c9784e6d 50
672987e8
ZD
51/* The set of blocks in that at least one of the following changes happened:
52 -- the statement at the end of the block was changed
53 -- the block was newly created
54 -- the set of the predecessors of the block changed
55 -- the set of the successors of the block changed
56 ??? Maybe we could track these changes separately, since they determine
57 what cleanups it makes sense to try on the block. */
58bitmap cfgcleanup_altered_bbs;
59
c9784e6d
KH
60/* Remove any fallthru edge from EV. Return true if an edge was removed. */
61
62static bool
9771b263 63remove_fallthru_edge (vec<edge, va_gc> *ev)
c9784e6d
KH
64{
65 edge_iterator ei;
66 edge e;
67
68 FOR_EACH_EDGE (e, ei, ev)
69 if ((e->flags & EDGE_FALLTHRU) != 0)
70 {
0107dca2
RB
71 if (e->flags & EDGE_COMPLEX)
72 e->flags &= ~EDGE_FALLTHRU;
73 else
74 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
75 return true;
76 }
77 return false;
78}
79
02e637d8
ML
80/* Convert a SWTCH with single non-default case to gcond and replace it
81 at GSI. */
82
83static bool
84convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
85{
86 if (gimple_switch_num_labels (swtch) != 2)
87 return false;
88
89 tree index = gimple_switch_index (swtch);
02e637d8
ML
90 tree label = gimple_switch_label (swtch, 1);
91 tree low = CASE_LOW (label);
92 tree high = CASE_HIGH (label);
93
61ff5d6f
ML
94 basic_block default_bb = gimple_switch_default_bb (cfun, swtch);
95 basic_block case_bb = label_to_block (cfun, CASE_LABEL (label));
02e637d8
ML
96
97 basic_block bb = gimple_bb (swtch);
98 gcond *cond;
99
100 /* Replace switch statement with condition statement. */
101 if (high)
102 {
103 tree lhs, rhs;
1d9cd701
JJ
104 if (range_check_type (TREE_TYPE (index)) == NULL_TREE)
105 return false;
02e637d8
ML
106 generate_range_test (bb, index, low, high, &lhs, &rhs);
107 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
108 }
109 else
110 cond = gimple_build_cond (EQ_EXPR, index,
111 fold_convert (TREE_TYPE (index), low),
112 NULL_TREE, NULL_TREE);
113
114 gsi_replace (&gsi, cond, true);
115
116 /* Update edges. */
117 edge case_edge = find_edge (bb, case_bb);
118 edge default_edge = find_edge (bb, default_bb);
119
120 case_edge->flags |= EDGE_TRUE_VALUE;
121 default_edge->flags |= EDGE_FALSE_VALUE;
122 return true;
123}
726a989a 124
c9784e6d
KH
125/* Disconnect an unreachable block in the control expression starting
126 at block BB. */
127
128static bool
55518e0f 129cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
c9784e6d
KH
130{
131 edge taken_edge;
132 bool retval = false;
355fe088 133 gimple *stmt = gsi_stmt (gsi);
c9784e6d
KH
134
135 if (!single_succ_p (bb))
136 {
137 edge e;
138 edge_iterator ei;
6ac01510 139 bool warned;
c3bea076 140 tree val = NULL_TREE;
6ac01510 141
02e637d8
ML
142 /* Try to convert a switch with just a single non-default case to
143 GIMPLE condition. */
144 if (gimple_code (stmt) == GIMPLE_SWITCH
145 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
146 stmt = gsi_stmt (gsi);
147
6ac01510 148 fold_defer_overflow_warnings ();
37530014
RG
149 switch (gimple_code (stmt))
150 {
151 case GIMPLE_COND:
55518e0f 152 {
5d75ad95
RS
153 gimple_match_op res_op;
154 if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges,
55518e0f 155 no_follow_ssa_edges)
5d75ad95
RS
156 && res_op.code == INTEGER_CST)
157 val = res_op.ops[0];
55518e0f 158 }
7b5ab0cd 159 break;
37530014
RG
160
161 case GIMPLE_SWITCH:
538dd0b7 162 val = gimple_switch_index (as_a <gswitch *> (stmt));
37530014
RG
163 break;
164
165 default:
c3bea076 166 ;
37530014 167 }
c9784e6d
KH
168 taken_edge = find_taken_edge (bb, val);
169 if (!taken_edge)
6ac01510
ILT
170 {
171 fold_undefer_and_ignore_overflow_warnings ();
172 return false;
173 }
c9784e6d
KH
174
175 /* Remove all the edges except the one that is always executed. */
6ac01510 176 warned = false;
c9784e6d
KH
177 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
178 {
179 if (e != taken_edge)
180 {
6ac01510
ILT
181 if (!warned)
182 {
183 fold_undefer_overflow_warnings
726a989a 184 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
6ac01510
ILT
185 warned = true;
186 }
187
c9784e6d 188 taken_edge->probability += e->probability;
672987e8 189 remove_edge_and_dominated_blocks (e);
c9784e6d
KH
190 retval = true;
191 }
192 else
193 ei_next (&ei);
194 }
6ac01510
ILT
195 if (!warned)
196 fold_undefer_and_ignore_overflow_warnings ();
c9784e6d
KH
197 }
198 else
199 taken_edge = single_succ_edge (bb);
200
672987e8 201 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
726a989a 202 gsi_remove (&gsi, true);
c9784e6d
KH
203 taken_edge->flags = EDGE_FALLTHRU;
204
c9784e6d
KH
205 return retval;
206}
207
58041fe6
MJ
208/* Cleanup the GF_CALL_CTRL_ALTERING flag according to
209 to updated gimple_call_flags. */
210
211static void
355fe088 212cleanup_call_ctrl_altering_flag (gimple *bb_end)
58041fe6
MJ
213{
214 if (!is_gimple_call (bb_end)
215 || !gimple_call_ctrl_altering_p (bb_end))
216 return;
217
218 int flags = gimple_call_flags (bb_end);
219 if (((flags & (ECF_CONST | ECF_PURE))
220 && !(flags & ECF_LOOPING_CONST_OR_PURE))
221 || (flags & ECF_LEAF))
222 gimple_call_set_ctrl_altering (bb_end, false);
223}
224
672987e8
ZD
225/* Try to remove superfluous control structures in basic block BB. Returns
226 true if anything changes. */
c9784e6d
KH
227
228static bool
55518e0f 229cleanup_control_flow_bb (basic_block bb)
c9784e6d 230{
726a989a 231 gimple_stmt_iterator gsi;
c9784e6d 232 bool retval = false;
355fe088 233 gimple *stmt;
c9784e6d 234
672987e8
ZD
235 /* If the last statement of the block could throw and now cannot,
236 we need to prune cfg. */
726a989a 237 retval |= gimple_purge_dead_eh_edges (bb);
672987e8 238
3be33b6e 239 gsi = gsi_last_nondebug_bb (bb);
726a989a 240 if (gsi_end_p (gsi))
672987e8
ZD
241 return retval;
242
726a989a 243 stmt = gsi_stmt (gsi);
672987e8 244
58041fe6
MJ
245 /* Try to cleanup ctrl altering flag for call which ends bb. */
246 cleanup_call_ctrl_altering_flag (stmt);
247
726a989a
RB
248 if (gimple_code (stmt) == GIMPLE_COND
249 || gimple_code (stmt) == GIMPLE_SWITCH)
3be33b6e
JJ
250 {
251 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
55518e0f 252 retval |= cleanup_control_expr_graph (bb, gsi);
3be33b6e 253 }
726a989a
RB
254 else if (gimple_code (stmt) == GIMPLE_GOTO
255 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
256 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
672987e8 257 == LABEL_DECL))
c9784e6d 258 {
726a989a
RB
259 /* If we had a computed goto which has a compile-time determinable
260 destination, then we can eliminate the goto. */
672987e8
ZD
261 edge e;
262 tree label;
263 edge_iterator ei;
264 basic_block target_block;
c9784e6d 265
3be33b6e 266 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
672987e8
ZD
267 /* First look at all the outgoing edges. Delete any outgoing
268 edges which do not go to the right block. For the one
269 edge which goes to the right block, fix up its flags. */
726a989a 270 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
b05c3fd6
RB
271 if (DECL_CONTEXT (label) != cfun->decl)
272 return retval;
61ff5d6f 273 target_block = label_to_block (cfun, label);
672987e8 274 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
c9784e6d 275 {
672987e8
ZD
276 if (e->dest != target_block)
277 remove_edge_and_dominated_blocks (e);
278 else
c9784e6d 279 {
672987e8
ZD
280 /* Turn off the EDGE_ABNORMAL flag. */
281 e->flags &= ~EDGE_ABNORMAL;
c9784e6d 282
672987e8
ZD
283 /* And set EDGE_FALLTHRU. */
284 e->flags |= EDGE_FALLTHRU;
285 ei_next (&ei);
286 }
c9784e6d
KH
287 }
288
672987e8
ZD
289 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
290 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
291
292 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
293 relevant information we need. */
726a989a 294 gsi_remove (&gsi, true);
672987e8 295 retval = true;
c9784e6d 296 }
672987e8
ZD
297
298 /* Check for indirect calls that have been turned into
299 noreturn calls. */
726a989a 300 else if (is_gimple_call (stmt)
3be33b6e
JJ
301 && gimple_call_noreturn_p (stmt))
302 {
303 /* If there are debug stmts after the noreturn call, remove them
304 now, they should be all unreachable anyway. */
305 for (gsi_next (&gsi); !gsi_end_p (gsi); )
306 gsi_remove (&gsi, true);
307 if (remove_fallthru_edge (bb->succs))
308 retval = true;
309 }
672987e8 310
c9784e6d
KH
311 return retval;
312}
313
314/* Return true if basic block BB does nothing except pass control
315 flow to another block and that we can safely insert a label at
316 the start of the successor block.
317
318 As a precondition, we require that BB be not equal to
6626665f 319 the entry block. */
c9784e6d
KH
320
321static bool
322tree_forwarder_block_p (basic_block bb, bool phi_wanted)
323{
726a989a 324 gimple_stmt_iterator gsi;
f99fcb3b 325 location_t locus;
c9784e6d
KH
326
327 /* BB must have a single outgoing edge. */
328 if (single_succ_p (bb) != 1
329 /* If PHI_WANTED is false, BB must not have any PHI nodes.
330 Otherwise, BB must have PHI nodes. */
726a989a 331 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
6626665f 332 /* BB may not be a predecessor of the exit block. */
fefa31b5 333 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
c9784e6d
KH
334 /* Nor should this be an infinite loop. */
335 || single_succ (bb) == bb
336 /* BB may not have an abnormal outgoing edge. */
337 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
338 return false;
339
fefa31b5 340 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
c9784e6d 341
f99fcb3b
JJ
342 locus = single_succ_edge (bb)->goto_locus;
343
1d65f45c
RH
344 /* There should not be an edge coming from entry, or an EH edge. */
345 {
346 edge_iterator ei;
347 edge e;
348
349 FOR_EACH_EDGE (e, ei, bb->preds)
fefa31b5 350 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
1d65f45c 351 return false;
f99fcb3b 352 /* If goto_locus of any of the edges differs, prevent removing
180720a5
EB
353 the forwarder block when not optimizing. */
354 else if (!optimize
355 && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
356 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
357 && e->goto_locus != locus)
f99fcb3b 358 return false;
1d65f45c
RH
359 }
360
c9784e6d
KH
361 /* Now walk through the statements backward. We can ignore labels,
362 anything else means this is not a forwarder block. */
726a989a 363 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
c9784e6d 364 {
355fe088 365 gimple *stmt = gsi_stmt (gsi);
c9784e6d 366
726a989a 367 switch (gimple_code (stmt))
c9784e6d 368 {
726a989a 369 case GIMPLE_LABEL:
538dd0b7 370 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
c9784e6d 371 return false;
180720a5
EB
372 if (!optimize
373 && (gimple_has_location (stmt)
374 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION)
375 && gimple_location (stmt) != locus)
f99fcb3b 376 return false;
c9784e6d
KH
377 break;
378
b5b8b0ac
AO
379 /* ??? For now, hope there's a corresponding debug
380 assignment at the destination. */
381 case GIMPLE_DEBUG:
382 break;
383
c9784e6d
KH
384 default:
385 return false;
386 }
387 }
388
c9784e6d
KH
389 if (current_loops)
390 {
391 basic_block dest;
a3afdbb8 392 /* Protect loop headers. */
ee281008 393 if (bb_loop_header_p (bb))
c9784e6d 394 return false;
c9784e6d 395
a3afdbb8
BC
396 dest = EDGE_SUCC (bb, 0)->dest;
397 /* Protect loop preheaders and latches if requested. */
c9784e6d 398 if (dest->loop_father->header == dest)
a3afdbb8 399 {
fba7c564
RB
400 if (bb->loop_father == dest->loop_father)
401 {
402 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
403 return false;
404 /* If bb doesn't have a single predecessor we'd make this
405 loop have multiple latches. Don't do that if that
406 would in turn require disambiguating them. */
407 return (single_pred_p (bb)
408 || loops_state_satisfies_p
409 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
410 }
411 else if (bb->loop_father == loop_outer (dest->loop_father))
412 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
413 /* Always preserve other edges into loop headers that are
414 not simple latches or preheaders. */
415 return false;
a3afdbb8 416 }
c9784e6d 417 }
a3afdbb8 418
c9784e6d
KH
419 return true;
420}
421
c9784e6d
KH
422/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
423 those alternatives are equal in each of the PHI nodes, then return
424 true, else return false. */
425
426static bool
427phi_alternatives_equal (basic_block dest, edge e1, edge e2)
428{
429 int n1 = e1->dest_idx;
430 int n2 = e2->dest_idx;
538dd0b7 431 gphi_iterator gsi;
c9784e6d 432
726a989a 433 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
c9784e6d 434 {
538dd0b7 435 gphi *phi = gsi.phi ();
726a989a
RB
436 tree val1 = gimple_phi_arg_def (phi, n1);
437 tree val2 = gimple_phi_arg_def (phi, n2);
c9784e6d
KH
438
439 gcc_assert (val1 != NULL_TREE);
440 gcc_assert (val2 != NULL_TREE);
441
442 if (!operand_equal_for_phi_arg_p (val1, val2))
443 return false;
444 }
445
446 return true;
447}
448
e62817ed
RB
449/* Move debug stmts from the forwarder block SRC to DEST. */
450
451static void
6e271009
RB
452move_debug_stmts_from_forwarder (basic_block src, basic_block dest,
453 bool dest_single_pred_p)
e62817ed
RB
454{
455 if (!MAY_HAVE_DEBUG_STMTS)
456 return;
457
e62817ed
RB
458 gimple_stmt_iterator gsi_to = gsi_after_labels (dest);
459 for (gimple_stmt_iterator gsi = gsi_after_labels (src); !gsi_end_p (gsi);)
460 {
461 gimple *debug = gsi_stmt (gsi);
462 gcc_assert (is_gimple_debug (debug));
463 /* Move debug binds anyway, but not anything else like begin-stmt
464 markers unless they are always valid at the destination. */
6e271009 465 if (dest_single_pred_p
e62817ed
RB
466 || gimple_debug_bind_p (debug))
467 {
468 gsi_move_before (&gsi, &gsi_to);
469 /* Reset debug-binds that are not always valid at the destination.
470 Simply dropping them can cause earlier values to become live,
471 generating wrong debug information.
472 ??? There are several things we could improve here. For
473 one we might be able to move stmts to the predecessor.
474 For anther, if the debug stmt is immediately followed by a
475 (debug) definition in the destination (on a post-dominated path?)
476 we can elide it without any bad effects. */
6e271009 477 if (!dest_single_pred_p)
e62817ed
RB
478 {
479 gimple_debug_bind_reset_value (debug);
480 update_stmt (debug);
481 }
482 }
483 else
484 gsi_next (&gsi);
485 }
486}
487
672987e8 488/* Removes forwarder block BB. Returns false if this failed. */
c9784e6d
KH
489
490static bool
672987e8 491remove_forwarder_block (basic_block bb)
c9784e6d
KH
492{
493 edge succ = single_succ_edge (bb), e, s;
494 basic_block dest = succ->dest;
67a8d719 495 gimple *stmt;
c9784e6d 496 edge_iterator ei;
726a989a 497 gimple_stmt_iterator gsi, gsi_to;
c9784e6d
KH
498
499 /* We check for infinite loops already in tree_forwarder_block_p.
500 However it may happen that the infinite loop is created
501 afterwards due to removal of forwarders. */
502 if (dest == bb)
503 return false;
504
28e5ca15
RB
505 /* If the destination block consists of a nonlocal label or is a
506 EH landing pad, do not merge it. */
67a8d719
AO
507 stmt = first_stmt (dest);
508 if (stmt)
509 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
538dd0b7
DM
510 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
511 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
512 return false;
c9784e6d
KH
513
514 /* If there is an abnormal edge to basic block BB, but not into
515 dest, problems might occur during removal of the phi node at out
516 of ssa due to overlapping live ranges of registers.
517
518 If there is an abnormal edge in DEST, the problems would occur
519 anyway since cleanup_dead_labels would then merge the labels for
520 two different eh regions, and rest of exception handling code
521 does not like it.
522
523 So if there is an abnormal edge to BB, proceed only if there is
524 no abnormal edge to DEST and there are no phi nodes in DEST. */
31ff2426
NF
525 if (bb_has_abnormal_pred (bb)
526 && (bb_has_abnormal_pred (dest)
1197e789
RG
527 || !gimple_seq_empty_p (phi_nodes (dest))))
528 return false;
c9784e6d
KH
529
530 /* If there are phi nodes in DEST, and some of the blocks that are
531 predecessors of BB are also predecessors of DEST, check that the
532 phi node arguments match. */
726a989a 533 if (!gimple_seq_empty_p (phi_nodes (dest)))
c9784e6d
KH
534 {
535 FOR_EACH_EDGE (e, ei, bb->preds)
536 {
537 s = find_edge (e->src, dest);
538 if (!s)
539 continue;
540
541 if (!phi_alternatives_equal (dest, succ, s))
542 return false;
543 }
544 }
545
fba7c564
RB
546 basic_block pred = NULL;
547 if (single_pred_p (bb))
548 pred = single_pred (bb);
6e271009 549 bool dest_single_pred_p = single_pred_p (dest);
fba7c564 550
c9784e6d
KH
551 /* Redirect the edges. */
552 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
553 {
672987e8
ZD
554 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
555
c9784e6d
KH
556 if (e->flags & EDGE_ABNORMAL)
557 {
558 /* If there is an abnormal edge, redirect it anyway, and
559 move the labels to the new block to make it legal. */
560 s = redirect_edge_succ_nodup (e, dest);
561 }
562 else
563 s = redirect_edge_and_branch (e, dest);
564
565 if (s == e)
566 {
567 /* Create arguments for the phi nodes, since the edge was not
568 here before. */
538dd0b7
DM
569 for (gphi_iterator psi = gsi_start_phis (dest);
570 !gsi_end_p (psi);
571 gsi_next (&psi))
726a989a 572 {
538dd0b7 573 gphi *phi = psi.phi ();
620e594b 574 location_t l = gimple_phi_arg_location_from_edge (phi, succ);
2724573f
RB
575 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
576 add_phi_arg (phi, unshare_expr (def), s, l);
726a989a 577 }
c9784e6d 578 }
c9784e6d
KH
579 }
580
1197e789
RG
581 /* Move nonlocal labels and computed goto targets as well as user
582 defined labels and labels with an EH landing pad number to the
583 new block, so that the redirection of the abnormal edges works,
584 jump targets end up in a sane place and debug information for
67a8d719 585 labels is retained. */
1197e789 586 gsi_to = gsi_start_bb (dest);
67a8d719 587 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1197e789 588 {
67a8d719
AO
589 stmt = gsi_stmt (gsi);
590 if (is_gimple_debug (stmt))
591 break;
592
593 /* Forwarder blocks can only contain labels and debug stmts, and
594 labels must come first, so if we get to this point, we know
595 we're looking at a label. */
596 tree decl = gimple_label_label (as_a <glabel *> (stmt));
597 if (EH_LANDING_PAD_NR (decl) != 0
598 || DECL_NONLOCAL (decl)
599 || FORCED_LABEL (decl)
600 || !DECL_ARTIFICIAL (decl))
601 gsi_move_before (&gsi, &gsi_to);
1197e789
RG
602 else
603 gsi_next (&gsi);
604 }
605
25eafae6
RB
606 /* Move debug statements. Reset them if the destination does not
607 have a single predecessor. */
6e271009 608 move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
3b778d9d 609
672987e8
ZD
610 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
611
c9784e6d
KH
612 /* Update the dominators. */
613 if (dom_info_available_p (CDI_DOMINATORS))
614 {
615 basic_block dom, dombb, domdest;
616
617 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
618 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
619 if (domdest == bb)
620 {
621 /* Shortcut to avoid calling (relatively expensive)
622 nearest_common_dominator unless necessary. */
623 dom = dombb;
624 }
625 else
626 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
627
628 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
629 }
630
a3afdbb8
BC
631 /* Adjust latch infomation of BB's parent loop as otherwise
632 the cfg hook has a hard time not to kill the loop. */
633 if (current_loops && bb->loop_father->latch == bb)
fba7c564 634 bb->loop_father->latch = pred;
a3afdbb8 635
c9784e6d
KH
636 /* And kill the forwarder block. */
637 delete_basic_block (bb);
638
639 return true;
640}
641
bed18fbd
RB
642/* STMT is a call that has been discovered noreturn. Split the
643 block to prepare fixing up the CFG and remove LHS.
644 Return true if cleanup-cfg needs to run. */
566d09ef
JH
645
646bool
355fe088 647fixup_noreturn_call (gimple *stmt)
566d09ef
JH
648{
649 basic_block bb = gimple_bb (stmt);
bed18fbd 650 bool changed = false;
566d09ef
JH
651
652 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
653 return false;
654
655 /* First split basic block if stmt is not last. */
656 if (stmt != gsi_stmt (gsi_last_bb (bb)))
c83ee180
JJ
657 {
658 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
659 {
660 /* Don't split if there are only debug stmts
661 after stmt, that can result in -fcompare-debug
662 failures. Remove the debug stmts instead,
663 they should be all unreachable anyway. */
664 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
665 for (gsi_next (&gsi); !gsi_end_p (gsi); )
666 gsi_remove (&gsi, true);
667 }
668 else
bed18fbd
RB
669 {
670 split_block (bb, stmt);
671 changed = true;
672 }
c83ee180 673 }
566d09ef 674
e6a54b01
EB
675 /* If there is an LHS, remove it, but only if its type has fixed size.
676 The LHS will need to be recreated during RTL expansion and creating
45177337 677 temporaries of variable-sized types is not supported. Also don't
458a1cd0
JJ
678 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
679 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
680 has been changed into a call that does not return a value, like
681 __builtin_unreachable or __cxa_pure_virtual. */
2f1b0141 682 tree lhs = gimple_call_lhs (stmt);
458a1cd0
JJ
683 if (lhs
684 && (should_remove_lhs_p (lhs)
685 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
566d09ef 686 {
566d09ef 687 gimple_call_set_lhs (stmt, NULL_TREE);
02d635a2 688
2f1b0141
EB
689 /* We need to fix up the SSA name to avoid checking errors. */
690 if (TREE_CODE (lhs) == SSA_NAME)
566d09ef 691 {
b731b390 692 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
2f1b0141
EB
693 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
694 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
695 set_ssa_default_def (cfun, new_var, lhs);
566d09ef 696 }
2f1b0141 697
566d09ef 698 update_stmt (stmt);
566d09ef 699 }
2f1b0141 700
2a5671ee 701 /* Mark the call as altering control flow. */
bed18fbd
RB
702 if (!gimple_call_ctrl_altering_p (stmt))
703 {
704 gimple_call_set_ctrl_altering (stmt, true);
705 changed = true;
706 }
2a5671ee 707
bed18fbd 708 return changed;
566d09ef
JH
709}
710
b5db8b44
JH
711/* Return true if we want to merge BB1 and BB2 into a single block. */
712
713static bool
714want_merge_blocks_p (basic_block bb1, basic_block bb2)
715{
716 if (!can_merge_blocks_p (bb1, bb2))
717 return false;
718 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
719 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
720 return true;
721 return bb1->count.ok_for_merging (bb2->count);
722}
723
566d09ef 724
b401e50f
RB
725/* Tries to cleanup cfg in basic block BB by merging blocks. Returns
726 true if anything changes. */
c9784e6d 727
89e80dd4 728static bool
672987e8 729cleanup_tree_cfg_bb (basic_block bb)
c9784e6d 730{
f99fcb3b 731 if (tree_forwarder_block_p (bb, false)
672987e8
ZD
732 && remove_forwarder_block (bb))
733 return true;
c9784e6d 734
50bf47fd
RB
735 /* If there is a merge opportunity with the predecessor
736 do nothing now but wait until we process the predecessor.
737 This happens when we visit BBs in a non-optimal order and
738 avoids quadratic behavior with adjusting stmts BB pointer. */
739 if (single_pred_p (bb)
b5db8b44 740 && want_merge_blocks_p (single_pred (bb), bb))
50bf47fd
RB
741 /* But make sure we _do_ visit it. When we remove unreachable paths
742 ending in a backedge we fail to mark the destinations predecessors
743 as changed. */
744 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
745
89e80dd4
DN
746 /* Merging the blocks may create new opportunities for folding
747 conditional branches (due to the elimination of single-valued PHI
748 nodes). */
50bf47fd 749 else if (single_succ_p (bb)
b5db8b44 750 && want_merge_blocks_p (bb, single_succ (bb)))
672987e8 751 {
50bf47fd
RB
752 merge_blocks (bb, single_succ (bb));
753 return true;
672987e8
ZD
754 }
755
3810ff24 756 return false;
672987e8
ZD
757}
758
f86624d8
JJ
759/* Return true if E is an EDGE_ABNORMAL edge for returns_twice calls,
760 i.e. one going from .ABNORMAL_DISPATCHER to basic block which doesn't
761 start with a forced or nonlocal label. Calls which return twice can return
762 the second time only if they are called normally the first time, so basic
763 blocks which can be only entered through these abnormal edges but not
764 normally are effectively unreachable as well. Additionally ignore
765 __builtin_setjmp_receiver starting blocks, which have one FORCED_LABEL
766 and which are always only reachable through EDGE_ABNORMAL edge. They are
767 handled in cleanup_control_flow_pre. */
768
769static bool
770maybe_dead_abnormal_edge_p (edge e)
771{
772 if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL)
773 return false;
774
775 gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (e->src);
776 gimple *g = gsi_stmt (gsi);
777 if (!g || !gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
778 return false;
779
780 tree target = NULL_TREE;
781 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
782 if (glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)))
783 {
784 tree this_target = gimple_label_label (label_stmt);
785 if (DECL_NONLOCAL (this_target))
786 return false;
787 if (FORCED_LABEL (this_target))
788 {
789 if (target)
790 return false;
791 target = this_target;
792 }
793 }
794 else
795 break;
796
797 if (target)
798 {
799 /* If there was a single FORCED_LABEL, check for
800 __builtin_setjmp_receiver with address of that label. */
801 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
802 gsi_next_nondebug (&gsi);
803 if (gsi_end_p (gsi))
804 return false;
805 if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_RECEIVER))
806 return false;
807
808 tree arg = gimple_call_arg (gsi_stmt (gsi), 0);
809 if (TREE_CODE (arg) != ADDR_EXPR || TREE_OPERAND (arg, 0) != target)
810 return false;
811 }
812 return true;
813}
814
815/* If BB is a basic block ending with __builtin_setjmp_setup, return edge
816 from .ABNORMAL_DISPATCHER basic block to corresponding
817 __builtin_setjmp_receiver basic block, otherwise return NULL. */
818static edge
819builtin_setjmp_setup_bb (basic_block bb)
820{
821 if (EDGE_COUNT (bb->succs) != 2
822 || ((EDGE_SUCC (bb, 0)->flags
823 & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
824 && (EDGE_SUCC (bb, 1)->flags
825 & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL))
826 return NULL;
827
828 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
829 if (gsi_end_p (gsi)
830 || !gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_SETJMP_SETUP))
831 return NULL;
832
833 tree arg = gimple_call_arg (gsi_stmt (gsi), 1);
834 if (TREE_CODE (arg) != ADDR_EXPR
835 || TREE_CODE (TREE_OPERAND (arg, 0)) != LABEL_DECL)
836 return NULL;
837
838 basic_block recv_bb = label_to_block (cfun, TREE_OPERAND (arg, 0));
839 if (EDGE_COUNT (recv_bb->preds) != 1
840 || (EDGE_PRED (recv_bb, 0)->flags
841 & (EDGE_ABNORMAL | EDGE_EH)) != EDGE_ABNORMAL
842 || (EDGE_SUCC (bb, 0)->dest != EDGE_PRED (recv_bb, 0)->src
843 && EDGE_SUCC (bb, 1)->dest != EDGE_PRED (recv_bb, 0)->src))
844 return NULL;
845
846 /* EDGE_PRED (recv_bb, 0)->src should be the .ABNORMAL_DISPATCHER bb. */
847 return EDGE_PRED (recv_bb, 0);
848}
849
55518e0f
RB
850/* Do cleanup_control_flow_bb in PRE order. */
851
852static bool
853cleanup_control_flow_pre ()
854{
855 bool retval = false;
856
b401e50f
RB
857 /* We want remove_edge_and_dominated_blocks to only remove edges,
858 not dominated blocks which it does when dom info isn't available.
859 Pretend so. */
860 dom_state saved_state = dom_info_state (CDI_DOMINATORS);
861 set_dom_info_availability (CDI_DOMINATORS, DOM_NONE);
862
f86624d8 863 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 2);
55518e0f
RB
864 auto_sbitmap visited (last_basic_block_for_fn (cfun));
865 bitmap_clear (visited);
866
f86624d8
JJ
867 vec<edge, va_gc> *setjmp_vec = NULL;
868 auto_vec<basic_block, 4> abnormal_dispatchers;
869
55518e0f
RB
870 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
871
872 while (! stack.is_empty ())
873 {
874 /* Look at the edge on the top of the stack. */
875 edge_iterator ei = stack.last ();
876 basic_block dest = ei_edge (ei)->dest;
877
878 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
f86624d8
JJ
879 && !bitmap_bit_p (visited, dest->index)
880 && (ei_container (ei) == setjmp_vec
881 || !maybe_dead_abnormal_edge_p (ei_edge (ei))))
55518e0f
RB
882 {
883 bitmap_set_bit (visited, dest->index);
b401e50f
RB
884 /* We only possibly remove edges from DEST here, leaving
885 possibly unreachable code in the IL. */
55518e0f 886 retval |= cleanup_control_flow_bb (dest);
f86624d8
JJ
887
888 /* Check for __builtin_setjmp_setup. Edges from .ABNORMAL_DISPATCH
889 to __builtin_setjmp_receiver will be normally ignored by
890 maybe_dead_abnormal_edge_p. If DEST is a visited
891 __builtin_setjmp_setup, queue edge from .ABNORMAL_DISPATCH
892 to __builtin_setjmp_receiver, so that it will be visited too. */
893 if (edge e = builtin_setjmp_setup_bb (dest))
894 {
895 vec_safe_push (setjmp_vec, e);
896 if (vec_safe_length (setjmp_vec) == 1)
897 stack.quick_push (ei_start (setjmp_vec));
898 }
899
900 if ((ei_edge (ei)->flags
901 & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
902 {
903 gimple_stmt_iterator gsi
904 = gsi_start_nondebug_after_labels_bb (dest);
905 gimple *g = gsi_stmt (gsi);
906 if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
907 abnormal_dispatchers.safe_push (dest);
908 }
909
55518e0f
RB
910 if (EDGE_COUNT (dest->succs) > 0)
911 stack.quick_push (ei_start (dest->succs));
912 }
913 else
914 {
915 if (!ei_one_before_end_p (ei))
916 ei_next (&stack.last ());
917 else
f86624d8
JJ
918 {
919 if (ei_container (ei) == setjmp_vec)
920 vec_safe_truncate (setjmp_vec, 0);
921 stack.pop ();
922 }
55518e0f
RB
923 }
924 }
925
f86624d8
JJ
926 vec_free (setjmp_vec);
927
928 /* If we've marked .ABNORMAL_DISPATCHER basic block(s) as visited
929 above, but haven't marked any of their successors as visited,
930 unmark them now, so that they can be removed as useless. */
931 basic_block dispatcher_bb;
932 unsigned int k;
933 FOR_EACH_VEC_ELT (abnormal_dispatchers, k, dispatcher_bb)
934 {
935 edge e;
936 edge_iterator ei;
937 FOR_EACH_EDGE (e, ei, dispatcher_bb->succs)
938 if (bitmap_bit_p (visited, e->dest->index))
939 break;
940 if (e == NULL)
941 bitmap_clear_bit (visited, dispatcher_bb->index);
942 }
943
b401e50f
RB
944 set_dom_info_availability (CDI_DOMINATORS, saved_state);
945
946 /* We are deleting BBs in non-reverse dominator order, make sure
947 insert_debug_temps_for_defs is prepared for that. */
948 if (retval)
949 free_dominance_info (CDI_DOMINATORS);
950
951 /* Remove all now (and previously) unreachable blocks. */
952 for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i)
953 {
954 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
955 if (bb && !bitmap_bit_p (visited, bb->index))
956 {
957 if (!retval)
958 free_dominance_info (CDI_DOMINATORS);
959 delete_basic_block (bb);
960 retval = true;
961 }
962 }
963
55518e0f
RB
964 return retval;
965}
966
e7d70c6c
RB
967static bool
968mfb_keep_latches (edge e)
969{
b401e50f
RB
970 return !((dom_info_available_p (CDI_DOMINATORS)
971 && dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
972 || (e->flags & EDGE_DFS_BACK));
e7d70c6c 973}
89e80dd4 974
e3594cb3
DN
975/* Remove unreachable blocks and other miscellaneous clean up work.
976 Return true if the flowgraph was modified, false otherwise. */
89e80dd4 977
592c303d 978static bool
d7a700e0 979cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
89e80dd4 980{
89e80dd4
DN
981 timevar_push (TV_TREE_CLEANUP_CFG);
982
e7d70c6c
RB
983 /* Ensure that we have single entries into loop headers. Otherwise
984 if one of the entries is becoming a latch due to CFG cleanup
985 (from formerly being part of an irreducible region) then we mess
986 up loop fixup and associate the old loop with a different region
987 which makes niter upper bounds invalid. See for example PR80549.
988 This needs to be done before we remove trivially dead edges as
989 we need to capture the dominance state before the pending transform. */
990 if (current_loops)
991 {
b401e50f
RB
992 /* This needs backedges or dominators. */
993 if (!dom_info_available_p (CDI_DOMINATORS))
994 mark_dfs_back_edges ();
995
e7d70c6c
RB
996 loop_p loop;
997 unsigned i;
998 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
999 if (loop && loop->header)
1000 {
1001 basic_block bb = loop->header;
1002 edge_iterator ei;
1003 edge e;
1004 bool found_latch = false;
1005 bool any_abnormal = false;
1006 unsigned n = 0;
1007 /* We are only interested in preserving existing loops, but
1008 we need to check whether they are still real and of course
1009 if we need to add a preheader at all. */
1010 FOR_EACH_EDGE (e, ei, bb->preds)
1011 {
1012 if (e->flags & EDGE_ABNORMAL)
1013 {
1014 any_abnormal = true;
1015 break;
1016 }
b401e50f
RB
1017 if ((dom_info_available_p (CDI_DOMINATORS)
1018 && dominated_by_p (CDI_DOMINATORS, e->src, bb))
1019 || (e->flags & EDGE_DFS_BACK))
e7d70c6c
RB
1020 {
1021 found_latch = true;
1022 continue;
1023 }
1024 n++;
1025 }
1026 /* If we have more than one entry to the loop header
1027 create a forwarder. */
1028 if (found_latch && ! any_abnormal && n > 1)
1029 {
1030 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
1031 NULL);
1032 loop->header = fallthru->dest;
1033 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1034 {
1035 /* The loop updating from the CFG hook is incomplete
1036 when we have multiple latches, fixup manually. */
1037 remove_bb_from_loops (fallthru->src);
1038 loop_p cloop = loop;
1039 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
1040 cloop = find_common_loop (cloop, e->src->loop_father);
1041 add_bb_to_loop (fallthru->src, cloop);
1042 }
1043 }
1044 }
1045 }
1046
1ccaa21f
RB
1047 /* Prepare the worklists of altered blocks. */
1048 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
1049
1050 /* Start by iterating over all basic blocks in PRE order looking for
1051 edge removal opportunities. Do this first because incoming SSA form
1052 may be invalid and we want to avoid performing SSA related tasks such
b401e50f
RB
1053 as propgating out a PHI node during BB merging in that state. This
1054 also gets rid of unreachable blocks. */
1055 bool changed = cleanup_control_flow_pre ();
1ccaa21f
RB
1056
1057 /* After doing the above SSA form should be valid (or an update SSA
1058 should be required). */
d7a700e0
RB
1059 if (ssa_update_flags)
1060 update_ssa (ssa_update_flags);
1ccaa21f 1061
b401e50f
RB
1062 /* Compute dominator info which we need for the iterative process below. */
1063 if (!dom_info_available_p (CDI_DOMINATORS))
1064 calculate_dominance_info (CDI_DOMINATORS);
1065 else
1066 checking_verify_dominators (CDI_DOMINATORS);
1067
1ccaa21f
RB
1068 /* During forwarder block cleanup, we may redirect edges out of
1069 SWITCH_EXPRs, which can get expensive. So we want to enable
1070 recording of edge to CASE_LABEL_EXPR. */
1071 start_recording_case_labels ();
1072
1073 /* Continue by iterating over all basic blocks looking for BB merging
1074 opportunities. We cannot use FOR_EACH_BB_FN for the BB iteration
1075 since the basic blocks may get removed. */
1076 unsigned n = last_basic_block_for_fn (cfun);
1077 for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++)
1078 {
1079 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1080 if (bb)
1081 changed |= cleanup_tree_cfg_bb (bb);
1082 }
1083
1084 /* Now process the altered blocks, as long as any are available. */
1085 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
1086 {
1087 unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
1088 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
1089 if (i < NUM_FIXED_BLOCKS)
1090 continue;
1091
1092 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1093 if (!bb)
1094 continue;
1095
b401e50f
RB
1096 /* BB merging done by cleanup_tree_cfg_bb can end up propagating
1097 out single-argument PHIs which in turn can expose
1098 cleanup_control_flow_bb opportunities so we have to repeat
1099 that here. */
1ccaa21f
RB
1100 changed |= cleanup_control_flow_bb (bb);
1101 changed |= cleanup_tree_cfg_bb (bb);
1102 }
1103
1104 end_recording_case_labels ();
1105 BITMAP_FREE (cfgcleanup_altered_bbs);
672987e8 1106
2b28c07a 1107 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
fb1fe1f3
RB
1108
1109 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
1110 basic-block numbers. */
1111 if (! scev_initialized_p ())
1112 compact_blocks ();
c9784e6d 1113
b2b29377 1114 checking_verify_flow_info ();
89e80dd4 1115
c9784e6d 1116 timevar_pop (TV_TREE_CLEANUP_CFG);
89e80dd4 1117
592c303d 1118 if (changed && current_loops)
21e4389f
RB
1119 {
1120 /* Removing edges and/or blocks may make recorded bounds refer
1121 to stale GIMPLE stmts now, so clear them. */
1122 free_numbers_of_iterations_estimates (cfun);
1123 loops_state_set (LOOPS_NEED_FIXUP);
1124 }
592c303d 1125
e3594cb3 1126 return changed;
c9784e6d
KH
1127}
1128
592c303d 1129/* Repairs loop structures. */
c9784e6d 1130
592c303d
ZD
1131static void
1132repair_loop_structures (void)
c9784e6d 1133{
a222c01a 1134 bitmap changed_bbs;
8e89b5b5 1135 unsigned n_new_loops;
a222c01a 1136
cc360b36
SB
1137 calculate_dominance_info (CDI_DOMINATORS);
1138
a222c01a
MM
1139 timevar_push (TV_REPAIR_LOOPS);
1140 changed_bbs = BITMAP_ALLOC (NULL);
8e89b5b5 1141 n_new_loops = fix_loop_structure (changed_bbs);
c9784e6d 1142
592c303d
ZD
1143 /* This usually does nothing. But sometimes parts of cfg that originally
1144 were inside a loop get out of it due to edge removal (since they
8e89b5b5
RB
1145 become unreachable by back edges from latch). Also a former
1146 irreducible loop can become reducible - in this case force a full
1147 rewrite into loop-closed SSA form. */
f87000d0 1148 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
8e89b5b5
RB
1149 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
1150 TODO_update_ssa);
c9784e6d 1151
592c303d 1152 BITMAP_FREE (changed_bbs);
c9784e6d 1153
b2b29377 1154 checking_verify_loop_structure ();
592c303d
ZD
1155 scev_reset ();
1156
a222c01a 1157 timevar_pop (TV_REPAIR_LOOPS);
592c303d
ZD
1158}
1159
1160/* Cleanup cfg and repair loop structures. */
1161
1162bool
d7a700e0 1163cleanup_tree_cfg (unsigned ssa_update_flags)
592c303d 1164{
d7a700e0 1165 bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
592c303d
ZD
1166
1167 if (current_loops != NULL
f87000d0 1168 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
592c303d
ZD
1169 repair_loop_structures ();
1170
1994bfea 1171 return changed;
c9784e6d
KH
1172}
1173
a9e0d843
RB
1174/* Tries to merge the PHI nodes at BB into those at BB's sole successor.
1175 Returns true if successful. */
c9784e6d 1176
a9e0d843 1177static bool
c9784e6d
KH
1178remove_forwarder_block_with_phi (basic_block bb)
1179{
1180 edge succ = single_succ_edge (bb);
1181 basic_block dest = succ->dest;
355fe088 1182 gimple *label;
c9784e6d
KH
1183 basic_block dombb, domdest, dom;
1184
1185 /* We check for infinite loops already in tree_forwarder_block_p.
1186 However it may happen that the infinite loop is created
1187 afterwards due to removal of forwarders. */
1188 if (dest == bb)
a9e0d843 1189 return false;
f3127685
JL
1190
1191 /* Removal of forwarders may expose new natural loops and thus
1192 a block may turn into a loop header. */
1193 if (current_loops && bb_loop_header_p (bb))
1194 return false;
c9784e6d
KH
1195
1196 /* If the destination block consists of a nonlocal label, do not
1197 merge it. */
1198 label = first_stmt (dest);
538dd0b7
DM
1199 if (label)
1200 if (glabel *label_stmt = dyn_cast <glabel *> (label))
1201 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1202 return false;
c9784e6d 1203
d731ee04
BC
1204 /* Record BB's single pred in case we need to update the father
1205 loop's latch information later. */
1206 basic_block pred = NULL;
1207 if (single_pred_p (bb))
1208 pred = single_pred (bb);
6e271009 1209 bool dest_single_pred_p = single_pred_p (dest);
d731ee04 1210
c9784e6d
KH
1211 /* Redirect each incoming edge to BB to DEST. */
1212 while (EDGE_COUNT (bb->preds) > 0)
1213 {
1214 edge e = EDGE_PRED (bb, 0), s;
538dd0b7 1215 gphi_iterator gsi;
c9784e6d
KH
1216
1217 s = find_edge (e->src, dest);
1218 if (s)
1219 {
1220 /* We already have an edge S from E->src to DEST. If S and
1221 E->dest's sole successor edge have the same PHI arguments
1222 at DEST, redirect S to DEST. */
1223 if (phi_alternatives_equal (dest, s, succ))
1224 {
1225 e = redirect_edge_and_branch (e, dest);
ea7e6d5a 1226 redirect_edge_var_map_clear (e);
c9784e6d
KH
1227 continue;
1228 }
1229
1230 /* PHI arguments are different. Create a forwarder block by
1231 splitting E so that we can merge PHI arguments on E to
1232 DEST. */
1233 e = single_succ_edge (split_edge (e));
1234 }
cbb88345
RB
1235 else
1236 {
1237 /* If we merge the forwarder into a loop header verify if we
1238 are creating another loop latch edge. If so, reset
1239 number of iteration information of the loop. */
1240 if (dest->loop_father->header == dest
1241 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1242 {
1243 dest->loop_father->any_upper_bound = false;
1244 dest->loop_father->any_likely_upper_bound = false;
adb7eaa2 1245 free_numbers_of_iterations_estimates (dest->loop_father);
cbb88345
RB
1246 }
1247 }
c9784e6d
KH
1248
1249 s = redirect_edge_and_branch (e, dest);
1250
1251 /* redirect_edge_and_branch must not create a new edge. */
1252 gcc_assert (s == e);
1253
1254 /* Add to the PHI nodes at DEST each PHI argument removed at the
1255 destination of E. */
726a989a
RB
1256 for (gsi = gsi_start_phis (dest);
1257 !gsi_end_p (gsi);
1258 gsi_next (&gsi))
c9784e6d 1259 {
538dd0b7 1260 gphi *phi = gsi.phi ();
726a989a 1261 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
620e594b 1262 location_t locus = gimple_phi_arg_location_from_edge (phi, succ);
c9784e6d
KH
1263
1264 if (TREE_CODE (def) == SSA_NAME)
1265 {
c9784e6d
KH
1266 /* If DEF is one of the results of PHI nodes removed during
1267 redirection, replace it with the PHI argument that used
1268 to be on E. */
b787e7a2
TS
1269 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1270 size_t length = head ? head->length () : 0;
1271 for (size_t i = 0; i < length; i++)
c9784e6d 1272 {
b787e7a2 1273 edge_var_map *vm = &(*head)[i];
ea7e6d5a
AH
1274 tree old_arg = redirect_edge_var_map_result (vm);
1275 tree new_arg = redirect_edge_var_map_def (vm);
c9784e6d
KH
1276
1277 if (def == old_arg)
1278 {
1279 def = new_arg;
f5045c96 1280 locus = redirect_edge_var_map_location (vm);
c9784e6d
KH
1281 break;
1282 }
1283 }
1284 }
1285
9e227d60 1286 add_phi_arg (phi, def, s, locus);
c9784e6d
KH
1287 }
1288
ea7e6d5a 1289 redirect_edge_var_map_clear (e);
c9784e6d
KH
1290 }
1291
e62817ed
RB
1292 /* Move debug statements. Reset them if the destination does not
1293 have a single predecessor. */
6e271009 1294 move_debug_stmts_from_forwarder (bb, dest, dest_single_pred_p);
e62817ed 1295
c9784e6d
KH
1296 /* Update the dominators. */
1297 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1298 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1299 if (domdest == bb)
1300 {
1301 /* Shortcut to avoid calling (relatively expensive)
1302 nearest_common_dominator unless necessary. */
1303 dom = dombb;
1304 }
1305 else
1306 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1307
1308 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1309
d731ee04
BC
1310 /* Adjust latch infomation of BB's parent loop as otherwise
1311 the cfg hook has a hard time not to kill the loop. */
1312 if (current_loops && bb->loop_father->latch == bb)
1313 bb->loop_father->latch = pred;
1314
c9784e6d
KH
1315 /* Remove BB since all of BB's incoming edges have been redirected
1316 to DEST. */
1317 delete_basic_block (bb);
a9e0d843
RB
1318
1319 return true;
c9784e6d
KH
1320}
1321
1322/* This pass merges PHI nodes if one feeds into another. For example,
1323 suppose we have the following:
1324
1325 goto <bb 9> (<L9>);
1326
1327<L8>:;
1328 tem_17 = foo ();
1329
1330 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1331<L9>:;
1332
1333 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1334<L10>:;
1335
1336 Then we merge the first PHI node into the second one like so:
1337
1338 goto <bb 9> (<L10>);
1339
1340<L8>:;
1341 tem_17 = foo ();
1342
1343 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1344<L10>:;
1345*/
1346
be55bfe6
TS
1347namespace {
1348
1349const pass_data pass_data_merge_phi =
1350{
1351 GIMPLE_PASS, /* type */
1352 "mergephi", /* name */
1353 OPTGROUP_NONE, /* optinfo_flags */
be55bfe6
TS
1354 TV_TREE_MERGE_PHI, /* tv_id */
1355 ( PROP_cfg | PROP_ssa ), /* properties_required */
1356 0, /* properties_provided */
1357 0, /* properties_destroyed */
1358 0, /* todo_flags_start */
3bea341f 1359 0, /* todo_flags_finish */
be55bfe6
TS
1360};
1361
1362class pass_merge_phi : public gimple_opt_pass
c9784e6d 1363{
be55bfe6
TS
1364public:
1365 pass_merge_phi (gcc::context *ctxt)
1366 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1367 {}
1368
1369 /* opt_pass methods: */
1370 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1371 virtual unsigned int execute (function *);
1372
1373}; // class pass_merge_phi
1374
1375unsigned int
1376pass_merge_phi::execute (function *fun)
1377{
1378 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
c9784e6d
KH
1379 basic_block *current = worklist;
1380 basic_block bb;
1381
1382 calculate_dominance_info (CDI_DOMINATORS);
1383
1384 /* Find all PHI nodes that we may be able to merge. */
be55bfe6 1385 FOR_EACH_BB_FN (bb, fun)
c9784e6d
KH
1386 {
1387 basic_block dest;
1388
1389 /* Look for a forwarder block with PHI nodes. */
1390 if (!tree_forwarder_block_p (bb, true))
1391 continue;
1392
1393 dest = single_succ (bb);
1394
1395 /* We have to feed into another basic block with PHI
1396 nodes. */
8eacd016 1397 if (gimple_seq_empty_p (phi_nodes (dest))
c9784e6d
KH
1398 /* We don't want to deal with a basic block with
1399 abnormal edges. */
31ff2426 1400 || bb_has_abnormal_pred (bb))
c9784e6d
KH
1401 continue;
1402
1403 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1404 {
1405 /* If BB does not dominate DEST, then the PHI nodes at
1406 DEST must be the only users of the results of the PHI
1407 nodes at BB. */
1408 *current++ = bb;
1409 }
ea65cd37
JL
1410 else
1411 {
538dd0b7 1412 gphi_iterator gsi;
338b5886 1413 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
ea65cd37
JL
1414
1415 /* BB dominates DEST. There may be many users of the PHI
1416 nodes in BB. However, there is still a trivial case we
1417 can handle. If the result of every PHI in BB is used
1418 only by a PHI in DEST, then we can trivially merge the
1419 PHI nodes from BB into DEST. */
726a989a
RB
1420 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1421 gsi_next (&gsi))
ea65cd37 1422 {
538dd0b7 1423 gphi *phi = gsi.phi ();
726a989a 1424 tree result = gimple_phi_result (phi);
ea65cd37 1425 use_operand_p imm_use;
355fe088 1426 gimple *use_stmt;
ea65cd37
JL
1427
1428 /* If the PHI's result is never used, then we can just
1429 ignore it. */
bfc646bf 1430 if (has_zero_uses (result))
ea65cd37
JL
1431 continue;
1432
1433 /* Get the single use of the result of this PHI node. */
1434 if (!single_imm_use (result, &imm_use, &use_stmt)
726a989a
RB
1435 || gimple_code (use_stmt) != GIMPLE_PHI
1436 || gimple_bb (use_stmt) != dest
1437 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
ea65cd37
JL
1438 break;
1439 }
1440
c0220ea4 1441 /* If the loop above iterated through all the PHI nodes
ea65cd37 1442 in BB, then we can merge the PHIs from BB into DEST. */
726a989a 1443 if (gsi_end_p (gsi))
ea65cd37
JL
1444 *current++ = bb;
1445 }
c9784e6d
KH
1446 }
1447
1448 /* Now let's drain WORKLIST. */
a9e0d843 1449 bool changed = false;
c9784e6d
KH
1450 while (current != worklist)
1451 {
1452 bb = *--current;
a9e0d843 1453 changed |= remove_forwarder_block_with_phi (bb);
c9784e6d 1454 }
c9784e6d 1455 free (worklist);
a9e0d843
RB
1456
1457 /* Removing forwarder blocks can cause formerly irreducible loops
1458 to become reducible if we merged two entry blocks. */
1459 if (changed
1460 && current_loops)
1461 loops_state_set (LOOPS_NEED_FIXUP);
1462
c2924966 1463 return 0;
c9784e6d
KH
1464}
1465
27a4cd48
DM
1466} // anon namespace
1467
1468gimple_opt_pass *
1469make_pass_merge_phi (gcc::context *ctxt)
1470{
1471 return new pass_merge_phi (ctxt);
1472}
4484a35a
AM
1473
1474/* Pass: cleanup the CFG just before expanding trees to RTL.
1475 This is just a round of label cleanups and case node grouping
1476 because after the tree optimizers have run such cleanups may
1477 be necessary. */
1478
1479static unsigned int
1480execute_cleanup_cfg_post_optimizing (void)
1481{
2a5671ee 1482 unsigned int todo = execute_fixup_cfg ();
4484a35a 1483 if (cleanup_tree_cfg ())
2a5671ee
RB
1484 {
1485 todo &= ~TODO_cleanup_cfg;
1486 todo |= TODO_update_ssa;
1487 }
4484a35a
AM
1488 maybe_remove_unreachable_handlers ();
1489 cleanup_dead_labels ();
4e3e8a5f
RB
1490 if (group_case_labels ())
1491 todo |= TODO_cleanup_cfg;
4484a35a
AM
1492 if ((flag_compare_debug_opt || flag_compare_debug)
1493 && flag_dump_final_insns)
1494 {
1495 FILE *final_output = fopen (flag_dump_final_insns, "a");
1496
1497 if (!final_output)
1498 {
1499 error ("could not open final insn dump file %qs: %m",
1500 flag_dump_final_insns);
1501 flag_dump_final_insns = NULL;
1502 }
1503 else
1504 {
1505 int save_unnumbered = flag_dump_unnumbered;
1506 int save_noaddr = flag_dump_noaddr;
1507
1508 flag_dump_noaddr = flag_dump_unnumbered = 1;
1509 fprintf (final_output, "\n");
65f4b875
AO
1510 dump_enumerated_decls (final_output,
1511 dump_flags | TDF_SLIM | TDF_NOUID);
4484a35a
AM
1512 flag_dump_noaddr = save_noaddr;
1513 flag_dump_unnumbered = save_unnumbered;
1514 if (fclose (final_output))
1515 {
1516 error ("could not close final insn dump file %qs: %m",
1517 flag_dump_final_insns);
1518 flag_dump_final_insns = NULL;
1519 }
1520 }
1521 }
1522 return todo;
1523}
1524
1525namespace {
1526
1527const pass_data pass_data_cleanup_cfg_post_optimizing =
1528{
1529 GIMPLE_PASS, /* type */
1530 "optimized", /* name */
1531 OPTGROUP_NONE, /* optinfo_flags */
4484a35a
AM
1532 TV_TREE_CLEANUP_CFG, /* tv_id */
1533 PROP_cfg, /* properties_required */
1534 0, /* properties_provided */
1535 0, /* properties_destroyed */
1536 0, /* todo_flags_start */
1537 TODO_remove_unused_locals, /* todo_flags_finish */
1538};
1539
1540class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1541{
1542public:
1543 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1544 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1545 {}
1546
1547 /* opt_pass methods: */
be55bfe6
TS
1548 virtual unsigned int execute (function *)
1549 {
1550 return execute_cleanup_cfg_post_optimizing ();
1551 }
4484a35a
AM
1552
1553}; // class pass_cleanup_cfg_post_optimizing
1554
1555} // anon namespace
1556
1557gimple_opt_pass *
1558make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1559{
1560 return new pass_cleanup_cfg_post_optimizing (ctxt);
1561}
1562
1563
95a2ed03
MJ
1564/* Delete all unreachable basic blocks and update callgraph.
1565 Doing so is somewhat nontrivial because we need to update all clones and
1566 remove inline function that become unreachable. */
1567
1568bool
1569delete_unreachable_blocks_update_callgraph (cgraph_node *dst_node,
1570 bool update_clones)
1571{
1572 bool changed = false;
1573 basic_block b, next_bb;
1574
1575 find_unreachable_blocks ();
1576
1577 /* Delete all unreachable basic blocks. */
1578
1579 for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
1580 != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
1581 {
1582 next_bb = b->next_bb;
1583
1584 if (!(b->flags & BB_REACHABLE))
1585 {
1586 gimple_stmt_iterator bsi;
1587
1588 for (bsi = gsi_start_bb (b); !gsi_end_p (bsi); gsi_next (&bsi))
1589 {
1590 struct cgraph_edge *e;
1591 struct cgraph_node *node;
1592
1593 dst_node->remove_stmt_references (gsi_stmt (bsi));
1594
1595 if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1596 &&(e = dst_node->get_edge (gsi_stmt (bsi))) != NULL)
1597 {
1598 if (!e->inline_failed)
1599 e->callee->remove_symbol_and_inline_clones (dst_node);
1600 else
1601 e->remove ();
1602 }
1603 if (update_clones && dst_node->clones)
1604 for (node = dst_node->clones; node != dst_node;)
1605 {
1606 node->remove_stmt_references (gsi_stmt (bsi));
1607 if (gimple_code (gsi_stmt (bsi)) == GIMPLE_CALL
1608 && (e = node->get_edge (gsi_stmt (bsi))) != NULL)
1609 {
1610 if (!e->inline_failed)
1611 e->callee->remove_symbol_and_inline_clones (dst_node);
1612 else
1613 e->remove ();
1614 }
1615
1616 if (node->clones)
1617 node = node->clones;
1618 else if (node->next_sibling_clone)
1619 node = node->next_sibling_clone;
1620 else
1621 {
1622 while (node != dst_node && !node->next_sibling_clone)
1623 node = node->clone_of;
1624 if (node != dst_node)
1625 node = node->next_sibling_clone;
1626 }
1627 }
1628 }
1629 delete_basic_block (b);
1630 changed = true;
1631 }
1632 }
1633
1634 return changed;
1635}
1636