]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/cfgcleanup.c
fix PR68343: disable fuse-*.c tests for isl 0.14 or earlier
[thirdparty/gcc.git] / gcc / cfgcleanup.c
CommitLineData
402209ff 1/* Control flow optimization code for GNU compiler.
818ab71a 2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
402209ff
JH
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
9dcd6f09 8Software Foundation; either version 3, or (at your option) any later
402209ff
JH
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
9dcd6f09
NC
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
402209ff 19
1ea7e6ad 20/* This file contains optimizer of the control flow. The main entry point is
402209ff
JH
21 cleanup_cfg. Following optimizations are performed:
22
23 - Unreachable blocks removal
d1a6adeb 24 - Edge forwarding (edge to the forwarder block is forwarded to its
eaec9b3d 25 successor. Simplification of the branch instruction is performed by
402209ff 26 underlying infrastructure so branch can be converted to simplejump or
f5143c46 27 eliminated).
402209ff
JH
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
31
32#include "config.h"
33#include "system.h"
4977bab6 34#include "coretypes.h"
c7131fb2 35#include "backend.h"
957060b5 36#include "target.h"
402209ff 37#include "rtl.h"
957060b5
AM
38#include "tree.h"
39#include "cfghooks.h"
c7131fb2 40#include "df.h"
957060b5 41#include "tm_p.h"
402209ff 42#include "insn-config.h"
957060b5 43#include "emit-rtl.h"
8ecba28a 44#include "cselib.h"
5f24e0dc 45#include "params.h"
ef330312
PB
46#include "tree-pass.h"
47#include "cfgloop.h"
60393bbc
AM
48#include "cfgrtl.h"
49#include "cfganal.h"
50#include "cfgbuild.h"
51#include "cfgcleanup.h"
c1e3e2d9 52#include "dce.h"
7d817ebc 53#include "dbgcnt.h"
a2250fe9 54#include "rtl-iter.h"
402209ff 55
2dd2d53e 56#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
c22cacf3 57
7cf240d5
JH
58/* Set to true when we are running first pass of try_optimize_cfg loop. */
59static bool first_pass;
c1e3e2d9 60
073a8998 61/* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
c1e3e2d9
SB
62static bool crossjumps_occured;
63
4ec5d4f5
BS
64/* Set to true if we couldn't run an optimization due to stale liveness
65 information; we should run df_analyze to enable more opportunities. */
66static bool block_was_dirty;
67
bf22920b 68static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
d329e058 69static bool try_crossjump_bb (int, basic_block);
c2fc5456 70static bool outgoing_edges_match (int, basic_block, basic_block);
da5477a9 71static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
d329e058 72
d329e058
AJ
73static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
74static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
d329e058
AJ
75static bool try_optimize_cfg (int);
76static bool try_simplify_condjump (basic_block);
77static bool try_forward_edges (int, basic_block);
6fb5fa3c 78static edge thread_jump (edge, basic_block);
d329e058
AJ
79static bool mark_effect (rtx, bitmap);
80static void notice_new_block (basic_block);
81static void update_forwarder_flag (basic_block);
c2fc5456 82static void merge_memattrs (rtx, rtx);
635559ab
JH
83\f
84/* Set flags for newly created block. */
85
86static void
d329e058 87notice_new_block (basic_block bb)
635559ab
JH
88{
89 if (!bb)
90 return;
5f0d2358 91
635559ab 92 if (forwarder_block_p (bb))
2dd2d53e 93 bb->flags |= BB_FORWARDER_BLOCK;
635559ab
JH
94}
95
96/* Recompute forwarder flag after block has been modified. */
97
98static void
d329e058 99update_forwarder_flag (basic_block bb)
635559ab
JH
100{
101 if (forwarder_block_p (bb))
2dd2d53e 102 bb->flags |= BB_FORWARDER_BLOCK;
635559ab 103 else
2dd2d53e 104 bb->flags &= ~BB_FORWARDER_BLOCK;
635559ab 105}
402209ff
JH
106\f
107/* Simplify a conditional jump around an unconditional jump.
108 Return true if something changed. */
109
110static bool
d329e058 111try_simplify_condjump (basic_block cbranch_block)
402209ff
JH
112{
113 basic_block jump_block, jump_dest_block, cbranch_dest_block;
114 edge cbranch_jump_edge, cbranch_fallthru_edge;
da5477a9 115 rtx_insn *cbranch_insn;
402209ff
JH
116
117 /* Verify that there are exactly two successors. */
628f6a4e 118 if (EDGE_COUNT (cbranch_block->succs) != 2)
402209ff
JH
119 return false;
120
121 /* Verify that we've got a normal conditional branch at the end
122 of the block. */
a813c111 123 cbranch_insn = BB_END (cbranch_block);
402209ff
JH
124 if (!any_condjump_p (cbranch_insn))
125 return false;
126
127 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
128 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
129
130 /* The next block must not have multiple predecessors, must not
131 be the last block in the function, and must contain just the
132 unconditional jump. */
133 jump_block = cbranch_fallthru_edge->dest;
c5cbcccf 134 if (!single_pred_p (jump_block)
fefa31b5 135 || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
635559ab 136 || !FORWARDER_BLOCK_P (jump_block))
402209ff 137 return false;
c5cbcccf 138 jump_dest_block = single_succ (jump_block);
402209ff 139
750054a2
CT
140 /* If we are partitioning hot/cold basic blocks, we don't want to
141 mess up unconditional or indirect jumps that cross between hot
c22cacf3 142 and cold sections.
8e8d5162
CT
143
144 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
145 be optimizable (or blocks that appear to be mergeable), but which really
146 must be left untouched (they are required to make it safely across
147 partition boundaries). See the comments at the top of
8e8d5162 148 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2 149
87c8b4be
CT
150 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
151 || (cbranch_jump_edge->flags & EDGE_CROSSING))
750054a2
CT
152 return false;
153
402209ff
JH
154 /* The conditional branch must target the block after the
155 unconditional branch. */
156 cbranch_dest_block = cbranch_jump_edge->dest;
157
fefa31b5 158 if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
2f52c531 159 || !can_fallthru (jump_block, cbranch_dest_block))
402209ff
JH
160 return false;
161
ca6c03ca 162 /* Invert the conditional branch. */
1476d1bd
MM
163 if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
164 block_label (jump_dest_block), 0))
ca6c03ca 165 return false;
402209ff 166
c263766c
RH
167 if (dump_file)
168 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
a813c111 169 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
402209ff
JH
170
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
175 cbranch_dest_block);
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
177 jump_dest_block);
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
b446e5a2 180 update_br_prob_note (cbranch_block);
402209ff
JH
181
182 /* Delete the block with the unconditional jump, and clean up the mess. */
f470c378
ZD
183 delete_basic_block (jump_block);
184 tidy_fallthru_edge (cbranch_jump_edge);
261139ce 185 update_forwarder_flag (cbranch_block);
402209ff
JH
186
187 return true;
188}
189\f
8ecba28a
JH
190/* Attempt to prove that operation is NOOP using CSElib or mark the effect
191 on register. Used by jump threading. */
5f0d2358 192
8ecba28a 193static bool
d329e058 194mark_effect (rtx exp, regset nonequal)
8ecba28a 195{
9f16e871 196 rtx dest;
8ecba28a
JH
197 switch (GET_CODE (exp))
198 {
199 /* In case we do clobber the register, mark it as equal, as we know the
c22cacf3 200 value is dead so it don't have to match. */
f87c27b4 201 case CLOBBER:
07a737f3
RS
202 dest = XEXP (exp, 0);
203 if (REG_P (dest))
204 bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
f87c27b4 205 return false;
5f0d2358 206
f87c27b4
KH
207 case SET:
208 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
8ecba28a 209 return false;
f87c27b4
KH
210 dest = SET_DEST (exp);
211 if (dest == pc_rtx)
8ecba28a 212 return false;
f87c27b4
KH
213 if (!REG_P (dest))
214 return true;
07a737f3 215 bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
f87c27b4
KH
216 return false;
217
218 default:
219 return false;
8ecba28a
JH
220 }
221}
fe477d8b 222
a2250fe9
RS
223/* Return true if X contains a register in NONEQUAL. */
224static bool
225mentions_nonequal_regs (const_rtx x, regset nonequal)
fe477d8b 226{
a2250fe9
RS
227 subrtx_iterator::array_type array;
228 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
fe477d8b 229 {
a2250fe9
RS
230 const_rtx x = *iter;
231 if (REG_P (x))
fe477d8b 232 {
53d1bae9
RS
233 unsigned int end_regno = END_REGNO (x);
234 for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
235 if (REGNO_REG_SET_P (nonequal, regno))
236 return true;
fe477d8b
JH
237 }
238 }
a2250fe9 239 return false;
fe477d8b 240}
a2250fe9 241
8ecba28a 242/* Attempt to prove that the basic block B will have no side effects and
95bd1dd7 243 always continues in the same edge if reached via E. Return the edge
8ecba28a
JH
244 if exist, NULL otherwise. */
245
246static edge
6fb5fa3c 247thread_jump (edge e, basic_block b)
8ecba28a 248{
da5477a9
DM
249 rtx set1, set2, cond1, cond2;
250 rtx_insn *insn;
8ecba28a
JH
251 enum rtx_code code1, code2, reversed_code2;
252 bool reverse1 = false;
3cd8c58a 253 unsigned i;
8ecba28a
JH
254 regset nonequal;
255 bool failed = false;
a2041967 256 reg_set_iterator rsi;
8ecba28a 257
2dd2d53e 258 if (b->flags & BB_NONTHREADABLE_BLOCK)
1540f9eb
JH
259 return NULL;
260
8ecba28a
JH
261 /* At the moment, we do handle only conditional jumps, but later we may
262 want to extend this code to tablejumps and others. */
628f6a4e 263 if (EDGE_COUNT (e->src->succs) != 2)
8ecba28a 264 return NULL;
628f6a4e 265 if (EDGE_COUNT (b->succs) != 2)
1540f9eb 266 {
2dd2d53e 267 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
268 return NULL;
269 }
8ecba28a
JH
270
271 /* Second branch must end with onlyjump, as we will eliminate the jump. */
a813c111 272 if (!any_condjump_p (BB_END (e->src)))
8ecba28a 273 return NULL;
f87c27b4 274
a813c111 275 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
1540f9eb 276 {
2dd2d53e 277 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
278 return NULL;
279 }
8ecba28a 280
a813c111
SB
281 set1 = pc_set (BB_END (e->src));
282 set2 = pc_set (BB_END (b));
8ecba28a 283 if (((e->flags & EDGE_FALLTHRU) != 0)
68f3f6f1 284 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
8ecba28a
JH
285 reverse1 = true;
286
287 cond1 = XEXP (SET_SRC (set1), 0);
288 cond2 = XEXP (SET_SRC (set2), 0);
289 if (reverse1)
a813c111 290 code1 = reversed_comparison_code (cond1, BB_END (e->src));
8ecba28a
JH
291 else
292 code1 = GET_CODE (cond1);
293
294 code2 = GET_CODE (cond2);
a813c111 295 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
8ecba28a
JH
296
297 if (!comparison_dominates_p (code1, code2)
298 && !comparison_dominates_p (code1, reversed_code2))
299 return NULL;
300
301 /* Ensure that the comparison operators are equivalent.
95bd1dd7 302 ??? This is far too pessimistic. We should allow swapped operands,
8ecba28a
JH
303 different CCmodes, or for example comparisons for interval, that
304 dominate even when operands are not equivalent. */
305 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
306 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
307 return NULL;
308
309 /* Short circuit cases where block B contains some side effects, as we can't
310 safely bypass it. */
a813c111 311 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
8ecba28a
JH
312 insn = NEXT_INSN (insn))
313 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
1540f9eb 314 {
2dd2d53e 315 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
316 return NULL;
317 }
8ecba28a 318
457eeaae 319 cselib_init (0);
8ecba28a
JH
320
321 /* First process all values computed in the source basic block. */
3cd8c58a
NS
322 for (insn = NEXT_INSN (BB_HEAD (e->src));
323 insn != NEXT_INSN (BB_END (e->src));
8ecba28a
JH
324 insn = NEXT_INSN (insn))
325 if (INSN_P (insn))
326 cselib_process_insn (insn);
327
8bdbfff5 328 nonequal = BITMAP_ALLOC (NULL);
8ecba28a 329 CLEAR_REG_SET (nonequal);
5f0d2358 330
8ecba28a
JH
331 /* Now assume that we've continued by the edge E to B and continue
332 processing as if it were same basic block.
8ecba28a 333 Our goal is to prove that whole block is an NOOP. */
5f0d2358 334
3cd8c58a
NS
335 for (insn = NEXT_INSN (BB_HEAD (b));
336 insn != NEXT_INSN (BB_END (b)) && !failed;
8ecba28a 337 insn = NEXT_INSN (insn))
f87c27b4
KH
338 {
339 if (INSN_P (insn))
340 {
341 rtx pat = PATTERN (insn);
342
343 if (GET_CODE (pat) == PARALLEL)
344 {
3cd8c58a 345 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
f87c27b4
KH
346 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
347 }
348 else
349 failed |= mark_effect (pat, nonequal);
350 }
5f0d2358 351
f87c27b4
KH
352 cselib_process_insn (insn);
353 }
8ecba28a
JH
354
355 /* Later we should clear nonequal of dead registers. So far we don't
356 have life information in cfg_cleanup. */
357 if (failed)
1540f9eb 358 {
2dd2d53e 359 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
360 goto failed_exit;
361 }
8ecba28a 362
fe477d8b
JH
363 /* cond2 must not mention any register that is not equal to the
364 former block. */
a2250fe9 365 if (mentions_nonequal_regs (cond2, nonequal))
fe477d8b
JH
366 goto failed_exit;
367
a2041967
KH
368 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
369 goto failed_exit;
8ecba28a 370
8bdbfff5 371 BITMAP_FREE (nonequal);
8ecba28a
JH
372 cselib_finish ();
373 if ((comparison_dominates_p (code1, code2) != 0)
4deaa2f8 374 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
8ecba28a
JH
375 return BRANCH_EDGE (b);
376 else
377 return FALLTHRU_EDGE (b);
378
379failed_exit:
8bdbfff5 380 BITMAP_FREE (nonequal);
8ecba28a
JH
381 cselib_finish ();
382 return NULL;
383}
384\f
402209ff 385/* Attempt to forward edges leaving basic block B.
eaec9b3d 386 Return true if successful. */
402209ff
JH
387
388static bool
d329e058 389try_forward_edges (int mode, basic_block b)
402209ff
JH
390{
391 bool changed = false;
628f6a4e
BE
392 edge_iterator ei;
393 edge e, *threaded_edges = NULL;
402209ff 394
750054a2
CT
395 /* If we are partitioning hot/cold basic blocks, we don't want to
396 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
397 and cold sections.
398
8e8d5162 399 Basic block partitioning may result in some jumps that appear to
fa10beec
RW
400 be optimizable (or blocks that appear to be mergeable), but which really
401 must be left untouched (they are required to make it safely across
c22cacf3 402 partition boundaries). See the comments at the top of
8e8d5162
CT
403 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
404
339ba33b 405 if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
750054a2
CT
406 return false;
407
628f6a4e 408 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
402209ff
JH
409 {
410 basic_block target, first;
8a829274
EB
411 location_t goto_locus;
412 int counter;
8ecba28a 413 bool threaded = false;
bcb3bc6d 414 int nthreaded_edges = 0;
4ec5d4f5 415 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
402209ff 416
402209ff
JH
417 /* Skip complex edges because we don't know how to update them.
418
c22cacf3
MS
419 Still handle fallthru edges, as we can succeed to forward fallthru
420 edge to the same place as the branch edge of conditional branch
421 and turn conditional branch to an unconditional branch. */
402209ff 422 if (e->flags & EDGE_COMPLEX)
628f6a4e
BE
423 {
424 ei_next (&ei);
425 continue;
426 }
402209ff
JH
427
428 target = first = e->dest;
24bd1a0b 429 counter = NUM_FIXED_BLOCKS;
7241571e 430 goto_locus = e->goto_locus;
402209ff 431
9fb32434 432 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
8e8d5162
CT
433 up jumps that cross between hot/cold sections.
434
435 Basic block partitioning may result in some jumps that appear
c22cacf3
MS
436 to be optimizable (or blocks that appear to be mergeable), but which
437 really must be left untouched (they are required to make it safely
8e8d5162
CT
438 across partition boundaries). See the comments at the top of
439 bb-reorder.c:partition_hot_cold_basic_blocks for complete
440 details. */
9fb32434 441
fefa31b5 442 if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
339ba33b
RS
443 && JUMP_P (BB_END (first))
444 && CROSSING_JUMP_P (BB_END (first)))
3371a64f 445 return changed;
9fb32434 446
0cae8d31 447 while (counter < n_basic_blocks_for_fn (cfun))
402209ff 448 {
8ecba28a
JH
449 basic_block new_target = NULL;
450 bool new_target_threaded = false;
4ec5d4f5 451 may_thread |= (target->flags & BB_MODIFIED) != 0;
8ecba28a
JH
452
453 if (FORWARDER_BLOCK_P (target)
c22cacf3 454 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
fefa31b5 455 && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
8ecba28a
JH
456 {
457 /* Bypass trivial infinite loops. */
c5cbcccf
ZD
458 new_target = single_succ (target);
459 if (target == new_target)
0cae8d31 460 counter = n_basic_blocks_for_fn (cfun);
7241571e
JJ
461 else if (!optimize)
462 {
463 /* When not optimizing, ensure that edges or forwarder
464 blocks with different locus are not optimized out. */
8a829274
EB
465 location_t new_locus = single_succ_edge (target)->goto_locus;
466 location_t locus = goto_locus;
7241571e 467
ffa4602f
EB
468 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
469 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
5368224f 470 && new_locus != locus)
50a36e42
EB
471 new_target = NULL;
472 else
7241571e 473 {
ffa4602f 474 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
50a36e42 475 locus = new_locus;
7241571e 476
da5477a9 477 rtx_insn *last = BB_END (target);
11321111
AO
478 if (DEBUG_INSN_P (last))
479 last = prev_nondebug_insn (last);
ffa4602f
EB
480 if (last && INSN_P (last))
481 new_locus = INSN_LOCATION (last);
482 else
483 new_locus = UNKNOWN_LOCATION;
11321111 484
ffa4602f
EB
485 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
486 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
5368224f 487 && new_locus != locus)
50a36e42
EB
488 new_target = NULL;
489 else
490 {
ffa4602f 491 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
50a36e42
EB
492 locus = new_locus;
493
494 goto_locus = locus;
495 }
7241571e
JJ
496 }
497 }
8ecba28a 498 }
5f0d2358 499
8ecba28a
JH
500 /* Allow to thread only over one edge at time to simplify updating
501 of probabilities. */
7cf240d5 502 else if ((mode & CLEANUP_THREADING) && may_thread)
8ecba28a 503 {
6fb5fa3c 504 edge t = thread_jump (e, target);
1c570418 505 if (t)
8ecba28a 506 {
bcb3bc6d 507 if (!threaded_edges)
0cae8d31
DM
508 threaded_edges = XNEWVEC (edge,
509 n_basic_blocks_for_fn (cfun));
3b3b1e32
RH
510 else
511 {
512 int i;
513
514 /* Detect an infinite loop across blocks not
515 including the start block. */
516 for (i = 0; i < nthreaded_edges; ++i)
517 if (threaded_edges[i] == t)
518 break;
519 if (i < nthreaded_edges)
b90e45ae 520 {
0cae8d31 521 counter = n_basic_blocks_for_fn (cfun);
b90e45ae
JH
522 break;
523 }
3b3b1e32
RH
524 }
525
526 /* Detect an infinite loop across the start block. */
527 if (t->dest == b)
528 break;
529
0cae8d31
DM
530 gcc_assert (nthreaded_edges
531 < (n_basic_blocks_for_fn (cfun)
532 - NUM_FIXED_BLOCKS));
1c570418 533 threaded_edges[nthreaded_edges++] = t;
3b3b1e32
RH
534
535 new_target = t->dest;
536 new_target_threaded = true;
8ecba28a
JH
537 }
538 }
5f0d2358 539
8ecba28a
JH
540 if (!new_target)
541 break;
402209ff 542
8ecba28a
JH
543 counter++;
544 target = new_target;
545 threaded |= new_target_threaded;
f87c27b4 546 }
402209ff 547
0cae8d31 548 if (counter >= n_basic_blocks_for_fn (cfun))
402209ff 549 {
c263766c
RH
550 if (dump_file)
551 fprintf (dump_file, "Infinite loop in BB %i.\n",
0b17ab2f 552 target->index);
402209ff
JH
553 }
554 else if (target == first)
555 ; /* We didn't do anything. */
556 else
557 {
558 /* Save the values now, as the edge may get removed. */
559 gcov_type edge_count = e->count;
560 int edge_probability = e->probability;
8ecba28a 561 int edge_frequency;
1c570418 562 int n = 0;
402209ff 563
7241571e
JJ
564 e->goto_locus = goto_locus;
565
6ee3c8e4 566 /* Don't force if target is exit block. */
fefa31b5 567 if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
402209ff 568 {
8ecba28a 569 notice_new_block (redirect_edge_and_branch_force (e, target));
c263766c
RH
570 if (dump_file)
571 fprintf (dump_file, "Conditionals threaded.\n");
402209ff 572 }
8ecba28a 573 else if (!redirect_edge_and_branch (e, target))
402209ff 574 {
c263766c
RH
575 if (dump_file)
576 fprintf (dump_file,
5f0d2358 577 "Forwarding edge %i->%i to %i failed.\n",
0b17ab2f 578 b->index, e->dest->index, target->index);
628f6a4e 579 ei_next (&ei);
8ecba28a 580 continue;
402209ff 581 }
5f0d2358 582
8ecba28a
JH
583 /* We successfully forwarded the edge. Now update profile
584 data: for each edge we traversed in the chain, remove
585 the original edge's execution count. */
8ddb5a29 586 edge_frequency = apply_probability (b->frequency, edge_probability);
8ecba28a 587
8ecba28a
JH
588 do
589 {
590 edge t;
5f0d2358 591
c5cbcccf 592 if (!single_succ_p (first))
3b3b1e32 593 {
341c100f 594 gcc_assert (n < nthreaded_edges);
3b3b1e32 595 t = threaded_edges [n++];
341c100f 596 gcc_assert (t->src == first);
15db5571
JH
597 update_bb_profile_for_threading (first, edge_frequency,
598 edge_count, t);
b446e5a2 599 update_br_prob_note (first);
3b3b1e32 600 }
8ecba28a 601 else
bcb3bc6d 602 {
15db5571
JH
603 first->count -= edge_count;
604 if (first->count < 0)
605 first->count = 0;
606 first->frequency -= edge_frequency;
607 if (first->frequency < 0)
608 first->frequency = 0;
bcb3bc6d
JH
609 /* It is possible that as the result of
610 threading we've removed edge as it is
611 threaded to the fallthru edge. Avoid
612 getting out of sync. */
613 if (n < nthreaded_edges
614 && first == threaded_edges [n]->src)
615 n++;
c5cbcccf 616 t = single_succ_edge (first);
f87c27b4 617 }
5f0d2358 618
b446e5a2
JH
619 t->count -= edge_count;
620 if (t->count < 0)
621 t->count = 0;
8ecba28a
JH
622 first = t->dest;
623 }
624 while (first != target);
625
626 changed = true;
628f6a4e 627 continue;
402209ff 628 }
628f6a4e 629 ei_next (&ei);
402209ff
JH
630 }
631
04695783 632 free (threaded_edges);
402209ff
JH
633 return changed;
634}
635\f
402209ff
JH
636
637/* Blocks A and B are to be merged into a single block. A has no incoming
638 fallthru edge, so it can be moved before B without adding or modifying
639 any jumps (aside from the jump from A to B). */
640
4262e623 641static void
d329e058 642merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
402209ff 643{
da5477a9 644 rtx_insn *barrier;
402209ff 645
750054a2
CT
646 /* If we are partitioning hot/cold basic blocks, we don't want to
647 mess up unconditional or indirect jumps that cross between hot
8e8d5162 648 and cold sections.
c22cacf3 649
8e8d5162 650 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
651 be optimizable (or blocks that appear to be mergeable), but which really
652 must be left untouched (they are required to make it safely across
653 partition boundaries). See the comments at the top of
8e8d5162
CT
654 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
655
87c8b4be 656 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
657 return;
658
a813c111 659 barrier = next_nonnote_insn (BB_END (a));
341c100f 660 gcc_assert (BARRIER_P (barrier));
53c17031 661 delete_insn (barrier);
402209ff 662
402209ff 663 /* Scramble the insn chain. */
a813c111
SB
664 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
665 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
6fb5fa3c 666 df_set_bb_dirty (a);
402209ff 667
c263766c
RH
668 if (dump_file)
669 fprintf (dump_file, "Moved block %d before %d and merged.\n",
0b17ab2f 670 a->index, b->index);
402209ff 671
bf77398c 672 /* Swap the records for the two blocks around. */
402209ff 673
918ed612
ZD
674 unlink_block (a);
675 link_block (a, b->prev_bb);
676
402209ff 677 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 678 merge_blocks (a, b);
402209ff
JH
679}
680
681/* Blocks A and B are to be merged into a single block. B has no outgoing
682 fallthru edge, so it can be moved after A without adding or modifying
683 any jumps (aside from the jump from A to B). */
684
4262e623 685static void
d329e058 686merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
402209ff 687{
da5477a9 688 rtx_insn *barrier, *real_b_end;
8942ee0f
DM
689 rtx label;
690 rtx_jump_table_data *table;
402209ff 691
750054a2
CT
692 /* If we are partitioning hot/cold basic blocks, we don't want to
693 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
694 and cold sections.
695
8e8d5162 696 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
697 be optimizable (or blocks that appear to be mergeable), but which really
698 must be left untouched (they are required to make it safely across
699 partition boundaries). See the comments at the top of
8e8d5162
CT
700 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
701
87c8b4be 702 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
703 return;
704
a813c111 705 real_b_end = BB_END (b);
402209ff 706
ee735eef
JZ
707 /* If there is a jump table following block B temporarily add the jump table
708 to block B so that it will also be moved to the correct location. */
a813c111
SB
709 if (tablejump_p (BB_END (b), &label, &table)
710 && prev_active_insn (label) == BB_END (b))
402209ff 711 {
1130d5e3 712 BB_END (b) = table;
402209ff
JH
713 }
714
715 /* There had better have been a barrier there. Delete it. */
a813c111 716 barrier = NEXT_INSN (BB_END (b));
4b4bf941 717 if (barrier && BARRIER_P (barrier))
53c17031 718 delete_insn (barrier);
402209ff 719
402209ff
JH
720
721 /* Scramble the insn chain. */
a813c111 722 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
402209ff 723
f62ce55b 724 /* Restore the real end of b. */
1130d5e3 725 BB_END (b) = real_b_end;
f62ce55b 726
c263766c
RH
727 if (dump_file)
728 fprintf (dump_file, "Moved block %d after %d and merged.\n",
0b17ab2f 729 b->index, a->index);
2150ad33
RH
730
731 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 732 merge_blocks (a, b);
402209ff
JH
733}
734
735/* Attempt to merge basic blocks that are potentially non-adjacent.
ec3ae3da
JH
736 Return NULL iff the attempt failed, otherwise return basic block
737 where cleanup_cfg should continue. Because the merging commonly
738 moves basic block away or introduces another optimization
e0bb17a8 739 possibility, return basic block just before B so cleanup_cfg don't
ec3ae3da
JH
740 need to iterate.
741
742 It may be good idea to return basic block before C in the case
743 C has been moved after B and originally appeared earlier in the
4d6922ee 744 insn sequence, but we have no information available about the
ec3ae3da
JH
745 relative ordering of these two. Hopefully it is not too common. */
746
747static basic_block
bc35512f 748merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
402209ff 749{
ec3ae3da 750 basic_block next;
402209ff 751
750054a2
CT
752 /* If we are partitioning hot/cold basic blocks, we don't want to
753 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
754 and cold sections.
755
8e8d5162 756 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
757 be optimizable (or blocks that appear to be mergeable), but which really
758 must be left untouched (they are required to make it safely across
759 partition boundaries). See the comments at the top of
8e8d5162
CT
760 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
761
87c8b4be 762 if (BB_PARTITION (b) != BB_PARTITION (c))
750054a2 763 return NULL;
c22cacf3 764
402209ff
JH
765 /* If B has a fallthru edge to C, no need to move anything. */
766 if (e->flags & EDGE_FALLTHRU)
767 {
0b17ab2f 768 int b_index = b->index, c_index = c->index;
7d776ee2
RG
769
770 /* Protect the loop latches. */
771 if (current_loops && c->loop_father->latch == c)
772 return NULL;
773
bc35512f 774 merge_blocks (b, c);
635559ab 775 update_forwarder_flag (b);
402209ff 776
c263766c
RH
777 if (dump_file)
778 fprintf (dump_file, "Merged %d and %d without moving.\n",
f87c27b4 779 b_index, c_index);
402209ff 780
fefa31b5 781 return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
402209ff 782 }
5f0d2358 783
402209ff
JH
784 /* Otherwise we will need to move code around. Do that only if expensive
785 transformations are allowed. */
786 else if (mode & CLEANUP_EXPENSIVE)
787 {
4262e623
JH
788 edge tmp_edge, b_fallthru_edge;
789 bool c_has_outgoing_fallthru;
790 bool b_has_incoming_fallthru;
402209ff
JH
791
792 /* Avoid overactive code motion, as the forwarder blocks should be
c22cacf3 793 eliminated by edge redirection instead. One exception might have
402209ff
JH
794 been if B is a forwarder block and C has no fallthru edge, but
795 that should be cleaned up by bb-reorder instead. */
635559ab 796 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
ec3ae3da 797 return NULL;
402209ff
JH
798
799 /* We must make sure to not munge nesting of lexical blocks,
800 and loop notes. This is done by squeezing out all the notes
801 and leaving them there to lie. Not ideal, but functional. */
802
0fd4b31d 803 tmp_edge = find_fallthru_edge (c->succs);
402209ff 804 c_has_outgoing_fallthru = (tmp_edge != NULL);
402209ff 805
0fd4b31d 806 tmp_edge = find_fallthru_edge (b->preds);
402209ff 807 b_has_incoming_fallthru = (tmp_edge != NULL);
4262e623 808 b_fallthru_edge = tmp_edge;
ec3ae3da 809 next = b->prev_bb;
912b79e7
JH
810 if (next == c)
811 next = next->prev_bb;
4262e623
JH
812
813 /* Otherwise, we're going to try to move C after B. If C does
814 not have an outgoing fallthru, then it can be moved
815 immediately after B without introducing or modifying jumps. */
816 if (! c_has_outgoing_fallthru)
817 {
818 merge_blocks_move_successor_nojumps (b, c);
fefa31b5 819 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
4262e623 820 }
402209ff
JH
821
822 /* If B does not have an incoming fallthru, then it can be moved
823 immediately before C without introducing or modifying jumps.
824 C cannot be the first block, so we do not have to worry about
825 accessing a non-existent block. */
402209ff 826
4262e623
JH
827 if (b_has_incoming_fallthru)
828 {
473fb060 829 basic_block bb;
5f0d2358 830
fefa31b5 831 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
ec3ae3da 832 return NULL;
7dddfb65
JH
833 bb = force_nonfallthru (b_fallthru_edge);
834 if (bb)
835 notice_new_block (bb);
4262e623 836 }
5f0d2358 837
4262e623 838 merge_blocks_move_predecessor_nojumps (b, c);
fefa31b5 839 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
402209ff 840 }
5f0d2358 841
10d6c0d0 842 return NULL;
402209ff
JH
843}
844\f
c2fc5456
R
845
846/* Removes the memory attributes of MEM expression
847 if they are not equal. */
848
893479de 849static void
c2fc5456
R
850merge_memattrs (rtx x, rtx y)
851{
852 int i;
853 int j;
854 enum rtx_code code;
855 const char *fmt;
856
857 if (x == y)
858 return;
859 if (x == 0 || y == 0)
860 return;
861
862 code = GET_CODE (x);
863
864 if (code != GET_CODE (y))
865 return;
866
867 if (GET_MODE (x) != GET_MODE (y))
868 return;
869
96b3c03f 870 if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
c2fc5456
R
871 {
872 if (! MEM_ATTRS (x))
873 MEM_ATTRS (y) = 0;
874 else if (! MEM_ATTRS (y))
875 MEM_ATTRS (x) = 0;
c22cacf3 876 else
c2fc5456 877 {
f5541398 878 HOST_WIDE_INT mem_size;
c2fc5456
R
879
880 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
881 {
882 set_mem_alias_set (x, 0);
883 set_mem_alias_set (y, 0);
884 }
c22cacf3 885
c2fc5456
R
886 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
887 {
888 set_mem_expr (x, 0);
889 set_mem_expr (y, 0);
527210c4
RS
890 clear_mem_offset (x);
891 clear_mem_offset (y);
c2fc5456 892 }
527210c4
RS
893 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
894 || (MEM_OFFSET_KNOWN_P (x)
895 && MEM_OFFSET (x) != MEM_OFFSET (y)))
c2fc5456 896 {
527210c4
RS
897 clear_mem_offset (x);
898 clear_mem_offset (y);
c2fc5456 899 }
c22cacf3 900
f5541398
RS
901 if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
902 {
903 mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
904 set_mem_size (x, mem_size);
905 set_mem_size (y, mem_size);
906 }
c2fc5456 907 else
f5541398
RS
908 {
909 clear_mem_size (x);
910 clear_mem_size (y);
911 }
c2fc5456
R
912
913 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
914 set_mem_align (y, MEM_ALIGN (x));
915 }
916 }
84cf4ab6
JJ
917 if (code == MEM)
918 {
919 if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
920 {
921 MEM_READONLY_P (x) = 0;
922 MEM_READONLY_P (y) = 0;
923 }
924 if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
925 {
926 MEM_NOTRAP_P (x) = 0;
927 MEM_NOTRAP_P (y) = 0;
928 }
929 if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
930 {
931 MEM_VOLATILE_P (x) = 1;
932 MEM_VOLATILE_P (y) = 1;
933 }
934 }
c22cacf3 935
c2fc5456
R
936 fmt = GET_RTX_FORMAT (code);
937 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
938 {
939 switch (fmt[i])
940 {
941 case 'E':
942 /* Two vectors must have the same length. */
943 if (XVECLEN (x, i) != XVECLEN (y, i))
944 return;
945
946 for (j = 0; j < XVECLEN (x, i); j++)
947 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
948
949 break;
950
951 case 'e':
952 merge_memattrs (XEXP (x, i), XEXP (y, i));
953 }
954 }
955 return;
956}
957
958
472c95f5
TV
959 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
960 different single sets S1 and S2. */
c2fc5456
R
961
962static bool
472c95f5
TV
963equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
964{
965 int i;
966 rtx e1, e2;
967
968 if (p1 == s1 && p2 == s2)
969 return true;
970
971 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
972 return false;
973
974 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
975 return false;
976
977 for (i = 0; i < XVECLEN (p1, 0); i++)
978 {
979 e1 = XVECEXP (p1, 0, i);
980 e2 = XVECEXP (p2, 0, i);
981 if (e1 == s1 && e2 == s2)
982 continue;
983 if (reload_completed
984 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
985 continue;
986
21c0a521 987 return false;
472c95f5
TV
988 }
989
990 return true;
991}
992
3e87f2d6
SC
993
994/* NOTE1 is the REG_EQUAL note, if any, attached to an insn
995 that is a single_set with a SET_SRC of SRC1. Similarly
996 for NOTE2/SRC2.
997
998 So effectively NOTE1/NOTE2 are an alternate form of
999 SRC1/SRC2 respectively.
1000
1001 Return nonzero if SRC1 or NOTE1 has the same constant
1002 integer value as SRC2 or NOTE2. Else return zero. */
1003static int
1004values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
1005{
1006 if (note1
1007 && note2
1008 && CONST_INT_P (XEXP (note1, 0))
1009 && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
1010 return 1;
1011
1012 if (!note1
1013 && !note2
1014 && CONST_INT_P (src1)
1015 && CONST_INT_P (src2)
1016 && rtx_equal_p (src1, src2))
1017 return 1;
1018
1019 if (note1
1020 && CONST_INT_P (src2)
1021 && rtx_equal_p (XEXP (note1, 0), src2))
1022 return 1;
1023
1024 if (note2
1025 && CONST_INT_P (src1)
1026 && rtx_equal_p (XEXP (note2, 0), src1))
1027 return 1;
1028
1029 return 0;
1030}
1031
472c95f5
TV
1032/* Examine register notes on I1 and I2 and return:
1033 - dir_forward if I1 can be replaced by I2, or
1034 - dir_backward if I2 can be replaced by I1, or
1035 - dir_both if both are the case. */
1036
1037static enum replace_direction
da5477a9 1038can_replace_by (rtx_insn *i1, rtx_insn *i2)
472c95f5
TV
1039{
1040 rtx s1, s2, d1, d2, src1, src2, note1, note2;
1041 bool c1, c2;
1042
1043 /* Check for 2 sets. */
1044 s1 = single_set (i1);
1045 s2 = single_set (i2);
1046 if (s1 == NULL_RTX || s2 == NULL_RTX)
1047 return dir_none;
1048
1049 /* Check that the 2 sets set the same dest. */
1050 d1 = SET_DEST (s1);
1051 d2 = SET_DEST (s2);
1052 if (!(reload_completed
1053 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1054 return dir_none;
1055
1056 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1057 set dest to the same value. */
1058 note1 = find_reg_equal_equiv_note (i1);
1059 note2 = find_reg_equal_equiv_note (i2);
3e87f2d6
SC
1060
1061 src1 = SET_SRC (s1);
1062 src2 = SET_SRC (s2);
1063
1064 if (!values_equal_p (note1, note2, src1, src2))
472c95f5
TV
1065 return dir_none;
1066
1067 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1068 return dir_none;
1069
1070 /* Although the 2 sets set dest to the same value, we cannot replace
1071 (set (dest) (const_int))
1072 by
1073 (set (dest) (reg))
1074 because we don't know if the reg is live and has the same value at the
1075 location of replacement. */
472c95f5
TV
1076 c1 = CONST_INT_P (src1);
1077 c2 = CONST_INT_P (src2);
1078 if (c1 && c2)
1079 return dir_both;
1080 else if (c2)
1081 return dir_forward;
1082 else if (c1)
1083 return dir_backward;
1084
1085 return dir_none;
1086}
1087
1088/* Merges directions A and B. */
1089
1090static enum replace_direction
1091merge_dir (enum replace_direction a, enum replace_direction b)
1092{
1093 /* Implements the following table:
1094 |bo fw bw no
1095 ---+-----------
1096 bo |bo fw bw no
1097 fw |-- fw no no
1098 bw |-- -- bw no
1099 no |-- -- -- no. */
1100
1101 if (a == b)
1102 return a;
1103
1104 if (a == dir_both)
1105 return b;
1106 if (b == dir_both)
1107 return a;
1108
1109 return dir_none;
1110}
1111
1112/* Examine I1 and I2 and return:
1113 - dir_forward if I1 can be replaced by I2, or
1114 - dir_backward if I2 can be replaced by I1, or
1115 - dir_both if both are the case. */
1116
1117static enum replace_direction
da5477a9 1118old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
c2fc5456
R
1119{
1120 rtx p1, p2;
1121
1122 /* Verify that I1 and I2 are equivalent. */
1123 if (GET_CODE (i1) != GET_CODE (i2))
472c95f5 1124 return dir_none;
c2fc5456 1125
ba21aba3
DD
1126 /* __builtin_unreachable() may lead to empty blocks (ending with
1127 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1128 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
472c95f5 1129 return dir_both;
ba21aba3 1130
9a08d230
RH
1131 /* ??? Do not allow cross-jumping between different stack levels. */
1132 p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1133 p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
42aa5124
RH
1134 if (p1 && p2)
1135 {
1136 p1 = XEXP (p1, 0);
1137 p2 = XEXP (p2, 0);
1138 if (!rtx_equal_p (p1, p2))
1139 return dir_none;
1140
1141 /* ??? Worse, this adjustment had better be constant lest we
1142 have differing incoming stack levels. */
1143 if (!frame_pointer_needed
1144 && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1145 return dir_none;
1146 }
1147 else if (p1 || p2)
9a08d230
RH
1148 return dir_none;
1149
7752e522 1150 p1 = PATTERN (i1);
c2fc5456
R
1151 p2 = PATTERN (i2);
1152
1153 if (GET_CODE (p1) != GET_CODE (p2))
472c95f5 1154 return dir_none;
c2fc5456
R
1155
1156 /* If this is a CALL_INSN, compare register usage information.
1157 If we don't check this on stack register machines, the two
1158 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1159 numbers of stack registers in the same basic block.
1160 If we don't check this on machines with delay slots, a delay slot may
1161 be filled that clobbers a parameter expected by the subroutine.
1162
1163 ??? We take the simple route for now and assume that if they're
31ce8a53 1164 equal, they were constructed identically.
c2fc5456 1165
31ce8a53
BS
1166 Also check for identical exception regions. */
1167
1168 if (CALL_P (i1))
1169 {
1170 /* Ensure the same EH region. */
1171 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1172 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1173
1174 if (!n1 && n2)
472c95f5 1175 return dir_none;
31ce8a53
BS
1176
1177 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
472c95f5 1178 return dir_none;
31ce8a53
BS
1179
1180 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
c22cacf3 1181 CALL_INSN_FUNCTION_USAGE (i2))
31ce8a53 1182 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
472c95f5 1183 return dir_none;
68a9738a
JJ
1184
1185 /* For address sanitizer, never crossjump __asan_report_* builtins,
1186 otherwise errors might be reported on incorrect lines. */
de5a5fa1 1187 if (flag_sanitize & SANITIZE_ADDRESS)
68a9738a
JJ
1188 {
1189 rtx call = get_call_rtx_from (i1);
1190 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1191 {
1192 rtx symbol = XEXP (XEXP (call, 0), 0);
1193 if (SYMBOL_REF_DECL (symbol)
1194 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1195 {
1196 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1197 == BUILT_IN_NORMAL)
1198 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1199 >= BUILT_IN_ASAN_REPORT_LOAD1
1200 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
8946c29e 1201 <= BUILT_IN_ASAN_STOREN)
68a9738a
JJ
1202 return dir_none;
1203 }
1204 }
1205 }
31ce8a53 1206 }
c2fc5456
R
1207
1208#ifdef STACK_REGS
1209 /* If cross_jump_death_matters is not 0, the insn's mode
1210 indicates whether or not the insn contains any stack-like
1211 regs. */
1212
1213 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1214 {
1215 /* If register stack conversion has already been done, then
c22cacf3
MS
1216 death notes must also be compared before it is certain that
1217 the two instruction streams match. */
c2fc5456
R
1218
1219 rtx note;
1220 HARD_REG_SET i1_regset, i2_regset;
1221
1222 CLEAR_HARD_REG_SET (i1_regset);
1223 CLEAR_HARD_REG_SET (i2_regset);
1224
1225 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1226 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1227 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1228
1229 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1230 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1231 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1232
56b138ae 1233 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
472c95f5 1234 return dir_none;
c2fc5456
R
1235 }
1236#endif
1237
1238 if (reload_completed
1239 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
472c95f5 1240 return dir_both;
c2fc5456 1241
472c95f5 1242 return can_replace_by (i1, i2);
c2fc5456
R
1243}
1244\f
31ce8a53
BS
1245/* When comparing insns I1 and I2 in flow_find_cross_jump or
1246 flow_find_head_matching_sequence, ensure the notes match. */
1247
1248static void
da5477a9 1249merge_notes (rtx_insn *i1, rtx_insn *i2)
31ce8a53
BS
1250{
1251 /* If the merged insns have different REG_EQUAL notes, then
1252 remove them. */
1253 rtx equiv1 = find_reg_equal_equiv_note (i1);
1254 rtx equiv2 = find_reg_equal_equiv_note (i2);
1255
1256 if (equiv1 && !equiv2)
1257 remove_note (i1, equiv1);
1258 else if (!equiv1 && equiv2)
1259 remove_note (i2, equiv2);
1260 else if (equiv1 && equiv2
1261 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1262 {
1263 remove_note (i1, equiv1);
1264 remove_note (i2, equiv2);
1265 }
1266}
1267
823918ae
TV
1268 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1269 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1270 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1271 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1272 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1273
1274static void
da5477a9 1275walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
823918ae
TV
1276 bool *did_fallthru)
1277{
1278 edge fallthru;
1279
1280 *did_fallthru = false;
1281
1282 /* Ignore notes. */
1283 while (!NONDEBUG_INSN_P (*i1))
1284 {
1285 if (*i1 != BB_HEAD (*bb1))
1286 {
1287 *i1 = PREV_INSN (*i1);
1288 continue;
1289 }
1290
1291 if (!follow_fallthru)
1292 return;
1293
1294 fallthru = find_fallthru_edge ((*bb1)->preds);
fefa31b5 1295 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
823918ae
TV
1296 || !single_succ_p (fallthru->src))
1297 return;
1298
1299 *bb1 = fallthru->src;
1300 *i1 = BB_END (*bb1);
1301 *did_fallthru = true;
1302 }
1303}
1304
c2fc5456 1305/* Look through the insns at the end of BB1 and BB2 and find the longest
472c95f5
TV
1306 sequence that are either equivalent, or allow forward or backward
1307 replacement. Store the first insns for that sequence in *F1 and *F2 and
1308 return the sequence length.
1309
1310 DIR_P indicates the allowed replacement direction on function entry, and
1311 the actual replacement direction on function exit. If NULL, only equivalent
1312 sequences are allowed.
c2fc5456
R
1313
1314 To simplify callers of this function, if the blocks match exactly,
1315 store the head of the blocks in *F1 and *F2. */
1316
31ce8a53 1317int
da5477a9
DM
1318flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1319 rtx_insn **f2, enum replace_direction *dir_p)
c2fc5456 1320{
da5477a9 1321 rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
c2fc5456 1322 int ninsns = 0;
472c95f5 1323 enum replace_direction dir, last_dir, afterlast_dir;
823918ae 1324 bool follow_fallthru, did_fallthru;
472c95f5
TV
1325
1326 if (dir_p)
1327 dir = *dir_p;
1328 else
1329 dir = dir_both;
1330 afterlast_dir = dir;
1331 last_dir = afterlast_dir;
c2fc5456
R
1332
1333 /* Skip simple jumps at the end of the blocks. Complex jumps still
1334 need to be compared for equivalence, which we'll do below. */
1335
1336 i1 = BB_END (bb1);
da5477a9 1337 last1 = afterlast1 = last2 = afterlast2 = NULL;
c2fc5456
R
1338 if (onlyjump_p (i1)
1339 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1340 {
1341 last1 = i1;
1342 i1 = PREV_INSN (i1);
1343 }
1344
1345 i2 = BB_END (bb2);
1346 if (onlyjump_p (i2)
1347 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1348 {
1349 last2 = i2;
a0cbe71e
JJ
1350 /* Count everything except for unconditional jump as insn.
1351 Don't count any jumps if dir_p is NULL. */
1352 if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
c2fc5456
R
1353 ninsns++;
1354 i2 = PREV_INSN (i2);
1355 }
1356
1357 while (true)
1358 {
823918ae
TV
1359 /* In the following example, we can replace all jumps to C by jumps to A.
1360
1361 This removes 4 duplicate insns.
1362 [bb A] insn1 [bb C] insn1
1363 insn2 insn2
1364 [bb B] insn3 insn3
1365 insn4 insn4
1366 jump_insn jump_insn
1367
1368 We could also replace all jumps to A by jumps to C, but that leaves B
1369 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1370 step, all jumps to B would be replaced with jumps to the middle of C,
1371 achieving the same result with more effort.
1372 So we allow only the first possibility, which means that we don't allow
1373 fallthru in the block that's being replaced. */
1374
1375 follow_fallthru = dir_p && dir != dir_forward;
1376 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1377 if (did_fallthru)
1378 dir = dir_backward;
1379
1380 follow_fallthru = dir_p && dir != dir_backward;
1381 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1382 if (did_fallthru)
1383 dir = dir_forward;
c2fc5456
R
1384
1385 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1386 break;
1387
472c95f5
TV
1388 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1389 if (dir == dir_none || (!dir_p && dir != dir_both))
c2fc5456
R
1390 break;
1391
1392 merge_memattrs (i1, i2);
1393
1394 /* Don't begin a cross-jump with a NOTE insn. */
1395 if (INSN_P (i1))
1396 {
31ce8a53 1397 merge_notes (i1, i2);
c2fc5456
R
1398
1399 afterlast1 = last1, afterlast2 = last2;
1400 last1 = i1, last2 = i2;
472c95f5
TV
1401 afterlast_dir = last_dir;
1402 last_dir = dir;
a0cbe71e 1403 if (active_insn_p (i1))
2a562b0a 1404 ninsns++;
c2fc5456
R
1405 }
1406
1407 i1 = PREV_INSN (i1);
1408 i2 = PREV_INSN (i2);
1409 }
1410
c2fc5456
R
1411 /* Don't allow the insn after a compare to be shared by
1412 cross-jumping unless the compare is also shared. */
618f4073
TS
1413 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1414 && ! sets_cc0_p (last1))
472c95f5 1415 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
c2fc5456
R
1416
1417 /* Include preceding notes and labels in the cross-jump. One,
1418 this may bring us to the head of the blocks as requested above.
1419 Two, it keeps line number notes as matched as may be. */
1420 if (ninsns)
1421 {
823918ae 1422 bb1 = BLOCK_FOR_INSN (last1);
b5b8b0ac 1423 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
c2fc5456
R
1424 last1 = PREV_INSN (last1);
1425
1426 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1427 last1 = PREV_INSN (last1);
1428
823918ae 1429 bb2 = BLOCK_FOR_INSN (last2);
b5b8b0ac 1430 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
c2fc5456
R
1431 last2 = PREV_INSN (last2);
1432
1433 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1434 last2 = PREV_INSN (last2);
1435
1436 *f1 = last1;
1437 *f2 = last2;
1438 }
1439
472c95f5
TV
1440 if (dir_p)
1441 *dir_p = last_dir;
c2fc5456
R
1442 return ninsns;
1443}
1444
31ce8a53
BS
1445/* Like flow_find_cross_jump, except start looking for a matching sequence from
1446 the head of the two blocks. Do not include jumps at the end.
1447 If STOP_AFTER is nonzero, stop after finding that many matching
b59e0455
JJ
1448 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1449 non-zero, only count active insns. */
31ce8a53
BS
1450
1451int
da5477a9
DM
1452flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1453 rtx_insn **f2, int stop_after)
31ce8a53 1454{
da5477a9 1455 rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
31ce8a53
BS
1456 int ninsns = 0;
1457 edge e;
1458 edge_iterator ei;
1459 int nehedges1 = 0, nehedges2 = 0;
1460
1461 FOR_EACH_EDGE (e, ei, bb1->succs)
1462 if (e->flags & EDGE_EH)
1463 nehedges1++;
1464 FOR_EACH_EDGE (e, ei, bb2->succs)
1465 if (e->flags & EDGE_EH)
1466 nehedges2++;
1467
1468 i1 = BB_HEAD (bb1);
1469 i2 = BB_HEAD (bb2);
da5477a9 1470 last1 = beforelast1 = last2 = beforelast2 = NULL;
31ce8a53
BS
1471
1472 while (true)
1473 {
4ec5d4f5 1474 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
31ce8a53 1475 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
4ec5d4f5
BS
1476 {
1477 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1478 break;
1479 i1 = NEXT_INSN (i1);
1480 }
31ce8a53
BS
1481
1482 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
4ec5d4f5
BS
1483 {
1484 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1485 break;
1486 i2 = NEXT_INSN (i2);
1487 }
31ce8a53 1488
662592e1
BS
1489 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1490 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1491 break;
1492
31ce8a53
BS
1493 if (NOTE_P (i1) || NOTE_P (i2)
1494 || JUMP_P (i1) || JUMP_P (i2))
1495 break;
1496
1497 /* A sanity check to make sure we're not merging insns with different
1498 effects on EH. If only one of them ends a basic block, it shouldn't
1499 have an EH edge; if both end a basic block, there should be the same
1500 number of EH edges. */
1501 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1502 && nehedges1 > 0)
1503 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1504 && nehedges2 > 0)
1505 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1506 && nehedges1 != nehedges2))
1507 break;
1508
472c95f5 1509 if (old_insns_match_p (0, i1, i2) != dir_both)
31ce8a53
BS
1510 break;
1511
1512 merge_memattrs (i1, i2);
1513
1514 /* Don't begin a cross-jump with a NOTE insn. */
1515 if (INSN_P (i1))
1516 {
1517 merge_notes (i1, i2);
1518
1519 beforelast1 = last1, beforelast2 = last2;
1520 last1 = i1, last2 = i2;
b59e0455 1521 if (!stop_after || active_insn_p (i1))
a0cbe71e 1522 ninsns++;
31ce8a53
BS
1523 }
1524
1525 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1526 || (stop_after > 0 && ninsns == stop_after))
1527 break;
1528
1529 i1 = NEXT_INSN (i1);
1530 i2 = NEXT_INSN (i2);
1531 }
1532
31ce8a53
BS
1533 /* Don't allow a compare to be shared by cross-jumping unless the insn
1534 after the compare is also shared. */
618f4073
TS
1535 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1536 && sets_cc0_p (last1))
31ce8a53 1537 last1 = beforelast1, last2 = beforelast2, ninsns--;
31ce8a53
BS
1538
1539 if (ninsns)
1540 {
1541 *f1 = last1;
1542 *f2 = last2;
1543 }
1544
1545 return ninsns;
1546}
1547
c2fc5456
R
1548/* Return true iff outgoing edges of BB1 and BB2 match, together with
1549 the branch instruction. This means that if we commonize the control
1550 flow before end of the basic block, the semantic remains unchanged.
402209ff
JH
1551
1552 We may assume that there exists one edge with a common destination. */
1553
1554static bool
c2fc5456 1555outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
402209ff 1556{
0dd0e980
JH
1557 int nehedges1 = 0, nehedges2 = 0;
1558 edge fallthru1 = 0, fallthru2 = 0;
1559 edge e1, e2;
628f6a4e 1560 edge_iterator ei;
0dd0e980 1561
6626665f 1562 /* If we performed shrink-wrapping, edges to the exit block can
484db665
BS
1563 only be distinguished for JUMP_INSNs. The two paths may differ in
1564 whether they went through the prologue. Sibcalls are fine, we know
1565 that we either didn't need or inserted an epilogue before them. */
1566 if (crtl->shrink_wrapped
fefa31b5
DM
1567 && single_succ_p (bb1)
1568 && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
484db665
BS
1569 && !JUMP_P (BB_END (bb1))
1570 && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1571 return false;
1572
c04cf67b
RH
1573 /* If BB1 has only one successor, we may be looking at either an
1574 unconditional jump, or a fake edge to exit. */
c5cbcccf
ZD
1575 if (single_succ_p (bb1)
1576 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1577 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
c5cbcccf
ZD
1578 return (single_succ_p (bb2)
1579 && (single_succ_edge (bb2)->flags
1580 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1581 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
402209ff
JH
1582
1583 /* Match conditional jumps - this may get tricky when fallthru and branch
1584 edges are crossed. */
628f6a4e 1585 if (EDGE_COUNT (bb1->succs) == 2
a813c111
SB
1586 && any_condjump_p (BB_END (bb1))
1587 && onlyjump_p (BB_END (bb1)))
402209ff 1588 {
c2fc5456
R
1589 edge b1, f1, b2, f2;
1590 bool reverse, match;
1591 rtx set1, set2, cond1, cond2;
1592 enum rtx_code code1, code2;
1593
628f6a4e 1594 if (EDGE_COUNT (bb2->succs) != 2
a813c111
SB
1595 || !any_condjump_p (BB_END (bb2))
1596 || !onlyjump_p (BB_END (bb2)))
0a2ed1f1 1597 return false;
c2fc5456
R
1598
1599 b1 = BRANCH_EDGE (bb1);
1600 b2 = BRANCH_EDGE (bb2);
1601 f1 = FALLTHRU_EDGE (bb1);
1602 f2 = FALLTHRU_EDGE (bb2);
1603
1604 /* Get around possible forwarders on fallthru edges. Other cases
c22cacf3 1605 should be optimized out already. */
c2fc5456
R
1606 if (FORWARDER_BLOCK_P (f1->dest))
1607 f1 = single_succ_edge (f1->dest);
1608
1609 if (FORWARDER_BLOCK_P (f2->dest))
1610 f2 = single_succ_edge (f2->dest);
1611
1612 /* To simplify use of this function, return false if there are
1613 unneeded forwarder blocks. These will get eliminated later
1614 during cleanup_cfg. */
1615 if (FORWARDER_BLOCK_P (f1->dest)
1616 || FORWARDER_BLOCK_P (f2->dest)
1617 || FORWARDER_BLOCK_P (b1->dest)
1618 || FORWARDER_BLOCK_P (b2->dest))
1619 return false;
1620
1621 if (f1->dest == f2->dest && b1->dest == b2->dest)
1622 reverse = false;
1623 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1624 reverse = true;
1625 else
1626 return false;
1627
1628 set1 = pc_set (BB_END (bb1));
1629 set2 = pc_set (BB_END (bb2));
1630 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1631 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1632 reverse = !reverse;
1633
1634 cond1 = XEXP (SET_SRC (set1), 0);
1635 cond2 = XEXP (SET_SRC (set2), 0);
1636 code1 = GET_CODE (cond1);
1637 if (reverse)
1638 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1639 else
1640 code2 = GET_CODE (cond2);
1641
1642 if (code2 == UNKNOWN)
1643 return false;
1644
1645 /* Verify codes and operands match. */
1646 match = ((code1 == code2
1647 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1648 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1649 || (code1 == swap_condition (code2)
1650 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1651 XEXP (cond2, 0))
1652 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1653 XEXP (cond2, 1))));
1654
1655 /* If we return true, we will join the blocks. Which means that
1656 we will only have one branch prediction bit to work with. Thus
1657 we require the existing branches to have probabilities that are
1658 roughly similar. */
1659 if (match
efd8f750
JH
1660 && optimize_bb_for_speed_p (bb1)
1661 && optimize_bb_for_speed_p (bb2))
c2fc5456
R
1662 {
1663 int prob2;
1664
1665 if (b1->dest == b2->dest)
1666 prob2 = b2->probability;
1667 else
1668 /* Do not use f2 probability as f2 may be forwarded. */
1669 prob2 = REG_BR_PROB_BASE - b2->probability;
1670
1671 /* Fail if the difference in probabilities is greater than 50%.
1672 This rules out two well-predicted branches with opposite
1673 outcomes. */
1674 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1675 {
1676 if (dump_file)
1677 fprintf (dump_file,
1678 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1679 bb1->index, bb2->index, b1->probability, prob2);
1680
1681 return false;
1682 }
1683 }
1684
1685 if (dump_file && match)
1686 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1687 bb1->index, bb2->index);
1688
1689 return match;
402209ff
JH
1690 }
1691
09da1532 1692 /* Generic case - we are seeing a computed jump, table jump or trapping
0dd0e980
JH
1693 instruction. */
1694
39811184
JZ
1695 /* Check whether there are tablejumps in the end of BB1 and BB2.
1696 Return true if they are identical. */
1697 {
1698 rtx label1, label2;
8942ee0f 1699 rtx_jump_table_data *table1, *table2;
39811184 1700
a813c111
SB
1701 if (tablejump_p (BB_END (bb1), &label1, &table1)
1702 && tablejump_p (BB_END (bb2), &label2, &table2)
39811184
JZ
1703 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1704 {
1705 /* The labels should never be the same rtx. If they really are same
1706 the jump tables are same too. So disable crossjumping of blocks BB1
1707 and BB2 because when deleting the common insns in the end of BB1
6de9cd9a 1708 by delete_basic_block () the jump table would be deleted too. */
4af16369 1709 /* If LABEL2 is referenced in BB1->END do not do anything
39811184
JZ
1710 because we would loose information when replacing
1711 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
a813c111 1712 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
39811184
JZ
1713 {
1714 /* Set IDENTICAL to true when the tables are identical. */
1715 bool identical = false;
1716 rtx p1, p2;
1717
1718 p1 = PATTERN (table1);
1719 p2 = PATTERN (table2);
1720 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1721 {
1722 identical = true;
1723 }
1724 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1725 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1726 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1727 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1728 {
1729 int i;
1730
1731 identical = true;
1732 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1733 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1734 identical = false;
1735 }
1736
c2fc5456 1737 if (identical)
39811184 1738 {
39811184
JZ
1739 bool match;
1740
c2fc5456 1741 /* Temporarily replace references to LABEL1 with LABEL2
39811184 1742 in BB1->END so that we could compare the instructions. */
a2b7026c 1743 replace_label_in_insn (BB_END (bb1), label1, label2, false);
39811184 1744
472c95f5
TV
1745 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1746 == dir_both);
c263766c
RH
1747 if (dump_file && match)
1748 fprintf (dump_file,
39811184
JZ
1749 "Tablejumps in bb %i and %i match.\n",
1750 bb1->index, bb2->index);
1751
c2fc5456
R
1752 /* Set the original label in BB1->END because when deleting
1753 a block whose end is a tablejump, the tablejump referenced
1754 from the instruction is deleted too. */
a2b7026c 1755 replace_label_in_insn (BB_END (bb1), label2, label1, false);
c2fc5456 1756
39811184
JZ
1757 return match;
1758 }
1759 }
1760 return false;
1761 }
1762 }
39811184 1763
d41d6122
TJ
1764 /* Find the last non-debug non-note instruction in each bb, except
1765 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1766 handles that case specially. old_insns_match_p does not handle
1767 other types of instruction notes. */
da5477a9
DM
1768 rtx_insn *last1 = BB_END (bb1);
1769 rtx_insn *last2 = BB_END (bb2);
d41d6122
TJ
1770 while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1771 (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1772 last1 = PREV_INSN (last1);
1773 while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1774 (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1775 last2 = PREV_INSN (last2);
1776 gcc_assert (last1 && last2);
1777
0dd0e980 1778 /* First ensure that the instructions match. There may be many outgoing
39811184 1779 edges so this test is generally cheaper. */
206604dc 1780 if (old_insns_match_p (mode, last1, last2) != dir_both)
0dd0e980
JH
1781 return false;
1782
1783 /* Search the outgoing edges, ensure that the counts do match, find possible
1784 fallthru and exception handling edges since these needs more
1785 validation. */
628f6a4e
BE
1786 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1787 return false;
1788
206604dc 1789 bool nonfakeedges = false;
628f6a4e 1790 FOR_EACH_EDGE (e1, ei, bb1->succs)
0dd0e980 1791 {
628f6a4e 1792 e2 = EDGE_SUCC (bb2, ei.index);
c22cacf3 1793
206604dc
JJ
1794 if ((e1->flags & EDGE_FAKE) == 0)
1795 nonfakeedges = true;
1796
0dd0e980
JH
1797 if (e1->flags & EDGE_EH)
1798 nehedges1++;
5f0d2358 1799
0dd0e980
JH
1800 if (e2->flags & EDGE_EH)
1801 nehedges2++;
5f0d2358 1802
0dd0e980
JH
1803 if (e1->flags & EDGE_FALLTHRU)
1804 fallthru1 = e1;
1805 if (e2->flags & EDGE_FALLTHRU)
1806 fallthru2 = e2;
1807 }
5f0d2358 1808
0dd0e980 1809 /* If number of edges of various types does not match, fail. */
628f6a4e 1810 if (nehedges1 != nehedges2
5f0d2358 1811 || (fallthru1 != 0) != (fallthru2 != 0))
0dd0e980
JH
1812 return false;
1813
206604dc
JJ
1814 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1815 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1816 attempt to optimize, as the two basic blocks might have different
1817 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1818 traps there should be REG_ARG_SIZE notes, they could be missing
1819 for __builtin_unreachable () uses though. */
1820 if (!nonfakeedges
1821 && !ACCUMULATE_OUTGOING_ARGS
1822 && (!INSN_P (last1)
1823 || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1824 return false;
1825
0dd0e980
JH
1826 /* fallthru edges must be forwarded to the same destination. */
1827 if (fallthru1)
1828 {
1829 basic_block d1 = (forwarder_block_p (fallthru1->dest)
c5cbcccf 1830 ? single_succ (fallthru1->dest): fallthru1->dest);
0dd0e980 1831 basic_block d2 = (forwarder_block_p (fallthru2->dest)
c5cbcccf 1832 ? single_succ (fallthru2->dest): fallthru2->dest);
5f0d2358 1833
0dd0e980
JH
1834 if (d1 != d2)
1835 return false;
1836 }
5f0d2358 1837
5f77fbd4
JJ
1838 /* Ensure the same EH region. */
1839 {
a813c111
SB
1840 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1841 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
5f0d2358 1842
5f77fbd4
JJ
1843 if (!n1 && n2)
1844 return false;
1845
1846 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1847 return false;
1848 }
5f0d2358 1849
38109dab
GL
1850 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1851 version of sequence abstraction. */
1852 FOR_EACH_EDGE (e1, ei, bb2->succs)
1853 {
1854 edge e2;
1855 edge_iterator ei;
1856 basic_block d1 = e1->dest;
1857
1858 if (FORWARDER_BLOCK_P (d1))
1859 d1 = EDGE_SUCC (d1, 0)->dest;
1860
1861 FOR_EACH_EDGE (e2, ei, bb1->succs)
1862 {
1863 basic_block d2 = e2->dest;
1864 if (FORWARDER_BLOCK_P (d2))
1865 d2 = EDGE_SUCC (d2, 0)->dest;
1866 if (d1 == d2)
1867 break;
1868 }
1869
1870 if (!e2)
1871 return false;
1872 }
1873
0dd0e980 1874 return true;
402209ff
JH
1875}
1876
38109dab
GL
1877/* Returns true if BB basic block has a preserve label. */
1878
1879static bool
1880block_has_preserve_label (basic_block bb)
1881{
1882 return (bb
1883 && block_label (bb)
1884 && LABEL_PRESERVE_P (block_label (bb)));
1885}
1886
402209ff
JH
1887/* E1 and E2 are edges with the same destination block. Search their
1888 predecessors for common code. If found, redirect control flow from
bf22920b
TV
1889 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1890 or the other way around (dir_backward). DIR specifies the allowed
1891 replacement direction. */
402209ff
JH
1892
1893static bool
bf22920b
TV
1894try_crossjump_to_edge (int mode, edge e1, edge e2,
1895 enum replace_direction dir)
402209ff 1896{
c2fc5456 1897 int nmatch;
402209ff 1898 basic_block src1 = e1->src, src2 = e2->src;
39587bb9 1899 basic_block redirect_to, redirect_from, to_remove;
823918ae 1900 basic_block osrc1, osrc2, redirect_edges_to, tmp;
da5477a9 1901 rtx_insn *newpos1, *newpos2;
402209ff 1902 edge s;
628f6a4e 1903 edge_iterator ei;
c2fc5456 1904
da5477a9 1905 newpos1 = newpos2 = NULL;
6de9cd9a 1906
750054a2 1907 /* If we have partitioned hot/cold basic blocks, it is a bad idea
c22cacf3 1908 to try this optimization.
8e8d5162
CT
1909
1910 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
1911 be optimizable (or blocks that appear to be mergeable), but which really
1912 must be left untouched (they are required to make it safely across
1913 partition boundaries). See the comments at the top of
8e8d5162 1914 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2 1915
af205f67 1916 if (crtl->has_bb_partition && reload_completed)
750054a2
CT
1917 return false;
1918
402209ff
JH
1919 /* Search backward through forwarder blocks. We don't need to worry
1920 about multiple entry or chained forwarders, as they will be optimized
1921 away. We do this to look past the unconditional jump following a
1922 conditional jump that is required due to the current CFG shape. */
c5cbcccf 1923 if (single_pred_p (src1)
635559ab 1924 && FORWARDER_BLOCK_P (src1))
c5cbcccf 1925 e1 = single_pred_edge (src1), src1 = e1->src;
5f0d2358 1926
c5cbcccf 1927 if (single_pred_p (src2)
635559ab 1928 && FORWARDER_BLOCK_P (src2))
c5cbcccf 1929 e2 = single_pred_edge (src2), src2 = e2->src;
402209ff
JH
1930
1931 /* Nothing to do if we reach ENTRY, or a common source block. */
fefa31b5
DM
1932 if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1933 == ENTRY_BLOCK_PTR_FOR_FN (cfun))
402209ff
JH
1934 return false;
1935 if (src1 == src2)
1936 return false;
1937
1938 /* Seeing more than 1 forwarder blocks would confuse us later... */
635559ab 1939 if (FORWARDER_BLOCK_P (e1->dest)
c5cbcccf 1940 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
402209ff 1941 return false;
5f0d2358 1942
635559ab 1943 if (FORWARDER_BLOCK_P (e2->dest)
c5cbcccf 1944 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
402209ff
JH
1945 return false;
1946
1947 /* Likewise with dead code (possibly newly created by the other optimizations
1948 of cfg_cleanup). */
628f6a4e 1949 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
402209ff
JH
1950 return false;
1951
402209ff 1952 /* Look for the common insn sequence, part the first ... */
c2fc5456 1953 if (!outgoing_edges_match (mode, src1, src2))
402209ff
JH
1954 return false;
1955
1956 /* ... and part the second. */
472c95f5 1957 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
12183e0f 1958
823918ae
TV
1959 osrc1 = src1;
1960 osrc2 = src2;
1961 if (newpos1 != NULL_RTX)
1962 src1 = BLOCK_FOR_INSN (newpos1);
1963 if (newpos2 != NULL_RTX)
1964 src2 = BLOCK_FOR_INSN (newpos2);
1965
bf22920b
TV
1966 if (dir == dir_backward)
1967 {
1968#define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1969 SWAP (basic_block, osrc1, osrc2);
1970 SWAP (basic_block, src1, src2);
1971 SWAP (edge, e1, e2);
da5477a9 1972 SWAP (rtx_insn *, newpos1, newpos2);
bf22920b
TV
1973#undef SWAP
1974 }
1975
12183e0f
PH
1976 /* Don't proceed with the crossjump unless we found a sufficient number
1977 of matching instructions or the 'from' block was totally matched
1978 (such that its predecessors will hopefully be redirected and the
1979 block removed). */
c2fc5456
R
1980 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1981 && (newpos1 != BB_HEAD (src1)))
7d22e898 1982 return false;
402209ff 1983
75c40d56 1984 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
38109dab
GL
1985 if (block_has_preserve_label (e1->dest)
1986 && (e1->flags & EDGE_ABNORMAL))
1987 return false;
1988
39811184
JZ
1989 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1990 will be deleted.
1991 If we have tablejumps in the end of SRC1 and SRC2
1992 they have been already compared for equivalence in outgoing_edges_match ()
1993 so replace the references to TABLE1 by references to TABLE2. */
21c0a521 1994 {
39811184 1995 rtx label1, label2;
8942ee0f 1996 rtx_jump_table_data *table1, *table2;
39811184 1997
823918ae
TV
1998 if (tablejump_p (BB_END (osrc1), &label1, &table1)
1999 && tablejump_p (BB_END (osrc2), &label2, &table2)
39811184
JZ
2000 && label1 != label2)
2001 {
da5477a9 2002 rtx_insn *insn;
39811184
JZ
2003
2004 /* Replace references to LABEL1 with LABEL2. */
39811184
JZ
2005 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2006 {
2007 /* Do not replace the label in SRC1->END because when deleting
2008 a block whose end is a tablejump, the tablejump referenced
2009 from the instruction is deleted too. */
823918ae 2010 if (insn != BB_END (osrc1))
a2b7026c 2011 replace_label_in_insn (insn, label1, label2, true);
39811184
JZ
2012 }
2013 }
21c0a521 2014 }
10d6c0d0 2015
b604fe9b
SB
2016 /* Avoid splitting if possible. We must always split when SRC2 has
2017 EH predecessor edges, or we may end up with basic blocks with both
2018 normal and EH predecessor edges. */
c2fc5456 2019 if (newpos2 == BB_HEAD (src2)
b604fe9b 2020 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
402209ff
JH
2021 redirect_to = src2;
2022 else
2023 {
c2fc5456 2024 if (newpos2 == BB_HEAD (src2))
b604fe9b
SB
2025 {
2026 /* Skip possible basic block header. */
c2fc5456
R
2027 if (LABEL_P (newpos2))
2028 newpos2 = NEXT_INSN (newpos2);
b5b8b0ac
AO
2029 while (DEBUG_INSN_P (newpos2))
2030 newpos2 = NEXT_INSN (newpos2);
c2fc5456
R
2031 if (NOTE_P (newpos2))
2032 newpos2 = NEXT_INSN (newpos2);
b5b8b0ac
AO
2033 while (DEBUG_INSN_P (newpos2))
2034 newpos2 = NEXT_INSN (newpos2);
b604fe9b
SB
2035 }
2036
c263766c
RH
2037 if (dump_file)
2038 fprintf (dump_file, "Splitting bb %i before %i insns\n",
0b17ab2f 2039 src2->index, nmatch);
c2fc5456 2040 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
402209ff
JH
2041 }
2042
c263766c 2043 if (dump_file)
c2fc5456
R
2044 fprintf (dump_file,
2045 "Cross jumping from bb %i to bb %i; %i common insns\n",
2046 src1->index, src2->index, nmatch);
402209ff 2047
6fc0bb99 2048 /* We may have some registers visible through the block. */
6fb5fa3c 2049 df_set_bb_dirty (redirect_to);
402209ff 2050
823918ae
TV
2051 if (osrc2 == src2)
2052 redirect_edges_to = redirect_to;
2053 else
2054 redirect_edges_to = osrc2;
2055
402209ff 2056 /* Recompute the frequencies and counts of outgoing edges. */
823918ae 2057 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
402209ff
JH
2058 {
2059 edge s2;
628f6a4e 2060 edge_iterator ei;
402209ff
JH
2061 basic_block d = s->dest;
2062
635559ab 2063 if (FORWARDER_BLOCK_P (d))
c5cbcccf 2064 d = single_succ (d);
5f0d2358 2065
628f6a4e 2066 FOR_EACH_EDGE (s2, ei, src1->succs)
402209ff
JH
2067 {
2068 basic_block d2 = s2->dest;
635559ab 2069 if (FORWARDER_BLOCK_P (d2))
c5cbcccf 2070 d2 = single_succ (d2);
402209ff
JH
2071 if (d == d2)
2072 break;
2073 }
5f0d2358 2074
402209ff
JH
2075 s->count += s2->count;
2076
2077 /* Take care to update possible forwarder blocks. We verified
c22cacf3
MS
2078 that there is no more than one in the chain, so we can't run
2079 into infinite loop. */
635559ab 2080 if (FORWARDER_BLOCK_P (s->dest))
402209ff 2081 {
c5cbcccf 2082 single_succ_edge (s->dest)->count += s2->count;
402209ff
JH
2083 s->dest->count += s2->count;
2084 s->dest->frequency += EDGE_FREQUENCY (s);
2085 }
5f0d2358 2086
635559ab 2087 if (FORWARDER_BLOCK_P (s2->dest))
402209ff 2088 {
c5cbcccf
ZD
2089 single_succ_edge (s2->dest)->count -= s2->count;
2090 if (single_succ_edge (s2->dest)->count < 0)
2091 single_succ_edge (s2->dest)->count = 0;
402209ff
JH
2092 s2->dest->count -= s2->count;
2093 s2->dest->frequency -= EDGE_FREQUENCY (s);
b446e5a2
JH
2094 if (s2->dest->frequency < 0)
2095 s2->dest->frequency = 0;
2096 if (s2->dest->count < 0)
2097 s2->dest->count = 0;
402209ff 2098 }
5f0d2358 2099
823918ae 2100 if (!redirect_edges_to->frequency && !src1->frequency)
402209ff
JH
2101 s->probability = (s->probability + s2->probability) / 2;
2102 else
5f0d2358 2103 s->probability
823918ae 2104 = ((s->probability * redirect_edges_to->frequency +
5f0d2358 2105 s2->probability * src1->frequency)
823918ae 2106 / (redirect_edges_to->frequency + src1->frequency));
402209ff
JH
2107 }
2108
52982a97
EB
2109 /* Adjust count and frequency for the block. An earlier jump
2110 threading pass may have left the profile in an inconsistent
2111 state (see update_bb_profile_for_threading) so we must be
2112 prepared for overflows. */
823918ae
TV
2113 tmp = redirect_to;
2114 do
2115 {
2116 tmp->count += src1->count;
2117 tmp->frequency += src1->frequency;
2118 if (tmp->frequency > BB_FREQ_MAX)
2119 tmp->frequency = BB_FREQ_MAX;
2120 if (tmp == redirect_edges_to)
2121 break;
2122 tmp = find_fallthru_edge (tmp->succs)->dest;
2123 }
2124 while (true);
2125 update_br_prob_note (redirect_edges_to);
402209ff
JH
2126
2127 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2128
c2fc5456
R
2129 /* Skip possible basic block header. */
2130 if (LABEL_P (newpos1))
2131 newpos1 = NEXT_INSN (newpos1);
b5b8b0ac
AO
2132
2133 while (DEBUG_INSN_P (newpos1))
2134 newpos1 = NEXT_INSN (newpos1);
2135
cd9c1ca8 2136 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
c2fc5456
R
2137 newpos1 = NEXT_INSN (newpos1);
2138
b5b8b0ac
AO
2139 while (DEBUG_INSN_P (newpos1))
2140 newpos1 = NEXT_INSN (newpos1);
2141
c2fc5456 2142 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
c5cbcccf 2143 to_remove = single_succ (redirect_from);
402209ff 2144
c5cbcccf 2145 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
f470c378 2146 delete_basic_block (to_remove);
402209ff 2147
39587bb9 2148 update_forwarder_flag (redirect_from);
7cbd12b8
JJ
2149 if (redirect_to != src2)
2150 update_forwarder_flag (src2);
635559ab 2151
402209ff
JH
2152 return true;
2153}
2154
2155/* Search the predecessors of BB for common insn sequences. When found,
2156 share code between them by redirecting control flow. Return true if
2157 any changes made. */
2158
2159static bool
d329e058 2160try_crossjump_bb (int mode, basic_block bb)
402209ff 2161{
628f6a4e 2162 edge e, e2, fallthru;
402209ff 2163 bool changed;
628f6a4e 2164 unsigned max, ix, ix2;
402209ff 2165
f63d1bf7 2166 /* Nothing to do if there is not at least two incoming edges. */
628f6a4e 2167 if (EDGE_COUNT (bb->preds) < 2)
402209ff
JH
2168 return false;
2169
bbcb0c05
SB
2170 /* Don't crossjump if this block ends in a computed jump,
2171 unless we are optimizing for size. */
efd8f750 2172 if (optimize_bb_for_size_p (bb)
fefa31b5 2173 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
bbcb0c05
SB
2174 && computed_jump_p (BB_END (bb)))
2175 return false;
2176
750054a2
CT
2177 /* If we are partitioning hot/cold basic blocks, we don't want to
2178 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
2179 and cold sections.
2180
8e8d5162 2181 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
2182 be optimizable (or blocks that appear to be mergeable), but which really
2183 must be left untouched (they are required to make it safely across
2184 partition boundaries). See the comments at the top of
8e8d5162
CT
2185 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2186
c22cacf3
MS
2187 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2188 BB_PARTITION (EDGE_PRED (bb, 1)->src)
87c8b4be 2189 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
750054a2
CT
2190 return false;
2191
402209ff
JH
2192 /* It is always cheapest to redirect a block that ends in a branch to
2193 a block that falls through into BB, as that adds no branches to the
2194 program. We'll try that combination first. */
5f24e0dc
RH
2195 fallthru = NULL;
2196 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
628f6a4e
BE
2197
2198 if (EDGE_COUNT (bb->preds) > max)
2199 return false;
2200
0fd4b31d 2201 fallthru = find_fallthru_edge (bb->preds);
402209ff
JH
2202
2203 changed = false;
0248bceb 2204 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
402209ff 2205 {
0248bceb 2206 e = EDGE_PRED (bb, ix);
628f6a4e 2207 ix++;
402209ff 2208
c1e3e2d9
SB
2209 /* As noted above, first try with the fallthru predecessor (or, a
2210 fallthru predecessor if we are in cfglayout mode). */
402209ff
JH
2211 if (fallthru)
2212 {
2213 /* Don't combine the fallthru edge into anything else.
2214 If there is a match, we'll do it the other way around. */
2215 if (e == fallthru)
2216 continue;
7cf240d5
JH
2217 /* If nothing changed since the last attempt, there is nothing
2218 we can do. */
2219 if (!first_pass
4ec5d4f5
BS
2220 && !((e->src->flags & BB_MODIFIED)
2221 || (fallthru->src->flags & BB_MODIFIED)))
7cf240d5 2222 continue;
402209ff 2223
bf22920b 2224 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
402209ff
JH
2225 {
2226 changed = true;
628f6a4e 2227 ix = 0;
402209ff
JH
2228 continue;
2229 }
2230 }
2231
2232 /* Non-obvious work limiting check: Recognize that we're going
2233 to call try_crossjump_bb on every basic block. So if we have
2234 two blocks with lots of outgoing edges (a switch) and they
2235 share lots of common destinations, then we would do the
2236 cross-jump check once for each common destination.
2237
2238 Now, if the blocks actually are cross-jump candidates, then
2239 all of their destinations will be shared. Which means that
2240 we only need check them for cross-jump candidacy once. We
2241 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2242 choosing to do the check from the block for which the edge
2243 in question is the first successor of A. */
628f6a4e 2244 if (EDGE_SUCC (e->src, 0) != e)
402209ff
JH
2245 continue;
2246
0248bceb 2247 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
402209ff 2248 {
0248bceb 2249 e2 = EDGE_PRED (bb, ix2);
402209ff
JH
2250
2251 if (e2 == e)
2252 continue;
2253
2254 /* We've already checked the fallthru edge above. */
2255 if (e2 == fallthru)
2256 continue;
2257
402209ff
JH
2258 /* The "first successor" check above only prevents multiple
2259 checks of crossjump(A,B). In order to prevent redundant
2260 checks of crossjump(B,A), require that A be the block
2261 with the lowest index. */
0b17ab2f 2262 if (e->src->index > e2->src->index)
402209ff
JH
2263 continue;
2264
7cf240d5
JH
2265 /* If nothing changed since the last attempt, there is nothing
2266 we can do. */
2267 if (!first_pass
4ec5d4f5
BS
2268 && !((e->src->flags & BB_MODIFIED)
2269 || (e2->src->flags & BB_MODIFIED)))
7cf240d5
JH
2270 continue;
2271
bf22920b
TV
2272 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2273 direction. */
2274 if (try_crossjump_to_edge (mode, e, e2, dir_both))
402209ff
JH
2275 {
2276 changed = true;
628f6a4e 2277 ix = 0;
402209ff
JH
2278 break;
2279 }
2280 }
2281 }
2282
c1e3e2d9
SB
2283 if (changed)
2284 crossjumps_occured = true;
2285
402209ff
JH
2286 return changed;
2287}
2288
4ec5d4f5
BS
2289/* Search the successors of BB for common insn sequences. When found,
2290 share code between them by moving it across the basic block
2291 boundary. Return true if any changes made. */
2292
2293static bool
2294try_head_merge_bb (basic_block bb)
2295{
2296 basic_block final_dest_bb = NULL;
2297 int max_match = INT_MAX;
2298 edge e0;
da5477a9 2299 rtx_insn **headptr, **currptr, **nextptr;
4ec5d4f5
BS
2300 bool changed, moveall;
2301 unsigned ix;
da5477a9 2302 rtx_insn *e0_last_head;
61aa0978
DM
2303 rtx cond;
2304 rtx_insn *move_before;
4ec5d4f5 2305 unsigned nedges = EDGE_COUNT (bb->succs);
da5477a9 2306 rtx_insn *jump = BB_END (bb);
4ec5d4f5
BS
2307 regset live, live_union;
2308
2309 /* Nothing to do if there is not at least two outgoing edges. */
2310 if (nedges < 2)
2311 return false;
2312
2313 /* Don't crossjump if this block ends in a computed jump,
2314 unless we are optimizing for size. */
2315 if (optimize_bb_for_size_p (bb)
fefa31b5 2316 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
4ec5d4f5
BS
2317 && computed_jump_p (BB_END (bb)))
2318 return false;
2319
2320 cond = get_condition (jump, &move_before, true, false);
2321 if (cond == NULL_RTX)
43052d45 2322 {
618f4073 2323 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
43052d45
BS
2324 move_before = prev_nonnote_nondebug_insn (jump);
2325 else
43052d45
BS
2326 move_before = jump;
2327 }
4ec5d4f5
BS
2328
2329 for (ix = 0; ix < nedges; ix++)
fefa31b5 2330 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4ec5d4f5
BS
2331 return false;
2332
2333 for (ix = 0; ix < nedges; ix++)
2334 {
2335 edge e = EDGE_SUCC (bb, ix);
2336 basic_block other_bb = e->dest;
2337
2338 if (df_get_bb_dirty (other_bb))
2339 {
2340 block_was_dirty = true;
2341 return false;
2342 }
2343
2344 if (e->flags & EDGE_ABNORMAL)
2345 return false;
2346
2347 /* Normally, all destination blocks must only be reachable from this
2348 block, i.e. they must have one incoming edge.
2349
2350 There is one special case we can handle, that of multiple consecutive
2351 jumps where the first jumps to one of the targets of the second jump.
2352 This happens frequently in switch statements for default labels.
2353 The structure is as follows:
2354 FINAL_DEST_BB
2355 ....
2356 if (cond) jump A;
2357 fall through
2358 BB
2359 jump with targets A, B, C, D...
2360 A
2361 has two incoming edges, from FINAL_DEST_BB and BB
2362
2363 In this case, we can try to move the insns through BB and into
2364 FINAL_DEST_BB. */
2365 if (EDGE_COUNT (other_bb->preds) != 1)
2366 {
2367 edge incoming_edge, incoming_bb_other_edge;
2368 edge_iterator ei;
2369
2370 if (final_dest_bb != NULL
2371 || EDGE_COUNT (other_bb->preds) != 2)
2372 return false;
2373
2374 /* We must be able to move the insns across the whole block. */
2375 move_before = BB_HEAD (bb);
2376 while (!NONDEBUG_INSN_P (move_before))
2377 move_before = NEXT_INSN (move_before);
2378
2379 if (EDGE_COUNT (bb->preds) != 1)
2380 return false;
2381 incoming_edge = EDGE_PRED (bb, 0);
2382 final_dest_bb = incoming_edge->src;
2383 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2384 return false;
2385 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2386 if (incoming_bb_other_edge != incoming_edge)
2387 break;
2388 if (incoming_bb_other_edge->dest != other_bb)
2389 return false;
2390 }
2391 }
2392
2393 e0 = EDGE_SUCC (bb, 0);
da5477a9 2394 e0_last_head = NULL;
4ec5d4f5
BS
2395 changed = false;
2396
2397 for (ix = 1; ix < nedges; ix++)
2398 {
2399 edge e = EDGE_SUCC (bb, ix);
da5477a9 2400 rtx_insn *e0_last, *e_last;
4ec5d4f5
BS
2401 int nmatch;
2402
2403 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2404 &e0_last, &e_last, 0);
2405 if (nmatch == 0)
2406 return false;
2407
2408 if (nmatch < max_match)
2409 {
2410 max_match = nmatch;
2411 e0_last_head = e0_last;
2412 }
2413 }
2414
2415 /* If we matched an entire block, we probably have to avoid moving the
2416 last insn. */
2417 if (max_match > 0
2418 && e0_last_head == BB_END (e0->dest)
2419 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2420 || control_flow_insn_p (e0_last_head)))
2421 {
2422 max_match--;
2423 if (max_match == 0)
2424 return false;
b59e0455
JJ
2425 do
2426 e0_last_head = prev_real_insn (e0_last_head);
2427 while (DEBUG_INSN_P (e0_last_head));
4ec5d4f5
BS
2428 }
2429
2430 if (max_match == 0)
2431 return false;
2432
2433 /* We must find a union of the live registers at each of the end points. */
2434 live = BITMAP_ALLOC (NULL);
2435 live_union = BITMAP_ALLOC (NULL);
2436
da5477a9
DM
2437 currptr = XNEWVEC (rtx_insn *, nedges);
2438 headptr = XNEWVEC (rtx_insn *, nedges);
2439 nextptr = XNEWVEC (rtx_insn *, nedges);
4ec5d4f5
BS
2440
2441 for (ix = 0; ix < nedges; ix++)
2442 {
2443 int j;
2444 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
da5477a9 2445 rtx_insn *head = BB_HEAD (merge_bb);
4ec5d4f5 2446
b59e0455
JJ
2447 while (!NONDEBUG_INSN_P (head))
2448 head = NEXT_INSN (head);
4ec5d4f5
BS
2449 headptr[ix] = head;
2450 currptr[ix] = head;
2451
2452 /* Compute the end point and live information */
2453 for (j = 1; j < max_match; j++)
b59e0455
JJ
2454 do
2455 head = NEXT_INSN (head);
2456 while (!NONDEBUG_INSN_P (head));
4ec5d4f5
BS
2457 simulate_backwards_to_point (merge_bb, live, head);
2458 IOR_REG_SET (live_union, live);
2459 }
2460
2461 /* If we're moving across two blocks, verify the validity of the
2462 first move, then adjust the target and let the loop below deal
2463 with the final move. */
2464 if (final_dest_bb != NULL)
2465 {
61aa0978 2466 rtx_insn *move_upto;
4ec5d4f5
BS
2467
2468 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2469 jump, e0->dest, live_union,
2470 NULL, &move_upto);
2471 if (!moveall)
2472 {
2473 if (move_upto == NULL_RTX)
2474 goto out;
2475
2476 while (e0_last_head != move_upto)
2477 {
2478 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2479 live_union);
2480 e0_last_head = PREV_INSN (e0_last_head);
2481 }
2482 }
2483 if (e0_last_head == NULL_RTX)
2484 goto out;
2485
2486 jump = BB_END (final_dest_bb);
2487 cond = get_condition (jump, &move_before, true, false);
2488 if (cond == NULL_RTX)
43052d45 2489 {
618f4073 2490 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
43052d45
BS
2491 move_before = prev_nonnote_nondebug_insn (jump);
2492 else
43052d45
BS
2493 move_before = jump;
2494 }
4ec5d4f5
BS
2495 }
2496
2497 do
2498 {
61aa0978 2499 rtx_insn *move_upto;
4ec5d4f5
BS
2500 moveall = can_move_insns_across (currptr[0], e0_last_head,
2501 move_before, jump, e0->dest, live_union,
2502 NULL, &move_upto);
2503 if (!moveall && move_upto == NULL_RTX)
2504 {
2505 if (jump == move_before)
2506 break;
2507
2508 /* Try again, using a different insertion point. */
2509 move_before = jump;
2510
4ec5d4f5
BS
2511 /* Don't try moving before a cc0 user, as that may invalidate
2512 the cc0. */
618f4073 2513 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
4ec5d4f5 2514 break;
4ec5d4f5
BS
2515
2516 continue;
2517 }
2518
2519 if (final_dest_bb && !moveall)
2520 /* We haven't checked whether a partial move would be OK for the first
2521 move, so we have to fail this case. */
2522 break;
2523
2524 changed = true;
2525 for (;;)
2526 {
2527 if (currptr[0] == move_upto)
2528 break;
2529 for (ix = 0; ix < nedges; ix++)
2530 {
da5477a9 2531 rtx_insn *curr = currptr[ix];
4ec5d4f5
BS
2532 do
2533 curr = NEXT_INSN (curr);
2534 while (!NONDEBUG_INSN_P (curr));
2535 currptr[ix] = curr;
2536 }
2537 }
2538
2539 /* If we can't currently move all of the identical insns, remember
2540 each insn after the range that we'll merge. */
2541 if (!moveall)
2542 for (ix = 0; ix < nedges; ix++)
2543 {
da5477a9 2544 rtx_insn *curr = currptr[ix];
4ec5d4f5
BS
2545 do
2546 curr = NEXT_INSN (curr);
2547 while (!NONDEBUG_INSN_P (curr));
2548 nextptr[ix] = curr;
2549 }
2550
2551 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2552 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2553 if (final_dest_bb != NULL)
2554 df_set_bb_dirty (final_dest_bb);
2555 df_set_bb_dirty (bb);
2556 for (ix = 1; ix < nedges; ix++)
2557 {
2558 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2559 delete_insn_chain (headptr[ix], currptr[ix], false);
2560 }
2561 if (!moveall)
2562 {
2563 if (jump == move_before)
2564 break;
2565
2566 /* For the unmerged insns, try a different insertion point. */
2567 move_before = jump;
2568
4ec5d4f5
BS
2569 /* Don't try moving before a cc0 user, as that may invalidate
2570 the cc0. */
618f4073 2571 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
4ec5d4f5 2572 break;
4ec5d4f5
BS
2573
2574 for (ix = 0; ix < nedges; ix++)
2575 currptr[ix] = headptr[ix] = nextptr[ix];
2576 }
2577 }
2578 while (!moveall);
2579
2580 out:
2581 free (currptr);
2582 free (headptr);
2583 free (nextptr);
2584
2585 crossjumps_occured |= changed;
2586
2587 return changed;
2588}
2589
7752e522
JJ
2590/* Return true if BB contains just bb note, or bb note followed
2591 by only DEBUG_INSNs. */
2592
2593static bool
2594trivially_empty_bb_p (basic_block bb)
2595{
da5477a9 2596 rtx_insn *insn = BB_END (bb);
7752e522
JJ
2597
2598 while (1)
2599 {
2600 if (insn == BB_HEAD (bb))
2601 return true;
2602 if (!DEBUG_INSN_P (insn))
2603 return false;
2604 insn = PREV_INSN (insn);
2605 }
2606}
2607
402209ff
JH
2608/* Do simple CFG optimizations - basic block merging, simplifying of jump
2609 instructions etc. Return nonzero if changes were made. */
2610
2611static bool
d329e058 2612try_optimize_cfg (int mode)
402209ff 2613{
402209ff
JH
2614 bool changed_overall = false;
2615 bool changed;
2616 int iterations = 0;
ec3ae3da 2617 basic_block bb, b, next;
402209ff 2618
6fb5fa3c 2619 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
38c1593d
JH
2620 clear_bb_flags ();
2621
c1e3e2d9
SB
2622 crossjumps_occured = false;
2623
11cd3bed 2624 FOR_EACH_BB_FN (bb, cfun)
2dd2d53e
SB
2625 update_forwarder_flag (bb);
2626
245f1bfa 2627 if (! targetm.cannot_modify_jumps_p ())
402209ff 2628 {
7cf240d5 2629 first_pass = true;
e4ec2cac
AO
2630 /* Attempt to merge blocks as made possible by edge removal. If
2631 a block has only one successor, and the successor has only
2632 one predecessor, they may be combined. */
2633 do
402209ff 2634 {
4ec5d4f5 2635 block_was_dirty = false;
e4ec2cac
AO
2636 changed = false;
2637 iterations++;
2638
c263766c
RH
2639 if (dump_file)
2640 fprintf (dump_file,
e4ec2cac
AO
2641 "\n\ntry_optimize_cfg iteration %i\n\n",
2642 iterations);
402209ff 2643
fefa31b5
DM
2644 for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2645 != EXIT_BLOCK_PTR_FOR_FN (cfun);)
402209ff 2646 {
e0082a72 2647 basic_block c;
e4ec2cac
AO
2648 edge s;
2649 bool changed_here = false;
5f0d2358 2650
468059bc
DD
2651 /* Delete trivially dead basic blocks. This is either
2652 blocks with no predecessors, or empty blocks with no
1e211590
DD
2653 successors. However if the empty block with no
2654 successors is the successor of the ENTRY_BLOCK, it is
2655 kept. This ensures that the ENTRY_BLOCK will have a
2656 successor which is a precondition for many RTL
2657 passes. Empty blocks may result from expanding
468059bc
DD
2658 __builtin_unreachable (). */
2659 if (EDGE_COUNT (b->preds) == 0
1e211590 2660 || (EDGE_COUNT (b->succs) == 0
7752e522 2661 && trivially_empty_bb_p (b)
fefa31b5
DM
2662 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2663 != b))
e4ec2cac 2664 {
f6366fc7 2665 c = b->prev_bb;
f1de5107 2666 if (EDGE_COUNT (b->preds) > 0)
3b5fda81
JJ
2667 {
2668 edge e;
2669 edge_iterator ei;
2670
f1de5107
JJ
2671 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2672 {
bcc708fc
MM
2673 if (BB_FOOTER (b)
2674 && BARRIER_P (BB_FOOTER (b)))
f1de5107
JJ
2675 FOR_EACH_EDGE (e, ei, b->preds)
2676 if ((e->flags & EDGE_FALLTHRU)
bcc708fc 2677 && BB_FOOTER (e->src) == NULL)
f1de5107 2678 {
bcc708fc 2679 if (BB_FOOTER (b))
f1de5107 2680 {
d8ce2eae
DM
2681 BB_FOOTER (e->src) = BB_FOOTER (b);
2682 BB_FOOTER (b) = NULL;
f1de5107
JJ
2683 }
2684 else
2685 {
2686 start_sequence ();
d8ce2eae 2687 BB_FOOTER (e->src) = emit_barrier ();
f1de5107
JJ
2688 end_sequence ();
2689 }
2690 }
2691 }
2692 else
2693 {
da5477a9 2694 rtx_insn *last = get_last_bb_insn (b);
f1de5107
JJ
2695 if (last && BARRIER_P (last))
2696 FOR_EACH_EDGE (e, ei, b->preds)
2697 if ((e->flags & EDGE_FALLTHRU))
2698 emit_barrier_after (BB_END (e->src));
2699 }
3b5fda81 2700 }
f470c378 2701 delete_basic_block (b);
bef16e87 2702 changed = true;
6626665f 2703 /* Avoid trying to remove the exit block. */
fefa31b5 2704 b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
83bd032b 2705 continue;
e4ec2cac 2706 }
402209ff 2707
6ce2bcb7 2708 /* Remove code labels no longer used. */
c5cbcccf
ZD
2709 if (single_pred_p (b)
2710 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2711 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
4b4bf941 2712 && LABEL_P (BB_HEAD (b))
6c979aa1 2713 && !LABEL_PRESERVE_P (BB_HEAD (b))
e4ec2cac
AO
2714 /* If the previous block ends with a branch to this
2715 block, we can't delete the label. Normally this
2716 is a condjump that is yet to be simplified, but
2717 if CASE_DROPS_THRU, this can be a tablejump with
2718 some element going to the same place as the
2719 default (fallthru). */
fefa31b5 2720 && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
c5cbcccf 2721 || !JUMP_P (BB_END (single_pred (b)))
a813c111 2722 || ! label_is_jump_target_p (BB_HEAD (b),
c5cbcccf 2723 BB_END (single_pred (b)))))
e4ec2cac 2724 {
03fbe718 2725 delete_insn (BB_HEAD (b));
c263766c
RH
2726 if (dump_file)
2727 fprintf (dump_file, "Deleted label in block %i.\n",
0b17ab2f 2728 b->index);
e4ec2cac 2729 }
402209ff 2730
e4ec2cac 2731 /* If we fall through an empty block, we can remove it. */
9be94227 2732 if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
c5cbcccf
ZD
2733 && single_pred_p (b)
2734 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
4b4bf941 2735 && !LABEL_P (BB_HEAD (b))
e4ec2cac
AO
2736 && FORWARDER_BLOCK_P (b)
2737 /* Note that forwarder_block_p true ensures that
2738 there is a successor for this block. */
c5cbcccf 2739 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
0cae8d31 2740 && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
e4ec2cac 2741 {
c263766c
RH
2742 if (dump_file)
2743 fprintf (dump_file,
e4ec2cac 2744 "Deleting fallthru block %i.\n",
0b17ab2f 2745 b->index);
e4ec2cac 2746
fefa31b5
DM
2747 c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2748 ? b->next_bb : b->prev_bb);
c5cbcccf
ZD
2749 redirect_edge_succ_nodup (single_pred_edge (b),
2750 single_succ (b));
f470c378 2751 delete_basic_block (b);
e4ec2cac
AO
2752 changed = true;
2753 b = c;
1e211590 2754 continue;
e4ec2cac 2755 }
5f0d2358 2756
50a36e42 2757 /* Merge B with its single successor, if any. */
c5cbcccf
ZD
2758 if (single_succ_p (b)
2759 && (s = single_succ_edge (b))
ec3ae3da 2760 && !(s->flags & EDGE_COMPLEX)
fefa31b5 2761 && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
c5cbcccf 2762 && single_pred_p (c)
bc35512f
JH
2763 && b != c)
2764 {
2765 /* When not in cfg_layout mode use code aware of reordering
2766 INSN. This code possibly creates new basic blocks so it
2767 does not fit merge_blocks interface and is kept here in
2768 hope that it will become useless once more of compiler
2769 is transformed to use cfg_layout mode. */
c22cacf3 2770
bc35512f
JH
2771 if ((mode & CLEANUP_CFGLAYOUT)
2772 && can_merge_blocks_p (b, c))
2773 {
2774 merge_blocks (b, c);
2775 update_forwarder_flag (b);
2776 changed_here = true;
2777 }
2778 else if (!(mode & CLEANUP_CFGLAYOUT)
2779 /* If the jump insn has side effects,
2780 we can't kill the edge. */
4b4bf941 2781 && (!JUMP_P (BB_END (b))
e24e7211 2782 || (reload_completed
a813c111 2783 ? simplejump_p (BB_END (b))
e4efa971
JH
2784 : (onlyjump_p (BB_END (b))
2785 && !tablejump_p (BB_END (b),
2786 NULL, NULL))))
bc35512f
JH
2787 && (next = merge_blocks_move (s, b, c, mode)))
2788 {
2789 b = next;
2790 changed_here = true;
2791 }
ec3ae3da 2792 }
e4ec2cac
AO
2793
2794 /* Simplify branch over branch. */
bc35512f
JH
2795 if ((mode & CLEANUP_EXPENSIVE)
2796 && !(mode & CLEANUP_CFGLAYOUT)
2797 && try_simplify_condjump (b))
38c1593d 2798 changed_here = true;
402209ff 2799
e4ec2cac
AO
2800 /* If B has a single outgoing edge, but uses a
2801 non-trivial jump instruction without side-effects, we
2802 can either delete the jump entirely, or replace it
3348b696 2803 with a simple unconditional jump. */
c5cbcccf 2804 if (single_succ_p (b)
fefa31b5 2805 && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
a813c111 2806 && onlyjump_p (BB_END (b))
339ba33b 2807 && !CROSSING_JUMP_P (BB_END (b))
c5cbcccf
ZD
2808 && try_redirect_by_replacing_jump (single_succ_edge (b),
2809 single_succ (b),
20b4e8ae 2810 (mode & CLEANUP_CFGLAYOUT) != 0))
e4ec2cac 2811 {
e4ec2cac
AO
2812 update_forwarder_flag (b);
2813 changed_here = true;
2814 }
402209ff 2815
e4ec2cac
AO
2816 /* Simplify branch to branch. */
2817 if (try_forward_edges (mode, b))
afe8b6ec
EB
2818 {
2819 update_forwarder_flag (b);
2820 changed_here = true;
2821 }
402209ff 2822
e4ec2cac
AO
2823 /* Look for shared code between blocks. */
2824 if ((mode & CLEANUP_CROSSJUMP)
2825 && try_crossjump_bb (mode, b))
2826 changed_here = true;
402209ff 2827
4ec5d4f5
BS
2828 if ((mode & CLEANUP_CROSSJUMP)
2829 /* This can lengthen register lifetimes. Do it only after
2830 reload. */
2831 && reload_completed
2832 && try_head_merge_bb (b))
2833 changed_here = true;
2834
e4ec2cac
AO
2835 /* Don't get confused by the index shift caused by
2836 deleting blocks. */
2837 if (!changed_here)
e0082a72 2838 b = b->next_bb;
e4ec2cac
AO
2839 else
2840 changed = true;
2841 }
402209ff 2842
e4ec2cac 2843 if ((mode & CLEANUP_CROSSJUMP)
fefa31b5 2844 && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
402209ff 2845 changed = true;
402209ff 2846
4ec5d4f5
BS
2847 if (block_was_dirty)
2848 {
2849 /* This should only be set by head-merging. */
2850 gcc_assert (mode & CLEANUP_CROSSJUMP);
2851 df_analyze ();
2852 }
2853
e4ec2cac 2854 if (changed)
600b5b1d
TJ
2855 {
2856 /* Edge forwarding in particular can cause hot blocks previously
2857 reached by both hot and cold blocks to become dominated only
2858 by cold blocks. This will cause the verification below to fail,
2859 and lead to now cold code in the hot section. This is not easy
2860 to detect and fix during edge forwarding, and in some cases
2861 is only visible after newly unreachable blocks are deleted,
2862 which will be done in fixup_partitions. */
b2b29377
MM
2863 fixup_partitions ();
2864 checking_verify_flow_info ();
600b5b1d 2865 }
402209ff 2866
e4ec2cac 2867 changed_overall |= changed;
7cf240d5 2868 first_pass = false;
e4ec2cac
AO
2869 }
2870 while (changed);
402209ff 2871 }
ca6c03ca 2872
04a90bec 2873 FOR_ALL_BB_FN (b, cfun)
2dd2d53e 2874 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
635559ab 2875
402209ff
JH
2876 return changed_overall;
2877}
2878\f
6d2f8887 2879/* Delete all unreachable basic blocks. */
4262e623 2880
969d70ca 2881bool
d329e058 2882delete_unreachable_blocks (void)
402209ff 2883{
402209ff 2884 bool changed = false;
b5b8b0ac 2885 basic_block b, prev_bb;
402209ff
JH
2886
2887 find_unreachable_blocks ();
2888
b5b8b0ac
AO
2889 /* When we're in GIMPLE mode and there may be debug insns, we should
2890 delete blocks in reverse dominator order, so as to get a chance
2891 to substitute all released DEFs into debug stmts. If we don't
2892 have dominators information, walking blocks backward gets us a
2893 better chance of retaining most debug information than
2894 otherwise. */
532aafad 2895 if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
b5b8b0ac 2896 && dom_info_available_p (CDI_DOMINATORS))
402209ff 2897 {
fefa31b5
DM
2898 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2899 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
b5b8b0ac
AO
2900 {
2901 prev_bb = b->prev_bb;
2902
2903 if (!(b->flags & BB_REACHABLE))
2904 {
2905 /* Speed up the removal of blocks that don't dominate
2906 others. Walking backwards, this should be the common
2907 case. */
2908 if (!first_dom_son (CDI_DOMINATORS, b))
2909 delete_basic_block (b);
2910 else
2911 {
9771b263 2912 vec<basic_block> h
b5b8b0ac
AO
2913 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2914
9771b263 2915 while (h.length ())
b5b8b0ac 2916 {
9771b263 2917 b = h.pop ();
b5b8b0ac
AO
2918
2919 prev_bb = b->prev_bb;
0b17ab2f 2920
b5b8b0ac
AO
2921 gcc_assert (!(b->flags & BB_REACHABLE));
2922
2923 delete_basic_block (b);
2924 }
2925
9771b263 2926 h.release ();
b5b8b0ac
AO
2927 }
2928
2929 changed = true;
2930 }
2931 }
2932 }
2933 else
2934 {
fefa31b5
DM
2935 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2936 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
6a58eee9 2937 {
b5b8b0ac
AO
2938 prev_bb = b->prev_bb;
2939
2940 if (!(b->flags & BB_REACHABLE))
2941 {
2942 delete_basic_block (b);
2943 changed = true;
2944 }
6a58eee9 2945 }
402209ff
JH
2946 }
2947
2948 if (changed)
2949 tidy_fallthru_edges ();
2950 return changed;
2951}
6fb5fa3c
DB
2952
2953/* Delete any jump tables never referenced. We can't delete them at the
29f3fd5b
SB
2954 time of removing tablejump insn as they are referenced by the preceding
2955 insns computing the destination, so we delay deleting and garbagecollect
2956 them once life information is computed. */
6fb5fa3c
DB
2957void
2958delete_dead_jumptables (void)
2959{
2960 basic_block bb;
2961
29f3fd5b
SB
2962 /* A dead jump table does not belong to any basic block. Scan insns
2963 between two adjacent basic blocks. */
11cd3bed 2964 FOR_EACH_BB_FN (bb, cfun)
6fb5fa3c 2965 {
da5477a9 2966 rtx_insn *insn, *next;
29f3fd5b
SB
2967
2968 for (insn = NEXT_INSN (BB_END (bb));
2969 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2970 insn = next)
57d6c446 2971 {
29f3fd5b
SB
2972 next = NEXT_INSN (insn);
2973 if (LABEL_P (insn)
2974 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2975 && JUMP_TABLE_DATA_P (next))
2976 {
da5477a9 2977 rtx_insn *label = insn, *jump = next;
29f3fd5b
SB
2978
2979 if (dump_file)
2980 fprintf (dump_file, "Dead jumptable %i removed\n",
2981 INSN_UID (insn));
2982
2983 next = NEXT_INSN (next);
2984 delete_insn (jump);
2985 delete_insn (label);
2986 }
6fb5fa3c
DB
2987 }
2988 }
2989}
2990
402209ff
JH
2991\f
2992/* Tidy the CFG by deleting unreachable code and whatnot. */
2993
2994bool
d329e058 2995cleanup_cfg (int mode)
402209ff 2996{
402209ff
JH
2997 bool changed = false;
2998
aeceeb06
SB
2999 /* Set the cfglayout mode flag here. We could update all the callers
3000 but that is just inconvenient, especially given that we eventually
3001 want to have cfglayout mode as the default. */
3002 if (current_ir_type () == IR_RTL_CFGLAYOUT)
3003 mode |= CLEANUP_CFGLAYOUT;
3004
402209ff 3005 timevar_push (TV_CLEANUP_CFG);
3dec4024
JH
3006 if (delete_unreachable_blocks ())
3007 {
3008 changed = true;
3009 /* We've possibly created trivially dead code. Cleanup it right
95bd1dd7 3010 now to introduce more opportunities for try_optimize_cfg. */
6fb5fa3c 3011 if (!(mode & (CLEANUP_NO_INSN_DEL))
3dec4024 3012 && !reload_completed)
62e5bf5d 3013 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3dec4024 3014 }
bf77398c
ZD
3015
3016 compact_blocks ();
3017
c1e3e2d9
SB
3018 /* To tail-merge blocks ending in the same noreturn function (e.g.
3019 a call to abort) we have to insert fake edges to exit. Do this
3020 here once. The fake edges do not interfere with any other CFG
3021 cleanups. */
3022 if (mode & CLEANUP_CROSSJUMP)
3023 add_noreturn_fake_exit_edges ();
3024
7d817ebc
DE
3025 if (!dbg_cnt (cfg_cleanup))
3026 return changed;
3027
3dec4024
JH
3028 while (try_optimize_cfg (mode))
3029 {
3030 delete_unreachable_blocks (), changed = true;
c1e3e2d9 3031 if (!(mode & CLEANUP_NO_INSN_DEL))
3dec4024 3032 {
c1e3e2d9
SB
3033 /* Try to remove some trivially dead insns when doing an expensive
3034 cleanup. But delete_trivially_dead_insns doesn't work after
3035 reload (it only handles pseudos) and run_fast_dce is too costly
3036 to run in every iteration.
3037
3038 For effective cross jumping, we really want to run a fast DCE to
3039 clean up any dead conditions, or they get in the way of performing
3040 useful tail merges.
3041
3042 Other transformations in cleanup_cfg are not so sensitive to dead
3043 code, so delete_trivially_dead_insns or even doing nothing at all
3044 is good enough. */
3045 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3046 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3dec4024 3047 break;
4ec5d4f5 3048 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
f842d54f 3049 run_fast_dce ();
3dec4024
JH
3050 }
3051 else
3052 break;
3dec4024 3053 }
402209ff 3054
c1e3e2d9
SB
3055 if (mode & CLEANUP_CROSSJUMP)
3056 remove_fake_exit_edges ();
3057
29f3fd5b
SB
3058 /* Don't call delete_dead_jumptables in cfglayout mode, because
3059 that function assumes that jump tables are in the insns stream.
3060 But we also don't _have_ to delete dead jumptables in cfglayout
3061 mode because we shouldn't even be looking at things that are
3062 not in a basic block. Dead jumptables are cleaned up when
3063 going out of cfglayout mode. */
3064 if (!(mode & CLEANUP_CFGLAYOUT))
6fb5fa3c
DB
3065 delete_dead_jumptables ();
3066
7d776ee2
RG
3067 /* ??? We probably do this way too often. */
3068 if (current_loops
3069 && (changed
3070 || (mode & CLEANUP_CFG_CHANGED)))
3071 {
7d776ee2
RG
3072 timevar_push (TV_REPAIR_LOOPS);
3073 /* The above doesn't preserve dominance info if available. */
3074 gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3075 calculate_dominance_info (CDI_DOMINATORS);
01cb1ef5 3076 fix_loop_structure (NULL);
7d776ee2
RG
3077 free_dominance_info (CDI_DOMINATORS);
3078 timevar_pop (TV_REPAIR_LOOPS);
3079 }
3080
402209ff
JH
3081 timevar_pop (TV_CLEANUP_CFG);
3082
402209ff
JH
3083 return changed;
3084}
ef330312 3085\f
27a4cd48
DM
3086namespace {
3087
3088const pass_data pass_data_jump =
11a687e7 3089{
27a4cd48
DM
3090 RTL_PASS, /* type */
3091 "jump", /* name */
3092 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
3093 TV_JUMP, /* tv_id */
3094 0, /* properties_required */
3095 0, /* properties_provided */
3096 0, /* properties_destroyed */
3097 0, /* todo_flags_start */
3bea341f 3098 0, /* todo_flags_finish */
11a687e7 3099};
27a4cd48
DM
3100
3101class pass_jump : public rtl_opt_pass
3102{
3103public:
c3284718
RS
3104 pass_jump (gcc::context *ctxt)
3105 : rtl_opt_pass (pass_data_jump, ctxt)
27a4cd48
DM
3106 {}
3107
3108 /* opt_pass methods: */
be55bfe6 3109 virtual unsigned int execute (function *);
27a4cd48
DM
3110
3111}; // class pass_jump
3112
be55bfe6
TS
3113unsigned int
3114pass_jump::execute (function *)
3115{
3116 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3117 if (dump_file)
3118 dump_flow_info (dump_file, dump_flags);
3119 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3120 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3121 return 0;
3122}
3123
27a4cd48
DM
3124} // anon namespace
3125
3126rtl_opt_pass *
3127make_pass_jump (gcc::context *ctxt)
3128{
3129 return new pass_jump (ctxt);
3130}
11a687e7 3131\f
27a4cd48
DM
3132namespace {
3133
3134const pass_data pass_data_jump2 =
ef330312 3135{
27a4cd48
DM
3136 RTL_PASS, /* type */
3137 "jump2", /* name */
3138 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
3139 TV_JUMP, /* tv_id */
3140 0, /* properties_required */
3141 0, /* properties_provided */
3142 0, /* properties_destroyed */
3143 0, /* todo_flags_start */
3bea341f 3144 0, /* todo_flags_finish */
ef330312 3145};
27a4cd48
DM
3146
3147class pass_jump2 : public rtl_opt_pass
3148{
3149public:
c3284718
RS
3150 pass_jump2 (gcc::context *ctxt)
3151 : rtl_opt_pass (pass_data_jump2, ctxt)
27a4cd48
DM
3152 {}
3153
3154 /* opt_pass methods: */
be55bfe6
TS
3155 virtual unsigned int execute (function *)
3156 {
3157 cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3158 return 0;
3159 }
27a4cd48
DM
3160
3161}; // class pass_jump2
3162
3163} // anon namespace
3164
3165rtl_opt_pass *
3166make_pass_jump2 (gcc::context *ctxt)
3167{
3168 return new pass_jump2 (ctxt);
3169}