]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/cfgcleanup.c
Make ifcvt clean up dead comparisons
[thirdparty/gcc.git] / gcc / cfgcleanup.c
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
22
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
27 eliminated).
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
31
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "backend.h"
36 #include "target.h"
37 #include "rtl.h"
38 #include "tree.h"
39 #include "cfghooks.h"
40 #include "df.h"
41 #include "memmodel.h"
42 #include "tm_p.h"
43 #include "insn-config.h"
44 #include "emit-rtl.h"
45 #include "cselib.h"
46 #include "params.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "cfgrtl.h"
50 #include "cfganal.h"
51 #include "cfgbuild.h"
52 #include "cfgcleanup.h"
53 #include "dce.h"
54 #include "dbgcnt.h"
55 #include "rtl-iter.h"
56 #include "regs.h"
57
58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59
60 /* Set to true when we are running first pass of try_optimize_cfg loop. */
61 static bool first_pass;
62
63 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
64 static bool crossjumps_occurred;
65
66 /* Set to true if we couldn't run an optimization due to stale liveness
67 information; we should run df_analyze to enable more opportunities. */
68 static bool block_was_dirty;
69
70 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
71 static bool try_crossjump_bb (int, basic_block);
72 static bool outgoing_edges_match (int, basic_block, basic_block);
73 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
74
75 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
76 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
77 static bool try_optimize_cfg (int);
78 static bool try_simplify_condjump (basic_block);
79 static bool try_forward_edges (int, basic_block);
80 static edge thread_jump (edge, basic_block);
81 static bool mark_effect (rtx, bitmap);
82 static void notice_new_block (basic_block);
83 static void update_forwarder_flag (basic_block);
84 static void merge_memattrs (rtx, rtx);
85 \f
86 /* Set flags for newly created block. */
87
88 static void
89 notice_new_block (basic_block bb)
90 {
91 if (!bb)
92 return;
93
94 if (forwarder_block_p (bb))
95 bb->flags |= BB_FORWARDER_BLOCK;
96 }
97
98 /* Recompute forwarder flag after block has been modified. */
99
100 static void
101 update_forwarder_flag (basic_block bb)
102 {
103 if (forwarder_block_p (bb))
104 bb->flags |= BB_FORWARDER_BLOCK;
105 else
106 bb->flags &= ~BB_FORWARDER_BLOCK;
107 }
108 \f
109 /* Simplify a conditional jump around an unconditional jump.
110 Return true if something changed. */
111
112 static bool
113 try_simplify_condjump (basic_block cbranch_block)
114 {
115 basic_block jump_block, jump_dest_block, cbranch_dest_block;
116 edge cbranch_jump_edge, cbranch_fallthru_edge;
117 rtx_insn *cbranch_insn;
118
119 /* Verify that there are exactly two successors. */
120 if (EDGE_COUNT (cbranch_block->succs) != 2)
121 return false;
122
123 /* Verify that we've got a normal conditional branch at the end
124 of the block. */
125 cbranch_insn = BB_END (cbranch_block);
126 if (!any_condjump_p (cbranch_insn))
127 return false;
128
129 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131
132 /* The next block must not have multiple predecessors, must not
133 be the last block in the function, and must contain just the
134 unconditional jump. */
135 jump_block = cbranch_fallthru_edge->dest;
136 if (!single_pred_p (jump_block)
137 || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
138 || !FORWARDER_BLOCK_P (jump_block))
139 return false;
140 jump_dest_block = single_succ (jump_block);
141
142 /* If we are partitioning hot/cold basic blocks, we don't want to
143 mess up unconditional or indirect jumps that cross between hot
144 and cold sections.
145
146 Basic block partitioning may result in some jumps that appear to
147 be optimizable (or blocks that appear to be mergeable), but which really
148 must be left untouched (they are required to make it safely across
149 partition boundaries). See the comments at the top of
150 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
151
152 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153 || (cbranch_jump_edge->flags & EDGE_CROSSING))
154 return false;
155
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
159
160 if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
161 || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
162 || !can_fallthru (jump_block, cbranch_dest_block))
163 return false;
164
165 /* Invert the conditional branch. */
166 if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
167 block_label (jump_dest_block), 0))
168 return false;
169
170 if (dump_file)
171 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
172 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
173
174 /* Success. Update the CFG to match. Note that after this point
175 the edge variable names appear backwards; the redirection is done
176 this way to preserve edge profile data. */
177 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
178 cbranch_dest_block);
179 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
180 jump_dest_block);
181 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
182 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
183 update_br_prob_note (cbranch_block);
184
185 /* Delete the block with the unconditional jump, and clean up the mess. */
186 delete_basic_block (jump_block);
187 tidy_fallthru_edge (cbranch_jump_edge);
188 update_forwarder_flag (cbranch_block);
189
190 return true;
191 }
192 \f
193 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
194 on register. Used by jump threading. */
195
196 static bool
197 mark_effect (rtx exp, regset nonequal)
198 {
199 rtx dest;
200 switch (GET_CODE (exp))
201 {
202 /* In case we do clobber the register, mark it as equal, as we know the
203 value is dead so it don't have to match. */
204 case CLOBBER:
205 dest = XEXP (exp, 0);
206 if (REG_P (dest))
207 bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
208 return false;
209
210 case SET:
211 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
212 return false;
213 dest = SET_DEST (exp);
214 if (dest == pc_rtx)
215 return false;
216 if (!REG_P (dest))
217 return true;
218 bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
219 return false;
220
221 default:
222 return false;
223 }
224 }
225
226 /* Return true if X contains a register in NONEQUAL. */
227 static bool
228 mentions_nonequal_regs (const_rtx x, regset nonequal)
229 {
230 subrtx_iterator::array_type array;
231 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
232 {
233 const_rtx x = *iter;
234 if (REG_P (x))
235 {
236 unsigned int end_regno = END_REGNO (x);
237 for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
238 if (REGNO_REG_SET_P (nonequal, regno))
239 return true;
240 }
241 }
242 return false;
243 }
244
245 /* Attempt to prove that the basic block B will have no side effects and
246 always continues in the same edge if reached via E. Return the edge
247 if exist, NULL otherwise. */
248
249 static edge
250 thread_jump (edge e, basic_block b)
251 {
252 rtx set1, set2, cond1, cond2;
253 rtx_insn *insn;
254 enum rtx_code code1, code2, reversed_code2;
255 bool reverse1 = false;
256 unsigned i;
257 regset nonequal;
258 bool failed = false;
259 reg_set_iterator rsi;
260
261 if (b->flags & BB_NONTHREADABLE_BLOCK)
262 return NULL;
263
264 /* At the moment, we do handle only conditional jumps, but later we may
265 want to extend this code to tablejumps and others. */
266 if (EDGE_COUNT (e->src->succs) != 2)
267 return NULL;
268 if (EDGE_COUNT (b->succs) != 2)
269 {
270 b->flags |= BB_NONTHREADABLE_BLOCK;
271 return NULL;
272 }
273
274 /* Second branch must end with onlyjump, as we will eliminate the jump. */
275 if (!any_condjump_p (BB_END (e->src)))
276 return NULL;
277
278 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
279 {
280 b->flags |= BB_NONTHREADABLE_BLOCK;
281 return NULL;
282 }
283
284 set1 = pc_set (BB_END (e->src));
285 set2 = pc_set (BB_END (b));
286 if (((e->flags & EDGE_FALLTHRU) != 0)
287 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
288 reverse1 = true;
289
290 cond1 = XEXP (SET_SRC (set1), 0);
291 cond2 = XEXP (SET_SRC (set2), 0);
292 if (reverse1)
293 code1 = reversed_comparison_code (cond1, BB_END (e->src));
294 else
295 code1 = GET_CODE (cond1);
296
297 code2 = GET_CODE (cond2);
298 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
299
300 if (!comparison_dominates_p (code1, code2)
301 && !comparison_dominates_p (code1, reversed_code2))
302 return NULL;
303
304 /* Ensure that the comparison operators are equivalent.
305 ??? This is far too pessimistic. We should allow swapped operands,
306 different CCmodes, or for example comparisons for interval, that
307 dominate even when operands are not equivalent. */
308 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
309 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
310 return NULL;
311
312 /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
313 the registers used in cond1. */
314 if (modified_in_p (cond1, BB_END (e->src)))
315 return NULL;
316
317 /* Short circuit cases where block B contains some side effects, as we can't
318 safely bypass it. */
319 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
320 insn = NEXT_INSN (insn))
321 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
322 {
323 b->flags |= BB_NONTHREADABLE_BLOCK;
324 return NULL;
325 }
326
327 cselib_init (0);
328
329 /* First process all values computed in the source basic block. */
330 for (insn = NEXT_INSN (BB_HEAD (e->src));
331 insn != NEXT_INSN (BB_END (e->src));
332 insn = NEXT_INSN (insn))
333 if (INSN_P (insn))
334 cselib_process_insn (insn);
335
336 nonequal = BITMAP_ALLOC (NULL);
337 CLEAR_REG_SET (nonequal);
338
339 /* Now assume that we've continued by the edge E to B and continue
340 processing as if it were same basic block.
341 Our goal is to prove that whole block is an NOOP. */
342
343 for (insn = NEXT_INSN (BB_HEAD (b));
344 insn != NEXT_INSN (BB_END (b)) && !failed;
345 insn = NEXT_INSN (insn))
346 {
347 /* cond2 must not mention any register that is not equal to the
348 former block. Check this before processing that instruction,
349 as BB_END (b) could contain also clobbers. */
350 if (insn == BB_END (b)
351 && mentions_nonequal_regs (cond2, nonequal))
352 goto failed_exit;
353
354 if (INSN_P (insn))
355 {
356 rtx pat = PATTERN (insn);
357
358 if (GET_CODE (pat) == PARALLEL)
359 {
360 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
361 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
362 }
363 else
364 failed |= mark_effect (pat, nonequal);
365 }
366
367 cselib_process_insn (insn);
368 }
369
370 /* Later we should clear nonequal of dead registers. So far we don't
371 have life information in cfg_cleanup. */
372 if (failed)
373 {
374 b->flags |= BB_NONTHREADABLE_BLOCK;
375 goto failed_exit;
376 }
377
378 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
379 goto failed_exit;
380
381 BITMAP_FREE (nonequal);
382 cselib_finish ();
383 if ((comparison_dominates_p (code1, code2) != 0)
384 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
385 return BRANCH_EDGE (b);
386 else
387 return FALLTHRU_EDGE (b);
388
389 failed_exit:
390 BITMAP_FREE (nonequal);
391 cselib_finish ();
392 return NULL;
393 }
394 \f
395 /* Attempt to forward edges leaving basic block B.
396 Return true if successful. */
397
398 static bool
399 try_forward_edges (int mode, basic_block b)
400 {
401 bool changed = false;
402 edge_iterator ei;
403 edge e, *threaded_edges = NULL;
404
405 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
406 {
407 basic_block target, first;
408 location_t goto_locus;
409 int counter;
410 bool threaded = false;
411 int nthreaded_edges = 0;
412 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
413 bool new_target_threaded = false;
414
415 /* Skip complex edges because we don't know how to update them.
416
417 Still handle fallthru edges, as we can succeed to forward fallthru
418 edge to the same place as the branch edge of conditional branch
419 and turn conditional branch to an unconditional branch. */
420 if (e->flags & EDGE_COMPLEX)
421 {
422 ei_next (&ei);
423 continue;
424 }
425
426 target = first = e->dest;
427 counter = NUM_FIXED_BLOCKS;
428 goto_locus = e->goto_locus;
429
430 while (counter < n_basic_blocks_for_fn (cfun))
431 {
432 basic_block new_target = NULL;
433 may_thread |= (target->flags & BB_MODIFIED) != 0;
434
435 if (FORWARDER_BLOCK_P (target)
436 && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
437 {
438 /* Bypass trivial infinite loops. */
439 new_target = single_succ (target);
440 if (target == new_target)
441 counter = n_basic_blocks_for_fn (cfun);
442 else if (!optimize)
443 {
444 /* When not optimizing, ensure that edges or forwarder
445 blocks with different locus are not optimized out. */
446 location_t new_locus = single_succ_edge (target)->goto_locus;
447 location_t locus = goto_locus;
448
449 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
450 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
451 && new_locus != locus)
452 new_target = NULL;
453 else
454 {
455 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
456 locus = new_locus;
457
458 rtx_insn *last = BB_END (target);
459 if (DEBUG_INSN_P (last))
460 last = prev_nondebug_insn (last);
461 if (last && INSN_P (last))
462 new_locus = INSN_LOCATION (last);
463 else
464 new_locus = UNKNOWN_LOCATION;
465
466 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
467 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
468 && new_locus != locus)
469 new_target = NULL;
470 else
471 {
472 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
473 locus = new_locus;
474
475 goto_locus = locus;
476 }
477 }
478 }
479 }
480
481 /* Allow to thread only over one edge at time to simplify updating
482 of probabilities. */
483 else if ((mode & CLEANUP_THREADING) && may_thread)
484 {
485 edge t = thread_jump (e, target);
486 if (t)
487 {
488 if (!threaded_edges)
489 threaded_edges = XNEWVEC (edge,
490 n_basic_blocks_for_fn (cfun));
491 else
492 {
493 int i;
494
495 /* Detect an infinite loop across blocks not
496 including the start block. */
497 for (i = 0; i < nthreaded_edges; ++i)
498 if (threaded_edges[i] == t)
499 break;
500 if (i < nthreaded_edges)
501 {
502 counter = n_basic_blocks_for_fn (cfun);
503 break;
504 }
505 }
506
507 /* Detect an infinite loop across the start block. */
508 if (t->dest == b)
509 break;
510
511 gcc_assert (nthreaded_edges
512 < (n_basic_blocks_for_fn (cfun)
513 - NUM_FIXED_BLOCKS));
514 threaded_edges[nthreaded_edges++] = t;
515
516 new_target = t->dest;
517 new_target_threaded = true;
518 }
519 }
520
521 if (!new_target)
522 break;
523
524 counter++;
525 /* Do not turn non-crossing jump to crossing. Depending on target
526 it may require different instruction pattern. */
527 if ((e->flags & EDGE_CROSSING)
528 || BB_PARTITION (first) == BB_PARTITION (new_target))
529 {
530 target = new_target;
531 threaded |= new_target_threaded;
532 }
533 }
534
535 if (counter >= n_basic_blocks_for_fn (cfun))
536 {
537 if (dump_file)
538 fprintf (dump_file, "Infinite loop in BB %i.\n",
539 target->index);
540 }
541 else if (target == first)
542 ; /* We didn't do anything. */
543 else
544 {
545 /* Save the values now, as the edge may get removed. */
546 profile_count edge_count = e->count ();
547 int n = 0;
548
549 e->goto_locus = goto_locus;
550
551 /* Don't force if target is exit block. */
552 if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
553 {
554 notice_new_block (redirect_edge_and_branch_force (e, target));
555 if (dump_file)
556 fprintf (dump_file, "Conditionals threaded.\n");
557 }
558 else if (!redirect_edge_and_branch (e, target))
559 {
560 if (dump_file)
561 fprintf (dump_file,
562 "Forwarding edge %i->%i to %i failed.\n",
563 b->index, e->dest->index, target->index);
564 ei_next (&ei);
565 continue;
566 }
567
568 /* We successfully forwarded the edge. Now update profile
569 data: for each edge we traversed in the chain, remove
570 the original edge's execution count. */
571 do
572 {
573 edge t;
574
575 if (!single_succ_p (first))
576 {
577 gcc_assert (n < nthreaded_edges);
578 t = threaded_edges [n++];
579 gcc_assert (t->src == first);
580 update_bb_profile_for_threading (first, edge_count, t);
581 update_br_prob_note (first);
582 }
583 else
584 {
585 first->count -= edge_count;
586 /* It is possible that as the result of
587 threading we've removed edge as it is
588 threaded to the fallthru edge. Avoid
589 getting out of sync. */
590 if (n < nthreaded_edges
591 && first == threaded_edges [n]->src)
592 n++;
593 t = single_succ_edge (first);
594 }
595
596 first = t->dest;
597 }
598 while (first != target);
599
600 changed = true;
601 continue;
602 }
603 ei_next (&ei);
604 }
605
606 free (threaded_edges);
607 return changed;
608 }
609 \f
610
611 /* Blocks A and B are to be merged into a single block. A has no incoming
612 fallthru edge, so it can be moved before B without adding or modifying
613 any jumps (aside from the jump from A to B). */
614
615 static void
616 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
617 {
618 rtx_insn *barrier;
619
620 /* If we are partitioning hot/cold basic blocks, we don't want to
621 mess up unconditional or indirect jumps that cross between hot
622 and cold sections.
623
624 Basic block partitioning may result in some jumps that appear to
625 be optimizable (or blocks that appear to be mergeable), but which really
626 must be left untouched (they are required to make it safely across
627 partition boundaries). See the comments at the top of
628 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
629
630 if (BB_PARTITION (a) != BB_PARTITION (b))
631 return;
632
633 barrier = next_nonnote_insn (BB_END (a));
634 gcc_assert (BARRIER_P (barrier));
635 delete_insn (barrier);
636
637 /* Scramble the insn chain. */
638 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
639 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
640 df_set_bb_dirty (a);
641
642 if (dump_file)
643 fprintf (dump_file, "Moved block %d before %d and merged.\n",
644 a->index, b->index);
645
646 /* Swap the records for the two blocks around. */
647
648 unlink_block (a);
649 link_block (a, b->prev_bb);
650
651 /* Now blocks A and B are contiguous. Merge them. */
652 merge_blocks (a, b);
653 }
654
655 /* Blocks A and B are to be merged into a single block. B has no outgoing
656 fallthru edge, so it can be moved after A without adding or modifying
657 any jumps (aside from the jump from A to B). */
658
659 static void
660 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
661 {
662 rtx_insn *barrier, *real_b_end;
663 rtx_insn *label;
664 rtx_jump_table_data *table;
665
666 /* If we are partitioning hot/cold basic blocks, we don't want to
667 mess up unconditional or indirect jumps that cross between hot
668 and cold sections.
669
670 Basic block partitioning may result in some jumps that appear to
671 be optimizable (or blocks that appear to be mergeable), but which really
672 must be left untouched (they are required to make it safely across
673 partition boundaries). See the comments at the top of
674 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
675
676 if (BB_PARTITION (a) != BB_PARTITION (b))
677 return;
678
679 real_b_end = BB_END (b);
680
681 /* If there is a jump table following block B temporarily add the jump table
682 to block B so that it will also be moved to the correct location. */
683 if (tablejump_p (BB_END (b), &label, &table)
684 && prev_active_insn (label) == BB_END (b))
685 {
686 BB_END (b) = table;
687 }
688
689 /* There had better have been a barrier there. Delete it. */
690 barrier = NEXT_INSN (BB_END (b));
691 if (barrier && BARRIER_P (barrier))
692 delete_insn (barrier);
693
694
695 /* Scramble the insn chain. */
696 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
697
698 /* Restore the real end of b. */
699 BB_END (b) = real_b_end;
700
701 if (dump_file)
702 fprintf (dump_file, "Moved block %d after %d and merged.\n",
703 b->index, a->index);
704
705 /* Now blocks A and B are contiguous. Merge them. */
706 merge_blocks (a, b);
707 }
708
709 /* Attempt to merge basic blocks that are potentially non-adjacent.
710 Return NULL iff the attempt failed, otherwise return basic block
711 where cleanup_cfg should continue. Because the merging commonly
712 moves basic block away or introduces another optimization
713 possibility, return basic block just before B so cleanup_cfg don't
714 need to iterate.
715
716 It may be good idea to return basic block before C in the case
717 C has been moved after B and originally appeared earlier in the
718 insn sequence, but we have no information available about the
719 relative ordering of these two. Hopefully it is not too common. */
720
721 static basic_block
722 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
723 {
724 basic_block next;
725
726 /* If we are partitioning hot/cold basic blocks, we don't want to
727 mess up unconditional or indirect jumps that cross between hot
728 and cold sections.
729
730 Basic block partitioning may result in some jumps that appear to
731 be optimizable (or blocks that appear to be mergeable), but which really
732 must be left untouched (they are required to make it safely across
733 partition boundaries). See the comments at the top of
734 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
735
736 if (BB_PARTITION (b) != BB_PARTITION (c))
737 return NULL;
738
739 /* If B has a fallthru edge to C, no need to move anything. */
740 if (e->flags & EDGE_FALLTHRU)
741 {
742 int b_index = b->index, c_index = c->index;
743
744 /* Protect the loop latches. */
745 if (current_loops && c->loop_father->latch == c)
746 return NULL;
747
748 merge_blocks (b, c);
749 update_forwarder_flag (b);
750
751 if (dump_file)
752 fprintf (dump_file, "Merged %d and %d without moving.\n",
753 b_index, c_index);
754
755 return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
756 }
757
758 /* Otherwise we will need to move code around. Do that only if expensive
759 transformations are allowed. */
760 else if (mode & CLEANUP_EXPENSIVE)
761 {
762 edge tmp_edge, b_fallthru_edge;
763 bool c_has_outgoing_fallthru;
764 bool b_has_incoming_fallthru;
765
766 /* Avoid overactive code motion, as the forwarder blocks should be
767 eliminated by edge redirection instead. One exception might have
768 been if B is a forwarder block and C has no fallthru edge, but
769 that should be cleaned up by bb-reorder instead. */
770 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
771 return NULL;
772
773 /* We must make sure to not munge nesting of lexical blocks,
774 and loop notes. This is done by squeezing out all the notes
775 and leaving them there to lie. Not ideal, but functional. */
776
777 tmp_edge = find_fallthru_edge (c->succs);
778 c_has_outgoing_fallthru = (tmp_edge != NULL);
779
780 tmp_edge = find_fallthru_edge (b->preds);
781 b_has_incoming_fallthru = (tmp_edge != NULL);
782 b_fallthru_edge = tmp_edge;
783 next = b->prev_bb;
784 if (next == c)
785 next = next->prev_bb;
786
787 /* Otherwise, we're going to try to move C after B. If C does
788 not have an outgoing fallthru, then it can be moved
789 immediately after B without introducing or modifying jumps. */
790 if (! c_has_outgoing_fallthru)
791 {
792 merge_blocks_move_successor_nojumps (b, c);
793 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
794 }
795
796 /* If B does not have an incoming fallthru, then it can be moved
797 immediately before C without introducing or modifying jumps.
798 C cannot be the first block, so we do not have to worry about
799 accessing a non-existent block. */
800
801 if (b_has_incoming_fallthru)
802 {
803 basic_block bb;
804
805 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
806 return NULL;
807 bb = force_nonfallthru (b_fallthru_edge);
808 if (bb)
809 notice_new_block (bb);
810 }
811
812 merge_blocks_move_predecessor_nojumps (b, c);
813 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
814 }
815
816 return NULL;
817 }
818 \f
819
820 /* Removes the memory attributes of MEM expression
821 if they are not equal. */
822
823 static void
824 merge_memattrs (rtx x, rtx y)
825 {
826 int i;
827 int j;
828 enum rtx_code code;
829 const char *fmt;
830
831 if (x == y)
832 return;
833 if (x == 0 || y == 0)
834 return;
835
836 code = GET_CODE (x);
837
838 if (code != GET_CODE (y))
839 return;
840
841 if (GET_MODE (x) != GET_MODE (y))
842 return;
843
844 if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
845 {
846 if (! MEM_ATTRS (x))
847 MEM_ATTRS (y) = 0;
848 else if (! MEM_ATTRS (y))
849 MEM_ATTRS (x) = 0;
850 else
851 {
852 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
853 {
854 set_mem_alias_set (x, 0);
855 set_mem_alias_set (y, 0);
856 }
857
858 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
859 {
860 set_mem_expr (x, 0);
861 set_mem_expr (y, 0);
862 clear_mem_offset (x);
863 clear_mem_offset (y);
864 }
865 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
866 || (MEM_OFFSET_KNOWN_P (x)
867 && maybe_ne (MEM_OFFSET (x), MEM_OFFSET (y))))
868 {
869 clear_mem_offset (x);
870 clear_mem_offset (y);
871 }
872
873 if (!MEM_SIZE_KNOWN_P (x))
874 clear_mem_size (y);
875 else if (!MEM_SIZE_KNOWN_P (y))
876 clear_mem_size (x);
877 else if (known_le (MEM_SIZE (x), MEM_SIZE (y)))
878 set_mem_size (x, MEM_SIZE (y));
879 else if (known_le (MEM_SIZE (y), MEM_SIZE (x)))
880 set_mem_size (y, MEM_SIZE (x));
881 else
882 {
883 /* The sizes aren't ordered, so we can't merge them. */
884 clear_mem_size (x);
885 clear_mem_size (y);
886 }
887
888 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
889 set_mem_align (y, MEM_ALIGN (x));
890 }
891 }
892 if (code == MEM)
893 {
894 if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
895 {
896 MEM_READONLY_P (x) = 0;
897 MEM_READONLY_P (y) = 0;
898 }
899 if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
900 {
901 MEM_NOTRAP_P (x) = 0;
902 MEM_NOTRAP_P (y) = 0;
903 }
904 if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
905 {
906 MEM_VOLATILE_P (x) = 1;
907 MEM_VOLATILE_P (y) = 1;
908 }
909 }
910
911 fmt = GET_RTX_FORMAT (code);
912 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
913 {
914 switch (fmt[i])
915 {
916 case 'E':
917 /* Two vectors must have the same length. */
918 if (XVECLEN (x, i) != XVECLEN (y, i))
919 return;
920
921 for (j = 0; j < XVECLEN (x, i); j++)
922 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
923
924 break;
925
926 case 'e':
927 merge_memattrs (XEXP (x, i), XEXP (y, i));
928 }
929 }
930 return;
931 }
932
933
934 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
935 different single sets S1 and S2. */
936
937 static bool
938 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
939 {
940 int i;
941 rtx e1, e2;
942
943 if (p1 == s1 && p2 == s2)
944 return true;
945
946 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
947 return false;
948
949 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
950 return false;
951
952 for (i = 0; i < XVECLEN (p1, 0); i++)
953 {
954 e1 = XVECEXP (p1, 0, i);
955 e2 = XVECEXP (p2, 0, i);
956 if (e1 == s1 && e2 == s2)
957 continue;
958 if (reload_completed
959 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
960 continue;
961
962 return false;
963 }
964
965 return true;
966 }
967
968
969 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
970 that is a single_set with a SET_SRC of SRC1. Similarly
971 for NOTE2/SRC2.
972
973 So effectively NOTE1/NOTE2 are an alternate form of
974 SRC1/SRC2 respectively.
975
976 Return nonzero if SRC1 or NOTE1 has the same constant
977 integer value as SRC2 or NOTE2. Else return zero. */
978 static int
979 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
980 {
981 if (note1
982 && note2
983 && CONST_INT_P (XEXP (note1, 0))
984 && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
985 return 1;
986
987 if (!note1
988 && !note2
989 && CONST_INT_P (src1)
990 && CONST_INT_P (src2)
991 && rtx_equal_p (src1, src2))
992 return 1;
993
994 if (note1
995 && CONST_INT_P (src2)
996 && rtx_equal_p (XEXP (note1, 0), src2))
997 return 1;
998
999 if (note2
1000 && CONST_INT_P (src1)
1001 && rtx_equal_p (XEXP (note2, 0), src1))
1002 return 1;
1003
1004 return 0;
1005 }
1006
1007 /* Examine register notes on I1 and I2 and return:
1008 - dir_forward if I1 can be replaced by I2, or
1009 - dir_backward if I2 can be replaced by I1, or
1010 - dir_both if both are the case. */
1011
1012 static enum replace_direction
1013 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1014 {
1015 rtx s1, s2, d1, d2, src1, src2, note1, note2;
1016 bool c1, c2;
1017
1018 /* Check for 2 sets. */
1019 s1 = single_set (i1);
1020 s2 = single_set (i2);
1021 if (s1 == NULL_RTX || s2 == NULL_RTX)
1022 return dir_none;
1023
1024 /* Check that the 2 sets set the same dest. */
1025 d1 = SET_DEST (s1);
1026 d2 = SET_DEST (s2);
1027 if (!(reload_completed
1028 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1029 return dir_none;
1030
1031 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1032 set dest to the same value. */
1033 note1 = find_reg_equal_equiv_note (i1);
1034 note2 = find_reg_equal_equiv_note (i2);
1035
1036 src1 = SET_SRC (s1);
1037 src2 = SET_SRC (s2);
1038
1039 if (!values_equal_p (note1, note2, src1, src2))
1040 return dir_none;
1041
1042 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1043 return dir_none;
1044
1045 /* Although the 2 sets set dest to the same value, we cannot replace
1046 (set (dest) (const_int))
1047 by
1048 (set (dest) (reg))
1049 because we don't know if the reg is live and has the same value at the
1050 location of replacement. */
1051 c1 = CONST_INT_P (src1);
1052 c2 = CONST_INT_P (src2);
1053 if (c1 && c2)
1054 return dir_both;
1055 else if (c2)
1056 return dir_forward;
1057 else if (c1)
1058 return dir_backward;
1059
1060 return dir_none;
1061 }
1062
1063 /* Merges directions A and B. */
1064
1065 static enum replace_direction
1066 merge_dir (enum replace_direction a, enum replace_direction b)
1067 {
1068 /* Implements the following table:
1069 |bo fw bw no
1070 ---+-----------
1071 bo |bo fw bw no
1072 fw |-- fw no no
1073 bw |-- -- bw no
1074 no |-- -- -- no. */
1075
1076 if (a == b)
1077 return a;
1078
1079 if (a == dir_both)
1080 return b;
1081 if (b == dir_both)
1082 return a;
1083
1084 return dir_none;
1085 }
1086
1087 /* Array of flags indexed by reg note kind, true if the given
1088 reg note is CFA related. */
1089 static const bool reg_note_cfa_p[] = {
1090 #undef REG_CFA_NOTE
1091 #define DEF_REG_NOTE(NAME) false,
1092 #define REG_CFA_NOTE(NAME) true,
1093 #include "reg-notes.def"
1094 #undef REG_CFA_NOTE
1095 #undef DEF_REG_NOTE
1096 false
1097 };
1098
1099 /* Return true if I1 and I2 have identical CFA notes (the same order
1100 and equivalent content). */
1101
1102 static bool
1103 insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2)
1104 {
1105 rtx n1, n2;
1106 for (n1 = REG_NOTES (i1), n2 = REG_NOTES (i2); ;
1107 n1 = XEXP (n1, 1), n2 = XEXP (n2, 1))
1108 {
1109 /* Skip over reg notes not related to CFI information. */
1110 while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)])
1111 n1 = XEXP (n1, 1);
1112 while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)])
1113 n2 = XEXP (n2, 1);
1114 if (n1 == NULL_RTX && n2 == NULL_RTX)
1115 return true;
1116 if (n1 == NULL_RTX || n2 == NULL_RTX)
1117 return false;
1118 if (XEXP (n1, 0) == XEXP (n2, 0))
1119 ;
1120 else if (XEXP (n1, 0) == NULL_RTX || XEXP (n2, 0) == NULL_RTX)
1121 return false;
1122 else if (!(reload_completed
1123 ? rtx_renumbered_equal_p (XEXP (n1, 0), XEXP (n2, 0))
1124 : rtx_equal_p (XEXP (n1, 0), XEXP (n2, 0))))
1125 return false;
1126 }
1127 }
1128
1129 /* Examine I1 and I2 and return:
1130 - dir_forward if I1 can be replaced by I2, or
1131 - dir_backward if I2 can be replaced by I1, or
1132 - dir_both if both are the case. */
1133
1134 static enum replace_direction
1135 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1136 {
1137 rtx p1, p2;
1138
1139 /* Verify that I1 and I2 are equivalent. */
1140 if (GET_CODE (i1) != GET_CODE (i2))
1141 return dir_none;
1142
1143 /* __builtin_unreachable() may lead to empty blocks (ending with
1144 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1145 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1146 return dir_both;
1147
1148 /* ??? Do not allow cross-jumping between different stack levels. */
1149 p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1150 p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1151 if (p1 && p2)
1152 {
1153 p1 = XEXP (p1, 0);
1154 p2 = XEXP (p2, 0);
1155 if (!rtx_equal_p (p1, p2))
1156 return dir_none;
1157
1158 /* ??? Worse, this adjustment had better be constant lest we
1159 have differing incoming stack levels. */
1160 if (!frame_pointer_needed
1161 && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN))
1162 return dir_none;
1163 }
1164 else if (p1 || p2)
1165 return dir_none;
1166
1167 /* Do not allow cross-jumping between frame related insns and other
1168 insns. */
1169 if (RTX_FRAME_RELATED_P (i1) != RTX_FRAME_RELATED_P (i2))
1170 return dir_none;
1171
1172 p1 = PATTERN (i1);
1173 p2 = PATTERN (i2);
1174
1175 if (GET_CODE (p1) != GET_CODE (p2))
1176 return dir_none;
1177
1178 /* If this is a CALL_INSN, compare register usage information.
1179 If we don't check this on stack register machines, the two
1180 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1181 numbers of stack registers in the same basic block.
1182 If we don't check this on machines with delay slots, a delay slot may
1183 be filled that clobbers a parameter expected by the subroutine.
1184
1185 ??? We take the simple route for now and assume that if they're
1186 equal, they were constructed identically.
1187
1188 Also check for identical exception regions. */
1189
1190 if (CALL_P (i1))
1191 {
1192 /* Ensure the same EH region. */
1193 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1194 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1195
1196 if (!n1 && n2)
1197 return dir_none;
1198
1199 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1200 return dir_none;
1201
1202 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1203 CALL_INSN_FUNCTION_USAGE (i2))
1204 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1205 return dir_none;
1206
1207 /* For address sanitizer, never crossjump __asan_report_* builtins,
1208 otherwise errors might be reported on incorrect lines. */
1209 if (flag_sanitize & SANITIZE_ADDRESS)
1210 {
1211 rtx call = get_call_rtx_from (i1);
1212 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1213 {
1214 rtx symbol = XEXP (XEXP (call, 0), 0);
1215 if (SYMBOL_REF_DECL (symbol)
1216 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1217 {
1218 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1219 == BUILT_IN_NORMAL)
1220 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1221 >= BUILT_IN_ASAN_REPORT_LOAD1
1222 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1223 <= BUILT_IN_ASAN_STOREN)
1224 return dir_none;
1225 }
1226 }
1227 }
1228
1229 HARD_REG_SET i1_used, i2_used;
1230
1231 get_call_reg_set_usage (i1, &i1_used, call_used_reg_set);
1232 get_call_reg_set_usage (i2, &i2_used, call_used_reg_set);
1233
1234 if (!hard_reg_set_equal_p (i1_used, i2_used))
1235 return dir_none;
1236 }
1237
1238 /* If both i1 and i2 are frame related, verify all the CFA notes
1239 in the same order and with the same content. */
1240 if (RTX_FRAME_RELATED_P (i1) && !insns_have_identical_cfa_notes (i1, i2))
1241 return dir_none;
1242
1243 #ifdef STACK_REGS
1244 /* If cross_jump_death_matters is not 0, the insn's mode
1245 indicates whether or not the insn contains any stack-like
1246 regs. */
1247
1248 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1249 {
1250 /* If register stack conversion has already been done, then
1251 death notes must also be compared before it is certain that
1252 the two instruction streams match. */
1253
1254 rtx note;
1255 HARD_REG_SET i1_regset, i2_regset;
1256
1257 CLEAR_HARD_REG_SET (i1_regset);
1258 CLEAR_HARD_REG_SET (i2_regset);
1259
1260 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1261 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1262 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1263
1264 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1265 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1266 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1267
1268 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1269 return dir_none;
1270 }
1271 #endif
1272
1273 if (reload_completed
1274 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1275 return dir_both;
1276
1277 return can_replace_by (i1, i2);
1278 }
1279 \f
1280 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1281 flow_find_head_matching_sequence, ensure the notes match. */
1282
1283 static void
1284 merge_notes (rtx_insn *i1, rtx_insn *i2)
1285 {
1286 /* If the merged insns have different REG_EQUAL notes, then
1287 remove them. */
1288 rtx equiv1 = find_reg_equal_equiv_note (i1);
1289 rtx equiv2 = find_reg_equal_equiv_note (i2);
1290
1291 if (equiv1 && !equiv2)
1292 remove_note (i1, equiv1);
1293 else if (!equiv1 && equiv2)
1294 remove_note (i2, equiv2);
1295 else if (equiv1 && equiv2
1296 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1297 {
1298 remove_note (i1, equiv1);
1299 remove_note (i2, equiv2);
1300 }
1301 }
1302
1303 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1304 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1305 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1306 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1307 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1308
1309 static void
1310 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1311 bool *did_fallthru)
1312 {
1313 edge fallthru;
1314
1315 *did_fallthru = false;
1316
1317 /* Ignore notes. */
1318 while (!NONDEBUG_INSN_P (*i1))
1319 {
1320 if (*i1 != BB_HEAD (*bb1))
1321 {
1322 *i1 = PREV_INSN (*i1);
1323 continue;
1324 }
1325
1326 if (!follow_fallthru)
1327 return;
1328
1329 fallthru = find_fallthru_edge ((*bb1)->preds);
1330 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1331 || !single_succ_p (fallthru->src))
1332 return;
1333
1334 *bb1 = fallthru->src;
1335 *i1 = BB_END (*bb1);
1336 *did_fallthru = true;
1337 }
1338 }
1339
1340 /* Look through the insns at the end of BB1 and BB2 and find the longest
1341 sequence that are either equivalent, or allow forward or backward
1342 replacement. Store the first insns for that sequence in *F1 and *F2 and
1343 return the sequence length.
1344
1345 DIR_P indicates the allowed replacement direction on function entry, and
1346 the actual replacement direction on function exit. If NULL, only equivalent
1347 sequences are allowed.
1348
1349 To simplify callers of this function, if the blocks match exactly,
1350 store the head of the blocks in *F1 and *F2. */
1351
1352 int
1353 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1354 rtx_insn **f2, enum replace_direction *dir_p)
1355 {
1356 rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1357 int ninsns = 0;
1358 enum replace_direction dir, last_dir, afterlast_dir;
1359 bool follow_fallthru, did_fallthru;
1360
1361 if (dir_p)
1362 dir = *dir_p;
1363 else
1364 dir = dir_both;
1365 afterlast_dir = dir;
1366 last_dir = afterlast_dir;
1367
1368 /* Skip simple jumps at the end of the blocks. Complex jumps still
1369 need to be compared for equivalence, which we'll do below. */
1370
1371 i1 = BB_END (bb1);
1372 last1 = afterlast1 = last2 = afterlast2 = NULL;
1373 if (onlyjump_p (i1)
1374 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1375 {
1376 last1 = i1;
1377 i1 = PREV_INSN (i1);
1378 }
1379
1380 i2 = BB_END (bb2);
1381 if (onlyjump_p (i2)
1382 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1383 {
1384 last2 = i2;
1385 /* Count everything except for unconditional jump as insn.
1386 Don't count any jumps if dir_p is NULL. */
1387 if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1388 ninsns++;
1389 i2 = PREV_INSN (i2);
1390 }
1391
1392 while (true)
1393 {
1394 /* In the following example, we can replace all jumps to C by jumps to A.
1395
1396 This removes 4 duplicate insns.
1397 [bb A] insn1 [bb C] insn1
1398 insn2 insn2
1399 [bb B] insn3 insn3
1400 insn4 insn4
1401 jump_insn jump_insn
1402
1403 We could also replace all jumps to A by jumps to C, but that leaves B
1404 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1405 step, all jumps to B would be replaced with jumps to the middle of C,
1406 achieving the same result with more effort.
1407 So we allow only the first possibility, which means that we don't allow
1408 fallthru in the block that's being replaced. */
1409
1410 follow_fallthru = dir_p && dir != dir_forward;
1411 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1412 if (did_fallthru)
1413 dir = dir_backward;
1414
1415 follow_fallthru = dir_p && dir != dir_backward;
1416 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1417 if (did_fallthru)
1418 dir = dir_forward;
1419
1420 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1421 break;
1422
1423 /* Do not turn corssing edge to non-crossing or vice versa after
1424 reload. */
1425 if (BB_PARTITION (BLOCK_FOR_INSN (i1))
1426 != BB_PARTITION (BLOCK_FOR_INSN (i2))
1427 && reload_completed)
1428 break;
1429
1430 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1431 if (dir == dir_none || (!dir_p && dir != dir_both))
1432 break;
1433
1434 merge_memattrs (i1, i2);
1435
1436 /* Don't begin a cross-jump with a NOTE insn. */
1437 if (INSN_P (i1))
1438 {
1439 merge_notes (i1, i2);
1440
1441 afterlast1 = last1, afterlast2 = last2;
1442 last1 = i1, last2 = i2;
1443 afterlast_dir = last_dir;
1444 last_dir = dir;
1445 if (active_insn_p (i1))
1446 ninsns++;
1447 }
1448
1449 i1 = PREV_INSN (i1);
1450 i2 = PREV_INSN (i2);
1451 }
1452
1453 /* Don't allow the insn after a compare to be shared by
1454 cross-jumping unless the compare is also shared. */
1455 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1456 && ! sets_cc0_p (last1))
1457 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1458
1459 /* Include preceding notes and labels in the cross-jump. One,
1460 this may bring us to the head of the blocks as requested above.
1461 Two, it keeps line number notes as matched as may be. */
1462 if (ninsns)
1463 {
1464 bb1 = BLOCK_FOR_INSN (last1);
1465 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1466 last1 = PREV_INSN (last1);
1467
1468 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1469 last1 = PREV_INSN (last1);
1470
1471 bb2 = BLOCK_FOR_INSN (last2);
1472 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1473 last2 = PREV_INSN (last2);
1474
1475 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1476 last2 = PREV_INSN (last2);
1477
1478 *f1 = last1;
1479 *f2 = last2;
1480 }
1481
1482 if (dir_p)
1483 *dir_p = last_dir;
1484 return ninsns;
1485 }
1486
1487 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1488 the head of the two blocks. Do not include jumps at the end.
1489 If STOP_AFTER is nonzero, stop after finding that many matching
1490 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1491 non-zero, only count active insns. */
1492
1493 int
1494 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1495 rtx_insn **f2, int stop_after)
1496 {
1497 rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1498 int ninsns = 0;
1499 edge e;
1500 edge_iterator ei;
1501 int nehedges1 = 0, nehedges2 = 0;
1502
1503 FOR_EACH_EDGE (e, ei, bb1->succs)
1504 if (e->flags & EDGE_EH)
1505 nehedges1++;
1506 FOR_EACH_EDGE (e, ei, bb2->succs)
1507 if (e->flags & EDGE_EH)
1508 nehedges2++;
1509
1510 i1 = BB_HEAD (bb1);
1511 i2 = BB_HEAD (bb2);
1512 last1 = beforelast1 = last2 = beforelast2 = NULL;
1513
1514 while (true)
1515 {
1516 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1517 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1518 {
1519 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1520 break;
1521 i1 = NEXT_INSN (i1);
1522 }
1523
1524 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1525 {
1526 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1527 break;
1528 i2 = NEXT_INSN (i2);
1529 }
1530
1531 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1532 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1533 break;
1534
1535 if (NOTE_P (i1) || NOTE_P (i2)
1536 || JUMP_P (i1) || JUMP_P (i2))
1537 break;
1538
1539 /* A sanity check to make sure we're not merging insns with different
1540 effects on EH. If only one of them ends a basic block, it shouldn't
1541 have an EH edge; if both end a basic block, there should be the same
1542 number of EH edges. */
1543 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1544 && nehedges1 > 0)
1545 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1546 && nehedges2 > 0)
1547 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1548 && nehedges1 != nehedges2))
1549 break;
1550
1551 if (old_insns_match_p (0, i1, i2) != dir_both)
1552 break;
1553
1554 merge_memattrs (i1, i2);
1555
1556 /* Don't begin a cross-jump with a NOTE insn. */
1557 if (INSN_P (i1))
1558 {
1559 merge_notes (i1, i2);
1560
1561 beforelast1 = last1, beforelast2 = last2;
1562 last1 = i1, last2 = i2;
1563 if (!stop_after || active_insn_p (i1))
1564 ninsns++;
1565 }
1566
1567 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1568 || (stop_after > 0 && ninsns == stop_after))
1569 break;
1570
1571 i1 = NEXT_INSN (i1);
1572 i2 = NEXT_INSN (i2);
1573 }
1574
1575 /* Don't allow a compare to be shared by cross-jumping unless the insn
1576 after the compare is also shared. */
1577 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1578 && sets_cc0_p (last1))
1579 last1 = beforelast1, last2 = beforelast2, ninsns--;
1580
1581 if (ninsns)
1582 {
1583 *f1 = last1;
1584 *f2 = last2;
1585 }
1586
1587 return ninsns;
1588 }
1589
1590 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1591 the branch instruction. This means that if we commonize the control
1592 flow before end of the basic block, the semantic remains unchanged.
1593
1594 We may assume that there exists one edge with a common destination. */
1595
1596 static bool
1597 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1598 {
1599 int nehedges1 = 0, nehedges2 = 0;
1600 edge fallthru1 = 0, fallthru2 = 0;
1601 edge e1, e2;
1602 edge_iterator ei;
1603
1604 /* If we performed shrink-wrapping, edges to the exit block can
1605 only be distinguished for JUMP_INSNs. The two paths may differ in
1606 whether they went through the prologue. Sibcalls are fine, we know
1607 that we either didn't need or inserted an epilogue before them. */
1608 if (crtl->shrink_wrapped
1609 && single_succ_p (bb1)
1610 && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1611 && (!JUMP_P (BB_END (bb1))
1612 /* Punt if the only successor is a fake edge to exit, the jump
1613 must be some weird one. */
1614 || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0)
1615 && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1616 return false;
1617
1618 /* If BB1 has only one successor, we may be looking at either an
1619 unconditional jump, or a fake edge to exit. */
1620 if (single_succ_p (bb1)
1621 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1622 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1623 return (single_succ_p (bb2)
1624 && (single_succ_edge (bb2)->flags
1625 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1626 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1627
1628 /* Match conditional jumps - this may get tricky when fallthru and branch
1629 edges are crossed. */
1630 if (EDGE_COUNT (bb1->succs) == 2
1631 && any_condjump_p (BB_END (bb1))
1632 && onlyjump_p (BB_END (bb1)))
1633 {
1634 edge b1, f1, b2, f2;
1635 bool reverse, match;
1636 rtx set1, set2, cond1, cond2;
1637 enum rtx_code code1, code2;
1638
1639 if (EDGE_COUNT (bb2->succs) != 2
1640 || !any_condjump_p (BB_END (bb2))
1641 || !onlyjump_p (BB_END (bb2)))
1642 return false;
1643
1644 b1 = BRANCH_EDGE (bb1);
1645 b2 = BRANCH_EDGE (bb2);
1646 f1 = FALLTHRU_EDGE (bb1);
1647 f2 = FALLTHRU_EDGE (bb2);
1648
1649 /* Get around possible forwarders on fallthru edges. Other cases
1650 should be optimized out already. */
1651 if (FORWARDER_BLOCK_P (f1->dest))
1652 f1 = single_succ_edge (f1->dest);
1653
1654 if (FORWARDER_BLOCK_P (f2->dest))
1655 f2 = single_succ_edge (f2->dest);
1656
1657 /* To simplify use of this function, return false if there are
1658 unneeded forwarder blocks. These will get eliminated later
1659 during cleanup_cfg. */
1660 if (FORWARDER_BLOCK_P (f1->dest)
1661 || FORWARDER_BLOCK_P (f2->dest)
1662 || FORWARDER_BLOCK_P (b1->dest)
1663 || FORWARDER_BLOCK_P (b2->dest))
1664 return false;
1665
1666 if (f1->dest == f2->dest && b1->dest == b2->dest)
1667 reverse = false;
1668 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1669 reverse = true;
1670 else
1671 return false;
1672
1673 set1 = pc_set (BB_END (bb1));
1674 set2 = pc_set (BB_END (bb2));
1675 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1676 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1677 reverse = !reverse;
1678
1679 cond1 = XEXP (SET_SRC (set1), 0);
1680 cond2 = XEXP (SET_SRC (set2), 0);
1681 code1 = GET_CODE (cond1);
1682 if (reverse)
1683 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1684 else
1685 code2 = GET_CODE (cond2);
1686
1687 if (code2 == UNKNOWN)
1688 return false;
1689
1690 /* Verify codes and operands match. */
1691 match = ((code1 == code2
1692 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1693 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1694 || (code1 == swap_condition (code2)
1695 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1696 XEXP (cond2, 0))
1697 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1698 XEXP (cond2, 1))));
1699
1700 /* If we return true, we will join the blocks. Which means that
1701 we will only have one branch prediction bit to work with. Thus
1702 we require the existing branches to have probabilities that are
1703 roughly similar. */
1704 if (match
1705 && optimize_bb_for_speed_p (bb1)
1706 && optimize_bb_for_speed_p (bb2))
1707 {
1708 profile_probability prob2;
1709
1710 if (b1->dest == b2->dest)
1711 prob2 = b2->probability;
1712 else
1713 /* Do not use f2 probability as f2 may be forwarded. */
1714 prob2 = b2->probability.invert ();
1715
1716 /* Fail if the difference in probabilities is greater than 50%.
1717 This rules out two well-predicted branches with opposite
1718 outcomes. */
1719 if (b1->probability.differs_lot_from_p (prob2))
1720 {
1721 if (dump_file)
1722 {
1723 fprintf (dump_file,
1724 "Outcomes of branch in bb %i and %i differ too"
1725 " much (", bb1->index, bb2->index);
1726 b1->probability.dump (dump_file);
1727 prob2.dump (dump_file);
1728 fprintf (dump_file, ")\n");
1729 }
1730 return false;
1731 }
1732 }
1733
1734 if (dump_file && match)
1735 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1736 bb1->index, bb2->index);
1737
1738 return match;
1739 }
1740
1741 /* Generic case - we are seeing a computed jump, table jump or trapping
1742 instruction. */
1743
1744 /* Check whether there are tablejumps in the end of BB1 and BB2.
1745 Return true if they are identical. */
1746 {
1747 rtx_insn *label1, *label2;
1748 rtx_jump_table_data *table1, *table2;
1749
1750 if (tablejump_p (BB_END (bb1), &label1, &table1)
1751 && tablejump_p (BB_END (bb2), &label2, &table2)
1752 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1753 {
1754 /* The labels should never be the same rtx. If they really are same
1755 the jump tables are same too. So disable crossjumping of blocks BB1
1756 and BB2 because when deleting the common insns in the end of BB1
1757 by delete_basic_block () the jump table would be deleted too. */
1758 /* If LABEL2 is referenced in BB1->END do not do anything
1759 because we would loose information when replacing
1760 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1761 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1762 {
1763 /* Set IDENTICAL to true when the tables are identical. */
1764 bool identical = false;
1765 rtx p1, p2;
1766
1767 p1 = PATTERN (table1);
1768 p2 = PATTERN (table2);
1769 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1770 {
1771 identical = true;
1772 }
1773 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1774 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1775 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1776 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1777 {
1778 int i;
1779
1780 identical = true;
1781 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1782 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1783 identical = false;
1784 }
1785
1786 if (identical)
1787 {
1788 bool match;
1789
1790 /* Temporarily replace references to LABEL1 with LABEL2
1791 in BB1->END so that we could compare the instructions. */
1792 replace_label_in_insn (BB_END (bb1), label1, label2, false);
1793
1794 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1795 == dir_both);
1796 if (dump_file && match)
1797 fprintf (dump_file,
1798 "Tablejumps in bb %i and %i match.\n",
1799 bb1->index, bb2->index);
1800
1801 /* Set the original label in BB1->END because when deleting
1802 a block whose end is a tablejump, the tablejump referenced
1803 from the instruction is deleted too. */
1804 replace_label_in_insn (BB_END (bb1), label2, label1, false);
1805
1806 return match;
1807 }
1808 }
1809 return false;
1810 }
1811 }
1812
1813 /* Find the last non-debug non-note instruction in each bb, except
1814 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1815 handles that case specially. old_insns_match_p does not handle
1816 other types of instruction notes. */
1817 rtx_insn *last1 = BB_END (bb1);
1818 rtx_insn *last2 = BB_END (bb2);
1819 while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1820 (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1821 last1 = PREV_INSN (last1);
1822 while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1823 (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1824 last2 = PREV_INSN (last2);
1825 gcc_assert (last1 && last2);
1826
1827 /* First ensure that the instructions match. There may be many outgoing
1828 edges so this test is generally cheaper. */
1829 if (old_insns_match_p (mode, last1, last2) != dir_both)
1830 return false;
1831
1832 /* Search the outgoing edges, ensure that the counts do match, find possible
1833 fallthru and exception handling edges since these needs more
1834 validation. */
1835 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1836 return false;
1837
1838 bool nonfakeedges = false;
1839 FOR_EACH_EDGE (e1, ei, bb1->succs)
1840 {
1841 e2 = EDGE_SUCC (bb2, ei.index);
1842
1843 if ((e1->flags & EDGE_FAKE) == 0)
1844 nonfakeedges = true;
1845
1846 if (e1->flags & EDGE_EH)
1847 nehedges1++;
1848
1849 if (e2->flags & EDGE_EH)
1850 nehedges2++;
1851
1852 if (e1->flags & EDGE_FALLTHRU)
1853 fallthru1 = e1;
1854 if (e2->flags & EDGE_FALLTHRU)
1855 fallthru2 = e2;
1856 }
1857
1858 /* If number of edges of various types does not match, fail. */
1859 if (nehedges1 != nehedges2
1860 || (fallthru1 != 0) != (fallthru2 != 0))
1861 return false;
1862
1863 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1864 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1865 attempt to optimize, as the two basic blocks might have different
1866 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1867 traps there should be REG_ARG_SIZE notes, they could be missing
1868 for __builtin_unreachable () uses though. */
1869 if (!nonfakeedges
1870 && !ACCUMULATE_OUTGOING_ARGS
1871 && (!INSN_P (last1)
1872 || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1873 return false;
1874
1875 /* fallthru edges must be forwarded to the same destination. */
1876 if (fallthru1)
1877 {
1878 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1879 ? single_succ (fallthru1->dest): fallthru1->dest);
1880 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1881 ? single_succ (fallthru2->dest): fallthru2->dest);
1882
1883 if (d1 != d2)
1884 return false;
1885 }
1886
1887 /* Ensure the same EH region. */
1888 {
1889 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1890 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1891
1892 if (!n1 && n2)
1893 return false;
1894
1895 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1896 return false;
1897 }
1898
1899 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1900 version of sequence abstraction. */
1901 FOR_EACH_EDGE (e1, ei, bb2->succs)
1902 {
1903 edge e2;
1904 edge_iterator ei;
1905 basic_block d1 = e1->dest;
1906
1907 if (FORWARDER_BLOCK_P (d1))
1908 d1 = EDGE_SUCC (d1, 0)->dest;
1909
1910 FOR_EACH_EDGE (e2, ei, bb1->succs)
1911 {
1912 basic_block d2 = e2->dest;
1913 if (FORWARDER_BLOCK_P (d2))
1914 d2 = EDGE_SUCC (d2, 0)->dest;
1915 if (d1 == d2)
1916 break;
1917 }
1918
1919 if (!e2)
1920 return false;
1921 }
1922
1923 return true;
1924 }
1925
1926 /* Returns true if BB basic block has a preserve label. */
1927
1928 static bool
1929 block_has_preserve_label (basic_block bb)
1930 {
1931 return (bb
1932 && block_label (bb)
1933 && LABEL_PRESERVE_P (block_label (bb)));
1934 }
1935
1936 /* E1 and E2 are edges with the same destination block. Search their
1937 predecessors for common code. If found, redirect control flow from
1938 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1939 or the other way around (dir_backward). DIR specifies the allowed
1940 replacement direction. */
1941
1942 static bool
1943 try_crossjump_to_edge (int mode, edge e1, edge e2,
1944 enum replace_direction dir)
1945 {
1946 int nmatch;
1947 basic_block src1 = e1->src, src2 = e2->src;
1948 basic_block redirect_to, redirect_from, to_remove;
1949 basic_block osrc1, osrc2, redirect_edges_to, tmp;
1950 rtx_insn *newpos1, *newpos2;
1951 edge s;
1952 edge_iterator ei;
1953
1954 newpos1 = newpos2 = NULL;
1955
1956 /* Search backward through forwarder blocks. We don't need to worry
1957 about multiple entry or chained forwarders, as they will be optimized
1958 away. We do this to look past the unconditional jump following a
1959 conditional jump that is required due to the current CFG shape. */
1960 if (single_pred_p (src1)
1961 && FORWARDER_BLOCK_P (src1))
1962 e1 = single_pred_edge (src1), src1 = e1->src;
1963
1964 if (single_pred_p (src2)
1965 && FORWARDER_BLOCK_P (src2))
1966 e2 = single_pred_edge (src2), src2 = e2->src;
1967
1968 /* Nothing to do if we reach ENTRY, or a common source block. */
1969 if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1970 == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1971 return false;
1972 if (src1 == src2)
1973 return false;
1974
1975 /* Seeing more than 1 forwarder blocks would confuse us later... */
1976 if (FORWARDER_BLOCK_P (e1->dest)
1977 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1978 return false;
1979
1980 if (FORWARDER_BLOCK_P (e2->dest)
1981 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1982 return false;
1983
1984 /* Likewise with dead code (possibly newly created by the other optimizations
1985 of cfg_cleanup). */
1986 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1987 return false;
1988
1989 /* Do not turn corssing edge to non-crossing or vice versa after reload. */
1990 if (BB_PARTITION (src1) != BB_PARTITION (src2)
1991 && reload_completed)
1992 return false;
1993
1994 /* Look for the common insn sequence, part the first ... */
1995 if (!outgoing_edges_match (mode, src1, src2))
1996 return false;
1997
1998 /* ... and part the second. */
1999 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
2000
2001 osrc1 = src1;
2002 osrc2 = src2;
2003 if (newpos1 != NULL_RTX)
2004 src1 = BLOCK_FOR_INSN (newpos1);
2005 if (newpos2 != NULL_RTX)
2006 src2 = BLOCK_FOR_INSN (newpos2);
2007
2008 /* Check that SRC1 and SRC2 have preds again. They may have changed
2009 above due to the call to flow_find_cross_jump. */
2010 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
2011 return false;
2012
2013 if (dir == dir_backward)
2014 {
2015 std::swap (osrc1, osrc2);
2016 std::swap (src1, src2);
2017 std::swap (e1, e2);
2018 std::swap (newpos1, newpos2);
2019 }
2020
2021 /* Don't proceed with the crossjump unless we found a sufficient number
2022 of matching instructions or the 'from' block was totally matched
2023 (such that its predecessors will hopefully be redirected and the
2024 block removed). */
2025 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2026 && (newpos1 != BB_HEAD (src1)))
2027 return false;
2028
2029 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
2030 if (block_has_preserve_label (e1->dest)
2031 && (e1->flags & EDGE_ABNORMAL))
2032 return false;
2033
2034 /* Here we know that the insns in the end of SRC1 which are common with SRC2
2035 will be deleted.
2036 If we have tablejumps in the end of SRC1 and SRC2
2037 they have been already compared for equivalence in outgoing_edges_match ()
2038 so replace the references to TABLE1 by references to TABLE2. */
2039 {
2040 rtx_insn *label1, *label2;
2041 rtx_jump_table_data *table1, *table2;
2042
2043 if (tablejump_p (BB_END (osrc1), &label1, &table1)
2044 && tablejump_p (BB_END (osrc2), &label2, &table2)
2045 && label1 != label2)
2046 {
2047 rtx_insn *insn;
2048
2049 /* Replace references to LABEL1 with LABEL2. */
2050 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2051 {
2052 /* Do not replace the label in SRC1->END because when deleting
2053 a block whose end is a tablejump, the tablejump referenced
2054 from the instruction is deleted too. */
2055 if (insn != BB_END (osrc1))
2056 replace_label_in_insn (insn, label1, label2, true);
2057 }
2058 }
2059 }
2060
2061 /* Avoid splitting if possible. We must always split when SRC2 has
2062 EH predecessor edges, or we may end up with basic blocks with both
2063 normal and EH predecessor edges. */
2064 if (newpos2 == BB_HEAD (src2)
2065 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2066 redirect_to = src2;
2067 else
2068 {
2069 if (newpos2 == BB_HEAD (src2))
2070 {
2071 /* Skip possible basic block header. */
2072 if (LABEL_P (newpos2))
2073 newpos2 = NEXT_INSN (newpos2);
2074 while (DEBUG_INSN_P (newpos2))
2075 newpos2 = NEXT_INSN (newpos2);
2076 if (NOTE_P (newpos2))
2077 newpos2 = NEXT_INSN (newpos2);
2078 while (DEBUG_INSN_P (newpos2))
2079 newpos2 = NEXT_INSN (newpos2);
2080 }
2081
2082 if (dump_file)
2083 fprintf (dump_file, "Splitting bb %i before %i insns\n",
2084 src2->index, nmatch);
2085 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2086 }
2087
2088 if (dump_file)
2089 fprintf (dump_file,
2090 "Cross jumping from bb %i to bb %i; %i common insns\n",
2091 src1->index, src2->index, nmatch);
2092
2093 /* We may have some registers visible through the block. */
2094 df_set_bb_dirty (redirect_to);
2095
2096 if (osrc2 == src2)
2097 redirect_edges_to = redirect_to;
2098 else
2099 redirect_edges_to = osrc2;
2100
2101 /* Recompute the counts of destinations of outgoing edges. */
2102 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2103 {
2104 edge s2;
2105 edge_iterator ei;
2106 basic_block d = s->dest;
2107
2108 if (FORWARDER_BLOCK_P (d))
2109 d = single_succ (d);
2110
2111 FOR_EACH_EDGE (s2, ei, src1->succs)
2112 {
2113 basic_block d2 = s2->dest;
2114 if (FORWARDER_BLOCK_P (d2))
2115 d2 = single_succ (d2);
2116 if (d == d2)
2117 break;
2118 }
2119
2120 /* Take care to update possible forwarder blocks. We verified
2121 that there is no more than one in the chain, so we can't run
2122 into infinite loop. */
2123 if (FORWARDER_BLOCK_P (s->dest))
2124 s->dest->count += s->count ();
2125
2126 if (FORWARDER_BLOCK_P (s2->dest))
2127 s2->dest->count -= s->count ();
2128
2129 s->probability = s->probability.combine_with_count
2130 (redirect_edges_to->count,
2131 s2->probability, src1->count);
2132 }
2133
2134 /* Adjust count for the block. An earlier jump
2135 threading pass may have left the profile in an inconsistent
2136 state (see update_bb_profile_for_threading) so we must be
2137 prepared for overflows. */
2138 tmp = redirect_to;
2139 do
2140 {
2141 tmp->count += src1->count;
2142 if (tmp == redirect_edges_to)
2143 break;
2144 tmp = find_fallthru_edge (tmp->succs)->dest;
2145 }
2146 while (true);
2147 update_br_prob_note (redirect_edges_to);
2148
2149 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2150
2151 /* Skip possible basic block header. */
2152 if (LABEL_P (newpos1))
2153 newpos1 = NEXT_INSN (newpos1);
2154
2155 while (DEBUG_INSN_P (newpos1))
2156 newpos1 = NEXT_INSN (newpos1);
2157
2158 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2159 newpos1 = NEXT_INSN (newpos1);
2160
2161 while (DEBUG_INSN_P (newpos1))
2162 newpos1 = NEXT_INSN (newpos1);
2163
2164 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2165 to_remove = single_succ (redirect_from);
2166
2167 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2168 delete_basic_block (to_remove);
2169
2170 update_forwarder_flag (redirect_from);
2171 if (redirect_to != src2)
2172 update_forwarder_flag (src2);
2173
2174 return true;
2175 }
2176
2177 /* Search the predecessors of BB for common insn sequences. When found,
2178 share code between them by redirecting control flow. Return true if
2179 any changes made. */
2180
2181 static bool
2182 try_crossjump_bb (int mode, basic_block bb)
2183 {
2184 edge e, e2, fallthru;
2185 bool changed;
2186 unsigned max, ix, ix2;
2187
2188 /* Nothing to do if there is not at least two incoming edges. */
2189 if (EDGE_COUNT (bb->preds) < 2)
2190 return false;
2191
2192 /* Don't crossjump if this block ends in a computed jump,
2193 unless we are optimizing for size. */
2194 if (optimize_bb_for_size_p (bb)
2195 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2196 && computed_jump_p (BB_END (bb)))
2197 return false;
2198
2199 /* If we are partitioning hot/cold basic blocks, we don't want to
2200 mess up unconditional or indirect jumps that cross between hot
2201 and cold sections.
2202
2203 Basic block partitioning may result in some jumps that appear to
2204 be optimizable (or blocks that appear to be mergeable), but which really
2205 must be left untouched (they are required to make it safely across
2206 partition boundaries). See the comments at the top of
2207 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2208
2209 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2210 BB_PARTITION (EDGE_PRED (bb, 1)->src)
2211 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2212 return false;
2213
2214 /* It is always cheapest to redirect a block that ends in a branch to
2215 a block that falls through into BB, as that adds no branches to the
2216 program. We'll try that combination first. */
2217 fallthru = NULL;
2218 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2219
2220 if (EDGE_COUNT (bb->preds) > max)
2221 return false;
2222
2223 fallthru = find_fallthru_edge (bb->preds);
2224
2225 changed = false;
2226 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2227 {
2228 e = EDGE_PRED (bb, ix);
2229 ix++;
2230
2231 /* As noted above, first try with the fallthru predecessor (or, a
2232 fallthru predecessor if we are in cfglayout mode). */
2233 if (fallthru)
2234 {
2235 /* Don't combine the fallthru edge into anything else.
2236 If there is a match, we'll do it the other way around. */
2237 if (e == fallthru)
2238 continue;
2239 /* If nothing changed since the last attempt, there is nothing
2240 we can do. */
2241 if (!first_pass
2242 && !((e->src->flags & BB_MODIFIED)
2243 || (fallthru->src->flags & BB_MODIFIED)))
2244 continue;
2245
2246 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2247 {
2248 changed = true;
2249 ix = 0;
2250 continue;
2251 }
2252 }
2253
2254 /* Non-obvious work limiting check: Recognize that we're going
2255 to call try_crossjump_bb on every basic block. So if we have
2256 two blocks with lots of outgoing edges (a switch) and they
2257 share lots of common destinations, then we would do the
2258 cross-jump check once for each common destination.
2259
2260 Now, if the blocks actually are cross-jump candidates, then
2261 all of their destinations will be shared. Which means that
2262 we only need check them for cross-jump candidacy once. We
2263 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2264 choosing to do the check from the block for which the edge
2265 in question is the first successor of A. */
2266 if (EDGE_SUCC (e->src, 0) != e)
2267 continue;
2268
2269 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2270 {
2271 e2 = EDGE_PRED (bb, ix2);
2272
2273 if (e2 == e)
2274 continue;
2275
2276 /* We've already checked the fallthru edge above. */
2277 if (e2 == fallthru)
2278 continue;
2279
2280 /* The "first successor" check above only prevents multiple
2281 checks of crossjump(A,B). In order to prevent redundant
2282 checks of crossjump(B,A), require that A be the block
2283 with the lowest index. */
2284 if (e->src->index > e2->src->index)
2285 continue;
2286
2287 /* If nothing changed since the last attempt, there is nothing
2288 we can do. */
2289 if (!first_pass
2290 && !((e->src->flags & BB_MODIFIED)
2291 || (e2->src->flags & BB_MODIFIED)))
2292 continue;
2293
2294 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2295 direction. */
2296 if (try_crossjump_to_edge (mode, e, e2, dir_both))
2297 {
2298 changed = true;
2299 ix = 0;
2300 break;
2301 }
2302 }
2303 }
2304
2305 if (changed)
2306 crossjumps_occurred = true;
2307
2308 return changed;
2309 }
2310
2311 /* Search the successors of BB for common insn sequences. When found,
2312 share code between them by moving it across the basic block
2313 boundary. Return true if any changes made. */
2314
2315 static bool
2316 try_head_merge_bb (basic_block bb)
2317 {
2318 basic_block final_dest_bb = NULL;
2319 int max_match = INT_MAX;
2320 edge e0;
2321 rtx_insn **headptr, **currptr, **nextptr;
2322 bool changed, moveall;
2323 unsigned ix;
2324 rtx_insn *e0_last_head;
2325 rtx cond;
2326 rtx_insn *move_before;
2327 unsigned nedges = EDGE_COUNT (bb->succs);
2328 rtx_insn *jump = BB_END (bb);
2329 regset live, live_union;
2330
2331 /* Nothing to do if there is not at least two outgoing edges. */
2332 if (nedges < 2)
2333 return false;
2334
2335 /* Don't crossjump if this block ends in a computed jump,
2336 unless we are optimizing for size. */
2337 if (optimize_bb_for_size_p (bb)
2338 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2339 && computed_jump_p (BB_END (bb)))
2340 return false;
2341
2342 cond = get_condition (jump, &move_before, true, false);
2343 if (cond == NULL_RTX)
2344 {
2345 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2346 move_before = prev_nonnote_nondebug_insn (jump);
2347 else
2348 move_before = jump;
2349 }
2350
2351 for (ix = 0; ix < nedges; ix++)
2352 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2353 return false;
2354
2355 for (ix = 0; ix < nedges; ix++)
2356 {
2357 edge e = EDGE_SUCC (bb, ix);
2358 basic_block other_bb = e->dest;
2359
2360 if (df_get_bb_dirty (other_bb))
2361 {
2362 block_was_dirty = true;
2363 return false;
2364 }
2365
2366 if (e->flags & EDGE_ABNORMAL)
2367 return false;
2368
2369 /* Normally, all destination blocks must only be reachable from this
2370 block, i.e. they must have one incoming edge.
2371
2372 There is one special case we can handle, that of multiple consecutive
2373 jumps where the first jumps to one of the targets of the second jump.
2374 This happens frequently in switch statements for default labels.
2375 The structure is as follows:
2376 FINAL_DEST_BB
2377 ....
2378 if (cond) jump A;
2379 fall through
2380 BB
2381 jump with targets A, B, C, D...
2382 A
2383 has two incoming edges, from FINAL_DEST_BB and BB
2384
2385 In this case, we can try to move the insns through BB and into
2386 FINAL_DEST_BB. */
2387 if (EDGE_COUNT (other_bb->preds) != 1)
2388 {
2389 edge incoming_edge, incoming_bb_other_edge;
2390 edge_iterator ei;
2391
2392 if (final_dest_bb != NULL
2393 || EDGE_COUNT (other_bb->preds) != 2)
2394 return false;
2395
2396 /* We must be able to move the insns across the whole block. */
2397 move_before = BB_HEAD (bb);
2398 while (!NONDEBUG_INSN_P (move_before))
2399 move_before = NEXT_INSN (move_before);
2400
2401 if (EDGE_COUNT (bb->preds) != 1)
2402 return false;
2403 incoming_edge = EDGE_PRED (bb, 0);
2404 final_dest_bb = incoming_edge->src;
2405 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2406 return false;
2407 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2408 if (incoming_bb_other_edge != incoming_edge)
2409 break;
2410 if (incoming_bb_other_edge->dest != other_bb)
2411 return false;
2412 }
2413 }
2414
2415 e0 = EDGE_SUCC (bb, 0);
2416 e0_last_head = NULL;
2417 changed = false;
2418
2419 for (ix = 1; ix < nedges; ix++)
2420 {
2421 edge e = EDGE_SUCC (bb, ix);
2422 rtx_insn *e0_last, *e_last;
2423 int nmatch;
2424
2425 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2426 &e0_last, &e_last, 0);
2427 if (nmatch == 0)
2428 return false;
2429
2430 if (nmatch < max_match)
2431 {
2432 max_match = nmatch;
2433 e0_last_head = e0_last;
2434 }
2435 }
2436
2437 /* If we matched an entire block, we probably have to avoid moving the
2438 last insn. */
2439 if (max_match > 0
2440 && e0_last_head == BB_END (e0->dest)
2441 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2442 || control_flow_insn_p (e0_last_head)))
2443 {
2444 max_match--;
2445 if (max_match == 0)
2446 return false;
2447 e0_last_head = prev_real_nondebug_insn (e0_last_head);
2448 }
2449
2450 if (max_match == 0)
2451 return false;
2452
2453 /* We must find a union of the live registers at each of the end points. */
2454 live = BITMAP_ALLOC (NULL);
2455 live_union = BITMAP_ALLOC (NULL);
2456
2457 currptr = XNEWVEC (rtx_insn *, nedges);
2458 headptr = XNEWVEC (rtx_insn *, nedges);
2459 nextptr = XNEWVEC (rtx_insn *, nedges);
2460
2461 for (ix = 0; ix < nedges; ix++)
2462 {
2463 int j;
2464 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2465 rtx_insn *head = BB_HEAD (merge_bb);
2466
2467 while (!NONDEBUG_INSN_P (head))
2468 head = NEXT_INSN (head);
2469 headptr[ix] = head;
2470 currptr[ix] = head;
2471
2472 /* Compute the end point and live information */
2473 for (j = 1; j < max_match; j++)
2474 do
2475 head = NEXT_INSN (head);
2476 while (!NONDEBUG_INSN_P (head));
2477 simulate_backwards_to_point (merge_bb, live, head);
2478 IOR_REG_SET (live_union, live);
2479 }
2480
2481 /* If we're moving across two blocks, verify the validity of the
2482 first move, then adjust the target and let the loop below deal
2483 with the final move. */
2484 if (final_dest_bb != NULL)
2485 {
2486 rtx_insn *move_upto;
2487
2488 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2489 jump, e0->dest, live_union,
2490 NULL, &move_upto);
2491 if (!moveall)
2492 {
2493 if (move_upto == NULL_RTX)
2494 goto out;
2495
2496 while (e0_last_head != move_upto)
2497 {
2498 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2499 live_union);
2500 e0_last_head = PREV_INSN (e0_last_head);
2501 }
2502 }
2503 if (e0_last_head == NULL_RTX)
2504 goto out;
2505
2506 jump = BB_END (final_dest_bb);
2507 cond = get_condition (jump, &move_before, true, false);
2508 if (cond == NULL_RTX)
2509 {
2510 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2511 move_before = prev_nonnote_nondebug_insn (jump);
2512 else
2513 move_before = jump;
2514 }
2515 }
2516
2517 do
2518 {
2519 rtx_insn *move_upto;
2520 moveall = can_move_insns_across (currptr[0], e0_last_head,
2521 move_before, jump, e0->dest, live_union,
2522 NULL, &move_upto);
2523 if (!moveall && move_upto == NULL_RTX)
2524 {
2525 if (jump == move_before)
2526 break;
2527
2528 /* Try again, using a different insertion point. */
2529 move_before = jump;
2530
2531 /* Don't try moving before a cc0 user, as that may invalidate
2532 the cc0. */
2533 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2534 break;
2535
2536 continue;
2537 }
2538
2539 if (final_dest_bb && !moveall)
2540 /* We haven't checked whether a partial move would be OK for the first
2541 move, so we have to fail this case. */
2542 break;
2543
2544 changed = true;
2545 for (;;)
2546 {
2547 if (currptr[0] == move_upto)
2548 break;
2549 for (ix = 0; ix < nedges; ix++)
2550 {
2551 rtx_insn *curr = currptr[ix];
2552 do
2553 curr = NEXT_INSN (curr);
2554 while (!NONDEBUG_INSN_P (curr));
2555 currptr[ix] = curr;
2556 }
2557 }
2558
2559 /* If we can't currently move all of the identical insns, remember
2560 each insn after the range that we'll merge. */
2561 if (!moveall)
2562 for (ix = 0; ix < nedges; ix++)
2563 {
2564 rtx_insn *curr = currptr[ix];
2565 do
2566 curr = NEXT_INSN (curr);
2567 while (!NONDEBUG_INSN_P (curr));
2568 nextptr[ix] = curr;
2569 }
2570
2571 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2572 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2573 if (final_dest_bb != NULL)
2574 df_set_bb_dirty (final_dest_bb);
2575 df_set_bb_dirty (bb);
2576 for (ix = 1; ix < nedges; ix++)
2577 {
2578 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2579 delete_insn_chain (headptr[ix], currptr[ix], false);
2580 }
2581 if (!moveall)
2582 {
2583 if (jump == move_before)
2584 break;
2585
2586 /* For the unmerged insns, try a different insertion point. */
2587 move_before = jump;
2588
2589 /* Don't try moving before a cc0 user, as that may invalidate
2590 the cc0. */
2591 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2592 break;
2593
2594 for (ix = 0; ix < nedges; ix++)
2595 currptr[ix] = headptr[ix] = nextptr[ix];
2596 }
2597 }
2598 while (!moveall);
2599
2600 out:
2601 free (currptr);
2602 free (headptr);
2603 free (nextptr);
2604
2605 crossjumps_occurred |= changed;
2606
2607 return changed;
2608 }
2609
2610 /* Return true if BB contains just bb note, or bb note followed
2611 by only DEBUG_INSNs. */
2612
2613 static bool
2614 trivially_empty_bb_p (basic_block bb)
2615 {
2616 rtx_insn *insn = BB_END (bb);
2617
2618 while (1)
2619 {
2620 if (insn == BB_HEAD (bb))
2621 return true;
2622 if (!DEBUG_INSN_P (insn))
2623 return false;
2624 insn = PREV_INSN (insn);
2625 }
2626 }
2627
2628 /* Return true if BB contains just a return and possibly a USE of the
2629 return value. Fill in *RET and *USE with the return and use insns
2630 if any found, otherwise NULL. All CLOBBERs are ignored. */
2631
2632 static bool
2633 bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use)
2634 {
2635 *ret = *use = NULL;
2636 rtx_insn *insn;
2637
2638 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2639 return false;
2640
2641 FOR_BB_INSNS (bb, insn)
2642 if (NONDEBUG_INSN_P (insn))
2643 {
2644 rtx pat = PATTERN (insn);
2645
2646 if (!*ret && ANY_RETURN_P (pat))
2647 *ret = insn;
2648 else if (!*ret && !*use && GET_CODE (pat) == USE
2649 && REG_P (XEXP (pat, 0))
2650 && REG_FUNCTION_VALUE_P (XEXP (pat, 0)))
2651 *use = insn;
2652 else if (GET_CODE (pat) != CLOBBER)
2653 return false;
2654 }
2655
2656 return !!*ret;
2657 }
2658
2659 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2660 instructions etc. Return nonzero if changes were made. */
2661
2662 static bool
2663 try_optimize_cfg (int mode)
2664 {
2665 bool changed_overall = false;
2666 bool changed;
2667 int iterations = 0;
2668 basic_block bb, b, next;
2669
2670 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2671 clear_bb_flags ();
2672
2673 crossjumps_occurred = false;
2674
2675 FOR_EACH_BB_FN (bb, cfun)
2676 update_forwarder_flag (bb);
2677
2678 if (! targetm.cannot_modify_jumps_p ())
2679 {
2680 first_pass = true;
2681 /* Attempt to merge blocks as made possible by edge removal. If
2682 a block has only one successor, and the successor has only
2683 one predecessor, they may be combined. */
2684 do
2685 {
2686 block_was_dirty = false;
2687 changed = false;
2688 iterations++;
2689
2690 if (dump_file)
2691 fprintf (dump_file,
2692 "\n\ntry_optimize_cfg iteration %i\n\n",
2693 iterations);
2694
2695 for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2696 != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2697 {
2698 basic_block c;
2699 edge s;
2700 bool changed_here = false;
2701
2702 /* Delete trivially dead basic blocks. This is either
2703 blocks with no predecessors, or empty blocks with no
2704 successors. However if the empty block with no
2705 successors is the successor of the ENTRY_BLOCK, it is
2706 kept. This ensures that the ENTRY_BLOCK will have a
2707 successor which is a precondition for many RTL
2708 passes. Empty blocks may result from expanding
2709 __builtin_unreachable (). */
2710 if (EDGE_COUNT (b->preds) == 0
2711 || (EDGE_COUNT (b->succs) == 0
2712 && trivially_empty_bb_p (b)
2713 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2714 != b))
2715 {
2716 c = b->prev_bb;
2717 if (EDGE_COUNT (b->preds) > 0)
2718 {
2719 edge e;
2720 edge_iterator ei;
2721
2722 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2723 {
2724 rtx_insn *insn;
2725 for (insn = BB_FOOTER (b);
2726 insn; insn = NEXT_INSN (insn))
2727 if (BARRIER_P (insn))
2728 break;
2729 if (insn)
2730 FOR_EACH_EDGE (e, ei, b->preds)
2731 if ((e->flags & EDGE_FALLTHRU))
2732 {
2733 if (BB_FOOTER (b)
2734 && BB_FOOTER (e->src) == NULL)
2735 {
2736 BB_FOOTER (e->src) = BB_FOOTER (b);
2737 BB_FOOTER (b) = NULL;
2738 }
2739 else
2740 emit_barrier_after_bb (e->src);
2741 }
2742 }
2743 else
2744 {
2745 rtx_insn *last = get_last_bb_insn (b);
2746 if (last && BARRIER_P (last))
2747 FOR_EACH_EDGE (e, ei, b->preds)
2748 if ((e->flags & EDGE_FALLTHRU))
2749 emit_barrier_after (BB_END (e->src));
2750 }
2751 }
2752 delete_basic_block (b);
2753 changed = true;
2754 /* Avoid trying to remove the exit block. */
2755 b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2756 continue;
2757 }
2758
2759 /* Remove code labels no longer used. */
2760 if (single_pred_p (b)
2761 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2762 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2763 && LABEL_P (BB_HEAD (b))
2764 && !LABEL_PRESERVE_P (BB_HEAD (b))
2765 /* If the previous block ends with a branch to this
2766 block, we can't delete the label. Normally this
2767 is a condjump that is yet to be simplified, but
2768 if CASE_DROPS_THRU, this can be a tablejump with
2769 some element going to the same place as the
2770 default (fallthru). */
2771 && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2772 || !JUMP_P (BB_END (single_pred (b)))
2773 || ! label_is_jump_target_p (BB_HEAD (b),
2774 BB_END (single_pred (b)))))
2775 {
2776 delete_insn (BB_HEAD (b));
2777 if (dump_file)
2778 fprintf (dump_file, "Deleted label in block %i.\n",
2779 b->index);
2780 }
2781
2782 /* If we fall through an empty block, we can remove it. */
2783 if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2784 && single_pred_p (b)
2785 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2786 && !LABEL_P (BB_HEAD (b))
2787 && FORWARDER_BLOCK_P (b)
2788 /* Note that forwarder_block_p true ensures that
2789 there is a successor for this block. */
2790 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2791 && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2792 {
2793 if (dump_file)
2794 fprintf (dump_file,
2795 "Deleting fallthru block %i.\n",
2796 b->index);
2797
2798 c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2799 ? b->next_bb : b->prev_bb);
2800 redirect_edge_succ_nodup (single_pred_edge (b),
2801 single_succ (b));
2802 delete_basic_block (b);
2803 changed = true;
2804 b = c;
2805 continue;
2806 }
2807
2808 /* Merge B with its single successor, if any. */
2809 if (single_succ_p (b)
2810 && (s = single_succ_edge (b))
2811 && !(s->flags & EDGE_COMPLEX)
2812 && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2813 && single_pred_p (c)
2814 && b != c)
2815 {
2816 /* When not in cfg_layout mode use code aware of reordering
2817 INSN. This code possibly creates new basic blocks so it
2818 does not fit merge_blocks interface and is kept here in
2819 hope that it will become useless once more of compiler
2820 is transformed to use cfg_layout mode. */
2821
2822 if ((mode & CLEANUP_CFGLAYOUT)
2823 && can_merge_blocks_p (b, c))
2824 {
2825 merge_blocks (b, c);
2826 update_forwarder_flag (b);
2827 changed_here = true;
2828 }
2829 else if (!(mode & CLEANUP_CFGLAYOUT)
2830 /* If the jump insn has side effects,
2831 we can't kill the edge. */
2832 && (!JUMP_P (BB_END (b))
2833 || (reload_completed
2834 ? simplejump_p (BB_END (b))
2835 : (onlyjump_p (BB_END (b))
2836 && !tablejump_p (BB_END (b),
2837 NULL, NULL))))
2838 && (next = merge_blocks_move (s, b, c, mode)))
2839 {
2840 b = next;
2841 changed_here = true;
2842 }
2843 }
2844
2845 /* Try to change a branch to a return to just that return. */
2846 rtx_insn *ret, *use;
2847 if (single_succ_p (b)
2848 && onlyjump_p (BB_END (b))
2849 && bb_is_just_return (single_succ (b), &ret, &use))
2850 {
2851 if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2852 PATTERN (ret), 0))
2853 {
2854 if (use)
2855 emit_insn_before (copy_insn (PATTERN (use)),
2856 BB_END (b));
2857 if (dump_file)
2858 fprintf (dump_file, "Changed jump %d->%d to return.\n",
2859 b->index, single_succ (b)->index);
2860 redirect_edge_succ (single_succ_edge (b),
2861 EXIT_BLOCK_PTR_FOR_FN (cfun));
2862 single_succ_edge (b)->flags &= ~EDGE_CROSSING;
2863 changed_here = true;
2864 }
2865 }
2866
2867 /* Try to change a conditional branch to a return to the
2868 respective conditional return. */
2869 if (EDGE_COUNT (b->succs) == 2
2870 && any_condjump_p (BB_END (b))
2871 && bb_is_just_return (BRANCH_EDGE (b)->dest, &ret, &use))
2872 {
2873 if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2874 PATTERN (ret), 0))
2875 {
2876 if (use)
2877 emit_insn_before (copy_insn (PATTERN (use)),
2878 BB_END (b));
2879 if (dump_file)
2880 fprintf (dump_file, "Changed conditional jump %d->%d "
2881 "to conditional return.\n",
2882 b->index, BRANCH_EDGE (b)->dest->index);
2883 redirect_edge_succ (BRANCH_EDGE (b),
2884 EXIT_BLOCK_PTR_FOR_FN (cfun));
2885 BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2886 changed_here = true;
2887 }
2888 }
2889
2890 /* Try to flip a conditional branch that falls through to
2891 a return so that it becomes a conditional return and a
2892 new jump to the original branch target. */
2893 if (EDGE_COUNT (b->succs) == 2
2894 && BRANCH_EDGE (b)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2895 && any_condjump_p (BB_END (b))
2896 && bb_is_just_return (FALLTHRU_EDGE (b)->dest, &ret, &use))
2897 {
2898 if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2899 JUMP_LABEL (BB_END (b)), 0))
2900 {
2901 basic_block new_ft = BRANCH_EDGE (b)->dest;
2902 if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2903 PATTERN (ret), 0))
2904 {
2905 if (use)
2906 emit_insn_before (copy_insn (PATTERN (use)),
2907 BB_END (b));
2908 if (dump_file)
2909 fprintf (dump_file, "Changed conditional jump "
2910 "%d->%d to conditional return, adding "
2911 "fall-through jump.\n",
2912 b->index, BRANCH_EDGE (b)->dest->index);
2913 redirect_edge_succ (BRANCH_EDGE (b),
2914 EXIT_BLOCK_PTR_FOR_FN (cfun));
2915 BRANCH_EDGE (b)->flags &= ~EDGE_CROSSING;
2916 std::swap (BRANCH_EDGE (b)->probability,
2917 FALLTHRU_EDGE (b)->probability);
2918 update_br_prob_note (b);
2919 basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b));
2920 notice_new_block (jb);
2921 if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)),
2922 block_label (new_ft), 0))
2923 gcc_unreachable ();
2924 redirect_edge_succ (single_succ_edge (jb), new_ft);
2925 changed_here = true;
2926 }
2927 else
2928 {
2929 /* Invert the jump back to what it was. This should
2930 never fail. */
2931 if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)),
2932 JUMP_LABEL (BB_END (b)), 0))
2933 gcc_unreachable ();
2934 }
2935 }
2936 }
2937
2938 /* Simplify branch over branch. */
2939 if ((mode & CLEANUP_EXPENSIVE)
2940 && !(mode & CLEANUP_CFGLAYOUT)
2941 && try_simplify_condjump (b))
2942 changed_here = true;
2943
2944 /* If B has a single outgoing edge, but uses a
2945 non-trivial jump instruction without side-effects, we
2946 can either delete the jump entirely, or replace it
2947 with a simple unconditional jump. */
2948 if (single_succ_p (b)
2949 && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2950 && onlyjump_p (BB_END (b))
2951 && !CROSSING_JUMP_P (BB_END (b))
2952 && try_redirect_by_replacing_jump (single_succ_edge (b),
2953 single_succ (b),
2954 (mode & CLEANUP_CFGLAYOUT) != 0))
2955 {
2956 update_forwarder_flag (b);
2957 changed_here = true;
2958 }
2959
2960 /* Simplify branch to branch. */
2961 if (try_forward_edges (mode, b))
2962 {
2963 update_forwarder_flag (b);
2964 changed_here = true;
2965 }
2966
2967 /* Look for shared code between blocks. */
2968 if ((mode & CLEANUP_CROSSJUMP)
2969 && try_crossjump_bb (mode, b))
2970 changed_here = true;
2971
2972 if ((mode & CLEANUP_CROSSJUMP)
2973 /* This can lengthen register lifetimes. Do it only after
2974 reload. */
2975 && reload_completed
2976 && try_head_merge_bb (b))
2977 changed_here = true;
2978
2979 /* Don't get confused by the index shift caused by
2980 deleting blocks. */
2981 if (!changed_here)
2982 b = b->next_bb;
2983 else
2984 changed = true;
2985 }
2986
2987 if ((mode & CLEANUP_CROSSJUMP)
2988 && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2989 changed = true;
2990
2991 if (block_was_dirty)
2992 {
2993 /* This should only be set by head-merging. */
2994 gcc_assert (mode & CLEANUP_CROSSJUMP);
2995 df_analyze ();
2996 }
2997
2998 if (changed)
2999 {
3000 /* Edge forwarding in particular can cause hot blocks previously
3001 reached by both hot and cold blocks to become dominated only
3002 by cold blocks. This will cause the verification below to fail,
3003 and lead to now cold code in the hot section. This is not easy
3004 to detect and fix during edge forwarding, and in some cases
3005 is only visible after newly unreachable blocks are deleted,
3006 which will be done in fixup_partitions. */
3007 if ((mode & CLEANUP_NO_PARTITIONING) == 0)
3008 {
3009 fixup_partitions ();
3010 checking_verify_flow_info ();
3011 }
3012 }
3013
3014 changed_overall |= changed;
3015 first_pass = false;
3016 }
3017 while (changed);
3018 }
3019
3020 FOR_ALL_BB_FN (b, cfun)
3021 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
3022
3023 return changed_overall;
3024 }
3025 \f
3026 /* Delete all unreachable basic blocks. */
3027
3028 bool
3029 delete_unreachable_blocks (void)
3030 {
3031 bool changed = false;
3032 basic_block b, prev_bb;
3033
3034 find_unreachable_blocks ();
3035
3036 /* When we're in GIMPLE mode and there may be debug bind insns, we
3037 should delete blocks in reverse dominator order, so as to get a
3038 chance to substitute all released DEFs into debug bind stmts. If
3039 we don't have dominators information, walking blocks backward
3040 gets us a better chance of retaining most debug information than
3041 otherwise. */
3042 if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
3043 && dom_info_available_p (CDI_DOMINATORS))
3044 {
3045 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3046 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3047 {
3048 prev_bb = b->prev_bb;
3049
3050 if (!(b->flags & BB_REACHABLE))
3051 {
3052 /* Speed up the removal of blocks that don't dominate
3053 others. Walking backwards, this should be the common
3054 case. */
3055 if (!first_dom_son (CDI_DOMINATORS, b))
3056 delete_basic_block (b);
3057 else
3058 {
3059 vec<basic_block> h
3060 = get_all_dominated_blocks (CDI_DOMINATORS, b);
3061
3062 while (h.length ())
3063 {
3064 b = h.pop ();
3065
3066 prev_bb = b->prev_bb;
3067
3068 gcc_assert (!(b->flags & BB_REACHABLE));
3069
3070 delete_basic_block (b);
3071 }
3072
3073 h.release ();
3074 }
3075
3076 changed = true;
3077 }
3078 }
3079 }
3080 else
3081 {
3082 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
3083 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
3084 {
3085 prev_bb = b->prev_bb;
3086
3087 if (!(b->flags & BB_REACHABLE))
3088 {
3089 delete_basic_block (b);
3090 changed = true;
3091 }
3092 }
3093 }
3094
3095 if (changed)
3096 tidy_fallthru_edges ();
3097 return changed;
3098 }
3099
3100 /* Delete any jump tables never referenced. We can't delete them at the
3101 time of removing tablejump insn as they are referenced by the preceding
3102 insns computing the destination, so we delay deleting and garbagecollect
3103 them once life information is computed. */
3104 void
3105 delete_dead_jumptables (void)
3106 {
3107 basic_block bb;
3108
3109 /* A dead jump table does not belong to any basic block. Scan insns
3110 between two adjacent basic blocks. */
3111 FOR_EACH_BB_FN (bb, cfun)
3112 {
3113 rtx_insn *insn, *next;
3114
3115 for (insn = NEXT_INSN (BB_END (bb));
3116 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3117 insn = next)
3118 {
3119 next = NEXT_INSN (insn);
3120 if (LABEL_P (insn)
3121 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3122 && JUMP_TABLE_DATA_P (next))
3123 {
3124 rtx_insn *label = insn, *jump = next;
3125
3126 if (dump_file)
3127 fprintf (dump_file, "Dead jumptable %i removed\n",
3128 INSN_UID (insn));
3129
3130 next = NEXT_INSN (next);
3131 delete_insn (jump);
3132 delete_insn (label);
3133 }
3134 }
3135 }
3136 }
3137
3138 \f
3139 /* Tidy the CFG by deleting unreachable code and whatnot. */
3140
3141 bool
3142 cleanup_cfg (int mode)
3143 {
3144 bool changed = false;
3145
3146 /* Set the cfglayout mode flag here. We could update all the callers
3147 but that is just inconvenient, especially given that we eventually
3148 want to have cfglayout mode as the default. */
3149 if (current_ir_type () == IR_RTL_CFGLAYOUT)
3150 mode |= CLEANUP_CFGLAYOUT;
3151
3152 timevar_push (TV_CLEANUP_CFG);
3153 if (delete_unreachable_blocks ())
3154 {
3155 changed = true;
3156 /* We've possibly created trivially dead code. Cleanup it right
3157 now to introduce more opportunities for try_optimize_cfg. */
3158 if (!(mode & (CLEANUP_NO_INSN_DEL))
3159 && !reload_completed)
3160 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3161 }
3162
3163 compact_blocks ();
3164
3165 /* To tail-merge blocks ending in the same noreturn function (e.g.
3166 a call to abort) we have to insert fake edges to exit. Do this
3167 here once. The fake edges do not interfere with any other CFG
3168 cleanups. */
3169 if (mode & CLEANUP_CROSSJUMP)
3170 add_noreturn_fake_exit_edges ();
3171
3172 if (!dbg_cnt (cfg_cleanup))
3173 return changed;
3174
3175 while (try_optimize_cfg (mode))
3176 {
3177 delete_unreachable_blocks (), changed = true;
3178 if (!(mode & CLEANUP_NO_INSN_DEL))
3179 {
3180 /* Try to remove some trivially dead insns when doing an expensive
3181 cleanup. But delete_trivially_dead_insns doesn't work after
3182 reload (it only handles pseudos) and run_fast_dce is too costly
3183 to run in every iteration.
3184
3185 For effective cross jumping, we really want to run a fast DCE to
3186 clean up any dead conditions, or they get in the way of performing
3187 useful tail merges.
3188
3189 Other transformations in cleanup_cfg are not so sensitive to dead
3190 code, so delete_trivially_dead_insns or even doing nothing at all
3191 is good enough. */
3192 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3193 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3194 break;
3195 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occurred)
3196 {
3197 run_fast_dce ();
3198 mode &= ~CLEANUP_FORCE_FAST_DCE;
3199 }
3200 }
3201 else
3202 break;
3203 }
3204
3205 if (mode & CLEANUP_CROSSJUMP)
3206 remove_fake_exit_edges ();
3207
3208 if (mode & CLEANUP_FORCE_FAST_DCE)
3209 run_fast_dce ();
3210
3211 /* Don't call delete_dead_jumptables in cfglayout mode, because
3212 that function assumes that jump tables are in the insns stream.
3213 But we also don't _have_ to delete dead jumptables in cfglayout
3214 mode because we shouldn't even be looking at things that are
3215 not in a basic block. Dead jumptables are cleaned up when
3216 going out of cfglayout mode. */
3217 if (!(mode & CLEANUP_CFGLAYOUT))
3218 delete_dead_jumptables ();
3219
3220 /* ??? We probably do this way too often. */
3221 if (current_loops
3222 && (changed
3223 || (mode & CLEANUP_CFG_CHANGED)))
3224 {
3225 timevar_push (TV_REPAIR_LOOPS);
3226 /* The above doesn't preserve dominance info if available. */
3227 gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3228 calculate_dominance_info (CDI_DOMINATORS);
3229 fix_loop_structure (NULL);
3230 free_dominance_info (CDI_DOMINATORS);
3231 timevar_pop (TV_REPAIR_LOOPS);
3232 }
3233
3234 timevar_pop (TV_CLEANUP_CFG);
3235
3236 return changed;
3237 }
3238 \f
3239 namespace {
3240
3241 const pass_data pass_data_jump =
3242 {
3243 RTL_PASS, /* type */
3244 "jump", /* name */
3245 OPTGROUP_NONE, /* optinfo_flags */
3246 TV_JUMP, /* tv_id */
3247 0, /* properties_required */
3248 0, /* properties_provided */
3249 0, /* properties_destroyed */
3250 0, /* todo_flags_start */
3251 0, /* todo_flags_finish */
3252 };
3253
3254 class pass_jump : public rtl_opt_pass
3255 {
3256 public:
3257 pass_jump (gcc::context *ctxt)
3258 : rtl_opt_pass (pass_data_jump, ctxt)
3259 {}
3260
3261 /* opt_pass methods: */
3262 virtual unsigned int execute (function *);
3263
3264 }; // class pass_jump
3265
3266 unsigned int
3267 pass_jump::execute (function *)
3268 {
3269 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3270 if (dump_file)
3271 dump_flow_info (dump_file, dump_flags);
3272 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3273 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3274 return 0;
3275 }
3276
3277 } // anon namespace
3278
3279 rtl_opt_pass *
3280 make_pass_jump (gcc::context *ctxt)
3281 {
3282 return new pass_jump (ctxt);
3283 }
3284 \f
3285 namespace {
3286
3287 const pass_data pass_data_postreload_jump =
3288 {
3289 RTL_PASS, /* type */
3290 "postreload_jump", /* name */
3291 OPTGROUP_NONE, /* optinfo_flags */
3292 TV_JUMP, /* tv_id */
3293 0, /* properties_required */
3294 0, /* properties_provided */
3295 0, /* properties_destroyed */
3296 0, /* todo_flags_start */
3297 0, /* todo_flags_finish */
3298 };
3299
3300 class pass_postreload_jump : public rtl_opt_pass
3301 {
3302 public:
3303 pass_postreload_jump (gcc::context *ctxt)
3304 : rtl_opt_pass (pass_data_postreload_jump, ctxt)
3305 {}
3306
3307 /* opt_pass methods: */
3308 virtual unsigned int execute (function *);
3309
3310 }; // class pass_postreload_jump
3311
3312 unsigned int
3313 pass_postreload_jump::execute (function *)
3314 {
3315 cleanup_cfg (flag_thread_jumps ? CLEANUP_THREADING : 0);
3316 return 0;
3317 }
3318
3319 } // anon namespace
3320
3321 rtl_opt_pass *
3322 make_pass_postreload_jump (gcc::context *ctxt)
3323 {
3324 return new pass_postreload_jump (ctxt);
3325 }
3326
3327 namespace {
3328
3329 const pass_data pass_data_jump2 =
3330 {
3331 RTL_PASS, /* type */
3332 "jump2", /* name */
3333 OPTGROUP_NONE, /* optinfo_flags */
3334 TV_JUMP, /* tv_id */
3335 0, /* properties_required */
3336 0, /* properties_provided */
3337 0, /* properties_destroyed */
3338 0, /* todo_flags_start */
3339 0, /* todo_flags_finish */
3340 };
3341
3342 class pass_jump2 : public rtl_opt_pass
3343 {
3344 public:
3345 pass_jump2 (gcc::context *ctxt)
3346 : rtl_opt_pass (pass_data_jump2, ctxt)
3347 {}
3348
3349 /* opt_pass methods: */
3350 virtual unsigned int execute (function *)
3351 {
3352 cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3353 return 0;
3354 }
3355
3356 }; // class pass_jump2
3357
3358 } // anon namespace
3359
3360 rtl_opt_pass *
3361 make_pass_jump2 (gcc::context *ctxt)
3362 {
3363 return new pass_jump2 (ctxt);
3364 }