1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
34 #include "coretypes.h"
43 #include "insn-config.h"
47 #include "tree-pass.h"
52 #include "cfgcleanup.h"
58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
60 /* Set to true when we are running first pass of try_optimize_cfg loop. */
61 static bool first_pass
;
63 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
64 static bool crossjumps_occurred
;
66 /* Set to true if we couldn't run an optimization due to stale liveness
67 information; we should run df_analyze to enable more opportunities. */
68 static bool block_was_dirty
;
70 static bool try_crossjump_to_edge (int, edge
, edge
, enum replace_direction
);
71 static bool try_crossjump_bb (int, basic_block
);
72 static bool outgoing_edges_match (int, basic_block
, basic_block
);
73 static enum replace_direction
old_insns_match_p (int, rtx_insn
*, rtx_insn
*);
75 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
76 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
77 static bool try_optimize_cfg (int);
78 static bool try_simplify_condjump (basic_block
);
79 static bool try_forward_edges (int, basic_block
);
80 static edge
thread_jump (edge
, basic_block
);
81 static bool mark_effect (rtx
, bitmap
);
82 static void notice_new_block (basic_block
);
83 static void update_forwarder_flag (basic_block
);
84 static void merge_memattrs (rtx
, rtx
);
86 /* Set flags for newly created block. */
89 notice_new_block (basic_block bb
)
94 if (forwarder_block_p (bb
))
95 bb
->flags
|= BB_FORWARDER_BLOCK
;
98 /* Recompute forwarder flag after block has been modified. */
101 update_forwarder_flag (basic_block bb
)
103 if (forwarder_block_p (bb
))
104 bb
->flags
|= BB_FORWARDER_BLOCK
;
106 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
109 /* Simplify a conditional jump around an unconditional jump.
110 Return true if something changed. */
113 try_simplify_condjump (basic_block cbranch_block
)
115 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
116 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
117 rtx_insn
*cbranch_insn
;
119 /* Verify that there are exactly two successors. */
120 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
123 /* Verify that we've got a normal conditional branch at the end
125 cbranch_insn
= BB_END (cbranch_block
);
126 if (!any_condjump_p (cbranch_insn
))
129 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
130 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
132 /* The next block must not have multiple predecessors, must not
133 be the last block in the function, and must contain just the
134 unconditional jump. */
135 jump_block
= cbranch_fallthru_edge
->dest
;
136 if (!single_pred_p (jump_block
)
137 || jump_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
138 || !FORWARDER_BLOCK_P (jump_block
))
140 jump_dest_block
= single_succ (jump_block
);
142 /* If we are partitioning hot/cold basic blocks, we don't want to
143 mess up unconditional or indirect jumps that cross between hot
146 Basic block partitioning may result in some jumps that appear to
147 be optimizable (or blocks that appear to be mergeable), but which really
148 must be left untouched (they are required to make it safely across
149 partition boundaries). See the comments at the top of
150 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
152 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
153 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block
= cbranch_jump_edge
->dest
;
160 if (cbranch_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
161 || jump_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
162 || !can_fallthru (jump_block
, cbranch_dest_block
))
165 /* Invert the conditional branch. */
166 if (!invert_jump (as_a
<rtx_jump_insn
*> (cbranch_insn
),
167 block_label (jump_dest_block
), 0))
171 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
172 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
174 /* Success. Update the CFG to match. Note that after this point
175 the edge variable names appear backwards; the redirection is done
176 this way to preserve edge profile data. */
177 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
179 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
181 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
182 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
183 update_br_prob_note (cbranch_block
);
185 /* Delete the block with the unconditional jump, and clean up the mess. */
186 delete_basic_block (jump_block
);
187 tidy_fallthru_edge (cbranch_jump_edge
);
188 update_forwarder_flag (cbranch_block
);
193 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
194 on register. Used by jump threading. */
197 mark_effect (rtx exp
, regset nonequal
)
200 switch (GET_CODE (exp
))
202 /* In case we do clobber the register, mark it as equal, as we know the
203 value is dead so it don't have to match. */
205 dest
= XEXP (exp
, 0);
207 bitmap_clear_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
211 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
213 dest
= SET_DEST (exp
);
218 bitmap_set_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
226 /* Return true if X contains a register in NONEQUAL. */
228 mentions_nonequal_regs (const_rtx x
, regset nonequal
)
230 subrtx_iterator::array_type array
;
231 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
236 unsigned int end_regno
= END_REGNO (x
);
237 for (unsigned int regno
= REGNO (x
); regno
< end_regno
; ++regno
)
238 if (REGNO_REG_SET_P (nonequal
, regno
))
245 /* Attempt to prove that the basic block B will have no side effects and
246 always continues in the same edge if reached via E. Return the edge
247 if exist, NULL otherwise. */
250 thread_jump (edge e
, basic_block b
)
252 rtx set1
, set2
, cond1
, cond2
;
254 enum rtx_code code1
, code2
, reversed_code2
;
255 bool reverse1
= false;
259 reg_set_iterator rsi
;
261 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
264 /* At the moment, we do handle only conditional jumps, but later we may
265 want to extend this code to tablejumps and others. */
266 if (EDGE_COUNT (e
->src
->succs
) != 2)
268 if (EDGE_COUNT (b
->succs
) != 2)
270 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
274 /* Second branch must end with onlyjump, as we will eliminate the jump. */
275 if (!any_condjump_p (BB_END (e
->src
)))
278 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
280 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
284 set1
= pc_set (BB_END (e
->src
));
285 set2
= pc_set (BB_END (b
));
286 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
287 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
290 cond1
= XEXP (SET_SRC (set1
), 0);
291 cond2
= XEXP (SET_SRC (set2
), 0);
293 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
295 code1
= GET_CODE (cond1
);
297 code2
= GET_CODE (cond2
);
298 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
300 if (!comparison_dominates_p (code1
, code2
)
301 && !comparison_dominates_p (code1
, reversed_code2
))
304 /* Ensure that the comparison operators are equivalent.
305 ??? This is far too pessimistic. We should allow swapped operands,
306 different CCmodes, or for example comparisons for interval, that
307 dominate even when operands are not equivalent. */
308 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
309 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
312 /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
313 the registers used in cond1. */
314 if (modified_in_p (cond1
, BB_END (e
->src
)))
317 /* Short circuit cases where block B contains some side effects, as we can't
319 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
320 insn
= NEXT_INSN (insn
))
321 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
323 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
329 /* First process all values computed in the source basic block. */
330 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
331 insn
!= NEXT_INSN (BB_END (e
->src
));
332 insn
= NEXT_INSN (insn
))
334 cselib_process_insn (insn
);
336 nonequal
= BITMAP_ALLOC (NULL
);
337 CLEAR_REG_SET (nonequal
);
339 /* Now assume that we've continued by the edge E to B and continue
340 processing as if it were same basic block.
341 Our goal is to prove that whole block is an NOOP. */
343 for (insn
= NEXT_INSN (BB_HEAD (b
));
344 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
345 insn
= NEXT_INSN (insn
))
347 /* cond2 must not mention any register that is not equal to the
348 former block. Check this before processing that instruction,
349 as BB_END (b) could contain also clobbers. */
350 if (insn
== BB_END (b
)
351 && mentions_nonequal_regs (cond2
, nonequal
))
356 rtx pat
= PATTERN (insn
);
358 if (GET_CODE (pat
) == PARALLEL
)
360 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
361 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
364 failed
|= mark_effect (pat
, nonequal
);
367 cselib_process_insn (insn
);
370 /* Later we should clear nonequal of dead registers. So far we don't
371 have life information in cfg_cleanup. */
374 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
378 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
381 BITMAP_FREE (nonequal
);
383 if ((comparison_dominates_p (code1
, code2
) != 0)
384 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
385 return BRANCH_EDGE (b
);
387 return FALLTHRU_EDGE (b
);
390 BITMAP_FREE (nonequal
);
395 /* Attempt to forward edges leaving basic block B.
396 Return true if successful. */
399 try_forward_edges (int mode
, basic_block b
)
401 bool changed
= false;
403 edge e
, *threaded_edges
= NULL
;
405 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
407 basic_block target
, first
;
408 location_t goto_locus
;
410 bool threaded
= false;
411 int nthreaded_edges
= 0;
412 bool may_thread
= first_pass
|| (b
->flags
& BB_MODIFIED
) != 0;
413 bool new_target_threaded
= false;
415 /* Skip complex edges because we don't know how to update them.
417 Still handle fallthru edges, as we can succeed to forward fallthru
418 edge to the same place as the branch edge of conditional branch
419 and turn conditional branch to an unconditional branch. */
420 if (e
->flags
& EDGE_COMPLEX
)
426 target
= first
= e
->dest
;
427 counter
= NUM_FIXED_BLOCKS
;
428 goto_locus
= e
->goto_locus
;
430 while (counter
< n_basic_blocks_for_fn (cfun
))
432 basic_block new_target
= NULL
;
433 may_thread
|= (target
->flags
& BB_MODIFIED
) != 0;
435 if (FORWARDER_BLOCK_P (target
)
436 && single_succ (target
) != EXIT_BLOCK_PTR_FOR_FN (cfun
))
438 /* Bypass trivial infinite loops. */
439 new_target
= single_succ (target
);
440 if (target
== new_target
)
441 counter
= n_basic_blocks_for_fn (cfun
);
444 /* When not optimizing, ensure that edges or forwarder
445 blocks with different locus are not optimized out. */
446 location_t new_locus
= single_succ_edge (target
)->goto_locus
;
447 location_t locus
= goto_locus
;
449 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
450 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
451 && new_locus
!= locus
)
455 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
458 rtx_insn
*last
= BB_END (target
);
459 if (DEBUG_INSN_P (last
))
460 last
= prev_nondebug_insn (last
);
461 if (last
&& INSN_P (last
))
462 new_locus
= INSN_LOCATION (last
);
464 new_locus
= UNKNOWN_LOCATION
;
466 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
467 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
468 && new_locus
!= locus
)
472 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
481 /* Allow to thread only over one edge at time to simplify updating
483 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
485 edge t
= thread_jump (e
, target
);
489 threaded_edges
= XNEWVEC (edge
,
490 n_basic_blocks_for_fn (cfun
));
495 /* Detect an infinite loop across blocks not
496 including the start block. */
497 for (i
= 0; i
< nthreaded_edges
; ++i
)
498 if (threaded_edges
[i
] == t
)
500 if (i
< nthreaded_edges
)
502 counter
= n_basic_blocks_for_fn (cfun
);
507 /* Detect an infinite loop across the start block. */
511 gcc_assert (nthreaded_edges
512 < (n_basic_blocks_for_fn (cfun
)
513 - NUM_FIXED_BLOCKS
));
514 threaded_edges
[nthreaded_edges
++] = t
;
516 new_target
= t
->dest
;
517 new_target_threaded
= true;
525 /* Do not turn non-crossing jump to crossing. Depending on target
526 it may require different instruction pattern. */
527 if ((e
->flags
& EDGE_CROSSING
)
528 || BB_PARTITION (first
) == BB_PARTITION (new_target
))
531 threaded
|= new_target_threaded
;
535 if (counter
>= n_basic_blocks_for_fn (cfun
))
538 fprintf (dump_file
, "Infinite loop in BB %i.\n",
541 else if (target
== first
)
542 ; /* We didn't do anything. */
545 /* Save the values now, as the edge may get removed. */
546 profile_count edge_count
= e
->count ();
549 e
->goto_locus
= goto_locus
;
551 /* Don't force if target is exit block. */
552 if (threaded
&& target
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
554 notice_new_block (redirect_edge_and_branch_force (e
, target
));
556 fprintf (dump_file
, "Conditionals threaded.\n");
558 else if (!redirect_edge_and_branch (e
, target
))
562 "Forwarding edge %i->%i to %i failed.\n",
563 b
->index
, e
->dest
->index
, target
->index
);
568 /* We successfully forwarded the edge. Now update profile
569 data: for each edge we traversed in the chain, remove
570 the original edge's execution count. */
575 if (!single_succ_p (first
))
577 gcc_assert (n
< nthreaded_edges
);
578 t
= threaded_edges
[n
++];
579 gcc_assert (t
->src
== first
);
580 update_bb_profile_for_threading (first
, edge_count
, t
);
581 update_br_prob_note (first
);
585 first
->count
-= edge_count
;
586 /* It is possible that as the result of
587 threading we've removed edge as it is
588 threaded to the fallthru edge. Avoid
589 getting out of sync. */
590 if (n
< nthreaded_edges
591 && first
== threaded_edges
[n
]->src
)
593 t
= single_succ_edge (first
);
598 while (first
!= target
);
606 free (threaded_edges
);
611 /* Blocks A and B are to be merged into a single block. A has no incoming
612 fallthru edge, so it can be moved before B without adding or modifying
613 any jumps (aside from the jump from A to B). */
616 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
620 /* If we are partitioning hot/cold basic blocks, we don't want to
621 mess up unconditional or indirect jumps that cross between hot
624 Basic block partitioning may result in some jumps that appear to
625 be optimizable (or blocks that appear to be mergeable), but which really
626 must be left untouched (they are required to make it safely across
627 partition boundaries). See the comments at the top of
628 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
630 if (BB_PARTITION (a
) != BB_PARTITION (b
))
633 barrier
= next_nonnote_insn (BB_END (a
));
634 gcc_assert (BARRIER_P (barrier
));
635 delete_insn (barrier
);
637 /* Scramble the insn chain. */
638 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
639 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
643 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
646 /* Swap the records for the two blocks around. */
649 link_block (a
, b
->prev_bb
);
651 /* Now blocks A and B are contiguous. Merge them. */
655 /* Blocks A and B are to be merged into a single block. B has no outgoing
656 fallthru edge, so it can be moved after A without adding or modifying
657 any jumps (aside from the jump from A to B). */
660 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
662 rtx_insn
*barrier
, *real_b_end
;
664 rtx_jump_table_data
*table
;
666 /* If we are partitioning hot/cold basic blocks, we don't want to
667 mess up unconditional or indirect jumps that cross between hot
670 Basic block partitioning may result in some jumps that appear to
671 be optimizable (or blocks that appear to be mergeable), but which really
672 must be left untouched (they are required to make it safely across
673 partition boundaries). See the comments at the top of
674 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
676 if (BB_PARTITION (a
) != BB_PARTITION (b
))
679 real_b_end
= BB_END (b
);
681 /* If there is a jump table following block B temporarily add the jump table
682 to block B so that it will also be moved to the correct location. */
683 if (tablejump_p (BB_END (b
), &label
, &table
)
684 && prev_active_insn (label
) == BB_END (b
))
689 /* There had better have been a barrier there. Delete it. */
690 barrier
= NEXT_INSN (BB_END (b
));
691 if (barrier
&& BARRIER_P (barrier
))
692 delete_insn (barrier
);
695 /* Scramble the insn chain. */
696 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
698 /* Restore the real end of b. */
699 BB_END (b
) = real_b_end
;
702 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
705 /* Now blocks A and B are contiguous. Merge them. */
709 /* Attempt to merge basic blocks that are potentially non-adjacent.
710 Return NULL iff the attempt failed, otherwise return basic block
711 where cleanup_cfg should continue. Because the merging commonly
712 moves basic block away or introduces another optimization
713 possibility, return basic block just before B so cleanup_cfg don't
716 It may be good idea to return basic block before C in the case
717 C has been moved after B and originally appeared earlier in the
718 insn sequence, but we have no information available about the
719 relative ordering of these two. Hopefully it is not too common. */
722 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
726 /* If we are partitioning hot/cold basic blocks, we don't want to
727 mess up unconditional or indirect jumps that cross between hot
730 Basic block partitioning may result in some jumps that appear to
731 be optimizable (or blocks that appear to be mergeable), but which really
732 must be left untouched (they are required to make it safely across
733 partition boundaries). See the comments at the top of
734 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
736 if (BB_PARTITION (b
) != BB_PARTITION (c
))
739 /* If B has a fallthru edge to C, no need to move anything. */
740 if (e
->flags
& EDGE_FALLTHRU
)
742 int b_index
= b
->index
, c_index
= c
->index
;
744 /* Protect the loop latches. */
745 if (current_loops
&& c
->loop_father
->latch
== c
)
749 update_forwarder_flag (b
);
752 fprintf (dump_file
, "Merged %d and %d without moving.\n",
755 return b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? b
: b
->prev_bb
;
758 /* Otherwise we will need to move code around. Do that only if expensive
759 transformations are allowed. */
760 else if (mode
& CLEANUP_EXPENSIVE
)
762 edge tmp_edge
, b_fallthru_edge
;
763 bool c_has_outgoing_fallthru
;
764 bool b_has_incoming_fallthru
;
766 /* Avoid overactive code motion, as the forwarder blocks should be
767 eliminated by edge redirection instead. One exception might have
768 been if B is a forwarder block and C has no fallthru edge, but
769 that should be cleaned up by bb-reorder instead. */
770 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
773 /* We must make sure to not munge nesting of lexical blocks,
774 and loop notes. This is done by squeezing out all the notes
775 and leaving them there to lie. Not ideal, but functional. */
777 tmp_edge
= find_fallthru_edge (c
->succs
);
778 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
780 tmp_edge
= find_fallthru_edge (b
->preds
);
781 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
782 b_fallthru_edge
= tmp_edge
;
785 next
= next
->prev_bb
;
787 /* Otherwise, we're going to try to move C after B. If C does
788 not have an outgoing fallthru, then it can be moved
789 immediately after B without introducing or modifying jumps. */
790 if (! c_has_outgoing_fallthru
)
792 merge_blocks_move_successor_nojumps (b
, c
);
793 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
796 /* If B does not have an incoming fallthru, then it can be moved
797 immediately before C without introducing or modifying jumps.
798 C cannot be the first block, so we do not have to worry about
799 accessing a non-existent block. */
801 if (b_has_incoming_fallthru
)
805 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
807 bb
= force_nonfallthru (b_fallthru_edge
);
809 notice_new_block (bb
);
812 merge_blocks_move_predecessor_nojumps (b
, c
);
813 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
820 /* Removes the memory attributes of MEM expression
821 if they are not equal. */
824 merge_memattrs (rtx x
, rtx y
)
833 if (x
== 0 || y
== 0)
838 if (code
!= GET_CODE (y
))
841 if (GET_MODE (x
) != GET_MODE (y
))
844 if (code
== MEM
&& !mem_attrs_eq_p (MEM_ATTRS (x
), MEM_ATTRS (y
)))
848 else if (! MEM_ATTRS (y
))
852 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
854 set_mem_alias_set (x
, 0);
855 set_mem_alias_set (y
, 0);
858 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
862 clear_mem_offset (x
);
863 clear_mem_offset (y
);
865 else if (MEM_OFFSET_KNOWN_P (x
) != MEM_OFFSET_KNOWN_P (y
)
866 || (MEM_OFFSET_KNOWN_P (x
)
867 && maybe_ne (MEM_OFFSET (x
), MEM_OFFSET (y
))))
869 clear_mem_offset (x
);
870 clear_mem_offset (y
);
873 if (!MEM_SIZE_KNOWN_P (x
))
875 else if (!MEM_SIZE_KNOWN_P (y
))
877 else if (known_le (MEM_SIZE (x
), MEM_SIZE (y
)))
878 set_mem_size (x
, MEM_SIZE (y
));
879 else if (known_le (MEM_SIZE (y
), MEM_SIZE (x
)))
880 set_mem_size (y
, MEM_SIZE (x
));
883 /* The sizes aren't ordered, so we can't merge them. */
888 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
889 set_mem_align (y
, MEM_ALIGN (x
));
894 if (MEM_READONLY_P (x
) != MEM_READONLY_P (y
))
896 MEM_READONLY_P (x
) = 0;
897 MEM_READONLY_P (y
) = 0;
899 if (MEM_NOTRAP_P (x
) != MEM_NOTRAP_P (y
))
901 MEM_NOTRAP_P (x
) = 0;
902 MEM_NOTRAP_P (y
) = 0;
904 if (MEM_VOLATILE_P (x
) != MEM_VOLATILE_P (y
))
906 MEM_VOLATILE_P (x
) = 1;
907 MEM_VOLATILE_P (y
) = 1;
911 fmt
= GET_RTX_FORMAT (code
);
912 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
917 /* Two vectors must have the same length. */
918 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
921 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
922 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
927 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
934 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
935 different single sets S1 and S2. */
938 equal_different_set_p (rtx p1
, rtx s1
, rtx p2
, rtx s2
)
943 if (p1
== s1
&& p2
== s2
)
946 if (GET_CODE (p1
) != PARALLEL
|| GET_CODE (p2
) != PARALLEL
)
949 if (XVECLEN (p1
, 0) != XVECLEN (p2
, 0))
952 for (i
= 0; i
< XVECLEN (p1
, 0); i
++)
954 e1
= XVECEXP (p1
, 0, i
);
955 e2
= XVECEXP (p2
, 0, i
);
956 if (e1
== s1
&& e2
== s2
)
959 ? rtx_renumbered_equal_p (e1
, e2
) : rtx_equal_p (e1
, e2
))
969 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
970 that is a single_set with a SET_SRC of SRC1. Similarly
973 So effectively NOTE1/NOTE2 are an alternate form of
974 SRC1/SRC2 respectively.
976 Return nonzero if SRC1 or NOTE1 has the same constant
977 integer value as SRC2 or NOTE2. Else return zero. */
979 values_equal_p (rtx note1
, rtx note2
, rtx src1
, rtx src2
)
983 && CONST_INT_P (XEXP (note1
, 0))
984 && rtx_equal_p (XEXP (note1
, 0), XEXP (note2
, 0)))
989 && CONST_INT_P (src1
)
990 && CONST_INT_P (src2
)
991 && rtx_equal_p (src1
, src2
))
995 && CONST_INT_P (src2
)
996 && rtx_equal_p (XEXP (note1
, 0), src2
))
1000 && CONST_INT_P (src1
)
1001 && rtx_equal_p (XEXP (note2
, 0), src1
))
1007 /* Examine register notes on I1 and I2 and return:
1008 - dir_forward if I1 can be replaced by I2, or
1009 - dir_backward if I2 can be replaced by I1, or
1010 - dir_both if both are the case. */
1012 static enum replace_direction
1013 can_replace_by (rtx_insn
*i1
, rtx_insn
*i2
)
1015 rtx s1
, s2
, d1
, d2
, src1
, src2
, note1
, note2
;
1018 /* Check for 2 sets. */
1019 s1
= single_set (i1
);
1020 s2
= single_set (i2
);
1021 if (s1
== NULL_RTX
|| s2
== NULL_RTX
)
1024 /* Check that the 2 sets set the same dest. */
1027 if (!(reload_completed
1028 ? rtx_renumbered_equal_p (d1
, d2
) : rtx_equal_p (d1
, d2
)))
1031 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1032 set dest to the same value. */
1033 note1
= find_reg_equal_equiv_note (i1
);
1034 note2
= find_reg_equal_equiv_note (i2
);
1036 src1
= SET_SRC (s1
);
1037 src2
= SET_SRC (s2
);
1039 if (!values_equal_p (note1
, note2
, src1
, src2
))
1042 if (!equal_different_set_p (PATTERN (i1
), s1
, PATTERN (i2
), s2
))
1045 /* Although the 2 sets set dest to the same value, we cannot replace
1046 (set (dest) (const_int))
1049 because we don't know if the reg is live and has the same value at the
1050 location of replacement. */
1051 c1
= CONST_INT_P (src1
);
1052 c2
= CONST_INT_P (src2
);
1058 return dir_backward
;
1063 /* Merges directions A and B. */
1065 static enum replace_direction
1066 merge_dir (enum replace_direction a
, enum replace_direction b
)
1068 /* Implements the following table:
1087 /* Array of flags indexed by reg note kind, true if the given
1088 reg note is CFA related. */
1089 static const bool reg_note_cfa_p
[] = {
1091 #define DEF_REG_NOTE(NAME) false,
1092 #define REG_CFA_NOTE(NAME) true,
1093 #include "reg-notes.def"
1099 /* Return true if I1 and I2 have identical CFA notes (the same order
1100 and equivalent content). */
1103 insns_have_identical_cfa_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1106 for (n1
= REG_NOTES (i1
), n2
= REG_NOTES (i2
); ;
1107 n1
= XEXP (n1
, 1), n2
= XEXP (n2
, 1))
1109 /* Skip over reg notes not related to CFI information. */
1110 while (n1
&& !reg_note_cfa_p
[REG_NOTE_KIND (n1
)])
1112 while (n2
&& !reg_note_cfa_p
[REG_NOTE_KIND (n2
)])
1114 if (n1
== NULL_RTX
&& n2
== NULL_RTX
)
1116 if (n1
== NULL_RTX
|| n2
== NULL_RTX
)
1118 if (XEXP (n1
, 0) == XEXP (n2
, 0))
1120 else if (XEXP (n1
, 0) == NULL_RTX
|| XEXP (n2
, 0) == NULL_RTX
)
1122 else if (!(reload_completed
1123 ? rtx_renumbered_equal_p (XEXP (n1
, 0), XEXP (n2
, 0))
1124 : rtx_equal_p (XEXP (n1
, 0), XEXP (n2
, 0))))
1129 /* Examine I1 and I2 and return:
1130 - dir_forward if I1 can be replaced by I2, or
1131 - dir_backward if I2 can be replaced by I1, or
1132 - dir_both if both are the case. */
1134 static enum replace_direction
1135 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx_insn
*i1
, rtx_insn
*i2
)
1139 /* Verify that I1 and I2 are equivalent. */
1140 if (GET_CODE (i1
) != GET_CODE (i2
))
1143 /* __builtin_unreachable() may lead to empty blocks (ending with
1144 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1145 if (NOTE_INSN_BASIC_BLOCK_P (i1
) && NOTE_INSN_BASIC_BLOCK_P (i2
))
1148 /* ??? Do not allow cross-jumping between different stack levels. */
1149 p1
= find_reg_note (i1
, REG_ARGS_SIZE
, NULL
);
1150 p2
= find_reg_note (i2
, REG_ARGS_SIZE
, NULL
);
1155 if (!rtx_equal_p (p1
, p2
))
1158 /* ??? Worse, this adjustment had better be constant lest we
1159 have differing incoming stack levels. */
1160 if (!frame_pointer_needed
1161 && known_eq (find_args_size_adjust (i1
), HOST_WIDE_INT_MIN
))
1167 /* Do not allow cross-jumping between frame related insns and other
1169 if (RTX_FRAME_RELATED_P (i1
) != RTX_FRAME_RELATED_P (i2
))
1175 if (GET_CODE (p1
) != GET_CODE (p2
))
1178 /* If this is a CALL_INSN, compare register usage information.
1179 If we don't check this on stack register machines, the two
1180 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1181 numbers of stack registers in the same basic block.
1182 If we don't check this on machines with delay slots, a delay slot may
1183 be filled that clobbers a parameter expected by the subroutine.
1185 ??? We take the simple route for now and assume that if they're
1186 equal, they were constructed identically.
1188 Also check for identical exception regions. */
1192 /* Ensure the same EH region. */
1193 rtx n1
= find_reg_note (i1
, REG_EH_REGION
, 0);
1194 rtx n2
= find_reg_note (i2
, REG_EH_REGION
, 0);
1199 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1202 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1203 CALL_INSN_FUNCTION_USAGE (i2
))
1204 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
))
1207 /* For address sanitizer, never crossjump __asan_report_* builtins,
1208 otherwise errors might be reported on incorrect lines. */
1209 if (flag_sanitize
& SANITIZE_ADDRESS
)
1211 rtx call
= get_call_rtx_from (i1
);
1212 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
1214 rtx symbol
= XEXP (XEXP (call
, 0), 0);
1215 if (SYMBOL_REF_DECL (symbol
)
1216 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
1218 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
1220 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1221 >= BUILT_IN_ASAN_REPORT_LOAD1
1222 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1223 <= BUILT_IN_ASAN_STOREN
)
1229 HARD_REG_SET i1_used
, i2_used
;
1231 get_call_reg_set_usage (i1
, &i1_used
, call_used_reg_set
);
1232 get_call_reg_set_usage (i2
, &i2_used
, call_used_reg_set
);
1234 if (!hard_reg_set_equal_p (i1_used
, i2_used
))
1238 /* If both i1 and i2 are frame related, verify all the CFA notes
1239 in the same order and with the same content. */
1240 if (RTX_FRAME_RELATED_P (i1
) && !insns_have_identical_cfa_notes (i1
, i2
))
1244 /* If cross_jump_death_matters is not 0, the insn's mode
1245 indicates whether or not the insn contains any stack-like
1248 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1250 /* If register stack conversion has already been done, then
1251 death notes must also be compared before it is certain that
1252 the two instruction streams match. */
1255 HARD_REG_SET i1_regset
, i2_regset
;
1257 CLEAR_HARD_REG_SET (i1_regset
);
1258 CLEAR_HARD_REG_SET (i2_regset
);
1260 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1261 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1262 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1264 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1265 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1266 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1268 if (!hard_reg_set_equal_p (i1_regset
, i2_regset
))
1273 if (reload_completed
1274 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1277 return can_replace_by (i1
, i2
);
1280 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1281 flow_find_head_matching_sequence, ensure the notes match. */
1284 merge_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1286 /* If the merged insns have different REG_EQUAL notes, then
1288 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1289 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1291 if (equiv1
&& !equiv2
)
1292 remove_note (i1
, equiv1
);
1293 else if (!equiv1
&& equiv2
)
1294 remove_note (i2
, equiv2
);
1295 else if (equiv1
&& equiv2
1296 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1298 remove_note (i1
, equiv1
);
1299 remove_note (i2
, equiv2
);
1303 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1304 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1305 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1306 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1307 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1310 walk_to_nondebug_insn (rtx_insn
**i1
, basic_block
*bb1
, bool follow_fallthru
,
1315 *did_fallthru
= false;
1318 while (!NONDEBUG_INSN_P (*i1
))
1320 if (*i1
!= BB_HEAD (*bb1
))
1322 *i1
= PREV_INSN (*i1
);
1326 if (!follow_fallthru
)
1329 fallthru
= find_fallthru_edge ((*bb1
)->preds
);
1330 if (!fallthru
|| fallthru
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1331 || !single_succ_p (fallthru
->src
))
1334 *bb1
= fallthru
->src
;
1335 *i1
= BB_END (*bb1
);
1336 *did_fallthru
= true;
1340 /* Look through the insns at the end of BB1 and BB2 and find the longest
1341 sequence that are either equivalent, or allow forward or backward
1342 replacement. Store the first insns for that sequence in *F1 and *F2 and
1343 return the sequence length.
1345 DIR_P indicates the allowed replacement direction on function entry, and
1346 the actual replacement direction on function exit. If NULL, only equivalent
1347 sequences are allowed.
1349 To simplify callers of this function, if the blocks match exactly,
1350 store the head of the blocks in *F1 and *F2. */
1353 flow_find_cross_jump (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1354 rtx_insn
**f2
, enum replace_direction
*dir_p
)
1356 rtx_insn
*i1
, *i2
, *last1
, *last2
, *afterlast1
, *afterlast2
;
1358 enum replace_direction dir
, last_dir
, afterlast_dir
;
1359 bool follow_fallthru
, did_fallthru
;
1365 afterlast_dir
= dir
;
1366 last_dir
= afterlast_dir
;
1368 /* Skip simple jumps at the end of the blocks. Complex jumps still
1369 need to be compared for equivalence, which we'll do below. */
1372 last1
= afterlast1
= last2
= afterlast2
= NULL
;
1374 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1377 i1
= PREV_INSN (i1
);
1382 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1385 /* Count everything except for unconditional jump as insn.
1386 Don't count any jumps if dir_p is NULL. */
1387 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
&& dir_p
)
1389 i2
= PREV_INSN (i2
);
1394 /* In the following example, we can replace all jumps to C by jumps to A.
1396 This removes 4 duplicate insns.
1397 [bb A] insn1 [bb C] insn1
1403 We could also replace all jumps to A by jumps to C, but that leaves B
1404 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1405 step, all jumps to B would be replaced with jumps to the middle of C,
1406 achieving the same result with more effort.
1407 So we allow only the first possibility, which means that we don't allow
1408 fallthru in the block that's being replaced. */
1410 follow_fallthru
= dir_p
&& dir
!= dir_forward
;
1411 walk_to_nondebug_insn (&i1
, &bb1
, follow_fallthru
, &did_fallthru
);
1415 follow_fallthru
= dir_p
&& dir
!= dir_backward
;
1416 walk_to_nondebug_insn (&i2
, &bb2
, follow_fallthru
, &did_fallthru
);
1420 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1423 /* Do not turn corssing edge to non-crossing or vice versa after
1425 if (BB_PARTITION (BLOCK_FOR_INSN (i1
))
1426 != BB_PARTITION (BLOCK_FOR_INSN (i2
))
1427 && reload_completed
)
1430 dir
= merge_dir (dir
, old_insns_match_p (0, i1
, i2
));
1431 if (dir
== dir_none
|| (!dir_p
&& dir
!= dir_both
))
1434 merge_memattrs (i1
, i2
);
1436 /* Don't begin a cross-jump with a NOTE insn. */
1439 merge_notes (i1
, i2
);
1441 afterlast1
= last1
, afterlast2
= last2
;
1442 last1
= i1
, last2
= i2
;
1443 afterlast_dir
= last_dir
;
1445 if (active_insn_p (i1
))
1449 i1
= PREV_INSN (i1
);
1450 i2
= PREV_INSN (i2
);
1453 /* Don't allow the insn after a compare to be shared by
1454 cross-jumping unless the compare is also shared. */
1455 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1456 && ! sets_cc0_p (last1
))
1457 last1
= afterlast1
, last2
= afterlast2
, last_dir
= afterlast_dir
, ninsns
--;
1459 /* Include preceding notes and labels in the cross-jump. One,
1460 this may bring us to the head of the blocks as requested above.
1461 Two, it keeps line number notes as matched as may be. */
1464 bb1
= BLOCK_FOR_INSN (last1
);
1465 while (last1
!= BB_HEAD (bb1
) && !NONDEBUG_INSN_P (PREV_INSN (last1
)))
1466 last1
= PREV_INSN (last1
);
1468 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1469 last1
= PREV_INSN (last1
);
1471 bb2
= BLOCK_FOR_INSN (last2
);
1472 while (last2
!= BB_HEAD (bb2
) && !NONDEBUG_INSN_P (PREV_INSN (last2
)))
1473 last2
= PREV_INSN (last2
);
1475 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1476 last2
= PREV_INSN (last2
);
1487 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1488 the head of the two blocks. Do not include jumps at the end.
1489 If STOP_AFTER is nonzero, stop after finding that many matching
1490 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1491 non-zero, only count active insns. */
1494 flow_find_head_matching_sequence (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1495 rtx_insn
**f2
, int stop_after
)
1497 rtx_insn
*i1
, *i2
, *last1
, *last2
, *beforelast1
, *beforelast2
;
1501 int nehedges1
= 0, nehedges2
= 0;
1503 FOR_EACH_EDGE (e
, ei
, bb1
->succs
)
1504 if (e
->flags
& EDGE_EH
)
1506 FOR_EACH_EDGE (e
, ei
, bb2
->succs
)
1507 if (e
->flags
& EDGE_EH
)
1512 last1
= beforelast1
= last2
= beforelast2
= NULL
;
1516 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1517 while (!NONDEBUG_INSN_P (i1
) && i1
!= BB_END (bb1
))
1519 if (NOTE_P (i1
) && NOTE_KIND (i1
) == NOTE_INSN_EPILOGUE_BEG
)
1521 i1
= NEXT_INSN (i1
);
1524 while (!NONDEBUG_INSN_P (i2
) && i2
!= BB_END (bb2
))
1526 if (NOTE_P (i2
) && NOTE_KIND (i2
) == NOTE_INSN_EPILOGUE_BEG
)
1528 i2
= NEXT_INSN (i2
);
1531 if ((i1
== BB_END (bb1
) && !NONDEBUG_INSN_P (i1
))
1532 || (i2
== BB_END (bb2
) && !NONDEBUG_INSN_P (i2
)))
1535 if (NOTE_P (i1
) || NOTE_P (i2
)
1536 || JUMP_P (i1
) || JUMP_P (i2
))
1539 /* A sanity check to make sure we're not merging insns with different
1540 effects on EH. If only one of them ends a basic block, it shouldn't
1541 have an EH edge; if both end a basic block, there should be the same
1542 number of EH edges. */
1543 if ((i1
== BB_END (bb1
) && i2
!= BB_END (bb2
)
1545 || (i2
== BB_END (bb2
) && i1
!= BB_END (bb1
)
1547 || (i1
== BB_END (bb1
) && i2
== BB_END (bb2
)
1548 && nehedges1
!= nehedges2
))
1551 if (old_insns_match_p (0, i1
, i2
) != dir_both
)
1554 merge_memattrs (i1
, i2
);
1556 /* Don't begin a cross-jump with a NOTE insn. */
1559 merge_notes (i1
, i2
);
1561 beforelast1
= last1
, beforelast2
= last2
;
1562 last1
= i1
, last2
= i2
;
1563 if (!stop_after
|| active_insn_p (i1
))
1567 if (i1
== BB_END (bb1
) || i2
== BB_END (bb2
)
1568 || (stop_after
> 0 && ninsns
== stop_after
))
1571 i1
= NEXT_INSN (i1
);
1572 i2
= NEXT_INSN (i2
);
1575 /* Don't allow a compare to be shared by cross-jumping unless the insn
1576 after the compare is also shared. */
1577 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1578 && sets_cc0_p (last1
))
1579 last1
= beforelast1
, last2
= beforelast2
, ninsns
--;
1590 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1591 the branch instruction. This means that if we commonize the control
1592 flow before end of the basic block, the semantic remains unchanged.
1594 We may assume that there exists one edge with a common destination. */
1597 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1599 int nehedges1
= 0, nehedges2
= 0;
1600 edge fallthru1
= 0, fallthru2
= 0;
1604 /* If we performed shrink-wrapping, edges to the exit block can
1605 only be distinguished for JUMP_INSNs. The two paths may differ in
1606 whether they went through the prologue. Sibcalls are fine, we know
1607 that we either didn't need or inserted an epilogue before them. */
1608 if (crtl
->shrink_wrapped
1609 && single_succ_p (bb1
)
1610 && single_succ (bb1
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
1611 && (!JUMP_P (BB_END (bb1
))
1612 /* Punt if the only successor is a fake edge to exit, the jump
1613 must be some weird one. */
1614 || (single_succ_edge (bb1
)->flags
& EDGE_FAKE
) != 0)
1615 && !(CALL_P (BB_END (bb1
)) && SIBLING_CALL_P (BB_END (bb1
))))
1618 /* If BB1 has only one successor, we may be looking at either an
1619 unconditional jump, or a fake edge to exit. */
1620 if (single_succ_p (bb1
)
1621 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1622 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1623 return (single_succ_p (bb2
)
1624 && (single_succ_edge (bb2
)->flags
1625 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1626 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1628 /* Match conditional jumps - this may get tricky when fallthru and branch
1629 edges are crossed. */
1630 if (EDGE_COUNT (bb1
->succs
) == 2
1631 && any_condjump_p (BB_END (bb1
))
1632 && onlyjump_p (BB_END (bb1
)))
1634 edge b1
, f1
, b2
, f2
;
1635 bool reverse
, match
;
1636 rtx set1
, set2
, cond1
, cond2
;
1637 enum rtx_code code1
, code2
;
1639 if (EDGE_COUNT (bb2
->succs
) != 2
1640 || !any_condjump_p (BB_END (bb2
))
1641 || !onlyjump_p (BB_END (bb2
)))
1644 b1
= BRANCH_EDGE (bb1
);
1645 b2
= BRANCH_EDGE (bb2
);
1646 f1
= FALLTHRU_EDGE (bb1
);
1647 f2
= FALLTHRU_EDGE (bb2
);
1649 /* Get around possible forwarders on fallthru edges. Other cases
1650 should be optimized out already. */
1651 if (FORWARDER_BLOCK_P (f1
->dest
))
1652 f1
= single_succ_edge (f1
->dest
);
1654 if (FORWARDER_BLOCK_P (f2
->dest
))
1655 f2
= single_succ_edge (f2
->dest
);
1657 /* To simplify use of this function, return false if there are
1658 unneeded forwarder blocks. These will get eliminated later
1659 during cleanup_cfg. */
1660 if (FORWARDER_BLOCK_P (f1
->dest
)
1661 || FORWARDER_BLOCK_P (f2
->dest
)
1662 || FORWARDER_BLOCK_P (b1
->dest
)
1663 || FORWARDER_BLOCK_P (b2
->dest
))
1666 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1668 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1673 set1
= pc_set (BB_END (bb1
));
1674 set2
= pc_set (BB_END (bb2
));
1675 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1676 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1679 cond1
= XEXP (SET_SRC (set1
), 0);
1680 cond2
= XEXP (SET_SRC (set2
), 0);
1681 code1
= GET_CODE (cond1
);
1683 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1685 code2
= GET_CODE (cond2
);
1687 if (code2
== UNKNOWN
)
1690 /* Verify codes and operands match. */
1691 match
= ((code1
== code2
1692 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1693 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1694 || (code1
== swap_condition (code2
)
1695 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1697 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1700 /* If we return true, we will join the blocks. Which means that
1701 we will only have one branch prediction bit to work with. Thus
1702 we require the existing branches to have probabilities that are
1705 && optimize_bb_for_speed_p (bb1
)
1706 && optimize_bb_for_speed_p (bb2
))
1708 profile_probability prob2
;
1710 if (b1
->dest
== b2
->dest
)
1711 prob2
= b2
->probability
;
1713 /* Do not use f2 probability as f2 may be forwarded. */
1714 prob2
= b2
->probability
.invert ();
1716 /* Fail if the difference in probabilities is greater than 50%.
1717 This rules out two well-predicted branches with opposite
1719 if (b1
->probability
.differs_lot_from_p (prob2
))
1724 "Outcomes of branch in bb %i and %i differ too"
1725 " much (", bb1
->index
, bb2
->index
);
1726 b1
->probability
.dump (dump_file
);
1727 prob2
.dump (dump_file
);
1728 fprintf (dump_file
, ")\n");
1734 if (dump_file
&& match
)
1735 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1736 bb1
->index
, bb2
->index
);
1741 /* Generic case - we are seeing a computed jump, table jump or trapping
1744 /* Check whether there are tablejumps in the end of BB1 and BB2.
1745 Return true if they are identical. */
1747 rtx_insn
*label1
, *label2
;
1748 rtx_jump_table_data
*table1
, *table2
;
1750 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1751 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1752 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1754 /* The labels should never be the same rtx. If they really are same
1755 the jump tables are same too. So disable crossjumping of blocks BB1
1756 and BB2 because when deleting the common insns in the end of BB1
1757 by delete_basic_block () the jump table would be deleted too. */
1758 /* If LABEL2 is referenced in BB1->END do not do anything
1759 because we would loose information when replacing
1760 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1761 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1763 /* Set IDENTICAL to true when the tables are identical. */
1764 bool identical
= false;
1767 p1
= PATTERN (table1
);
1768 p2
= PATTERN (table2
);
1769 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1773 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1774 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1775 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1776 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1781 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1782 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1790 /* Temporarily replace references to LABEL1 with LABEL2
1791 in BB1->END so that we could compare the instructions. */
1792 replace_label_in_insn (BB_END (bb1
), label1
, label2
, false);
1794 match
= (old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
))
1796 if (dump_file
&& match
)
1798 "Tablejumps in bb %i and %i match.\n",
1799 bb1
->index
, bb2
->index
);
1801 /* Set the original label in BB1->END because when deleting
1802 a block whose end is a tablejump, the tablejump referenced
1803 from the instruction is deleted too. */
1804 replace_label_in_insn (BB_END (bb1
), label2
, label1
, false);
1813 /* Find the last non-debug non-note instruction in each bb, except
1814 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1815 handles that case specially. old_insns_match_p does not handle
1816 other types of instruction notes. */
1817 rtx_insn
*last1
= BB_END (bb1
);
1818 rtx_insn
*last2
= BB_END (bb2
);
1819 while (!NOTE_INSN_BASIC_BLOCK_P (last1
) &&
1820 (DEBUG_INSN_P (last1
) || NOTE_P (last1
)))
1821 last1
= PREV_INSN (last1
);
1822 while (!NOTE_INSN_BASIC_BLOCK_P (last2
) &&
1823 (DEBUG_INSN_P (last2
) || NOTE_P (last2
)))
1824 last2
= PREV_INSN (last2
);
1825 gcc_assert (last1
&& last2
);
1827 /* First ensure that the instructions match. There may be many outgoing
1828 edges so this test is generally cheaper. */
1829 if (old_insns_match_p (mode
, last1
, last2
) != dir_both
)
1832 /* Search the outgoing edges, ensure that the counts do match, find possible
1833 fallthru and exception handling edges since these needs more
1835 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1838 bool nonfakeedges
= false;
1839 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1841 e2
= EDGE_SUCC (bb2
, ei
.index
);
1843 if ((e1
->flags
& EDGE_FAKE
) == 0)
1844 nonfakeedges
= true;
1846 if (e1
->flags
& EDGE_EH
)
1849 if (e2
->flags
& EDGE_EH
)
1852 if (e1
->flags
& EDGE_FALLTHRU
)
1854 if (e2
->flags
& EDGE_FALLTHRU
)
1858 /* If number of edges of various types does not match, fail. */
1859 if (nehedges1
!= nehedges2
1860 || (fallthru1
!= 0) != (fallthru2
!= 0))
1863 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1864 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1865 attempt to optimize, as the two basic blocks might have different
1866 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1867 traps there should be REG_ARG_SIZE notes, they could be missing
1868 for __builtin_unreachable () uses though. */
1870 && !ACCUMULATE_OUTGOING_ARGS
1872 || !find_reg_note (last1
, REG_ARGS_SIZE
, NULL
)))
1875 /* fallthru edges must be forwarded to the same destination. */
1878 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1879 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1880 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1881 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1887 /* Ensure the same EH region. */
1889 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1890 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1895 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1899 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1900 version of sequence abstraction. */
1901 FOR_EACH_EDGE (e1
, ei
, bb2
->succs
)
1905 basic_block d1
= e1
->dest
;
1907 if (FORWARDER_BLOCK_P (d1
))
1908 d1
= EDGE_SUCC (d1
, 0)->dest
;
1910 FOR_EACH_EDGE (e2
, ei
, bb1
->succs
)
1912 basic_block d2
= e2
->dest
;
1913 if (FORWARDER_BLOCK_P (d2
))
1914 d2
= EDGE_SUCC (d2
, 0)->dest
;
1926 /* Returns true if BB basic block has a preserve label. */
1929 block_has_preserve_label (basic_block bb
)
1933 && LABEL_PRESERVE_P (block_label (bb
)));
1936 /* E1 and E2 are edges with the same destination block. Search their
1937 predecessors for common code. If found, redirect control flow from
1938 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1939 or the other way around (dir_backward). DIR specifies the allowed
1940 replacement direction. */
1943 try_crossjump_to_edge (int mode
, edge e1
, edge e2
,
1944 enum replace_direction dir
)
1947 basic_block src1
= e1
->src
, src2
= e2
->src
;
1948 basic_block redirect_to
, redirect_from
, to_remove
;
1949 basic_block osrc1
, osrc2
, redirect_edges_to
, tmp
;
1950 rtx_insn
*newpos1
, *newpos2
;
1954 newpos1
= newpos2
= NULL
;
1956 /* Search backward through forwarder blocks. We don't need to worry
1957 about multiple entry or chained forwarders, as they will be optimized
1958 away. We do this to look past the unconditional jump following a
1959 conditional jump that is required due to the current CFG shape. */
1960 if (single_pred_p (src1
)
1961 && FORWARDER_BLOCK_P (src1
))
1962 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1964 if (single_pred_p (src2
)
1965 && FORWARDER_BLOCK_P (src2
))
1966 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1968 /* Nothing to do if we reach ENTRY, or a common source block. */
1969 if (src1
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || src2
1970 == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1975 /* Seeing more than 1 forwarder blocks would confuse us later... */
1976 if (FORWARDER_BLOCK_P (e1
->dest
)
1977 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1980 if (FORWARDER_BLOCK_P (e2
->dest
)
1981 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1984 /* Likewise with dead code (possibly newly created by the other optimizations
1986 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1989 /* Do not turn corssing edge to non-crossing or vice versa after reload. */
1990 if (BB_PARTITION (src1
) != BB_PARTITION (src2
)
1991 && reload_completed
)
1994 /* Look for the common insn sequence, part the first ... */
1995 if (!outgoing_edges_match (mode
, src1
, src2
))
1998 /* ... and part the second. */
1999 nmatch
= flow_find_cross_jump (src1
, src2
, &newpos1
, &newpos2
, &dir
);
2003 if (newpos1
!= NULL_RTX
)
2004 src1
= BLOCK_FOR_INSN (newpos1
);
2005 if (newpos2
!= NULL_RTX
)
2006 src2
= BLOCK_FOR_INSN (newpos2
);
2008 /* Check that SRC1 and SRC2 have preds again. They may have changed
2009 above due to the call to flow_find_cross_jump. */
2010 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
2013 if (dir
== dir_backward
)
2015 std::swap (osrc1
, osrc2
);
2016 std::swap (src1
, src2
);
2018 std::swap (newpos1
, newpos2
);
2021 /* Don't proceed with the crossjump unless we found a sufficient number
2022 of matching instructions or the 'from' block was totally matched
2023 (such that its predecessors will hopefully be redirected and the
2025 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
2026 && (newpos1
!= BB_HEAD (src1
)))
2029 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
2030 if (block_has_preserve_label (e1
->dest
)
2031 && (e1
->flags
& EDGE_ABNORMAL
))
2034 /* Here we know that the insns in the end of SRC1 which are common with SRC2
2036 If we have tablejumps in the end of SRC1 and SRC2
2037 they have been already compared for equivalence in outgoing_edges_match ()
2038 so replace the references to TABLE1 by references to TABLE2. */
2040 rtx_insn
*label1
, *label2
;
2041 rtx_jump_table_data
*table1
, *table2
;
2043 if (tablejump_p (BB_END (osrc1
), &label1
, &table1
)
2044 && tablejump_p (BB_END (osrc2
), &label2
, &table2
)
2045 && label1
!= label2
)
2049 /* Replace references to LABEL1 with LABEL2. */
2050 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2052 /* Do not replace the label in SRC1->END because when deleting
2053 a block whose end is a tablejump, the tablejump referenced
2054 from the instruction is deleted too. */
2055 if (insn
!= BB_END (osrc1
))
2056 replace_label_in_insn (insn
, label1
, label2
, true);
2061 /* Avoid splitting if possible. We must always split when SRC2 has
2062 EH predecessor edges, or we may end up with basic blocks with both
2063 normal and EH predecessor edges. */
2064 if (newpos2
== BB_HEAD (src2
)
2065 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
2069 if (newpos2
== BB_HEAD (src2
))
2071 /* Skip possible basic block header. */
2072 if (LABEL_P (newpos2
))
2073 newpos2
= NEXT_INSN (newpos2
);
2074 while (DEBUG_INSN_P (newpos2
))
2075 newpos2
= NEXT_INSN (newpos2
);
2076 if (NOTE_P (newpos2
))
2077 newpos2
= NEXT_INSN (newpos2
);
2078 while (DEBUG_INSN_P (newpos2
))
2079 newpos2
= NEXT_INSN (newpos2
);
2083 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
2084 src2
->index
, nmatch
);
2085 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
2090 "Cross jumping from bb %i to bb %i; %i common insns\n",
2091 src1
->index
, src2
->index
, nmatch
);
2093 /* We may have some registers visible through the block. */
2094 df_set_bb_dirty (redirect_to
);
2097 redirect_edges_to
= redirect_to
;
2099 redirect_edges_to
= osrc2
;
2101 /* Recompute the counts of destinations of outgoing edges. */
2102 FOR_EACH_EDGE (s
, ei
, redirect_edges_to
->succs
)
2106 basic_block d
= s
->dest
;
2108 if (FORWARDER_BLOCK_P (d
))
2109 d
= single_succ (d
);
2111 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
2113 basic_block d2
= s2
->dest
;
2114 if (FORWARDER_BLOCK_P (d2
))
2115 d2
= single_succ (d2
);
2120 /* Take care to update possible forwarder blocks. We verified
2121 that there is no more than one in the chain, so we can't run
2122 into infinite loop. */
2123 if (FORWARDER_BLOCK_P (s
->dest
))
2124 s
->dest
->count
+= s
->count ();
2126 if (FORWARDER_BLOCK_P (s2
->dest
))
2127 s2
->dest
->count
-= s
->count ();
2129 s
->probability
= s
->probability
.combine_with_count
2130 (redirect_edges_to
->count
,
2131 s2
->probability
, src1
->count
);
2134 /* Adjust count for the block. An earlier jump
2135 threading pass may have left the profile in an inconsistent
2136 state (see update_bb_profile_for_threading) so we must be
2137 prepared for overflows. */
2141 tmp
->count
+= src1
->count
;
2142 if (tmp
== redirect_edges_to
)
2144 tmp
= find_fallthru_edge (tmp
->succs
)->dest
;
2147 update_br_prob_note (redirect_edges_to
);
2149 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2151 /* Skip possible basic block header. */
2152 if (LABEL_P (newpos1
))
2153 newpos1
= NEXT_INSN (newpos1
);
2155 while (DEBUG_INSN_P (newpos1
))
2156 newpos1
= NEXT_INSN (newpos1
);
2158 if (NOTE_INSN_BASIC_BLOCK_P (newpos1
))
2159 newpos1
= NEXT_INSN (newpos1
);
2161 while (DEBUG_INSN_P (newpos1
))
2162 newpos1
= NEXT_INSN (newpos1
);
2164 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
2165 to_remove
= single_succ (redirect_from
);
2167 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
2168 delete_basic_block (to_remove
);
2170 update_forwarder_flag (redirect_from
);
2171 if (redirect_to
!= src2
)
2172 update_forwarder_flag (src2
);
2177 /* Search the predecessors of BB for common insn sequences. When found,
2178 share code between them by redirecting control flow. Return true if
2179 any changes made. */
2182 try_crossjump_bb (int mode
, basic_block bb
)
2184 edge e
, e2
, fallthru
;
2186 unsigned max
, ix
, ix2
;
2188 /* Nothing to do if there is not at least two incoming edges. */
2189 if (EDGE_COUNT (bb
->preds
) < 2)
2192 /* Don't crossjump if this block ends in a computed jump,
2193 unless we are optimizing for size. */
2194 if (optimize_bb_for_size_p (bb
)
2195 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2196 && computed_jump_p (BB_END (bb
)))
2199 /* If we are partitioning hot/cold basic blocks, we don't want to
2200 mess up unconditional or indirect jumps that cross between hot
2203 Basic block partitioning may result in some jumps that appear to
2204 be optimizable (or blocks that appear to be mergeable), but which really
2205 must be left untouched (they are required to make it safely across
2206 partition boundaries). See the comments at the top of
2207 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2209 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
2210 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
2211 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
2214 /* It is always cheapest to redirect a block that ends in a branch to
2215 a block that falls through into BB, as that adds no branches to the
2216 program. We'll try that combination first. */
2218 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
2220 if (EDGE_COUNT (bb
->preds
) > max
)
2223 fallthru
= find_fallthru_edge (bb
->preds
);
2226 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
);)
2228 e
= EDGE_PRED (bb
, ix
);
2231 /* As noted above, first try with the fallthru predecessor (or, a
2232 fallthru predecessor if we are in cfglayout mode). */
2235 /* Don't combine the fallthru edge into anything else.
2236 If there is a match, we'll do it the other way around. */
2239 /* If nothing changed since the last attempt, there is nothing
2242 && !((e
->src
->flags
& BB_MODIFIED
)
2243 || (fallthru
->src
->flags
& BB_MODIFIED
)))
2246 if (try_crossjump_to_edge (mode
, e
, fallthru
, dir_forward
))
2254 /* Non-obvious work limiting check: Recognize that we're going
2255 to call try_crossjump_bb on every basic block. So if we have
2256 two blocks with lots of outgoing edges (a switch) and they
2257 share lots of common destinations, then we would do the
2258 cross-jump check once for each common destination.
2260 Now, if the blocks actually are cross-jump candidates, then
2261 all of their destinations will be shared. Which means that
2262 we only need check them for cross-jump candidacy once. We
2263 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2264 choosing to do the check from the block for which the edge
2265 in question is the first successor of A. */
2266 if (EDGE_SUCC (e
->src
, 0) != e
)
2269 for (ix2
= 0; ix2
< EDGE_COUNT (bb
->preds
); ix2
++)
2271 e2
= EDGE_PRED (bb
, ix2
);
2276 /* We've already checked the fallthru edge above. */
2280 /* The "first successor" check above only prevents multiple
2281 checks of crossjump(A,B). In order to prevent redundant
2282 checks of crossjump(B,A), require that A be the block
2283 with the lowest index. */
2284 if (e
->src
->index
> e2
->src
->index
)
2287 /* If nothing changed since the last attempt, there is nothing
2290 && !((e
->src
->flags
& BB_MODIFIED
)
2291 || (e2
->src
->flags
& BB_MODIFIED
)))
2294 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2296 if (try_crossjump_to_edge (mode
, e
, e2
, dir_both
))
2306 crossjumps_occurred
= true;
2311 /* Search the successors of BB for common insn sequences. When found,
2312 share code between them by moving it across the basic block
2313 boundary. Return true if any changes made. */
2316 try_head_merge_bb (basic_block bb
)
2318 basic_block final_dest_bb
= NULL
;
2319 int max_match
= INT_MAX
;
2321 rtx_insn
**headptr
, **currptr
, **nextptr
;
2322 bool changed
, moveall
;
2324 rtx_insn
*e0_last_head
;
2326 rtx_insn
*move_before
;
2327 unsigned nedges
= EDGE_COUNT (bb
->succs
);
2328 rtx_insn
*jump
= BB_END (bb
);
2329 regset live
, live_union
;
2331 /* Nothing to do if there is not at least two outgoing edges. */
2335 /* Don't crossjump if this block ends in a computed jump,
2336 unless we are optimizing for size. */
2337 if (optimize_bb_for_size_p (bb
)
2338 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2339 && computed_jump_p (BB_END (bb
)))
2342 cond
= get_condition (jump
, &move_before
, true, false);
2343 if (cond
== NULL_RTX
)
2345 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2346 move_before
= prev_nonnote_nondebug_insn (jump
);
2351 for (ix
= 0; ix
< nedges
; ix
++)
2352 if (EDGE_SUCC (bb
, ix
)->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2355 for (ix
= 0; ix
< nedges
; ix
++)
2357 edge e
= EDGE_SUCC (bb
, ix
);
2358 basic_block other_bb
= e
->dest
;
2360 if (df_get_bb_dirty (other_bb
))
2362 block_was_dirty
= true;
2366 if (e
->flags
& EDGE_ABNORMAL
)
2369 /* Normally, all destination blocks must only be reachable from this
2370 block, i.e. they must have one incoming edge.
2372 There is one special case we can handle, that of multiple consecutive
2373 jumps where the first jumps to one of the targets of the second jump.
2374 This happens frequently in switch statements for default labels.
2375 The structure is as follows:
2381 jump with targets A, B, C, D...
2383 has two incoming edges, from FINAL_DEST_BB and BB
2385 In this case, we can try to move the insns through BB and into
2387 if (EDGE_COUNT (other_bb
->preds
) != 1)
2389 edge incoming_edge
, incoming_bb_other_edge
;
2392 if (final_dest_bb
!= NULL
2393 || EDGE_COUNT (other_bb
->preds
) != 2)
2396 /* We must be able to move the insns across the whole block. */
2397 move_before
= BB_HEAD (bb
);
2398 while (!NONDEBUG_INSN_P (move_before
))
2399 move_before
= NEXT_INSN (move_before
);
2401 if (EDGE_COUNT (bb
->preds
) != 1)
2403 incoming_edge
= EDGE_PRED (bb
, 0);
2404 final_dest_bb
= incoming_edge
->src
;
2405 if (EDGE_COUNT (final_dest_bb
->succs
) != 2)
2407 FOR_EACH_EDGE (incoming_bb_other_edge
, ei
, final_dest_bb
->succs
)
2408 if (incoming_bb_other_edge
!= incoming_edge
)
2410 if (incoming_bb_other_edge
->dest
!= other_bb
)
2415 e0
= EDGE_SUCC (bb
, 0);
2416 e0_last_head
= NULL
;
2419 for (ix
= 1; ix
< nedges
; ix
++)
2421 edge e
= EDGE_SUCC (bb
, ix
);
2422 rtx_insn
*e0_last
, *e_last
;
2425 nmatch
= flow_find_head_matching_sequence (e0
->dest
, e
->dest
,
2426 &e0_last
, &e_last
, 0);
2430 if (nmatch
< max_match
)
2433 e0_last_head
= e0_last
;
2437 /* If we matched an entire block, we probably have to avoid moving the
2440 && e0_last_head
== BB_END (e0
->dest
)
2441 && (find_reg_note (e0_last_head
, REG_EH_REGION
, 0)
2442 || control_flow_insn_p (e0_last_head
)))
2447 e0_last_head
= prev_real_nondebug_insn (e0_last_head
);
2453 /* We must find a union of the live registers at each of the end points. */
2454 live
= BITMAP_ALLOC (NULL
);
2455 live_union
= BITMAP_ALLOC (NULL
);
2457 currptr
= XNEWVEC (rtx_insn
*, nedges
);
2458 headptr
= XNEWVEC (rtx_insn
*, nedges
);
2459 nextptr
= XNEWVEC (rtx_insn
*, nedges
);
2461 for (ix
= 0; ix
< nedges
; ix
++)
2464 basic_block merge_bb
= EDGE_SUCC (bb
, ix
)->dest
;
2465 rtx_insn
*head
= BB_HEAD (merge_bb
);
2467 while (!NONDEBUG_INSN_P (head
))
2468 head
= NEXT_INSN (head
);
2472 /* Compute the end point and live information */
2473 for (j
= 1; j
< max_match
; j
++)
2475 head
= NEXT_INSN (head
);
2476 while (!NONDEBUG_INSN_P (head
));
2477 simulate_backwards_to_point (merge_bb
, live
, head
);
2478 IOR_REG_SET (live_union
, live
);
2481 /* If we're moving across two blocks, verify the validity of the
2482 first move, then adjust the target and let the loop below deal
2483 with the final move. */
2484 if (final_dest_bb
!= NULL
)
2486 rtx_insn
*move_upto
;
2488 moveall
= can_move_insns_across (currptr
[0], e0_last_head
, move_before
,
2489 jump
, e0
->dest
, live_union
,
2493 if (move_upto
== NULL_RTX
)
2496 while (e0_last_head
!= move_upto
)
2498 df_simulate_one_insn_backwards (e0
->dest
, e0_last_head
,
2500 e0_last_head
= PREV_INSN (e0_last_head
);
2503 if (e0_last_head
== NULL_RTX
)
2506 jump
= BB_END (final_dest_bb
);
2507 cond
= get_condition (jump
, &move_before
, true, false);
2508 if (cond
== NULL_RTX
)
2510 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2511 move_before
= prev_nonnote_nondebug_insn (jump
);
2519 rtx_insn
*move_upto
;
2520 moveall
= can_move_insns_across (currptr
[0], e0_last_head
,
2521 move_before
, jump
, e0
->dest
, live_union
,
2523 if (!moveall
&& move_upto
== NULL_RTX
)
2525 if (jump
== move_before
)
2528 /* Try again, using a different insertion point. */
2531 /* Don't try moving before a cc0 user, as that may invalidate
2533 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2539 if (final_dest_bb
&& !moveall
)
2540 /* We haven't checked whether a partial move would be OK for the first
2541 move, so we have to fail this case. */
2547 if (currptr
[0] == move_upto
)
2549 for (ix
= 0; ix
< nedges
; ix
++)
2551 rtx_insn
*curr
= currptr
[ix
];
2553 curr
= NEXT_INSN (curr
);
2554 while (!NONDEBUG_INSN_P (curr
));
2559 /* If we can't currently move all of the identical insns, remember
2560 each insn after the range that we'll merge. */
2562 for (ix
= 0; ix
< nedges
; ix
++)
2564 rtx_insn
*curr
= currptr
[ix
];
2566 curr
= NEXT_INSN (curr
);
2567 while (!NONDEBUG_INSN_P (curr
));
2571 reorder_insns (headptr
[0], currptr
[0], PREV_INSN (move_before
));
2572 df_set_bb_dirty (EDGE_SUCC (bb
, 0)->dest
);
2573 if (final_dest_bb
!= NULL
)
2574 df_set_bb_dirty (final_dest_bb
);
2575 df_set_bb_dirty (bb
);
2576 for (ix
= 1; ix
< nedges
; ix
++)
2578 df_set_bb_dirty (EDGE_SUCC (bb
, ix
)->dest
);
2579 delete_insn_chain (headptr
[ix
], currptr
[ix
], false);
2583 if (jump
== move_before
)
2586 /* For the unmerged insns, try a different insertion point. */
2589 /* Don't try moving before a cc0 user, as that may invalidate
2591 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2594 for (ix
= 0; ix
< nedges
; ix
++)
2595 currptr
[ix
] = headptr
[ix
] = nextptr
[ix
];
2605 crossjumps_occurred
|= changed
;
2610 /* Return true if BB contains just bb note, or bb note followed
2611 by only DEBUG_INSNs. */
2614 trivially_empty_bb_p (basic_block bb
)
2616 rtx_insn
*insn
= BB_END (bb
);
2620 if (insn
== BB_HEAD (bb
))
2622 if (!DEBUG_INSN_P (insn
))
2624 insn
= PREV_INSN (insn
);
2628 /* Return true if BB contains just a return and possibly a USE of the
2629 return value. Fill in *RET and *USE with the return and use insns
2630 if any found, otherwise NULL. All CLOBBERs are ignored. */
2633 bb_is_just_return (basic_block bb
, rtx_insn
**ret
, rtx_insn
**use
)
2638 if (bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2641 FOR_BB_INSNS (bb
, insn
)
2642 if (NONDEBUG_INSN_P (insn
))
2644 rtx pat
= PATTERN (insn
);
2646 if (!*ret
&& ANY_RETURN_P (pat
))
2648 else if (!*ret
&& !*use
&& GET_CODE (pat
) == USE
2649 && REG_P (XEXP (pat
, 0))
2650 && REG_FUNCTION_VALUE_P (XEXP (pat
, 0)))
2652 else if (GET_CODE (pat
) != CLOBBER
)
2659 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2660 instructions etc. Return nonzero if changes were made. */
2663 try_optimize_cfg (int mode
)
2665 bool changed_overall
= false;
2668 basic_block bb
, b
, next
;
2670 if (mode
& (CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
2673 crossjumps_occurred
= false;
2675 FOR_EACH_BB_FN (bb
, cfun
)
2676 update_forwarder_flag (bb
);
2678 if (! targetm
.cannot_modify_jumps_p ())
2681 /* Attempt to merge blocks as made possible by edge removal. If
2682 a block has only one successor, and the successor has only
2683 one predecessor, they may be combined. */
2686 block_was_dirty
= false;
2692 "\n\ntry_optimize_cfg iteration %i\n\n",
2695 for (b
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
; b
2696 != EXIT_BLOCK_PTR_FOR_FN (cfun
);)
2700 bool changed_here
= false;
2702 /* Delete trivially dead basic blocks. This is either
2703 blocks with no predecessors, or empty blocks with no
2704 successors. However if the empty block with no
2705 successors is the successor of the ENTRY_BLOCK, it is
2706 kept. This ensures that the ENTRY_BLOCK will have a
2707 successor which is a precondition for many RTL
2708 passes. Empty blocks may result from expanding
2709 __builtin_unreachable (). */
2710 if (EDGE_COUNT (b
->preds
) == 0
2711 || (EDGE_COUNT (b
->succs
) == 0
2712 && trivially_empty_bb_p (b
)
2713 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->dest
2717 if (EDGE_COUNT (b
->preds
) > 0)
2722 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
2725 for (insn
= BB_FOOTER (b
);
2726 insn
; insn
= NEXT_INSN (insn
))
2727 if (BARRIER_P (insn
))
2730 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2731 if ((e
->flags
& EDGE_FALLTHRU
))
2734 && BB_FOOTER (e
->src
) == NULL
)
2736 BB_FOOTER (e
->src
) = BB_FOOTER (b
);
2737 BB_FOOTER (b
) = NULL
;
2740 emit_barrier_after_bb (e
->src
);
2745 rtx_insn
*last
= get_last_bb_insn (b
);
2746 if (last
&& BARRIER_P (last
))
2747 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2748 if ((e
->flags
& EDGE_FALLTHRU
))
2749 emit_barrier_after (BB_END (e
->src
));
2752 delete_basic_block (b
);
2754 /* Avoid trying to remove the exit block. */
2755 b
= (c
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? c
->next_bb
: c
);
2759 /* Remove code labels no longer used. */
2760 if (single_pred_p (b
)
2761 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2762 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
2763 && LABEL_P (BB_HEAD (b
))
2764 && !LABEL_PRESERVE_P (BB_HEAD (b
))
2765 /* If the previous block ends with a branch to this
2766 block, we can't delete the label. Normally this
2767 is a condjump that is yet to be simplified, but
2768 if CASE_DROPS_THRU, this can be a tablejump with
2769 some element going to the same place as the
2770 default (fallthru). */
2771 && (single_pred (b
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
)
2772 || !JUMP_P (BB_END (single_pred (b
)))
2773 || ! label_is_jump_target_p (BB_HEAD (b
),
2774 BB_END (single_pred (b
)))))
2776 delete_insn (BB_HEAD (b
));
2778 fprintf (dump_file
, "Deleted label in block %i.\n",
2782 /* If we fall through an empty block, we can remove it. */
2783 if (!(mode
& (CLEANUP_CFGLAYOUT
| CLEANUP_NO_INSN_DEL
))
2784 && single_pred_p (b
)
2785 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2786 && !LABEL_P (BB_HEAD (b
))
2787 && FORWARDER_BLOCK_P (b
)
2788 /* Note that forwarder_block_p true ensures that
2789 there is a successor for this block. */
2790 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2791 && n_basic_blocks_for_fn (cfun
) > NUM_FIXED_BLOCKS
+ 1)
2795 "Deleting fallthru block %i.\n",
2798 c
= ((b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2799 ? b
->next_bb
: b
->prev_bb
);
2800 redirect_edge_succ_nodup (single_pred_edge (b
),
2802 delete_basic_block (b
);
2808 /* Merge B with its single successor, if any. */
2809 if (single_succ_p (b
)
2810 && (s
= single_succ_edge (b
))
2811 && !(s
->flags
& EDGE_COMPLEX
)
2812 && (c
= s
->dest
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2813 && single_pred_p (c
)
2816 /* When not in cfg_layout mode use code aware of reordering
2817 INSN. This code possibly creates new basic blocks so it
2818 does not fit merge_blocks interface and is kept here in
2819 hope that it will become useless once more of compiler
2820 is transformed to use cfg_layout mode. */
2822 if ((mode
& CLEANUP_CFGLAYOUT
)
2823 && can_merge_blocks_p (b
, c
))
2825 merge_blocks (b
, c
);
2826 update_forwarder_flag (b
);
2827 changed_here
= true;
2829 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2830 /* If the jump insn has side effects,
2831 we can't kill the edge. */
2832 && (!JUMP_P (BB_END (b
))
2833 || (reload_completed
2834 ? simplejump_p (BB_END (b
))
2835 : (onlyjump_p (BB_END (b
))
2836 && !tablejump_p (BB_END (b
),
2838 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2841 changed_here
= true;
2845 /* Try to change a branch to a return to just that return. */
2846 rtx_insn
*ret
, *use
;
2847 if (single_succ_p (b
)
2848 && onlyjump_p (BB_END (b
))
2849 && bb_is_just_return (single_succ (b
), &ret
, &use
))
2851 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2855 emit_insn_before (copy_insn (PATTERN (use
)),
2858 fprintf (dump_file
, "Changed jump %d->%d to return.\n",
2859 b
->index
, single_succ (b
)->index
);
2860 redirect_edge_succ (single_succ_edge (b
),
2861 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2862 single_succ_edge (b
)->flags
&= ~EDGE_CROSSING
;
2863 changed_here
= true;
2867 /* Try to change a conditional branch to a return to the
2868 respective conditional return. */
2869 if (EDGE_COUNT (b
->succs
) == 2
2870 && any_condjump_p (BB_END (b
))
2871 && bb_is_just_return (BRANCH_EDGE (b
)->dest
, &ret
, &use
))
2873 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2877 emit_insn_before (copy_insn (PATTERN (use
)),
2880 fprintf (dump_file
, "Changed conditional jump %d->%d "
2881 "to conditional return.\n",
2882 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2883 redirect_edge_succ (BRANCH_EDGE (b
),
2884 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2885 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2886 changed_here
= true;
2890 /* Try to flip a conditional branch that falls through to
2891 a return so that it becomes a conditional return and a
2892 new jump to the original branch target. */
2893 if (EDGE_COUNT (b
->succs
) == 2
2894 && BRANCH_EDGE (b
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2895 && any_condjump_p (BB_END (b
))
2896 && bb_is_just_return (FALLTHRU_EDGE (b
)->dest
, &ret
, &use
))
2898 if (invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2899 JUMP_LABEL (BB_END (b
)), 0))
2901 basic_block new_ft
= BRANCH_EDGE (b
)->dest
;
2902 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2906 emit_insn_before (copy_insn (PATTERN (use
)),
2909 fprintf (dump_file
, "Changed conditional jump "
2910 "%d->%d to conditional return, adding "
2911 "fall-through jump.\n",
2912 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2913 redirect_edge_succ (BRANCH_EDGE (b
),
2914 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2915 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2916 std::swap (BRANCH_EDGE (b
)->probability
,
2917 FALLTHRU_EDGE (b
)->probability
);
2918 update_br_prob_note (b
);
2919 basic_block jb
= force_nonfallthru (FALLTHRU_EDGE (b
));
2920 notice_new_block (jb
);
2921 if (!redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (jb
)),
2922 block_label (new_ft
), 0))
2924 redirect_edge_succ (single_succ_edge (jb
), new_ft
);
2925 changed_here
= true;
2929 /* Invert the jump back to what it was. This should
2931 if (!invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2932 JUMP_LABEL (BB_END (b
)), 0))
2938 /* Simplify branch over branch. */
2939 if ((mode
& CLEANUP_EXPENSIVE
)
2940 && !(mode
& CLEANUP_CFGLAYOUT
)
2941 && try_simplify_condjump (b
))
2942 changed_here
= true;
2944 /* If B has a single outgoing edge, but uses a
2945 non-trivial jump instruction without side-effects, we
2946 can either delete the jump entirely, or replace it
2947 with a simple unconditional jump. */
2948 if (single_succ_p (b
)
2949 && single_succ (b
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2950 && onlyjump_p (BB_END (b
))
2951 && !CROSSING_JUMP_P (BB_END (b
))
2952 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2954 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2956 update_forwarder_flag (b
);
2957 changed_here
= true;
2960 /* Simplify branch to branch. */
2961 if (try_forward_edges (mode
, b
))
2963 update_forwarder_flag (b
);
2964 changed_here
= true;
2967 /* Look for shared code between blocks. */
2968 if ((mode
& CLEANUP_CROSSJUMP
)
2969 && try_crossjump_bb (mode
, b
))
2970 changed_here
= true;
2972 if ((mode
& CLEANUP_CROSSJUMP
)
2973 /* This can lengthen register lifetimes. Do it only after
2976 && try_head_merge_bb (b
))
2977 changed_here
= true;
2979 /* Don't get confused by the index shift caused by
2987 if ((mode
& CLEANUP_CROSSJUMP
)
2988 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR_FOR_FN (cfun
)))
2991 if (block_was_dirty
)
2993 /* This should only be set by head-merging. */
2994 gcc_assert (mode
& CLEANUP_CROSSJUMP
);
3000 /* Edge forwarding in particular can cause hot blocks previously
3001 reached by both hot and cold blocks to become dominated only
3002 by cold blocks. This will cause the verification below to fail,
3003 and lead to now cold code in the hot section. This is not easy
3004 to detect and fix during edge forwarding, and in some cases
3005 is only visible after newly unreachable blocks are deleted,
3006 which will be done in fixup_partitions. */
3007 if ((mode
& CLEANUP_NO_PARTITIONING
) == 0)
3009 fixup_partitions ();
3010 checking_verify_flow_info ();
3014 changed_overall
|= changed
;
3020 FOR_ALL_BB_FN (b
, cfun
)
3021 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
3023 return changed_overall
;
3026 /* Delete all unreachable basic blocks. */
3029 delete_unreachable_blocks (void)
3031 bool changed
= false;
3032 basic_block b
, prev_bb
;
3034 find_unreachable_blocks ();
3036 /* When we're in GIMPLE mode and there may be debug bind insns, we
3037 should delete blocks in reverse dominator order, so as to get a
3038 chance to substitute all released DEFs into debug bind stmts. If
3039 we don't have dominators information, walking blocks backward
3040 gets us a better chance of retaining most debug information than
3042 if (MAY_HAVE_DEBUG_BIND_INSNS
&& current_ir_type () == IR_GIMPLE
3043 && dom_info_available_p (CDI_DOMINATORS
))
3045 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3046 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3048 prev_bb
= b
->prev_bb
;
3050 if (!(b
->flags
& BB_REACHABLE
))
3052 /* Speed up the removal of blocks that don't dominate
3053 others. Walking backwards, this should be the common
3055 if (!first_dom_son (CDI_DOMINATORS
, b
))
3056 delete_basic_block (b
);
3060 = get_all_dominated_blocks (CDI_DOMINATORS
, b
);
3066 prev_bb
= b
->prev_bb
;
3068 gcc_assert (!(b
->flags
& BB_REACHABLE
));
3070 delete_basic_block (b
);
3082 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3083 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3085 prev_bb
= b
->prev_bb
;
3087 if (!(b
->flags
& BB_REACHABLE
))
3089 delete_basic_block (b
);
3096 tidy_fallthru_edges ();
3100 /* Delete any jump tables never referenced. We can't delete them at the
3101 time of removing tablejump insn as they are referenced by the preceding
3102 insns computing the destination, so we delay deleting and garbagecollect
3103 them once life information is computed. */
3105 delete_dead_jumptables (void)
3109 /* A dead jump table does not belong to any basic block. Scan insns
3110 between two adjacent basic blocks. */
3111 FOR_EACH_BB_FN (bb
, cfun
)
3113 rtx_insn
*insn
, *next
;
3115 for (insn
= NEXT_INSN (BB_END (bb
));
3116 insn
&& !NOTE_INSN_BASIC_BLOCK_P (insn
);
3119 next
= NEXT_INSN (insn
);
3121 && LABEL_NUSES (insn
) == LABEL_PRESERVE_P (insn
)
3122 && JUMP_TABLE_DATA_P (next
))
3124 rtx_insn
*label
= insn
, *jump
= next
;
3127 fprintf (dump_file
, "Dead jumptable %i removed\n",
3130 next
= NEXT_INSN (next
);
3132 delete_insn (label
);
3139 /* Tidy the CFG by deleting unreachable code and whatnot. */
3142 cleanup_cfg (int mode
)
3144 bool changed
= false;
3146 /* Set the cfglayout mode flag here. We could update all the callers
3147 but that is just inconvenient, especially given that we eventually
3148 want to have cfglayout mode as the default. */
3149 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
3150 mode
|= CLEANUP_CFGLAYOUT
;
3152 timevar_push (TV_CLEANUP_CFG
);
3153 if (delete_unreachable_blocks ())
3156 /* We've possibly created trivially dead code. Cleanup it right
3157 now to introduce more opportunities for try_optimize_cfg. */
3158 if (!(mode
& (CLEANUP_NO_INSN_DEL
))
3159 && !reload_completed
)
3160 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3165 /* To tail-merge blocks ending in the same noreturn function (e.g.
3166 a call to abort) we have to insert fake edges to exit. Do this
3167 here once. The fake edges do not interfere with any other CFG
3169 if (mode
& CLEANUP_CROSSJUMP
)
3170 add_noreturn_fake_exit_edges ();
3172 if (!dbg_cnt (cfg_cleanup
))
3175 while (try_optimize_cfg (mode
))
3177 delete_unreachable_blocks (), changed
= true;
3178 if (!(mode
& CLEANUP_NO_INSN_DEL
))
3180 /* Try to remove some trivially dead insns when doing an expensive
3181 cleanup. But delete_trivially_dead_insns doesn't work after
3182 reload (it only handles pseudos) and run_fast_dce is too costly
3183 to run in every iteration.
3185 For effective cross jumping, we really want to run a fast DCE to
3186 clean up any dead conditions, or they get in the way of performing
3189 Other transformations in cleanup_cfg are not so sensitive to dead
3190 code, so delete_trivially_dead_insns or even doing nothing at all
3192 if ((mode
& CLEANUP_EXPENSIVE
) && !reload_completed
3193 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3195 if ((mode
& CLEANUP_CROSSJUMP
) && crossjumps_occurred
)
3198 mode
&= ~CLEANUP_FORCE_FAST_DCE
;
3205 if (mode
& CLEANUP_CROSSJUMP
)
3206 remove_fake_exit_edges ();
3208 if (mode
& CLEANUP_FORCE_FAST_DCE
)
3211 /* Don't call delete_dead_jumptables in cfglayout mode, because
3212 that function assumes that jump tables are in the insns stream.
3213 But we also don't _have_ to delete dead jumptables in cfglayout
3214 mode because we shouldn't even be looking at things that are
3215 not in a basic block. Dead jumptables are cleaned up when
3216 going out of cfglayout mode. */
3217 if (!(mode
& CLEANUP_CFGLAYOUT
))
3218 delete_dead_jumptables ();
3220 /* ??? We probably do this way too often. */
3223 || (mode
& CLEANUP_CFG_CHANGED
)))
3225 timevar_push (TV_REPAIR_LOOPS
);
3226 /* The above doesn't preserve dominance info if available. */
3227 gcc_assert (!dom_info_available_p (CDI_DOMINATORS
));
3228 calculate_dominance_info (CDI_DOMINATORS
);
3229 fix_loop_structure (NULL
);
3230 free_dominance_info (CDI_DOMINATORS
);
3231 timevar_pop (TV_REPAIR_LOOPS
);
3234 timevar_pop (TV_CLEANUP_CFG
);
3241 const pass_data pass_data_jump
=
3243 RTL_PASS
, /* type */
3245 OPTGROUP_NONE
, /* optinfo_flags */
3246 TV_JUMP
, /* tv_id */
3247 0, /* properties_required */
3248 0, /* properties_provided */
3249 0, /* properties_destroyed */
3250 0, /* todo_flags_start */
3251 0, /* todo_flags_finish */
3254 class pass_jump
: public rtl_opt_pass
3257 pass_jump (gcc::context
*ctxt
)
3258 : rtl_opt_pass (pass_data_jump
, ctxt
)
3261 /* opt_pass methods: */
3262 virtual unsigned int execute (function
*);
3264 }; // class pass_jump
3267 pass_jump::execute (function
*)
3269 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3271 dump_flow_info (dump_file
, dump_flags
);
3272 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0)
3273 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
3280 make_pass_jump (gcc::context
*ctxt
)
3282 return new pass_jump (ctxt
);
3287 const pass_data pass_data_postreload_jump
=
3289 RTL_PASS
, /* type */
3290 "postreload_jump", /* name */
3291 OPTGROUP_NONE
, /* optinfo_flags */
3292 TV_JUMP
, /* tv_id */
3293 0, /* properties_required */
3294 0, /* properties_provided */
3295 0, /* properties_destroyed */
3296 0, /* todo_flags_start */
3297 0, /* todo_flags_finish */
3300 class pass_postreload_jump
: public rtl_opt_pass
3303 pass_postreload_jump (gcc::context
*ctxt
)
3304 : rtl_opt_pass (pass_data_postreload_jump
, ctxt
)
3307 /* opt_pass methods: */
3308 virtual unsigned int execute (function
*);
3310 }; // class pass_postreload_jump
3313 pass_postreload_jump::execute (function
*)
3315 cleanup_cfg (flag_thread_jumps
? CLEANUP_THREADING
: 0);
3322 make_pass_postreload_jump (gcc::context
*ctxt
)
3324 return new pass_postreload_jump (ctxt
);
3329 const pass_data pass_data_jump2
=
3331 RTL_PASS
, /* type */
3333 OPTGROUP_NONE
, /* optinfo_flags */
3334 TV_JUMP
, /* tv_id */
3335 0, /* properties_required */
3336 0, /* properties_provided */
3337 0, /* properties_destroyed */
3338 0, /* todo_flags_start */
3339 0, /* todo_flags_finish */
3342 class pass_jump2
: public rtl_opt_pass
3345 pass_jump2 (gcc::context
*ctxt
)
3346 : rtl_opt_pass (pass_data_jump2
, ctxt
)
3349 /* opt_pass methods: */
3350 virtual unsigned int execute (function
*)
3352 cleanup_cfg (flag_crossjumping
? CLEANUP_CROSSJUMP
: 0);
3356 }; // class pass_jump2
3361 make_pass_jump2 (gcc::context
*ctxt
)
3363 return new pass_jump2 (ctxt
);