/* Control flow graph manipulation code for GNU compiler.
- Copyright (C) 1987-2014 Free Software Foundation, Inc.
+ Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
+#include "rtl.h"
#include "tree.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
+#include "cfghooks.h"
+#include "df.h"
+#include "insn-config.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
#include "bb-reorder.h"
-#include "regs.h"
-#include "flags.h"
-#include "function.h"
-#include "except.h"
#include "rtl-error.h"
-#include "tm_p.h"
-#include "obstack.h"
#include "insn-attr.h"
-#include "insn-config.h"
+#include "dojump.h"
#include "expr.h"
-#include "target.h"
-#include "common/common-target.h"
#include "cfgloop.h"
-#include "ggc.h"
#include "tree-pass.h"
-#include "df.h"
+#include "print-rtl.h"
+
+/* Disable warnings about missing quoting in GCC diagnostics. */
+#if __GNUC__ >= 10
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wformat-diag"
+#endif
/* Holds the interesting leading and trailing notes for the function.
Only applicable if the CFG is in cfglayout mode. */
static rtx_insn *skip_insns_after_block (basic_block);
static void record_effective_endpoints (void);
-static rtx label_for_bb (basic_block);
static void fixup_reorder_chain (void);
void verify_insn_chain (void);
static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
static edge rtl_redirect_edge_and_branch (edge, basic_block);
static basic_block rtl_split_block (basic_block, void *);
-static void rtl_dump_bb (FILE *, basic_block, int, int);
+static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
static int rtl_verify_flow_info_1 (void);
static void rtl_make_forwarder_block (edge);
\f
return (!LABEL_PRESERVE_P (label)
/* User declared labels must be preserved. */
&& LABEL_NAME (label) == 0
- && !in_expr_list_p (forced_labels, label));
+ && !vec_safe_contains<rtx_insn *> (forced_labels,
+ const_cast<rtx_code_label *> (label)));
}
/* Delete INSN by patching it out. */
void
-delete_insn (rtx uncast_insn)
+delete_insn (rtx_insn *insn)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
rtx note;
bool really_delete = true;
if (really_delete)
{
/* If this insn has already been deleted, something is very wrong. */
- gcc_assert (!INSN_DELETED_P (insn));
+ gcc_assert (!insn->deleted ());
if (INSN_P (insn))
df_insn_delete (insn);
remove_insn (insn);
- INSN_DELETED_P (insn) = 1;
+ insn->set_deleted ();
}
/* If deleting a jump, decrement the use count of the label. Deleting
}
}
-/* Like delete_insn but also purge dead edges from BB. */
+/* Like delete_insn but also purge dead edges from BB.
+ Return true if any edges are eliminated. */
-void
+bool
delete_insn_and_edges (rtx_insn *insn)
{
bool purge = false;
- if (INSN_P (insn)
- && BLOCK_FOR_INSN (insn)
- && BB_END (BLOCK_FOR_INSN (insn)) == insn)
- purge = true;
+ if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
+ {
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ if (BB_END (bb) == insn)
+ purge = true;
+ else if (DEBUG_INSN_P (BB_END (bb)))
+ for (rtx_insn *dinsn = NEXT_INSN (insn);
+ DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (dinsn))
+ if (BB_END (bb) == dinsn)
+ {
+ purge = true;
+ break;
+ }
+ }
delete_insn (insn);
if (purge)
- purge_dead_edges (BLOCK_FOR_INSN (insn));
+ return purge_dead_edges (BLOCK_FOR_INSN (insn));
+ return false;
}
/* Unlink a chain of insns between START and FINISH, leaving notes
insns that cannot be removed to NULL. */
void
-delete_insn_chain (rtx start, rtx finish, bool clear_bb)
+delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
{
- rtx_insn *prev, *current;
-
/* Unchain the insns one by one. It would be quicker to delete all of these
with a single unchaining, rather than one at a time, but we need to keep
the NOTE's. */
- current = safe_as_a <rtx_insn *> (finish);
+ rtx_insn *current = finish;
while (1)
{
- prev = PREV_INSN (current);
+ rtx_insn *prev = PREV_INSN (current);
if (NOTE_P (current) && !can_delete_note_p (as_a <rtx_note *> (current)))
;
else
delete_insn (current);
- if (clear_bb && !INSN_DELETED_P (current))
+ if (clear_bb && !current->deleted ())
set_block_for_insn (current, NULL);
if (current == start)
unsigned int
pass_free_cfg::execute (function *)
{
-#ifdef DELAY_SLOTS
/* The resource.c machinery uses DF but the CFG isn't guaranteed to be
valid at that point so it would be too late to call df_analyze. */
- if (optimize > 0 && flag_delayed_branch)
+ if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
{
df_note_add_problem ();
df_analyze ();
}
-#endif
if (crtl->has_bb_partition)
insert_section_boundary_note ();
}
\f
-/* Like active_insn_p, except keep the return value clobber around
+/* Like active_insn_p, except keep the return value use or clobber around
even after reload. */
static bool
programs that fail to return a value. Its effect is to
keep the return value from being live across the entire
function. If we allow it to be skipped, we introduce the
- possibility for register lifetime confusion. */
- if (GET_CODE (PATTERN (insn)) == CLOBBER
+ possibility for register lifetime confusion.
+ Similarly, keep a USE of the function return value, otherwise
+ the USE is dropped and we could fail to thread jump if USE
+ appears on some paths and not on others, see PR90257. */
+ if ((GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == USE)
&& REG_P (XEXP (PATTERN (insn), 0))
&& REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
return true;
{
rtx_insn *insn;
- if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
- || !single_succ_p (bb))
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || !single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
return false;
for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
return NEXT_INSN (insn);
}
-/* Creates a new basic block just after basic block B by splitting
- everything after specified instruction I. */
+/* Creates a new basic block just after basic block BB by splitting
+ everything after specified instruction INSNP. */
static basic_block
rtl_split_block (basic_block bb, void *insnp)
static void
rtl_merge_blocks (basic_block a, basic_block b)
{
+ /* If B is a forwarder block whose outgoing edge has no location, we'll
+ propagate the locus of the edge between A and B onto it. */
+ const bool forward_edge_locus
+ = (b->flags & BB_FORWARDER_BLOCK) != 0
+ && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
rtx_insn *del_first = NULL, *del_last = NULL;
rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
- bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
int b_empty = 0;
if (dump_file)
del_first = a_end;
-#ifdef HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
- if (only_sets_cc0_p (prev))
+ if (HAVE_cc0 && only_sets_cc0_p (prev))
{
rtx_insn *tmp = prev;
prev = BB_HEAD (a);
del_first = tmp;
}
-#endif
a_end = PREV_INSN (del_first);
}
BB_HEAD (b) = b_empty ? NULL : b_head;
delete_insn_chain (del_first, del_last, true);
- /* When not optimizing and the edge is the only place in RTL which holds
- some unique locus, emit a nop with that locus in between. */
- if (!optimize)
+ /* If not optimizing, preserve the locus of the single edge between
+ blocks A and B if necessary by emitting a nop. */
+ if (!optimize
+ && !forward_edge_locus
+ && !DECL_IGNORED_P (current_function_decl))
{
emit_nop_for_unique_locus_between (a, b);
a_end = BB_END (a);
df_bb_delete (b->index);
- /* If B was a forwarder block, propagate the locus on the edge. */
- if (forwarder_p
- && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
+ if (forward_edge_locus)
EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
if (dump_file)
/* Return the label in the head of basic block BLOCK. Create one if it doesn't
exist. */
-rtx
+rtx_code_label *
block_label (basic_block block)
{
if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
- return NULL_RTX;
+ return NULL;
if (!LABEL_P (BB_HEAD (block)))
{
BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
}
- return BB_HEAD (block);
+ return as_a <rtx_code_label *> (BB_HEAD (block));
+}
+
+/* Remove all barriers from BB_FOOTER of a BB. */
+
+static void
+remove_barriers_from_footer (basic_block bb)
+{
+ rtx_insn *insn = BB_FOOTER (bb);
+
+ /* Remove barriers but keep jumptables. */
+ while (insn)
+ {
+ if (BARRIER_P (insn))
+ {
+ if (PREV_INSN (insn))
+ SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+ else
+ BB_FOOTER (bb) = NEXT_INSN (insn);
+ if (NEXT_INSN (insn))
+ SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ }
+ if (LABEL_P (insn))
+ return;
+ insn = NEXT_INSN (insn);
+ }
}
/* Attempt to perform edge redirection by replacing possibly complex jump
/* In case we zap a conditional jump, we'll need to kill
the cc0 setter too. */
kill_from = insn;
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, PATTERN (insn))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
&& only_sets_cc0_p (PREV_INSN (insn)))
kill_from = PREV_INSN (insn);
-#endif
/* See if we can create the fallthru edge. */
if (in_cfglayout || can_fallthru (src, target))
/* Selectively unlink whole insn chain. */
if (in_cfglayout)
{
- rtx_insn *insn = BB_FOOTER (src);
-
delete_insn_chain (kill_from, BB_END (src), false);
-
- /* Remove barriers but keep jumptables. */
- while (insn)
- {
- if (BARRIER_P (insn))
- {
- if (PREV_INSN (insn))
- SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
- else
- BB_FOOTER (src) = NEXT_INSN (insn);
- if (NEXT_INSN (insn))
- SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
- }
- if (LABEL_P (insn))
- break;
- insn = NEXT_INSN (insn);
- }
+ remove_barriers_from_footer (src);
}
else
delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
if (dump_file)
fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
INSN_UID (insn), e->dest->index, target->index);
- if (!redirect_jump (insn, block_label (target), 0))
+ if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
+ block_label (target), 0))
{
gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
return NULL;
/* Or replace possibly complicated jump insn by simple jump insn. */
else
{
- rtx target_label = block_label (target);
+ rtx_code_label *target_label = block_label (target);
rtx_insn *barrier;
- rtx label;
+ rtx_insn *label;
rtx_jump_table_data *table;
- emit_jump_insn_after_noloc (gen_jump (target_label), insn);
+ emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
JUMP_LABEL (BB_END (src)) = target_label;
LABEL_NUSES (target_label)++;
if (dump_file)
if (tablejump_p (insn, &label, &table))
delete_insn_chain (label, table, false);
- barrier = next_nonnote_insn (BB_END (src));
+ barrier = next_nonnote_nondebug_insn (BB_END (src));
if (!barrier || !BARRIER_P (barrier))
emit_barrier_after (BB_END (src));
else
else
e->flags = 0;
- e->probability = REG_BR_PROB_BASE;
- e->count = src->count;
+ e->probability = profile_probability::always ();
if (e->dest != target)
redirect_edge_succ (e, target);
{
rtvec vec;
int j;
- rtx new_label = block_label (new_bb);
+ rtx_code_label *new_label = block_label (new_bb);
if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
}
/* Handle casesi dispatch insns. */
- if ((tmp = single_set (insn)) != NULL
- && SET_DEST (tmp) == pc_rtx
- && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
- && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
- && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label)
+ if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
+ && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
{
XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
new_label);
else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
{
int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
- rtx new_label, note;
+ rtx note;
if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
- new_label = block_label (new_bb);
+ rtx_code_label *new_label = block_label (new_bb);
for (i = 0; i < n; ++i)
{
/* If the substitution doesn't succeed, die. This can happen
if the back end emitted unrecognizable instructions or if
- target is exit block on some arches. */
- if (!redirect_jump (insn, block_label (new_bb), 0))
+ target is exit block on some arches. Or for crossing
+ jumps. */
+ if (!redirect_jump (as_a <rtx_jump_insn *> (insn),
+ block_label (new_bb), 0))
{
- gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun));
+ gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || CROSSING_JUMP_P (insn));
return false;
}
}
if (!currently_expanding_to_rtl)
{
- if (!patch_jump_insn (insn, old_label, target))
+ if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target))
return NULL;
}
else
jumps (i.e. not yet split by find_many_sub_basic_blocks).
Redirect all of those that match our label. */
FOR_BB_INSNS (src, insn)
- if (JUMP_P (insn) && !patch_jump_insn (insn, old_label, target))
+ if (JUMP_P (insn) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn),
+ old_label, target))
return NULL;
if (dump_file)
if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
{
e->flags |= EDGE_CROSSING;
- if (JUMP_P (BB_END (e->src))
- && !CROSSING_JUMP_P (BB_END (e->src)))
+ if (JUMP_P (BB_END (e->src)))
CROSSING_JUMP_P (BB_END (e->src)) = 1;
}
else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
gcc_assert (current_ir_type () == IR_RTL_CFGRTL
|| current_ir_type () == IR_RTL_CFGLAYOUT);
if (current_ir_type () == IR_RTL_CFGLAYOUT)
- BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
+ {
+ rtx_insn *insn = unlink_insn_chain (barrier, barrier);
+
+ if (BB_FOOTER (bb))
+ {
+ rtx_insn *footer_tail = BB_FOOTER (bb);
+
+ while (NEXT_INSN (footer_tail))
+ footer_tail = NEXT_INSN (footer_tail);
+ if (!BARRIER_P (footer_tail))
+ {
+ SET_NEXT_INSN (footer_tail) = insn;
+ SET_PREV_INSN (insn) = footer_tail;
+ }
+ }
+ else
+ BB_FOOTER (bb) = insn;
+ }
}
/* Like force_nonfallthru below, but additionally performs redirection
edge b = unchecked_make_edge (e->src, target, 0);
bool redirected;
- redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
+ redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
+ block_label (target), 0);
gcc_assert (redirected);
note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
{
int prob = XINT (note, 0);
- b->probability = prob;
- /* Update this to use GCOV_COMPUTE_SCALE. */
- b->count = e->count * prob / REG_BR_PROB_BASE;
+ b->probability = profile_probability::from_reg_br_prob_note (prob);
e->probability -= e->probability;
- e->count -= b->count;
- if (e->probability < 0)
- e->probability = 0;
- if (e->count < 0)
- e->count = 0;
}
}
basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
+
+ /* Make sure new block ends up in correct hot/cold section. */
+ BB_COPY_PARTITION (bb, e->dest);
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
{
- gcov_type count = e->count;
- int probability = e->probability;
+ rtx_insn *new_head;
+ profile_count count = e->count ();
+ profile_probability probability = e->probability;
/* Create the new structures. */
/* If the old block ended with a tablejump, skip its table
forward from the last instruction of the old block. */
rtx_jump_table_data *table;
if (tablejump_p (BB_END (e->src), NULL, &table))
- note = table;
+ new_head = table;
else
- note = BB_END (e->src);
- note = NEXT_INSN (note);
+ new_head = BB_END (e->src);
+ new_head = NEXT_INSN (new_head);
- jump_block = create_basic_block (note, NULL, e->src);
+ jump_block = create_basic_block (new_head, NULL, e->src);
jump_block->count = count;
- jump_block->frequency = EDGE_FREQUENCY (e);
/* Make sure new block ends up in correct hot/cold section. */
/* Wire edge in. */
new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
new_edge->probability = probability;
- new_edge->count = count;
/* Redirect old edge. */
redirect_edge_pred (e, jump_block);
- e->probability = REG_BR_PROB_BASE;
+ e->probability = profile_probability::always ();
/* If e->src was previously region crossing, it no longer is
and the reg crossing note should be removed. */
add also edge from asm goto bb to target. */
if (asm_goto_edge)
{
- new_edge->probability /= 2;
- new_edge->count /= 2;
- jump_block->count /= 2;
- jump_block->frequency /= 2;
- new_edge = make_edge (new_edge->src, target,
- e->flags & ~EDGE_FALLTHRU);
- new_edge->probability = probability - probability / 2;
- new_edge->count = count - count / 2;
+ new_edge->probability = new_edge->probability.apply_scale (1, 2);
+ jump_block->count = jump_block->count.apply_scale (1, 2);
+ edge new_edge2 = make_edge (new_edge->src, target,
+ e->flags & ~EDGE_FALLTHRU);
+ new_edge2->probability = probability - new_edge->probability;
}
new_bb = jump_block;
if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
if (jump_label == ret_rtx)
- {
-#ifdef HAVE_return
- emit_jump_insn_after_setloc (gen_return (), BB_END (jump_block), loc);
-#else
- gcc_unreachable ();
-#endif
- }
+ emit_jump_insn_after_setloc (targetm.gen_return (),
+ BB_END (jump_block), loc);
else
{
gcc_assert (jump_label == simple_return_rtx);
-#ifdef HAVE_simple_return
- emit_jump_insn_after_setloc (gen_simple_return (),
+ emit_jump_insn_after_setloc (targetm.gen_simple_return (),
BB_END (jump_block), loc);
-#else
- gcc_unreachable ();
-#endif
}
set_return_jump_label (BB_END (jump_block));
}
else
{
- rtx label = block_label (target);
- emit_jump_insn_after_setloc (gen_jump (label), BB_END (jump_block), loc);
+ rtx_code_label *label = block_label (target);
+ emit_jump_insn_after_setloc (targetm.gen_jump (label),
+ BB_END (jump_block), loc);
JUMP_LABEL (BB_END (jump_block)) = label;
LABEL_NUSES (label)++;
}
the head of block C and assert that we really do fall through. */
for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
- if (INSN_P (q))
+ if (NONDEBUG_INSN_P (q))
return;
/* Remove what will soon cease being the jump insn from the source block.
&& (any_uncondjump_p (q)
|| single_succ_p (b)))
{
-#ifdef HAVE_cc0
+ rtx_insn *label;
+ rtx_jump_table_data *table;
+
+ if (tablejump_p (q, &label, &table))
+ {
+ /* The label is likely mentioned in some instruction before
+ the tablejump and might not be DCEd, so turn it into
+ a note instead and move before the tablejump that is going to
+ be deleted. */
+ const char *name = LABEL_NAME (label);
+ PUT_CODE (label, NOTE);
+ NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
+ NOTE_DELETED_LABEL_NAME (label) = name;
+ reorder_insns (label, label, PREV_INSN (q));
+ delete_insn (table);
+ }
+
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
- if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
+ if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
q = PREV_INSN (q);
-#endif
q = PREV_INSN (q);
}
+ /* Unconditional jumps with side-effects (i.e. which we can't just delete
+ together with the barrier) should never have a fallthru edge. */
+ else if (JUMP_P (q) && any_uncondjump_p (q))
+ return;
/* Selectively unlink the sequence. */
if (q != PREV_INSN (BB_HEAD (c)))
&& (edge_in->flags & EDGE_CROSSING))
{
after = last_bb_in_partition (edge_in->src);
- before = NEXT_INSN (BB_END (after));
+ before = get_last_bb_insn (after);
/* The instruction following the last bb in partition should
be a barrier, since it cannot end in a fall-through. */
gcc_checking_assert (BARRIER_P (before));
if (last
&& JUMP_P (last)
&& edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && extract_asm_operands (PATTERN (last)) != NULL_RTX
+ && (extract_asm_operands (PATTERN (last))
+ || JUMP_LABEL (last) == before)
&& patch_jump_insn (last, before, bb))
df_set_bb_dirty (edge_in->src);
}
which will be done by fixup_partitions. */
fixup_partitions ();
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ if (!currently_expanding_to_rtl)
+ checking_verify_flow_info ();
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->insns.r)
- commit_one_edge_insertion (e);
+ {
+ if (currently_expanding_to_rtl)
+ rebuild_jump_labels_chain (e->insns.r);
+ commit_one_edge_insertion (e);
+ }
}
}
\f
documented in dumpfile.h. */
static void
-rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
+rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
{
- rtx_insn *insn;
- rtx_insn *last;
char *s_indent;
s_indent = (char *) alloca ((size_t) indent + 1);
}
if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
- for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
- insn = NEXT_INSN (insn))
- {
- if (flags & TDF_DETAILS)
- df_dump_insn_top (insn, outf);
- if (! (flags & TDF_SLIM))
- print_rtl_single (outf, insn);
- else
- dump_insn_slim (outf, insn);
- if (flags & TDF_DETAILS)
- df_dump_insn_bottom (insn, outf);
- }
+ {
+ rtx_insn *last = BB_END (bb);
+ if (last)
+ last = NEXT_INSN (last);
+ for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
+ {
+ if (flags & TDF_DETAILS)
+ df_dump_insn_top (insn, outf);
+ if (! (flags & TDF_SLIM))
+ print_rtl_single (outf, insn);
+ else
+ dump_insn_slim (outf, insn);
+ if (flags & TDF_DETAILS)
+ df_dump_insn_bottom (insn, outf);
+ }
+ }
if (df && (flags & TDF_DETAILS))
{
in dumpfile.h. */
void
-print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, int flags)
+print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
{
const rtx_insn *tmp_rtx;
if (rtx_first == 0)
if (df)
df_dump_start (outf);
- if (flags & TDF_BLOCKS)
+ if (cfun->curr_properties & PROP_cfg)
{
FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
start[INSN_UID (BB_HEAD (bb))] = bb;
end[INSN_UID (BB_END (bb))] = bb;
- for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
+ if (flags & TDF_BLOCKS)
{
- enum bb_state state = IN_MULTIPLE_BB;
+ for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
+ {
+ enum bb_state state = IN_MULTIPLE_BB;
- if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
- state = IN_ONE_BB;
- in_bb_p[INSN_UID (x)] = state;
+ if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
+ state = IN_ONE_BB;
+ in_bb_p[INSN_UID (x)] = state;
- if (x == BB_END (bb))
- break;
+ if (x == BB_END (bb))
+ break;
+ }
}
}
}
- for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
+ for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (tmp_rtx))
{
if (flags & TDF_BLOCKS)
{
bb = start[INSN_UID (tmp_rtx)];
if (bb != NULL)
{
- dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
+ dump_bb_info (outf, bb, 0, dump_flags, true, false);
if (df && (flags & TDF_DETAILS))
df_dump_top (bb, outf);
}
if (flags & TDF_DETAILS)
df_dump_insn_bottom (tmp_rtx, outf);
- if (flags & TDF_BLOCKS)
+ bb = end[INSN_UID (tmp_rtx)];
+ if (bb != NULL)
{
- bb = end[INSN_UID (tmp_rtx)];
- if (bb != NULL)
+ if (flags & TDF_BLOCKS)
{
- dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
+ dump_bb_info (outf, bb, 0, dump_flags, false, true);
if (df && (flags & TDF_DETAILS))
df_dump_bottom (bb, outf);
putc ('\n', outf);
}
+ /* Emit a hint if the fallthrough target of current basic block
+ isn't the one placed right next. */
+ else if (EDGE_COUNT (bb->succs) > 0)
+ {
+ gcc_assert (BB_END (bb) == tmp_rtx);
+ const rtx_insn *ninsn = NEXT_INSN (tmp_rtx);
+ /* Bypass intervening deleted-insn notes and debug insns. */
+ while (ninsn
+ && !NONDEBUG_INSN_P (ninsn)
+ && !start[INSN_UID (ninsn)])
+ ninsn = NEXT_INSN (ninsn);
+ edge e = find_fallthru_edge (bb->succs);
+ if (e && ninsn)
+ {
+ basic_block dest = e->dest;
+ if (start[INSN_UID (ninsn)] != dest)
+ fprintf (outf, "%s ; pc falls through to BB %d\n",
+ print_rtx_head, dest->index);
+ }
+ }
}
}
update_br_prob_note (basic_block bb)
{
rtx note;
- if (!JUMP_P (BB_END (bb)))
- return;
note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
- if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
+ if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
+ {
+ if (note)
+ {
+ rtx *note_link, this_rtx;
+
+ note_link = ®_NOTES (BB_END (bb));
+ for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
+ if (this_rtx == note)
+ {
+ *note_link = XEXP (this_rtx, 1);
+ break;
+ }
+ }
+ return;
+ }
+ if (!note
+ || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
return;
- XINT (note, 0) = BRANCH_EDGE (bb)->probability;
+ XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
}
/* Get the last insn associated with block BB (that includes barriers and
end = table;
/* Include any barriers that may follow the basic block. */
- tmp = next_nonnote_insn_bb (end);
+ tmp = next_nonnote_nondebug_insn_bb (end);
while (tmp && BARRIER_P (tmp))
{
end = tmp;
- tmp = next_nonnote_insn_bb (end);
+ tmp = next_nonnote_nondebug_insn_bb (end);
}
return end;
}
+/* Add all BBs reachable from entry via hot paths into the SET. */
+
+void
+find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
+{
+ auto_vec<basic_block, 64> worklist;
+
+ set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+
+ while (worklist.length () > 0)
+ {
+ basic_block bb = worklist.pop ();
+ edge_iterator ei;
+ edge e;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
+ && !set->add (e->dest))
+ worklist.safe_push (e->dest);
+ }
+}
+
/* Sanity check partition hotness to ensure that basic blocks in
 the cold partition don't dominate basic blocks in the hot partition.
If FLAG_ONLY is true, report violations as errors. Otherwise
find_partition_fixes (bool flag_only)
{
basic_block bb;
- vec<basic_block> bbs_in_cold_partition = vNULL;
vec<basic_block> bbs_to_fix = vNULL;
+ hash_set<basic_block> set;
/* Callers check this. */
gcc_checking_assert (crtl->has_bb_partition);
- FOR_EACH_BB_FN (bb, cfun)
- if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
- bbs_in_cold_partition.safe_push (bb);
-
- if (bbs_in_cold_partition.is_empty ())
- return vNULL;
-
- bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
-
- if (dom_calculated_here)
- calculate_dominance_info (CDI_DOMINATORS);
-
- while (! bbs_in_cold_partition.is_empty ())
- {
- bb = bbs_in_cold_partition.pop ();
- /* Any blocks dominated by a block in the cold section
- must also be cold. */
- basic_block son;
- for (son = first_dom_son (CDI_DOMINATORS, bb);
- son;
- son = next_dom_son (CDI_DOMINATORS, son))
- {
- /* If son is not yet cold, then mark it cold here and
- enqueue it for further processing. */
- if ((BB_PARTITION (son) != BB_COLD_PARTITION))
- {
- if (flag_only)
- error ("non-cold basic block %d dominated "
- "by a block in the cold partition (%d)", son->index, bb->index);
- else
- BB_SET_PARTITION (son, BB_COLD_PARTITION);
- bbs_to_fix.safe_push (son);
- bbs_in_cold_partition.safe_push (son);
- }
- }
- }
+ find_bbs_reachable_by_hot_paths (&set);
- if (dom_calculated_here)
- free_dominance_info (CDI_DOMINATORS);
+ FOR_EACH_BB_FN (bb, cfun)
+ if (!set.contains (bb)
+ && BB_PARTITION (bb) != BB_COLD_PARTITION)
+ {
+ if (flag_only)
+ error ("non-cold basic block %d reachable only "
+ "by paths crossing the cold partition", bb->index);
+ else
+ BB_SET_PARTITION (bb, BB_COLD_PARTITION);
+ bbs_to_fix.safe_push (bb);
+ }
return bbs_to_fix;
}
&& EDGE_COUNT (bb->succs) >= 2
&& any_condjump_p (BB_END (bb)))
{
- if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
- && profile_status_for_fn (cfun) != PROFILE_ABSENT)
+ if (!BRANCH_EDGE (bb)->probability.initialized_p ())
+ {
+ if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
+ {
+ error ("verify_flow_info: "
+ "REG_BR_PROB is set but cfg probability is not");
+ err = 1;
+ }
+ }
+ else if (XINT (note, 0)
+ != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
+ && profile_status_for_fn (cfun) != PROFILE_ABSENT)
{
error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
- XINT (note, 0), BRANCH_EDGE (bb)->probability);
+ XINT (note, 0),
+ BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
err = 1;
}
}
n_abnormal++;
}
- if (!has_crossing_edge
- && JUMP_P (BB_END (bb))
- && CROSSING_JUMP_P (BB_END (bb)))
- {
- print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
- error ("Region crossing jump across same section in bb %i",
- bb->index);
- err = 1;
- }
+ if (!has_crossing_edge
+ && JUMP_P (BB_END (bb))
+ && CROSSING_JUMP_P (BB_END (bb)))
+ {
+ print_rtl_with_bb (stderr, get_insns (), TDF_BLOCKS | TDF_DETAILS);
+ error ("Region crossing jump across same section in bb %i",
+ bb->index);
+ err = 1;
+ }
if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
{
error ("abnormal edges for no purpose in bb %i", bb->index);
err = 1;
}
+
+ int has_eh = -1;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (has_eh == -1)
+ has_eh = (e->flags & EDGE_EH);
+ if ((e->flags & EDGE_EH) == has_eh)
+ continue;
+ error ("EH incoming edge mixed with non-EH incoming edges "
+ "in bb %i", bb->index);
+ err = 1;
+ break;
+ }
}
/* If there are partitions, do a sanity check on them: A basic block in
 a cold partition cannot dominate a basic block in a hot partition.  */
- if (crtl->has_bb_partition && !err)
+ if (crtl->has_bb_partition && !err
+ && current_ir_type () == IR_RTL_CFGLAYOUT)
{
vec<basic_block> bbs_to_fix = find_partition_fixes (true);
err = !bbs_to_fix.is_empty ();
else
for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
insn = NEXT_INSN (insn))
- if (BARRIER_P (insn) || INSN_P (insn))
+ if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
e->src->index, e->dest->index);
{
basic_block bb;
int err = 0;
- rtx_insn *x;
+ rtx_insn *x, *y;
int num_bb_notes;
rtx_insn * const rtx_first = get_insns ();
basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
num_bb_notes = 0;
- last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun);
for (x = rtx_first; x; x = NEXT_INSN (x))
{
if (JUMP_P (x)
&& returnjump_p (x) && ! condjump_p (x)
- && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
+ && ! ((y = next_nonnote_nondebug_insn (x))
+ && BARRIER_P (y)))
fatal_insn ("return not followed by barrier", x);
if (curr_bb && x == BB_END (curr_bb))
bool found;
edge_iterator ei;
- if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
+ if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
do
insn = PREV_INSN (insn);
while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
/* Redistribute probabilities. */
if (single_succ_p (bb))
{
- single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
- single_succ_edge (bb)->count = bb->count;
+ single_succ_edge (bb)->probability = profile_probability::always ();
}
else
{
b = BRANCH_EDGE (bb);
f = FALLTHRU_EDGE (bb);
- b->probability = XINT (note, 0);
- f->probability = REG_BR_PROB_BASE - b->probability;
- /* Update these to use GCOV_COMPUTE_SCALE. */
- b->count = bb->count * b->probability / REG_BR_PROB_BASE;
- f->count = bb->count * f->probability / REG_BR_PROB_BASE;
+ b->probability = profile_probability::from_reg_br_prob_note
+ (XINT (note, 0));
+ f->probability = b->probability.invert ();
}
return purged;
gcc_assert (single_succ_p (bb));
- single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
- single_succ_edge (bb)->count = bb->count;
+ single_succ_edge (bb)->probability = profile_probability::always ();
if (dump_file)
fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
If it's placed after a trapping call (i.e. that
call is the last insn anyway), we have no fallthru
edge. Simply delete this use and don't try to insert
- on the non-existent edge. */
- if (GET_CODE (PATTERN (insn)) != USE)
+ on the non-existent edge.
+ Similarly, sometimes a call that can throw is
+ followed in the source with __builtin_unreachable (),
+ meaning that there is UB if the call returns rather
+ than throws. If there weren't any instructions
+ following such calls before, supposedly even the ones
+ we've deleted aren't significant and can be
+ removed. */
+ if (e)
{
/* We're not deleting it, we're moving it. */
- INSN_DELETED_P (insn) = 0;
+ insn->set_undeleted ();
SET_PREV_INSN (insn) = NULL_RTX;
SET_NEXT_INSN (insn) = NULL_RTX;
/* Locate or create a label for a given basic block. */
-static rtx
+static rtx_insn *
label_for_bb (basic_block bb)
{
- rtx label = BB_HEAD (bb);
+ rtx_insn *label = BB_HEAD (bb);
if (!LABEL_P (label))
{
{
fprintf (dump_file, " %i ", index);
if (get_bb_original (bb))
- fprintf (dump_file, "duplicate of %i ",
+ fprintf (dump_file, "duplicate of %i\n",
get_bb_original (bb)->index);
else if (forwarder_block_p (bb)
&& !LABEL_P (BB_HEAD (bb)))
- fprintf (dump_file, "compensation ");
+ fprintf (dump_file, "compensation\n");
else
- fprintf (dump_file, "bb %i ", bb->index);
- fprintf (dump_file, " [%i]\n", bb->frequency);
+ fprintf (dump_file, "bb %i\n", bb->index);
}
}
/* Maybe reset the original copy tables, they are not valid anymore
when we renumber the basic blocks in compact_blocks. If we are
are going out of cfglayout mode, don't re-allocate the tables. */
- free_original_copy_tables ();
+ if (original_copy_tables_initialized_p ())
+ free_original_copy_tables ();
if (stay_in_cfglayout_mode)
initialize_original_copy_tables ();
insn = NEXT_INSN (insn);
set_last_insn (insn);
-#ifdef ENABLE_CHECKING
- verify_insn_chain ();
-#endif
+ if (flag_checking)
+ verify_insn_chain ();
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
e_taken = e;
bb_end_insn = BB_END (bb);
- if (JUMP_P (bb_end_insn))
+ if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn))
{
- ret_label = JUMP_LABEL (bb_end_insn);
- if (any_condjump_p (bb_end_insn))
+ ret_label = JUMP_LABEL (bb_end_jump);
+ if (any_condjump_p (bb_end_jump))
{
/* This might happen if the conditional jump has side
effects and could therefore not be optimized away.
to prevent rtl_verify_flow_info from complaining. */
if (!e_fall)
{
- gcc_assert (!onlyjump_p (bb_end_insn)
- || returnjump_p (bb_end_insn)
+ gcc_assert (!onlyjump_p (bb_end_jump)
+ || returnjump_p (bb_end_jump)
|| (e_taken->flags & EDGE_CROSSING));
- emit_barrier_after (bb_end_insn);
+ emit_barrier_after (bb_end_jump);
continue;
}
edge based on known or assumed probability. */
else if (bb->aux != e_taken->dest)
{
- rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);
+ rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
if (note
- && XINT (note, 0) < REG_BR_PROB_BASE / 2
- && invert_jump (bb_end_insn,
+ && profile_probability::from_reg_br_prob_note
+ (XINT (note, 0)) < profile_probability::even ()
+ && invert_jump (bb_end_jump,
(e_fall->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
/* Otherwise we can try to invert the jump. This will
basically never fail, however, keep up the pretense. */
- else if (invert_jump (bb_end_insn,
+ else if (invert_jump (bb_end_jump,
(e_fall->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun)
? NULL_RTX
update_br_prob_note (bb);
if (LABEL_NUSES (ret_label) == 0
&& single_pred_p (e_taken->dest))
- delete_insn (ret_label);
+ delete_insn (as_a<rtx_insn *> (ret_label));
continue;
}
}
force_nonfallthru (e);
}
- /* Ensure goto_locus from edges has some instructions with that locus
- in RTL. */
- if (!optimize)
+ /* Ensure goto_locus from edges has some instructions with that locus in RTL
+ when not optimizing. */
+ if (!optimize && !DECL_IGNORED_P (current_function_decl))
FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge, we have to split that block. */
if (c == bb)
{
- bb = split_block (bb, NULL)->dest;
+ bb = split_block_after_labels (bb)->dest;
bb->aux = c->aux;
c->aux = bb;
BB_FOOTER (bb) = BB_FOOTER (c);
{
case DEBUG_INSN:
/* Don't duplicate label debug insns. */
- if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
+ if (DEBUG_BIND_INSN_P (insn)
+ && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
break;
/* FALLTHRU */
case INSN:
break;
case NOTE_INSN_EPILOGUE_BEG:
+ case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
emit_note_copy (as_a <rtx_note *> (insn));
break;
/* Create a duplicate of the basic block BB. */
static basic_block
-cfg_layout_duplicate_bb (basic_block bb)
+cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *)
{
rtx_insn *insn;
basic_block new_bb;
FLAGS is a set of additional flags to pass to cleanup_cfg(). */
void
-cfg_layout_initialize (unsigned int flags)
+cfg_layout_initialize (int flags)
{
rtx_insn_list *x;
basic_block bb;
layout required moving a block from the hot to the cold
section. This would create an illegal partitioning unless some
manual fixup was performed. */
- gcc_assert (!(crtl->bb_reorder_complete
- && flag_reorder_blocks_and_partition));
+ gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
initialize_original_copy_tables ();
void
break_superblocks (void)
{
- sbitmap superblocks;
bool need = false;
basic_block bb;
- superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
bitmap_clear (superblocks);
FOR_EACH_BB_FN (bb, cfun)
rebuild_jump_labels (get_insns ());
find_many_sub_basic_blocks (superblocks);
}
-
- free (superblocks);
}
/* Finalize the changes: reorder insn list according to the sequence specified
void
cfg_layout_finalize (void)
{
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
+ free_dominance_info (CDI_DOMINATORS);
force_one_exit_fallthru ();
rtl_register_cfg_hooks ();
- if (reload_completed
-#ifdef HAVE_epilogue
- && !HAVE_epilogue
-#endif
- )
+ if (reload_completed && !targetm.have_epilogue ())
fixup_fallthru_exit_predecessor ();
fixup_reorder_chain ();
rebuild_jump_labels (get_insns ());
delete_dead_jumptables ();
-#ifdef ENABLE_CHECKING
- verify_insn_chain ();
- verify_flow_info ();
-#endif
+ if (flag_checking)
+ verify_insn_chain ();
+ checking_verify_flow_info ();
}
if (e->dest == dest)
return e;
+ if (e->flags & EDGE_CROSSING
+ && BB_PARTITION (e->src) == BB_PARTITION (dest)
+ && simplejump_p (BB_END (src)))
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Removing crossing jump while redirecting edge form %i to %i\n",
+ e->src->index, dest->index);
+ delete_insn (BB_END (src));
+ remove_barriers_from_footer (src);
+ e->flags |= EDGE_FALLTHRU;
+ }
+
if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& (ret = try_redirect_by_replacing_jump (e, dest, true)))
{
else
ret = redirect_branch_edge (e, dest);
+ if (!ret)
+ return NULL;
+
+ fixup_partition_crossing (ret);
/* We don't want simplejumps in the insn stream during cfglayout. */
- gcc_assert (!simplejump_p (BB_END (src)));
+ gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
df_set_bb_dirty (src);
return ret;
static void
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
- bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
+ /* If B is a forwarder block whose outgoing edge has no location, we'll
+ propagate the locus of the edge between A and B onto it. */
+ const bool forward_edge_locus
+ = (b->flags & BB_FORWARDER_BLOCK) != 0
+ && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
rtx_insn *insn;
gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
gcc_assert (!JUMP_P (BB_END (a)));
- /* When not optimizing and the edge is the only place in RTL which holds
- some unique locus, emit a nop with that locus in between. */
- if (!optimize)
+ /* If not optimizing, preserve the locus of the single edge between
+ blocks A and B if necessary by emitting a nop. */
+ if (!optimize
+ && !forward_edge_locus
+ && !DECL_IGNORED_P (current_function_decl))
emit_nop_for_unique_locus_between (a, b);
/* Move things from b->footer after a->footer. */
df_bb_delete (b->index);
- /* If B was a forwarder block, propagate the locus on the edge. */
- if (forwarder_p
- && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
+ if (forward_edge_locus)
EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
if (dump_file)
block in CFG already. Calling make_edge in such case would
cause us to mark that edge as fake and remove it later. */
-#ifdef ENABLE_CHECKING
- if (split_at_insn == BB_END (bb))
+ if (flag_checking && split_at_insn == BB_END (bb))
{
e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (e == NULL);
}
-#endif
/* Note that the following may create a new basic block
and renumber the existing basic blocks. */
blocks_split++;
}
- make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
+ edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
+ ne->probability = profile_probability::guessed_never ();
}
if (insn == BB_HEAD (bb))
basic_block second_head ATTRIBUTE_UNUSED,
basic_block cond_bb, void *comp_rtx)
{
- rtx label;
+ rtx_code_label *label;
rtx_insn *seq, *jump;
rtx op0 = XEXP ((rtx)comp_rtx, 0);
rtx op1 = XEXP ((rtx)comp_rtx, 1);
enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
- enum machine_mode mode;
+ machine_mode mode;
label = block_label (first_head);
start_sequence ();
op0 = force_operand (op0, NULL_RTX);
op1 = force_operand (op1, NULL_RTX);
- do_compare_rtx_and_jump (op0, op1, comp, 0,
- mode, NULL_RTX, NULL_RTX, label, -1);
+ do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
+ profile_probability::uninitialized ());
jump = get_last_insn ();
JUMP_LABEL (jump) = label;
LABEL_NUSES (label)++;
seq = get_insns ();
end_sequence ();
- /* Add the new cond , in the new head. */
+ /* Add the new cond, in the new head. */
emit_insn_after (seq, BB_END (cond_bb));
}
}
static basic_block
-rtl_duplicate_bb (basic_block bb)
+rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
{
- bb = cfg_layout_duplicate_bb (bb);
+ bb = cfg_layout_duplicate_bb (bb, id);
bb->aux = NULL;
return bb;
}
/* Do book-keeping of basic block BB for the profile consistency checker.
- If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
- then do post-pass accounting. Store the counting in RECORD. */
+ Store the counting in RECORD. */
static void
-rtl_account_profile_record (basic_block bb, int after_pass,
- struct profile_record *record)
+rtl_account_profile_record (basic_block bb, struct profile_record *record)
{
rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
- record->size[after_pass]
- += insn_rtx_cost (PATTERN (insn), false);
- if (profile_status_for_fn (cfun) == PROFILE_READ)
- record->time[after_pass]
- += insn_rtx_cost (PATTERN (insn), true) * bb->count;
+ record->size += insn_cost (insn, false);
+ if (bb->count.initialized_p ())
+ record->time
+ += insn_cost (insn, true) * bb->count.to_gcov_type ();
else if (profile_status_for_fn (cfun) == PROFILE_GUESSED)
- record->time[after_pass]
- += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
+ record->time
+ += insn_cost (insn, true) * bb->count.to_frequency (cfun);
}
}
};
#include "gt-cfgrtl.h"
+
+#if __GNUC__ >= 10
+# pragma GCC diagnostic pop
+#endif