/* Instruction scheduling pass. Selective scheduler and pipeliner.
- Copyright (C) 2006-2017 Free Software Foundation, Inc.
+ Copyright (C) 2006-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "insn-config.h"
#include "insn-attr.h"
#include "recog.h"
-#include "params.h"
#include "target.h"
#include "sched-int.h"
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
static struct common_sched_info_def sel_common_sched_info;
/* The loop nest being pipelined. */
-struct loop *current_loop_nest;
+class loop *current_loop_nest;
/* LOOP_NESTS is a vector containing the corresponding loop nest for
each region. */
flist_remove (lp);
}
-/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
+/* Add ORIGINAL_INSN the def list DL honoring CROSSED_CALL_ABIS. */
void
-def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
+def_list_add (def_list_t *dl, insn_t original_insn,
+ unsigned int crossed_call_abis)
{
def_t d;
d = DEF_LIST_DEF (*dl);
d->orig_insn = original_insn;
- d->crosses_call = crosses_call;
+ d->crossed_call_abis = crossed_call_abis;
}
\f
}
\f
/* Functions to work with dependence contexts.
- Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
+ Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
static deps_t
alloc_deps_context (void)
{
- return XNEW (struct deps_desc);
+ return XNEW (class deps_desc);
}
/* Allocate and initialize dep context. */
else
if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
{
- /* Would be weird if same insn is successor of several fallthrough
- edges. */
- gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
- != BLOCK_FOR_INSN (last_scheduled_insn_old));
-
state_free (FENCE_STATE (f));
FENCE_STATE (f) = state;
if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
- if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
- EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
+ /* We merge sched-times half-way to the larger value to avoid the endless
+ pipelining of unneeded insns. The average seems to be good compromise
+ between pipelining opportunities and avoiding extra work. */
+ if (EXPR_SCHED_TIMES (to) != EXPR_SCHED_TIMES (from))
+ EXPR_SCHED_TIMES (to) = ((EXPR_SCHED_TIMES (from) + EXPR_SCHED_TIMES (to)
+ + 1) / 2);
if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
EXPR_ORIG_BB_INDEX (to) = 0;
return;
HARD_REG_SET temp;
- unsigned regno;
- hard_reg_set_iterator hrsi;
get_implicit_reg_pending_clobbers (&temp, insn);
- EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
- SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
+ IOR_REG_SET_HRS (IDATA_REG_SETS (id), temp);
}
/* Setup register sets describing INSN in ID. */
static void
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
{
- struct deps_desc _dc, *dc = &_dc;
+ class deps_desc _dc, *dc = &_dc;
deps_init_id_data.where = DEPS_IN_NOWHERE;
deps_init_id_data.id = id;
/* Note a dependence. */
static void
-has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
- ds_t ds ATTRIBUTE_UNUSED)
-{
- if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
- VINSN_INSN_RTX (has_dependence_data.con)))
+has_dependence_note_dep (insn_t pro, ds_t ds ATTRIBUTE_UNUSED)
+{
+ insn_t real_pro = has_dependence_data.pro;
+ insn_t real_con = VINSN_INSN_RTX (has_dependence_data.con);
+
+ /* We do not allow for debug insns to move through others unless they
+ are at the start of bb. This movement may create bookkeeping copies
+ that later would not be able to move up, violating the invariant
+ that a bookkeeping copy should be movable as the original insn.
+ Detect that here and allow that movement if we allowed it before
+ in the first place. */
+ if (DEBUG_INSN_P (real_con) && !DEBUG_INSN_P (real_pro)
+ && INSN_UID (NEXT_INSN (pro)) == INSN_UID (real_con))
+ return;
+
+ if (!sched_insns_conditions_mutex_p (real_pro, real_con))
{
ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
{
int i;
ds_t ds;
- struct deps_desc *dc;
+ class deps_desc *dc;
if (INSN_SIMPLEJUMP_P (pred))
/* Unconditional jump is just a transfer of control flow.
if (!(e->flags & EDGE_FALLTHRU))
{
- /* We can not invalidate computed topological order by moving
+ /* We cannot invalidate computed topological order by moving
the edge destination block (E->SUCC) along a fallthru edge.
We will update dominators here only when we'll get
&& INSN_SCHED_TIMES (BB_END (xbb)) == 0
&& !IN_CURRENT_FENCE_P (BB_END (xbb)))
{
- if (sel_remove_insn (BB_END (xbb), false, false))
- return true;
+ /* We used to call sel_remove_insn here that can trigger tidy_control_flow
+ before we fix up the fallthru edge. Correct that ordering by
+ explicitly doing the latter before the former. */
+ clear_expr (INSN_EXPR (BB_END (xbb)));
tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
+ if (tidy_control_flow (xbb, false))
+ return true;
}
first = sel_bb_head (xbb);
gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
+ /* We could have skipped some debug insns which did not get removed with the block,
+ and the seqnos could become incorrect. Fix them up here. */
+ if (MAY_HAVE_DEBUG_INSNS && (sel_bb_head (xbb) != first || sel_bb_end (xbb) != last))
+ {
+ if (!sel_bb_empty_p (xbb->prev_bb))
+ {
+ int prev_seqno = INSN_SEQNO (sel_bb_end (xbb->prev_bb));
+ if (prev_seqno > INSN_SEQNO (sel_bb_head (xbb)))
+ for (insn_t insn = sel_bb_head (xbb); insn != first; insn = NEXT_INSN (insn))
+ INSN_SEQNO (insn) = prev_seqno + 1;
+ }
+ }
+
/* It can turn out that after removing unused jump, basic block
that contained that jump, becomes empty too. In such case
remove it too. */
if (current_loop_nest)
{
- struct loop *loop;
+ class loop *loop;
for (loop = current_loop_nest; loop; loop = loop_outer (loop))
if (considered_for_pipelining_p (loop) && loop->latch == from)
recompute_dominator (CDI_DOMINATORS, to));
set_immediate_dominator (CDI_DOMINATORS, orig_dest,
recompute_dominator (CDI_DOMINATORS, orig_dest));
+ if (jump && sel_bb_head_p (jump))
+ compute_live (jump);
}
/* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
set_immediate_dominator (CDI_DOMINATORS, orig_dest,
recompute_dominator (CDI_DOMINATORS, orig_dest));
}
+ if (jump && sel_bb_head_p (jump))
+ compute_live (jump);
return recompute_toporder_p;
}
/* Create a region for LOOP and return its number. If we don't want
to pipeline LOOP, return -1. */
static int
-make_region_from_loop (struct loop *loop)
+make_region_from_loop (class loop *loop)
{
unsigned int i;
int new_rgn_number = -1;
- struct loop *inner;
+ class loop *inner;
/* Basic block index, to be assigned to BLOCK_TO_BB. */
int bb_ord_index = 0;
basic_block preheader_block;
if (loop->num_nodes
- > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+ > (unsigned) param_max_pipeline_region_blocks)
return -1;
/* Don't pipeline loops whose latch belongs to some of its inner loops. */
return -1;
loop->ninsns = num_loop_insns (loop);
- if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+ if ((int) loop->ninsns > param_max_pipeline_region_insns)
return -1;
loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
pipelined before outer loops. Returns true when a region for LOOP
is created. */
static bool
-make_regions_from_loop_nest (struct loop *loop)
+make_regions_from_loop_nest (class loop *loop)
{
- struct loop *cur_loop;
+ class loop *cur_loop;
int rgn_number;
/* Traverse all inner nodes of the loop. */
recompute_rev_top_order ();
}
-/* Returns a struct loop for region RGN. */
+/* Returns a class loop for region RGN. */
loop_p
get_loop_nest_for_rgn (unsigned int rgn)
{
/* True when LOOP was included into pipelining regions. */
bool
-considered_for_pipelining_p (struct loop *loop)
+considered_for_pipelining_p (class loop *loop)
{
if (loop_depth (loop) == 0)
return false;
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- struct loop *loop;
+ class loop *loop;
/* Release aux fields so we don't free them later by mistake. */
FOR_EACH_LOOP (loop, 0)
{
if (current_loop_nest)
{
- struct loop *outer;
+ class loop *outer;
if (preheader_removed)
return false;