/* Instruction scheduling pass. Selective scheduler and pipeliner.
- Copyright (C) 2006-2019 Free Software Foundation, Inc.
+ Copyright (C) 2006-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "insn-config.h"
#include "insn-attr.h"
#include "recog.h"
-#include "params.h"
#include "target.h"
#include "sched-int.h"
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
static struct common_sched_info_def sel_common_sched_info;
/* The loop nest being pipelined. */
-struct loop *current_loop_nest;
+class loop *current_loop_nest;
/* LOOP_NESTS is a vector containing the corresponding loop nest for
each region. */
flist_remove (lp);
}
-/* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
+/* Add ORIGINAL_INSN the def list DL honoring CROSSED_CALL_ABIS. */
void
-def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
+def_list_add (def_list_t *dl, insn_t original_insn,
+ unsigned int crossed_call_abis)
{
def_t d;
d = DEF_LIST_DEF (*dl);
d->orig_insn = original_insn;
- d->crosses_call = crosses_call;
+ d->crossed_call_abis = crossed_call_abis;
}
\f
}
\f
/* Functions to work with dependence contexts.
- Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
+ Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
static deps_t
alloc_deps_context (void)
{
- return XNEW (struct deps_desc);
+ return XNEW (class deps_desc);
}
/* Allocate and initialize dep context. */
return;
HARD_REG_SET temp;
- unsigned regno;
- hard_reg_set_iterator hrsi;
get_implicit_reg_pending_clobbers (&temp, insn);
- EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi)
- SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
+ IOR_REG_SET_HRS (IDATA_REG_SETS (id), temp);
}
/* Setup register sets describing INSN in ID. */
static void
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
{
- struct deps_desc _dc, *dc = &_dc;
+ class deps_desc _dc, *dc = &_dc;
deps_init_id_data.where = DEPS_IN_NOWHERE;
deps_init_id_data.id = id;
{
int i;
ds_t ds;
- struct deps_desc *dc;
+ class deps_desc *dc;
if (INSN_SIMPLEJUMP_P (pred))
/* Unconditional jump is just a transfer of control flow.
if (current_loop_nest)
{
- struct loop *loop;
+ class loop *loop;
for (loop = current_loop_nest; loop; loop = loop_outer (loop))
if (considered_for_pipelining_p (loop) && loop->latch == from)
recompute_dominator (CDI_DOMINATORS, to));
set_immediate_dominator (CDI_DOMINATORS, orig_dest,
recompute_dominator (CDI_DOMINATORS, orig_dest));
+ if (jump && sel_bb_head_p (jump))
+ compute_live (jump);
}
/* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
set_immediate_dominator (CDI_DOMINATORS, orig_dest,
recompute_dominator (CDI_DOMINATORS, orig_dest));
}
+ if (jump && sel_bb_head_p (jump))
+ compute_live (jump);
return recompute_toporder_p;
}
/* Create a region for LOOP and return its number. If we don't want
to pipeline LOOP, return -1. */
static int
-make_region_from_loop (struct loop *loop)
+make_region_from_loop (class loop *loop)
{
unsigned int i;
int new_rgn_number = -1;
- struct loop *inner;
+ class loop *inner;
/* Basic block index, to be assigned to BLOCK_TO_BB. */
int bb_ord_index = 0;
basic_block preheader_block;
if (loop->num_nodes
- > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
+ > (unsigned) param_max_pipeline_region_blocks)
return -1;
/* Don't pipeline loops whose latch belongs to some of its inner loops. */
return -1;
loop->ninsns = num_loop_insns (loop);
- if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
+ if ((int) loop->ninsns > param_max_pipeline_region_insns)
return -1;
loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
pipelined before outer loops. Returns true when a region for LOOP
is created. */
static bool
-make_regions_from_loop_nest (struct loop *loop)
+make_regions_from_loop_nest (class loop *loop)
{
- struct loop *cur_loop;
+ class loop *cur_loop;
int rgn_number;
/* Traverse all inner nodes of the loop. */
recompute_rev_top_order ();
}
-/* Returns a struct loop for region RGN. */
+/* Returns a class loop for region RGN. */
loop_p
get_loop_nest_for_rgn (unsigned int rgn)
{
/* True when LOOP was included into pipelining regions. */
bool
-considered_for_pipelining_p (struct loop *loop)
+considered_for_pipelining_p (class loop *loop)
{
if (loop_depth (loop) == 0)
return false;
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- struct loop *loop;
+ class loop *loop;
/* Release aux fields so we don't free them later by mistake. */
FOR_EACH_LOOP (loop, 0)
{
if (current_loop_nest)
{
- struct loop *outer;
+ class loop *outer;
if (preheader_removed)
return false;