/* Instruction scheduling pass. This file computes dependencies between
instructions.
- Copyright (C) 1992-2015 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "diagnostic-core.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
-#include "hash-set.h"
-#include "machmode.h"
-#include "vec.h"
-#include "double-int.h"
-#include "input.h"
-#include "alias.h"
-#include "symtab.h"
-#include "wide-int.h"
-#include "inchash.h"
-#include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
-#include "tm_p.h"
-#include "hard-reg-set.h"
-#include "regs.h"
-#include "input.h"
-#include "function.h"
-#include "flags.h"
+#include "tree.h"
+#include "df.h"
#include "insn-config.h"
+#include "regs.h"
+#include "memmodel.h"
+#include "ira.h"
+#include "ira-int.h"
#include "insn-attr.h"
-#include "except.h"
-#include "recog.h"
-#include "emit-rtl.h"
-#include "dominance.h"
-#include "cfg.h"
#include "cfgbuild.h"
-#include "predict.h"
-#include "basic-block.h"
#include "sched-int.h"
-#include "params.h"
#include "cselib.h"
-#include "ira.h"
-#include "target.h"
+#include "function-abi.h"
#ifdef INSN_SCHEDULING
-#ifdef ENABLE_CHECKING
-#define CHECK (true)
-#else
-#define CHECK (false)
-#endif
-
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
}
/* Pool to hold all dependency nodes (dep_node_t). */
-static alloc_pool dn_pool;
+static object_allocator<_dep_node> *dn_pool;
/* Number of dep_nodes out there. */
static int dn_pool_diff = 0;
static dep_node_t
create_dep_node (void)
{
- dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
+ dep_node_t n = dn_pool->allocate ();
dep_link_t back = DEP_NODE_BACK (n);
dep_link_t forw = DEP_NODE_FORW (n);
--dn_pool_diff;
- pool_free (dn_pool, n);
+ dn_pool->remove (n);
}
/* Pool to hold dependencies lists (deps_list_t). */
-static alloc_pool dl_pool;
+static object_allocator<_deps_list> *dl_pool;
/* Number of deps_lists out there. */
static int dl_pool_diff = 0;
static deps_list_t
create_deps_list (void)
{
- deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
+ deps_list_t l = dl_pool->allocate ();
DEPS_LIST_FIRST (l) = NULL;
DEPS_LIST_N_LINKS (l) = 0;
--dl_pool_diff;
- pool_free (dl_pool, l);
+ dl_pool->remove (l);
}
/* Return true if there is no dep_nodes and deps_lists out there.
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
allocated then the rest two ones are also allocated. */
-static bitmap_head *true_dependency_cache = NULL;
-static bitmap_head *output_dependency_cache = NULL;
-static bitmap_head *anti_dependency_cache = NULL;
-static bitmap_head *control_dependency_cache = NULL;
-static bitmap_head *spec_dependency_cache = NULL;
+static bitmap true_dependency_cache = NULL;
+static bitmap output_dependency_cache = NULL;
+static bitmap anti_dependency_cache = NULL;
+static bitmap control_dependency_cache = NULL;
+static bitmap spec_dependency_cache = NULL;
static int cache_size;
/* True if we should mark added dependencies as a non-register deps. */
static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
enum reg_note, bool);
-static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
+static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
rtx_insn_list **, int, enum reg_note,
bool);
static void delete_all_dependences (rtx_insn *);
static void chain_to_prev_insn (rtx_insn *);
-static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
-static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
+static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
+static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
static bool sched_has_condition_p (const rtx_insn *);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
rtx, rtx);
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
-#ifdef ENABLE_CHECKING
static void check_dep (dep_t, bool);
-#endif
+
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-#ifdef ENABLE_CHECKING
- check_dep (new_dep, mem1 != NULL);
-#endif
+ if (flag_checking)
+ check_dep (new_dep, mem1 != NULL);
if (true_dependency_cache != NULL)
{
add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
-#ifdef ENABLE_CHECKING
- check_dep (dep, false);
-#endif
+ if (flag_checking)
+ check_dep (dep, false);
add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
+add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
rtx_insn_list **listp,
int uncond, enum reg_note dep_type, bool hard)
{
so that we can do memory aliasing on it. */
static void
-add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
+add_insn_mem_dependence (class deps_desc *deps, bool read_p,
rtx_insn *insn, rtx mem)
{
rtx_insn_list **insn_list;
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
+flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
int for_write)
{
if (for_write)
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
-setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
+setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
{
unsigned i;
reg_set_iterator rsi;
reg_pressure_info[cl].change = 0;
}
- note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
+ note_stores (insn, mark_insn_reg_clobber, insn);
- note_stores (PATTERN (insn), mark_insn_reg_store, insn);
+ note_stores (insn, mark_insn_reg_store, insn);
-#ifdef AUTO_INC_DEC
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_INC)
- mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
-#endif
+ if (AUTO_INC_DEC)
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
-extend_deps_reg_info (struct deps_desc *deps, int regno)
+extend_deps_reg_info (class deps_desc *deps, int regno)
{
int max_regno = regno + 1;
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
-sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
+sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
enum rtx_code ref, rtx_insn *insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = hard_regno_nregs[regno][mode];
+ int i = hard_regno_nregs (regno, mode);
if (ref == SET)
{
while (--i >= 0)
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
{
if (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == ZERO_EXTRACT
- || df_read_modify_subreg_p (dest))
+ || read_modify_subreg_p (dest))
{
/* These both read and modify the result. We must handle
them as writes to get proper dependencies for following
/* Pending lists can't get larger with a readonly context. */
if (!deps->readonly
&& ((deps->pending_read_list_length + deps->pending_write_list_length)
- >= MAX_PENDING_LIST_LENGTH))
+ >= param_max_pending_list_length))
{
/* Flush all pending reads and writes to prevent the pending lists
from getting any larger. Insn scheduling runs too slowly when
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
int i;
int j;
case MEM:
{
/* Reading memory. */
- rtx u;
+ rtx_insn_list *u;
rtx_insn_list *pending;
rtx_expr_list *pending_mem;
rtx t = x;
pending_mem = pending_mem->next ();
}
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
- REG_DEP_ANTI);
+ for (u = deps->last_pending_memory_flush; u; u = u->next ())
+ add_dependence (insn, u->insn (), REG_DEP_ANTI);
- for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
+ for (u = deps->pending_jump_insns; u; u = u->next ())
if (deps_may_trap_p (x))
{
if ((sched_deps_info->generate_spec_deps)
ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
MAX_DEP_WEAK);
- note_dep (as_a <rtx_insn *> (XEXP (u, 0)), ds);
+ note_dep (u->insn (), ds);
}
else
- add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
- REG_DEP_CONTROL);
+ add_dependence (insn, u->insn (), REG_DEP_CONTROL);
}
}
{
if ((deps->pending_read_list_length
+ deps->pending_write_list_length)
- >= MAX_PENDING_LIST_LENGTH
+ >= param_max_pending_list_length
&& !DEBUG_INSN_P (insn))
flush_pending_lists (deps, insn, true, true);
add_insn_mem_dependence (deps, true, insn, x);
return;
}
- /* Force pending stores to memory in case a trap handler needs them. */
+ /* Force pending stores to memory in case a trap handler needs them.
+ Also force pending loads from memory; loads and stores can segfault
+ and the signal handler won't be triggered if the trap insn was moved
+ above load or store insn. */
case TRAP_IF:
- flush_pending_lists (deps, insn, true, false);
+ flush_pending_lists (deps, insn, true, true);
break;
case PREFETCH:
reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
- We can not just fall through here since then we would be confused
+ We cannot just fall through here since then we would be confused
by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
traditional asms unlike their normal usage. */
sched_deps_info->finish_rhs ();
}
-/* Try to group two fuseable insns together to prevent scheduler
+/* Try to group two fusible insns together to prevent scheduler
from scheduling them apart. */
static void
sched_macro_fuse_insns (rtx_insn *insn)
{
rtx_insn *prev;
+ /* No target hook would return true for debug insn as any of the
+ hook operand, and with very large sequences of only debug insns
+ where on each we call sched_macro_fuse_insns it has quadratic
+ compile time complexity. */
+ if (DEBUG_INSN_P (insn))
+ return;
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (!prev)
+ return;
if (any_condjump_p (insn))
{
unsigned int condreg1, condreg2;
rtx cc_reg_1;
- targetm.fixed_condition_code_regs (&condreg1, &condreg2);
- cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
- prev = prev_nonnote_nondebug_insn (insn);
- if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
- || !prev
- || !modified_in_p (cc_reg_1, prev))
- return;
+ if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
+ {
+ cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
+ if (reg_referenced_p (cc_reg_1, PATTERN (insn))
+ && modified_in_p (cc_reg_1, prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
+ return;
+ }
+ }
}
- else
- {
- rtx insn_set = single_set (insn);
-
- prev = prev_nonnote_nondebug_insn (insn);
- if (!prev
- || !insn_set
- || !single_set (prev))
- return;
+ if (single_set (insn) && single_set (prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
}
+}
- if (targetm.sched.macro_fusion_pair_p (prev, insn))
- SCHED_GROUP_P (insn) = 1;
-
+/* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
+void
+get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
+{
+ extract_insn (insn);
+ preprocess_constraints (insn);
+ alternative_mask preferred = get_preferred_alternatives (insn);
+ ira_implicitly_set_insn_hard_regs (temp, preferred);
+ *temp &= ~ira_no_alloc_regs;
}
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
if (! reload_completed)
{
HARD_REG_SET temp;
-
- extract_insn (insn);
- preprocess_constraints (insn);
- ira_implicitly_set_insn_hard_regs (&temp);
- AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
- IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
+ get_implicit_reg_pending_clobbers (&temp, insn);
+ implicit_reg_pending_clobbers |= temp;
}
can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
&& code == SET);
/* Group compare and branch insns for macro-fusion. */
- if (targetm.sched.macro_fusion_p
+ if (!deps->readonly
+ && targetm.sched.macro_fusion_p
&& targetm.sched.macro_fusion_p ())
sched_macro_fuse_insns (insn);
= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
/* Make sure epilogue insn is scheduled after preceding jumps. */
+ add_dependence_list (insn, deps->last_pending_memory_flush, 1,
+ REG_DEP_ANTI, true);
add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
true);
}
sub = COND_EXEC_CODE (sub);
code = GET_CODE (sub);
}
- if (code == SET || code == CLOBBER)
+ else if (code == SET || code == CLOBBER)
sched_analyze_1 (deps, sub, insn);
else
sched_analyze_2 (deps, sub, insn);
if (JUMP_P (insn))
{
- rtx next;
- next = next_nonnote_nondebug_insn (insn);
+ rtx_insn *next = next_nonnote_nondebug_insn (insn);
+ /* ??? For tablejumps, the barrier may appear not immediately after
+ the jump, but after a label and a jump_table_data insn. */
+ if (next && LABEL_P (next) && NEXT_INSN (next)
+ && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
+ next = NEXT_INSN (NEXT_INSN (next));
if (next && BARRIER_P (next))
reg_pending_barrier = MOVE_BARRIER;
else
if (DEBUG_INSN_P (insn))
{
rtx_insn *prev = deps->last_debug_insn;
- rtx u;
+ rtx_insn_list *u;
if (!deps->readonly)
deps->last_debug_insn = insn;
REG_DEP_ANTI, false);
if (!sel_sched_p ())
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)), REG_DEP_ANTI);
+ for (u = deps->last_pending_memory_flush; u; u = u->next ())
+ add_dependence (insn, u->insn (), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
- || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
+ if (reg_last->uses_length >= param_max_pending_list_length
+ || reg_last->clobbers_length >= param_max_pending_list_length)
{
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
REG_DEP_OUTPUT, false);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
- || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
- SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ IOR_REG_SET_HRS (&deps->reg_last_in_use,
+ implicit_reg_pending_uses
+ | implicit_reg_pending_clobbers);
/* Set up the pending barrier found. */
deps->last_reg_pending_barrier = reg_pending_barrier;
{
if (deps->last_args_size)
add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
- deps->last_args_size = insn;
+ if (!deps->readonly)
+ deps->last_args_size = insn;
+ }
+
+ /* We must not mix prologue and epilogue insns. See PR78029. */
+ if (prologue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_prologue);
+ deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
+ deps->last_logue_was_epilogue = false;
+ }
+ }
+
+ if (epilogue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (!deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_epilogue);
+ deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
+ deps->last_logue_was_epilogue = true;
+ }
}
}
static bool
chain_to_prev_insn_p (rtx_insn *insn)
{
- rtx prev, x;
-
/* INSN forms a group with the previous instruction. */
if (SCHED_GROUP_P (insn))
return true;
part of R, the clobber was added specifically to help us track the
liveness of R. There's no point scheduling the clobber and leaving
INSN behind, especially if we move the clobber to another block. */
- prev = prev_nonnote_nondebug_insn (insn);
+ rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
if (prev
&& INSN_P (prev)
&& BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
&& GET_CODE (PATTERN (prev)) == CLOBBER)
{
- x = XEXP (PATTERN (prev), 0);
+ rtx x = XEXP (PATTERN (prev), 0);
if (set_of (x, insn))
return true;
}
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
+deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
&& sel_insn_is_speculation_check (insn)))
{
/* Keep the list a reasonable size. */
- if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
- flush_pending_lists (deps, insn, true, true);
+ if (deps->pending_flush_length++ >= param_max_pending_list_length)
+ flush_pending_lists (deps, insn, true, true);
else
deps->pending_jump_insns
= alloc_INSN_LIST (insn, deps->pending_jump_insns);
}
else
{
+ function_abi callee_abi = insn_callee_abi (insn);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
/* A call may read and modify global register variables. */
if (global_regs[i])
Since we only have a choice between 'might be clobbered'
and 'definitely not clobbered', we must include all
partly call-clobbered registers here. */
- else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ else if (callee_abi.clobbers_at_least_part_of_reg_p (i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
/* We don't know what set of fixed registers might be used
by the function, but it is certain that the stack pointer
/* Initialize DEPS for the new block beginning with HEAD. */
void
-deps_start_bb (struct deps_desc *deps, rtx_insn *head)
+deps_start_bb (class deps_desc *deps, rtx_insn *head)
{
gcc_assert (!deps->readonly);
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
+sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
{
rtx_insn *insn;
\f
/* Initialize variables for region data dependence analysis.
When LAZY_REG_LAST is true, do not allocate reg_last array
- of struct deps_desc immediately. */
+ of class deps_desc immediately. */
void
-init_deps (struct deps_desc *deps, bool lazy_reg_last)
+init_deps (class deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
deps->last_args_size = 0;
+ deps->last_prologue = 0;
+ deps->last_epilogue = 0;
+ deps->last_logue_was_epilogue = false;
deps->last_reg_pending_barrier = NOT_A_BARRIER;
deps->readonly = 0;
}
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
-init_deps_reg_last (struct deps_desc *deps)
+init_deps_reg_last (class deps_desc *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
/* Free insn lists found in DEPS. */
void
-free_deps (struct deps_desc *deps)
+free_deps (class deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
/* Remove INSN from dependence contexts DEPS. */
void
-remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
+remove_from_deps (class deps_desc *deps, rtx_insn *insn)
{
int removed;
unsigned i;
removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
deps->pending_flush_length -= removed;
+ unsigned to_clear = -1U;
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
+ if (to_clear != -1U)
+ {
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
+ to_clear = -1U;
+ }
struct deps_reg *reg_last = &deps->reg_last[i];
if (reg_last->uses)
remove_from_dependence_list (insn, ®_last->uses);
remove_from_dependence_list (insn, ®_last->clobbers);
if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
&& !reg_last->clobbers)
- CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ to_clear = i;
}
+ if (to_clear != -1U)
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
if (CALL_P (insn))
{
if (global_p)
{
- dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
- /* Allocate lists for one block at a time. */
- insns_in_block);
- dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
- /* Allocate nodes for one block at a time.
- We assume that average insn has
- 5 producers. */
- 5 * insns_in_block);
+ dl_pool = new object_allocator<_deps_list> ("deps_list");
+ /* Allocate lists for one block at a time. */
+ dn_pool = new object_allocator<_dep_node> ("dep_node");
+ /* Allocate nodes for one block at a time. */
}
}
sched_deps_finish (void)
{
gcc_assert (deps_pools_are_empty_p ());
- free_alloc_pool_if_empty (&dn_pool);
- free_alloc_pool_if_empty (&dl_pool);
- gcc_assert (dn_pool == NULL && dl_pool == NULL);
+ delete dn_pool;
+ delete dl_pool;
+ dn_pool = NULL;
+ dl_pool = NULL;
h_d_i_d.release ();
cache_size = 0;
dw_t
estimate_dep_weak (rtx mem1, rtx mem2)
{
- rtx r1, r2;
-
if (mem1 == mem2)
/* MEMs are the same - don't speculate. */
return MIN_DEP_WEAK;
- r1 = XEXP (mem1, 0);
- r2 = XEXP (mem2, 0);
+ rtx r1 = XEXP (mem1, 0);
+ rtx r2 = XEXP (mem2, 0);
+
+ if (sched_deps_info->use_cselib)
+ {
+ /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
+ dangling at this point, since we never preserve them. Instead we
+ canonicalize manually to get stable VALUEs out of hashing. */
+ if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
+ r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
+ if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
+ r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
+ }
if (r1 == r2
- || (REG_P (r1) && REG_P (r2)
- && REGNO (r1) == REGNO (r2)))
+ || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
/* Again, MEMs are the same. */
return MIN_DEP_WEAK;
- else if ((REG_P (r1) && !REG_P (r2))
- || (!REG_P (r1) && REG_P (r2)))
+ else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
/* Different addressing modes - reason to be more speculative,
than usual. */
return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
fprintf (stderr, "\n");
}
-#ifdef ENABLE_CHECKING
/* Verify that dependence type and status are consistent.
If RELAXED_P is true, then skip dep_weakness checks. */
static void
&& (ds & DEP_CONTROL)
&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
- /* HARD_DEP can not appear in dep_status of a link. */
+ /* HARD_DEP cannot appear in dep_status of a link. */
gcc_assert (!(ds & HARD_DEP));
/* Check that dependence status is set correctly when speculation is not
gcc_assert (ds & BEGIN_CONTROL);
}
}
-#endif /* ENABLE_CHECKING */
/* The following code discovers opportunities to switch a memory reference
and an increment by modifying the address. We ensure that this is done
if (RTX_FRAME_RELATED_P (insn) || !pat)
return false;
+ /* Do not allow breaking data dependencies for insns that are marked
+ with REG_STACK_CHECK. */
+ if (find_reg_note (insn, REG_STACK_CHECK, NULL))
+ return false;
+
/* Result must be single reg. */
if (!REG_P (SET_DEST (pat)))
return false;
if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
{
/* Note that the sign has already been reversed for !before_mem. */
-#ifdef STACK_GROWS_DOWNWARD
- return mii->inc_constant > 0;
-#else
- return mii->inc_constant < 0;
-#endif
+ if (STACK_GROWS_DOWNWARD)
+ return mii->inc_constant > 0;
+ else
+ return mii->inc_constant < 0;
}
return true;
}