/* Instruction scheduling pass. This file computes dependencies between
instructions.
- Copyright (C) 1992-2013 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "diagnostic-core.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
-#include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
-#include "tm_p.h"
-#include "hard-reg-set.h"
-#include "regs.h"
-#include "function.h"
-#include "flags.h"
+#include "tree.h"
+#include "df.h"
#include "insn-config.h"
+#include "regs.h"
+#include "memmodel.h"
+#include "ira.h"
+#include "ira-int.h"
#include "insn-attr.h"
-#include "except.h"
-#include "recog.h"
-#include "emit-rtl.h"
+#include "cfgbuild.h"
#include "sched-int.h"
-#include "params.h"
#include "cselib.h"
-#include "ira.h"
-#include "target.h"
+#include "function-abi.h"
#ifdef INSN_SCHEDULING
-#ifdef ENABLE_CHECKING
-#define CHECK (true)
-#else
-#define CHECK (false)
-#endif
-
/* Holds current parameters for the dependency analyzer. */
struct sched_deps_info_def *sched_deps_info;
/* Init DEP with the arguments. */
void
-init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
+init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
{
DEP_PRO (dep) = pro;
DEP_CON (dep) = con;
While most of the scheduler (including targets) only need the major type
of the dependency, it is convenient to hide full dep_status from them. */
void
-init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
+init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
{
ds_t ds;
}
/* Pool to hold all dependency nodes (dep_node_t). */
-static alloc_pool dn_pool;
+static object_allocator<_dep_node> *dn_pool;
/* Number of dep_nodes out there. */
static int dn_pool_diff = 0;
static dep_node_t
create_dep_node (void)
{
- dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
+ dep_node_t n = dn_pool->allocate ();
dep_link_t back = DEP_NODE_BACK (n);
dep_link_t forw = DEP_NODE_FORW (n);
--dn_pool_diff;
- pool_free (dn_pool, n);
+ dn_pool->remove (n);
}
/* Pool to hold dependencies lists (deps_list_t). */
-static alloc_pool dl_pool;
+static object_allocator<_deps_list> *dl_pool;
/* Number of deps_lists out there. */
static int dl_pool_diff = 0;
static deps_list_t
create_deps_list (void)
{
- deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
+ deps_list_t l = dl_pool->allocate ();
DEPS_LIST_FIRST (l) = NULL;
DEPS_LIST_N_LINKS (l) = 0;
--dl_pool_diff;
- pool_free (dl_pool, l);
+ dl_pool->remove (l);
}
/* Return true if there is no dep_nodes and deps_lists out there.
has enough entries to represent a dependency on any other insn in
the insn chain. All bitmap for true dependencies cache is
allocated then the rest two ones are also allocated. */
-static bitmap_head *true_dependency_cache = NULL;
-static bitmap_head *output_dependency_cache = NULL;
-static bitmap_head *anti_dependency_cache = NULL;
-static bitmap_head *control_dependency_cache = NULL;
-static bitmap_head *spec_dependency_cache = NULL;
+static bitmap true_dependency_cache = NULL;
+static bitmap output_dependency_cache = NULL;
+static bitmap anti_dependency_cache = NULL;
+static bitmap control_dependency_cache = NULL;
+static bitmap spec_dependency_cache = NULL;
static int cache_size;
/* True if we should mark added dependencies as a non-register deps. */
static bool mark_as_hard;
static int deps_may_trap_p (const_rtx);
-static void add_dependence_1 (rtx, rtx, enum reg_note);
-static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
-static void add_dependence_list_and_free (struct deps_desc *, rtx,
- rtx *, int, enum reg_note, bool);
-static void delete_all_dependences (rtx);
-static void chain_to_prev_insn (rtx);
-
-static void flush_pending_lists (struct deps_desc *, rtx, int, int);
-static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
-static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
-static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
-
-static bool sched_has_condition_p (const_rtx);
+static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
+static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
+ enum reg_note, bool);
+static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
+ rtx_insn_list **, int, enum reg_note,
+ bool);
+static void delete_all_dependences (rtx_insn *);
+static void chain_to_prev_insn (rtx_insn *);
+
+static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
+static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
+
+static bool sched_has_condition_p (const rtx_insn *);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
rtx, rtx);
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
-#ifdef ENABLE_CHECKING
static void check_dep (dep_t, bool);
-#endif
+
\f
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
it is set to TRUE when the returned comparison should be reversed
to get the actual condition. */
static rtx
-sched_get_condition_with_rev_uncached (const_rtx insn, bool *rev)
+sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
{
rtx pat = PATTERN (insn);
rtx src;
find such a condition. The caller should make a copy of the condition
before using it. */
rtx
-sched_get_reverse_condition_uncached (const_rtx insn)
+sched_get_reverse_condition_uncached (const rtx_insn *insn)
{
bool rev;
rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
We only do actual work the first time we come here for an insn; the
results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
static rtx
-sched_get_condition_with_rev (const_rtx insn, bool *rev)
+sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
{
bool tmp;
/* True when we can find a condition under which INSN is executed. */
static bool
-sched_has_condition_p (const_rtx insn)
+sched_has_condition_p (const rtx_insn *insn)
{
return !! sched_get_condition_with_rev (insn, NULL);
}
/* Return true if insn1 and insn2 can never depend on one another because
the conditions under which they are executed are mutually exclusive. */
bool
-sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
+sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
{
rtx cond1, cond2;
bool rev1 = false, rev2 = false;
/* Return true if INSN can potentially be speculated with type DS. */
bool
-sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
+sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
{
if (HAS_INTERNAL_DEP (insn))
return false;
if (SCHED_GROUP_P (insn))
return false;
- if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
+ if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
return false;
if (side_effects_p (PATTERN (insn)))
/* Initialize data for INSN. */
void
-sd_init_insn (rtx insn)
+sd_init_insn (rtx_insn *insn)
{
INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
/* Free data for INSN. */
void
-sd_finish_insn (rtx insn)
+sd_finish_insn (rtx_insn *insn)
{
/* ??? It would be nice to deallocate dependency caches here. */
static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
{
- rtx elem = DEP_PRO (dep);
- rtx insn = DEP_CON (dep);
+ rtx_insn *elem = DEP_PRO (dep);
+ rtx_insn *insn = DEP_CON (dep);
gcc_assert (INSN_P (insn) && INSN_P (elem));
dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
dep_link_t link = DEP_NODE_BACK (node);
dep_t dep = DEP_NODE_DEP (node);
- rtx elem = DEP_PRO (dep);
- rtx insn = DEP_CON (dep);
+ rtx_insn *elem = DEP_PRO (dep);
+ rtx_insn *insn = DEP_CON (dep);
move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
&& DEP_PRO (new_dep) != DEP_CON (new_dep));
-#ifdef ENABLE_CHECKING
- check_dep (new_dep, mem1 != NULL);
-#endif
+ if (flag_checking)
+ check_dep (new_dep, mem1 != NULL);
if (true_dependency_cache != NULL)
{
switch (ask_dependency_caches (new_dep))
{
case DEP_PRESENT:
+ dep_t present_dep;
+ sd_iterator_def sd_it;
+
+ present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
+ DEP_CON (new_dep),
+ resolved_p, &sd_it);
+ DEP_MULTIPLE (present_dep) = 1;
return DEP_PRESENT;
case DEP_CHANGED:
deps_list_t *back_list_ptr,
deps_list_t *forw_list_ptr)
{
- rtx con = DEP_CON (dep);
+ rtx_insn *con = DEP_CON (dep);
if (!resolved_p)
{
dep_node_t n = create_dep_node ();
deps_list_t con_back_deps;
deps_list_t pro_forw_deps;
- rtx elem = DEP_PRO (dep);
- rtx insn = DEP_CON (dep);
+ rtx_insn *elem = DEP_PRO (dep);
+ rtx_insn *insn = DEP_CON (dep);
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
-#ifdef ENABLE_CHECKING
- check_dep (dep, false);
-#endif
+ if (flag_checking)
+ check_dep (dep, false);
add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
{
dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
dep_t dep = DEP_NODE_DEP (node);
- rtx pro = DEP_PRO (dep);
- rtx con = DEP_CON (dep);
+ rtx_insn *pro = DEP_PRO (dep);
+ rtx_insn *con = DEP_CON (dep);
if (dep_spec_p (dep))
move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
{
dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
dep_t dep = DEP_NODE_DEP (node);
- rtx pro = DEP_PRO (dep);
- rtx con = DEP_CON (dep);
+ rtx_insn *pro = DEP_PRO (dep);
+ rtx_insn *con = DEP_CON (dep);
if (dep_spec_p (dep))
move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
/* Make TO depend on all the FROM's producers.
If RESOLVED_P is true add dependencies to the resolved lists. */
void
-sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
+sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
{
sd_list_types_def list_type;
sd_iterator_def sd_it;
{
dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
dep_t dep = DEP_NODE_DEP (n);
- rtx pro = DEP_PRO (dep);
- rtx con = DEP_CON (dep);
+ rtx_insn *pro = DEP_PRO (dep);
+ rtx_insn *con = DEP_CON (dep);
deps_list_t con_back_deps;
deps_list_t pro_forw_deps;
impossible; otherwise we add additional true dependencies on the
INSN_COND_DEPS list of the jump (which PRO must be). */
void
-add_dependence (rtx con, rtx pro, enum reg_note dep_type)
+add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
{
if (dep_type == REG_DEP_CONTROL
&& !(current_sched_info->flags & DO_PREDICATION))
condition. */
if (dep_type == REG_DEP_CONTROL)
{
- rtx real_pro = pro;
- rtx other = real_insn_for_shadow (real_pro);
+ rtx_insn *real_pro = pro;
+ rtx_insn *other = real_insn_for_shadow (real_pro);
rtx cond;
if (other != NULL_RTX)
true if DEP_NONREG should be set on newly created dependencies. */
static void
-add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
- bool hard)
+add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
+ enum reg_note dep_type, bool hard)
{
mark_as_hard = hard;
- for (; list; list = XEXP (list, 1))
+ for (; list; list = list->next ())
{
- if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
- add_dependence (insn, XEXP (list, 0), dep_type);
+ if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
+ add_dependence (insn, list->insn (), dep_type);
}
mark_as_hard = false;
}
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
+add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
+ rtx_insn_list **listp,
int uncond, enum reg_note dep_type, bool hard)
{
add_dependence_list (insn, *listp, uncond, dep_type, hard);
occurrences removed. */
static int
-remove_from_dependence_list (rtx insn, rtx* listp)
+remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
{
int removed = 0;
while (*listp)
{
- if (XEXP (*listp, 0) == insn)
+ if ((*listp)->insn () == insn)
{
remove_free_INSN_LIST_node (listp);
removed++;
continue;
}
- listp = &XEXP (*listp, 1);
+ listp = (rtx_insn_list **)&XEXP (*listp, 1);
}
return removed;
/* Same as above, but process two lists at once. */
static int
-remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
+remove_from_both_dependence_lists (rtx_insn *insn,
+ rtx_insn_list **listp,
+ rtx_expr_list **exprp)
{
int removed = 0;
continue;
}
- listp = &XEXP (*listp, 1);
- exprp = &XEXP (*exprp, 1);
+ listp = (rtx_insn_list **)&XEXP (*listp, 1);
+ exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
}
return removed;
/* Clear all dependencies for an insn. */
static void
-delete_all_dependences (rtx insn)
+delete_all_dependences (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
the previous nonnote insn. */
static void
-chain_to_prev_insn (rtx insn)
+chain_to_prev_insn (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
- rtx prev_nonnote;
+ rtx_insn *prev_nonnote;
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
{
- rtx i = insn;
- rtx pro = DEP_PRO (dep);
+ rtx_insn *i = insn;
+ rtx_insn *pro = DEP_PRO (dep);
do
{
so that we can do memory aliasing on it. */
static void
-add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
- rtx insn, rtx mem)
+add_insn_mem_dependence (class deps_desc *deps, bool read_p,
+ rtx_insn *insn, rtx mem)
{
- rtx *insn_list;
- rtx *mem_list;
- rtx link;
+ rtx_insn_list **insn_list;
+ rtx_insn_list *insn_node;
+ rtx_expr_list **mem_list;
+ rtx_expr_list *mem_node;
gcc_assert (!deps->readonly);
if (read_p)
deps->pending_write_list_length++;
}
- link = alloc_INSN_LIST (insn, *insn_list);
- *insn_list = link;
+ insn_node = alloc_INSN_LIST (insn, *insn_list);
+ *insn_list = insn_node;
if (sched_deps_info->use_cselib)
{
XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
GET_MODE (mem), insn);
}
- link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
- *mem_list = link;
+ mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
+ *mem_list = mem_node;
}
/* Make a dependency between every memory reference on the pending lists
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
+flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
int for_write)
{
if (for_write)
}
\f
/* Instruction which dependencies we are analyzing. */
-static rtx cur_insn = NULL_RTX;
+static rtx_insn *cur_insn = NULL;
/* Implement hooks for haifa scheduler. */
static void
-haifa_start_insn (rtx insn)
+haifa_start_insn (rtx_insn *insn)
{
gcc_assert (insn && !cur_insn);
}
static void
-haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
+haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
{
if (!(ds & SPECULATIVE))
{
}
static void
-haifa_note_dep (rtx elem, ds_t ds)
+haifa_note_dep (rtx_insn *elem, ds_t ds)
{
dep_def _dep;
dep_t dep = &_dep;
}
static void
-note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
+note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
{
if (sched_deps_info->note_mem_dep)
sched_deps_info->note_mem_dep (m1, m2, e, ds);
}
static void
-note_dep (rtx e, ds_t ds)
+note_dep (rtx_insn *e, ds_t ds)
{
if (sched_deps_info->note_dep)
sched_deps_info->note_dep (e, ds);
/* Allocate and return reg_use_data structure for REGNO and INSN. */
static struct reg_use_data *
-create_insn_reg_use (int regno, rtx insn)
+create_insn_reg_use (int regno, rtx_insn *insn)
{
struct reg_use_data *use;
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
-setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
+setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
{
unsigned i;
reg_set_iterator rsi;
- rtx list;
struct reg_use_data *use, *use2, *next;
struct deps_reg *reg_last;
reg_last = &deps->reg_last[i];
/* Create the cycle list of uses. */
- for (list = reg_last->uses; list; list = XEXP (list, 1))
+ for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
{
- use2 = create_insn_reg_use (i, XEXP (list, 0));
+ use2 = create_insn_reg_use (i, list->insn ());
next = use->next_regno_use;
use->next_regno_use = use2;
use2->next_regno_use = next;
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- mark_insn_hard_regno_birth (insn, regno,
- hard_regno_nregs[regno][GET_MODE (reg)],
+ mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
clobber_p, unused_p);
else
mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
regno = REGNO (reg);
if (regno < FIRST_PSEUDO_REGISTER)
- mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
+ mark_hard_regno_death (regno, REG_NREGS (reg));
else
mark_pseudo_death (regno);
}
/* Set up reg pressure info related to INSN. */
void
-init_insn_reg_pressure_info (rtx insn)
+init_insn_reg_pressure_info (rtx_insn *insn)
{
int i, len;
enum reg_class cl;
reg_pressure_info[cl].change = 0;
}
- note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
+ note_stores (insn, mark_insn_reg_clobber, insn);
- note_stores (PATTERN (insn), mark_insn_reg_store, insn);
+ note_stores (insn, mark_insn_reg_store, insn);
-#ifdef AUTO_INC_DEC
- for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
- if (REG_NOTE_KIND (link) == REG_INC)
- mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
-#endif
+ if (AUTO_INC_DEC)
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
if (REG_NOTE_KIND (link) == REG_DEAD)
/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
-extend_deps_reg_info (struct deps_desc *deps, int regno)
+extend_deps_reg_info (class deps_desc *deps, int regno)
{
int max_regno = regno + 1;
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
-sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
- enum rtx_code ref, rtx insn)
+sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
+ enum rtx_code ref, rtx_insn *insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
if (!reload_completed && sel_sched_p ()
If so, mark all of them just like the first. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i = hard_regno_nregs[regno][mode];
+ int i = hard_regno_nregs (regno, mode);
if (ref == SET)
{
while (--i >= 0)
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
{
if (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == ZERO_EXTRACT
- || df_read_modify_subreg_p (dest))
+ || read_modify_subreg_p (dest))
{
/* These both read and modify the result. We must handle
them as writes to get proper dependencies for following
if (REG_P (dest))
{
int regno = REGNO (dest);
- enum machine_mode mode = GET_MODE (dest);
+ machine_mode mode = GET_MODE (dest);
sched_analyze_reg (deps, regno, mode, code, insn);
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode = get_address_mode (dest);
+ machine_mode address_mode = get_address_mode (dest);
t = shallow_copy_rtx (dest);
cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
/* Pending lists can't get larger with a readonly context. */
if (!deps->readonly
&& ((deps->pending_read_list_length + deps->pending_write_list_length)
- > MAX_PENDING_LIST_LENGTH))
+ >= param_max_pending_list_length))
{
/* Flush all pending reads and writes to prevent the pending lists
from getting any larger. Insn scheduling runs too slowly when
}
else
{
- rtx pending, pending_mem;
+ rtx_insn_list *pending;
+ rtx_expr_list *pending_mem;
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (anti_dependence (XEXP (pending_mem, 0), t)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ if (anti_dependence (pending_mem->element (), t)
+ && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
+ note_mem_dep (t, pending_mem->element (), pending->insn (),
DEP_ANTI);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ pending = pending->next ();
+ pending_mem = pending_mem->next ();
}
pending = deps->pending_write_insns;
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (output_dependence (XEXP (pending_mem, 0), t)
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ if (output_dependence (pending_mem->element (), t)
+ && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
+ note_mem_dep (t, pending_mem->element (),
+ pending->insn (),
DEP_OUTPUT);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ pending = pending->next ();
+ pending_mem = pending_mem-> next ();
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
int i;
int j;
return;
-#ifdef HAVE_cc0
case CC0:
+ if (!HAVE_cc0)
+ gcc_unreachable ();
+
/* User of CC0 depends on immediately preceding insn. */
SCHED_GROUP_P (insn) = 1;
/* Don't move CC0 setter to another block (it can set up the
sched_deps_info->finish_rhs ();
return;
-#endif
case REG:
{
int regno = REGNO (x);
- enum machine_mode mode = GET_MODE (x);
+ machine_mode mode = GET_MODE (x);
sched_analyze_reg (deps, regno, mode, USE, insn);
case MEM:
{
/* Reading memory. */
- rtx u;
- rtx pending, pending_mem;
+ rtx_insn_list *u;
+ rtx_insn_list *pending;
+ rtx_expr_list *pending_mem;
rtx t = x;
if (sched_deps_info->use_cselib)
{
- enum machine_mode address_mode = get_address_mode (t);
+ machine_mode address_mode = get_address_mode (t);
t = shallow_copy_rtx (t);
cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (read_dependence (XEXP (pending_mem, 0), t)
+ if (read_dependence (pending_mem->element (), t)
&& ! sched_insns_conditions_mutex_p (insn,
- XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ pending->insn ()))
+ note_mem_dep (t, pending_mem->element (),
+ pending->insn (),
DEP_ANTI);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ pending = pending->next ();
+ pending_mem = pending_mem->next ();
}
pending = deps->pending_write_insns;
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
+ if (true_dependence (pending_mem->element (), VOIDmode, t)
&& ! sched_insns_conditions_mutex_p (insn,
- XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ pending->insn ()))
+ note_mem_dep (t, pending_mem->element (),
+ pending->insn (),
sched_deps_info->generate_spec_deps
? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ pending = pending->next ();
+ pending_mem = pending_mem->next ();
}
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = deps->last_pending_memory_flush; u; u = u->next ())
+ add_dependence (insn, u->insn (), REG_DEP_ANTI);
- for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
+ for (u = deps->pending_jump_insns; u; u = u->next ())
if (deps_may_trap_p (x))
{
if ((sched_deps_info->generate_spec_deps)
ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
MAX_DEP_WEAK);
- note_dep (XEXP (u, 0), ds);
+ note_dep (u->insn (), ds);
}
else
- add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
+ add_dependence (insn, u->insn (), REG_DEP_CONTROL);
}
}
{
if ((deps->pending_read_list_length
+ deps->pending_write_list_length)
- > MAX_PENDING_LIST_LENGTH
+ >= param_max_pending_list_length
&& !DEBUG_INSN_P (insn))
flush_pending_lists (deps, insn, true, true);
add_insn_mem_dependence (deps, true, insn, x);
return;
}
- /* Force pending stores to memory in case a trap handler needs them. */
+ /* Force pending stores to memory in case a trap handler needs them.
+ Also force pending loads from memory; loads and stores can segfault
+ and the signal handler won't be triggered if the trap insn was moved
+ above load or store insn. */
case TRAP_IF:
- flush_pending_lists (deps, insn, true, false);
+ flush_pending_lists (deps, insn, true, true);
break;
case PREFETCH:
Consider for instance a volatile asm that changes the fpu rounding
mode. An insn should not be moved across this even if it only uses
pseudo-regs because it might give an incorrectly rounded result. */
- if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
+ && !DEBUG_INSN_P (insn))
reg_pending_barrier = TRUE_BARRIER;
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
- We can not just fall through here since then we would be confused
+ We cannot just fall through here since then we would be confused
by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
traditional asms unlike their normal usage. */
sched_deps_info->finish_rhs ();
}
+/* Try to group two fusible insns together to prevent scheduler
+ from scheduling them apart. */
+
+static void
+sched_macro_fuse_insns (rtx_insn *insn)
+{
+ rtx_insn *prev;
+ /* No target hook would return true for debug insn as any of the
+ hook operand, and with very large sequences of only debug insns
+ where on each we call sched_macro_fuse_insns it has quadratic
+ compile time complexity. */
+ if (DEBUG_INSN_P (insn))
+ return;
+ prev = prev_nonnote_nondebug_insn (insn);
+ if (!prev)
+ return;
+
+ if (any_condjump_p (insn))
+ {
+ unsigned int condreg1, condreg2;
+ rtx cc_reg_1;
+ if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
+ {
+ cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
+ if (reg_referenced_p (cc_reg_1, PATTERN (insn))
+ && modified_in_p (cc_reg_1, prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
+ return;
+ }
+ }
+ }
+
+ if (single_set (insn) && single_set (prev))
+ {
+ if (targetm.sched.macro_fusion_pair_p (prev, insn))
+ SCHED_GROUP_P (insn) = 1;
+ }
+}
+
+/* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
+void
+get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
+{
+ extract_insn (insn);
+ preprocess_constraints (insn);
+ alternative_mask preferred = get_preferred_alternatives (insn);
+ ira_implicitly_set_insn_hard_regs (temp, preferred);
+ *temp &= ~ira_no_alloc_regs;
+}
+
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
if (! reload_completed)
{
HARD_REG_SET temp;
-
- extract_insn (insn);
- preprocess_constraints ();
- ira_implicitly_set_insn_hard_regs (&temp);
- AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
- IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
+ get_implicit_reg_pending_clobbers (&temp, insn);
+ implicit_reg_pending_clobbers |= temp;
}
can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
&& code == SET);
+ /* Group compare and branch insns for macro-fusion. */
+ if (!deps->readonly
+ && targetm.sched.macro_fusion_p
+ && targetm.sched.macro_fusion_p ())
+ sched_macro_fuse_insns (insn);
+
if (may_trap_p (x))
/* Avoid moving trapping instructions across function calls that might
not always return. */
= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
/* Make sure epilogue insn is scheduled after preceding jumps. */
+ add_dependence_list (insn, deps->last_pending_memory_flush, 1,
+ REG_DEP_ANTI, true);
add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
true);
}
sub = COND_EXEC_CODE (sub);
code = GET_CODE (sub);
}
- if (code == SET || code == CLOBBER)
+ else if (code == SET || code == CLOBBER)
sched_analyze_1 (deps, sub, insn);
else
sched_analyze_2 (deps, sub, insn);
if (JUMP_P (insn))
{
- rtx next;
- next = next_nonnote_nondebug_insn (insn);
+ rtx_insn *next = next_nonnote_nondebug_insn (insn);
+ /* ??? For tablejumps, the barrier may appear not immediately after
+ the jump, but after a label and a jump_table_data insn. */
+ if (next && LABEL_P (next) && NEXT_INSN (next)
+ && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
+ next = NEXT_INSN (NEXT_INSN (next));
if (next && BARRIER_P (next))
reg_pending_barrier = MOVE_BARRIER;
else
{
- rtx pending, pending_mem;
+ rtx_insn_list *pending;
+ rtx_expr_list *pending_mem;
if (sched_deps_info->compute_jump_reg_dependencies)
{
pending_mem = deps->pending_write_mems;
while (pending)
{
- if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
+ add_dependence (insn, pending->insn (),
+ REG_DEP_OUTPUT);
+ pending = pending->next ();
+ pending_mem = pending_mem->next ();
}
pending = deps->pending_read_insns;
pending_mem = deps->pending_read_mems;
while (pending)
{
- if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
- && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
- pending = XEXP (pending, 1);
- pending_mem = XEXP (pending_mem, 1);
+ if (MEM_VOLATILE_P (pending_mem->element ())
+ && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
+ add_dependence (insn, pending->insn (),
+ REG_DEP_OUTPUT);
+ pending = pending->next ();
+ pending_mem = pending_mem->next ();
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
/* Add register dependencies for insn. */
if (DEBUG_INSN_P (insn))
{
- rtx prev = deps->last_debug_insn;
- rtx u;
+ rtx_insn *prev = deps->last_debug_insn;
+ rtx_insn_list *u;
if (!deps->readonly)
deps->last_debug_insn = insn;
REG_DEP_ANTI, false);
if (!sel_sched_p ())
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = deps->last_pending_memory_flush; u; u = u->next ())
+ add_dependence (insn, u->insn (), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
{
rtx other = XEXP (list, 0);
if (INSN_CACHED_COND (other) != const_true_rtx
- && refers_to_regno_p (i, i + 1, INSN_CACHED_COND (other), NULL))
+ && refers_to_regno_p (i, INSN_CACHED_COND (other)))
INSN_CACHED_COND (other) = const_true_rtx;
}
}
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
- || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
+ if (reg_last->uses_length >= param_max_pending_list_length
+ || reg_last->clobbers_length >= param_max_pending_list_length)
{
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
REG_DEP_OUTPUT, false);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
- || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
- SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ IOR_REG_SET_HRS (&deps->reg_last_in_use,
+ implicit_reg_pending_uses
+ | implicit_reg_pending_clobbers);
/* Set up the pending barrier found. */
deps->last_reg_pending_barrier = reg_pending_barrier;
change_spec_dep_to_hard (sd_it);
}
}
+
+ /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
+ honor their original ordering. */
+ if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
+ {
+ if (deps->last_args_size)
+ add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
+ if (!deps->readonly)
+ deps->last_args_size = insn;
+ }
+
+ /* We must not mix prologue and epilogue insns. See PR78029. */
+ if (prologue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_prologue);
+ deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
+ deps->last_logue_was_epilogue = false;
+ }
+ }
+
+ if (epilogue_contains (insn))
+ {
+ add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
+ if (!deps->readonly)
+ {
+ if (!deps->last_logue_was_epilogue)
+ free_INSN_LIST_list (&deps->last_epilogue);
+ deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
+ deps->last_logue_was_epilogue = true;
+ }
+ }
}
/* Return TRUE if INSN might not always return normally (e.g. call exit,
/* FIXME: Why can't this function just use flags_from_decl_or_type and
test for ECF_NORETURN? */
static bool
-call_may_noreturn_p (rtx insn)
+call_may_noreturn_p (rtx_insn *insn)
{
rtx call;
instruction of that group. */
static bool
-chain_to_prev_insn_p (rtx insn)
+chain_to_prev_insn_p (rtx_insn *insn)
{
- rtx prev, x;
-
/* INSN forms a group with the previous instruction. */
if (SCHED_GROUP_P (insn))
return true;
part of R, the clobber was added specifically to help us track the
liveness of R. There's no point scheduling the clobber and leaving
INSN behind, especially if we move the clobber to another block. */
- prev = prev_nonnote_nondebug_insn (insn);
+ rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
if (prev
&& INSN_P (prev)
&& BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
&& GET_CODE (PATTERN (prev)) == CLOBBER)
{
- x = XEXP (PATTERN (prev), 0);
+ rtx x = XEXP (PATTERN (prev), 0);
if (set_of (x, insn))
return true;
}
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps_desc *deps, rtx insn)
+deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
rtx t;
sched_get_condition_with_rev (insn, NULL);
t = INSN_CACHED_COND (insn);
- INSN_COND_DEPS (insn) = NULL_RTX;
+ INSN_COND_DEPS (insn) = NULL;
if (reload_completed
&& (current_sched_info->flags & DO_PREDICATION)
&& COMPARISON_P (t)
{
unsigned int regno;
int nregs;
+ rtx_insn_list *cond_deps = NULL;
t = XEXP (t, 0);
regno = REGNO (t);
- nregs = hard_regno_nregs[regno][GET_MODE (t)];
- t = NULL_RTX;
+ nregs = REG_NREGS (t);
while (nregs-- > 0)
{
struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
- t = concat_INSN_LIST (reg_last->sets, t);
- t = concat_INSN_LIST (reg_last->clobbers, t);
- t = concat_INSN_LIST (reg_last->implicit_sets, t);
+ cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
+ cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
+ cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
}
- INSN_COND_DEPS (insn) = t;
+ INSN_COND_DEPS (insn) = cond_deps;
}
}
&& sel_insn_is_speculation_check (insn)))
{
/* Keep the list a reasonable size. */
- if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
- flush_pending_lists (deps, insn, true, true);
+ if (deps->pending_flush_length++ >= param_max_pending_list_length)
+ flush_pending_lists (deps, insn, true, true);
else
deps->pending_jump_insns
= alloc_INSN_LIST (insn, deps->pending_jump_insns);
}
else
{
+ function_abi callee_abi = insn_callee_abi (insn);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
/* A call may read and modify global register variables. */
if (global_regs[i])
Since we only have a choice between 'might be clobbered'
and 'definitely not clobbered', we must include all
partly call-clobbered registers here. */
- else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
- || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ else if (callee_abi.clobbers_at_least_part_of_reg_p (i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
/* We don't know what set of fixed registers might be used
by the function, but it is certain that the stack pointer
/* Initialize DEPS for the new block beginning with HEAD. */
void
-deps_start_bb (struct deps_desc *deps, rtx head)
+deps_start_bb (class deps_desc *deps, rtx_insn *head)
{
gcc_assert (!deps->readonly);
hard registers correct. */
if (! reload_completed && !LABEL_P (head))
{
- rtx insn = prev_nonnote_nondebug_insn (head);
+ rtx_insn *insn = prev_nonnote_nondebug_insn (head);
if (insn && CALL_P (insn))
deps->in_post_call_group_p = post_call_initial;
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
+sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
{
- rtx insn;
+ rtx_insn *insn;
if (sched_deps_info->use_cselib)
cselib_init (CSELIB_RECORD_MEMORY);
{
/* And initialize deps_lists. */
sd_init_insn (insn);
+ /* Clean up SCHED_GROUP_P which may be set by last
+ scheduler pass. */
+ if (SCHED_GROUP_P (insn))
+ SCHED_GROUP_P (insn) = 0;
}
deps_analyze_insn (deps, insn);
/* Helper for sched_free_deps ().
Delete INSN's (RESOLVED_P) backward dependencies. */
static void
-delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
+delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
{
sd_iterator_def sd_it;
dep_t dep;
/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
deps_lists. */
void
-sched_free_deps (rtx head, rtx tail, bool resolved_p)
+sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
{
- rtx insn;
- rtx next_tail = NEXT_INSN (tail);
+ rtx_insn *insn;
+ rtx_insn *next_tail = NEXT_INSN (tail);
/* We make two passes since some insns may be scheduled before their
dependencies are resolved. */
\f
/* Initialize variables for region data dependence analysis.
When LAZY_REG_LAST is true, do not allocate reg_last array
- of struct deps_desc immediately. */
+ of class deps_desc immediately. */
void
-init_deps (struct deps_desc *deps, bool lazy_reg_last)
+init_deps (class deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->sched_before_next_jump = 0;
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
+ deps->last_args_size = 0;
+ deps->last_prologue = 0;
+ deps->last_epilogue = 0;
+ deps->last_logue_was_epilogue = false;
deps->last_reg_pending_barrier = NOT_A_BARRIER;
deps->readonly = 0;
}
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
-init_deps_reg_last (struct deps_desc *deps)
+init_deps_reg_last (class deps_desc *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
/* Free insn lists found in DEPS. */
void
-free_deps (struct deps_desc *deps)
+free_deps (class deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
/* Remove INSN from dependence contexts DEPS. */
void
-remove_from_deps (struct deps_desc *deps, rtx insn)
+remove_from_deps (class deps_desc *deps, rtx_insn *insn)
{
int removed;
unsigned i;
removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
deps->pending_flush_length -= removed;
+ unsigned to_clear = -1U;
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
+ if (to_clear != -1U)
+ {
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
+ to_clear = -1U;
+ }
struct deps_reg *reg_last = &deps->reg_last[i];
if (reg_last->uses)
remove_from_dependence_list (insn, ®_last->uses);
remove_from_dependence_list (insn, ®_last->clobbers);
if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
&& !reg_last->clobbers)
- CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ to_clear = i;
}
+ if (to_clear != -1U)
+ CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
if (CALL_P (insn))
{
if (global_p)
{
- dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
- /* Allocate lists for one block at a time. */
- insns_in_block);
- dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
- /* Allocate nodes for one block at a time.
- We assume that average insn has
- 5 producers. */
- 5 * insns_in_block);
+ dl_pool = new object_allocator<_deps_list> ("deps_list");
+ /* Allocate lists for one block at a time. */
+ dn_pool = new object_allocator<_dep_node> ("dep_node");
+ /* Allocate nodes for one block at a time. */
}
}
sched_deps_finish (void)
{
gcc_assert (deps_pools_are_empty_p ());
- free_alloc_pool_if_empty (&dn_pool);
- free_alloc_pool_if_empty (&dl_pool);
- gcc_assert (dn_pool == NULL && dl_pool == NULL);
+ delete dn_pool;
+ delete dl_pool;
+ dn_pool = NULL;
+ dl_pool = NULL;
h_d_i_d.release ();
cache_size = 0;
dw_t
estimate_dep_weak (rtx mem1, rtx mem2)
{
- rtx r1, r2;
-
if (mem1 == mem2)
/* MEMs are the same - don't speculate. */
return MIN_DEP_WEAK;
- r1 = XEXP (mem1, 0);
- r2 = XEXP (mem2, 0);
+ rtx r1 = XEXP (mem1, 0);
+ rtx r2 = XEXP (mem2, 0);
+
+ if (sched_deps_info->use_cselib)
+ {
+ /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
+ dangling at this point, since we never preserve them. Instead we
+ canonicalize manually to get stable VALUEs out of hashing. */
+ if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
+ r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
+ if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
+ r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
+ }
if (r1 == r2
- || (REG_P (r1) && REG_P (r2)
- && REGNO (r1) == REGNO (r2)))
+ || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
/* Again, MEMs are the same. */
return MIN_DEP_WEAK;
- else if ((REG_P (r1) && !REG_P (r2))
- || (!REG_P (r1) && REG_P (r2)))
+ else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
/* Different addressing modes - reason to be more speculative,
than usual. */
return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
This function can handle same INSN and ELEM (INSN == ELEM).
It is a convenience wrapper. */
static void
-add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
+add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
{
ds_t ds;
bool internal;
fprintf (stderr, "\n");
}
-#ifdef ENABLE_CHECKING
/* Verify that dependence type and status are consistent.
If RELAXED_P is true, then skip dep_weakness checks. */
static void
&& (ds & DEP_CONTROL)
&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
- /* HARD_DEP can not appear in dep_status of a link. */
+ /* HARD_DEP cannot appear in dep_status of a link. */
gcc_assert (!(ds & HARD_DEP));
/* Check that dependence status is set correctly when speculation is not
gcc_assert (ds & BEGIN_CONTROL);
}
}
-#endif /* ENABLE_CHECKING */
/* The following code discovers opportunities to switch a memory reference
and an increment by modifying the address. We ensure that this is done
insns which depend on each other, but could possibly be interchanged. */
struct mem_inc_info
{
- rtx inc_insn;
- rtx mem_insn;
+ rtx_insn *inc_insn;
+ rtx_insn *mem_insn;
rtx *mem_loc;
/* A register occurring in the memory address for which we wish to break
a corresponding memory reference. */
static bool
-parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
+parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
{
rtx pat = single_set (insn);
rtx src, cst;
if (RTX_FRAME_RELATED_P (insn) || !pat)
return false;
+ /* Do not allow breaking data dependencies for insns that are marked
+ with REG_STACK_CHECK. */
+ if (find_reg_note (insn, REG_STACK_CHECK, NULL))
+ return false;
+
/* Result must be single reg. */
if (!REG_P (SET_DEST (pat)))
return false;
if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
{
/* Note that the sign has already been reversed for !before_mem. */
-#ifdef STACK_GROWS_DOWNWARD
- return mii->inc_constant > 0;
-#else
- return mii->inc_constant < 0;
-#endif
+ if (STACK_GROWS_DOWNWARD)
+ return mii->inc_constant > 0;
+ else
+ return mii->inc_constant < 0;
}
return true;
}
while (sd_iterator_cond (&sd_it, &dep))
{
dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
- rtx pro = DEP_PRO (dep);
- rtx con = DEP_CON (dep);
- rtx inc_cand = backwards ? pro : con;
+ rtx_insn *pro = DEP_PRO (dep);
+ rtx_insn *con = DEP_CON (dep);
+ rtx_insn *inc_cand = backwards ? pro : con;
if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
goto next;
if (parse_add_or_inc (mii, inc_cand, backwards))
{
struct dep_replacement *desc;
- df_ref *def_rec;
+ df_ref def;
rtx newaddr, newmem;
if (sched_verbose >= 5)
/* Need to assure that none of the operands of the inc
instruction are assigned to by the mem insn. */
- for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
- || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
- {
- if (sched_verbose >= 5)
- fprintf (sched_dump,
- "inc conflicts with store failure.\n");
- goto next;
- }
- }
+ FOR_EACH_INSN_DEF (def, mii->mem_insn)
+ if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
+ || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump,
+ "inc conflicts with store failure.\n");
+ goto next;
+ }
+
newaddr = mii->inc_input;
if (mii->mem_index != NULL_RTX)
newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
}
if (REG_P (reg0))
{
- df_ref *def_rec;
+ df_ref use;
int occurrences = 0;
/* Make sure this reg appears only once in this insn. Can't use
count_occurrences since that only works for pseudos. */
- for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
- if (++occurrences > 1)
- {
- if (sched_verbose >= 5)
- fprintf (sched_dump, "mem count failure\n");
- return false;
- }
- }
+ FOR_EACH_INSN_USE (use, mii->mem_insn)
+ if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
+ if (++occurrences > 1)
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "mem count failure\n");
+ return false;
+ }
mii->mem_reg0 = reg0;
return find_inc (mii, true) || find_inc (mii, false);
dependencies that can be broken by modifying one of the patterns. */
void
-find_modifiable_mems (rtx head, rtx tail)
+find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
{
- rtx insn, next_tail = NEXT_INSN (tail);
+ rtx_insn *insn, *next_tail = NEXT_INSN (tail);
int success_in_block = 0;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))