/* Instruction scheduling pass.
- Copyright (C) 1992-2015 Free Software Foundation, Inc.
+ Copyright (C) 1992-2020 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "diagnostic-core.h"
-#include "hard-reg-set.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
+#include "cfghooks.h"
+#include "df.h"
+#include "memmodel.h"
#include "tm_p.h"
-#include "regs.h"
-#include "hashtab.h"
-#include "hash-set.h"
-#include "vec.h"
-#include "machmode.h"
-#include "input.h"
-#include "function.h"
-#include "flags.h"
#include "insn-config.h"
-#include "insn-attr.h"
-#include "except.h"
+#include "regs.h"
+#include "ira.h"
#include "recog.h"
-#include "dominance.h"
-#include "cfg.h"
+#include "insn-attr.h"
#include "cfgrtl.h"
#include "cfgbuild.h"
-#include "predict.h"
-#include "basic-block.h"
#include "sched-int.h"
-#include "target.h"
#include "common/common-target.h"
-#include "params.h"
#include "dbgcnt.h"
#include "cfgloop.h"
-#include "ira.h"
-#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
-#include "hash-table.h"
#include "dumpfile.h"
+#include "print-rtl.h"
+#include "function-abi.h"
#ifdef INSN_SCHEDULING
/* sched-verbose controls the amount of debugging output the
scheduler prints. It is controlled by -fsched-verbose=N:
- N>0 and no -DSR : the output is directed to stderr.
- N>=10 will direct the printouts to stderr (regardless of -dSR).
- N=1: same as -dSR.
+ N=0: no debugging output.
+ N=1: default value.
N=2: bb's probabilities, detailed ready list info, unit/insn info.
N=3: rtl at abort point, control-flow, regions info.
N=5: dependences info. */
-
int sched_verbose = 0;
-/* Debugging file. All printouts are sent to dump, which is always set,
- either to stderr, or to the dump listing file (-dRS). */
+/* Debugging file. All printouts are sent to dump. */
FILE *sched_dump = 0;
/* This is a placeholder for the scheduler parameters common
#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
#define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
-/* Cached cost of the instruction. Use insn_cost to get cost of the
+/* Cached cost of the instruction. Use insn_sched_cost to get cost of the
insn. -1 here means that the field is not initialized. */
#define INSN_COST(INSN) (HID (INSN)->cost)
};
/* Mapping from instruction UID to its Logical UID. */
-vec<int> sched_luids = vNULL;
+vec<int> sched_luids;
/* Next LUID to assign to an instruction. */
int sched_max_luid = 1;
/* Haifa Instruction Data. */
-vec<haifa_insn_data_def> h_i_d = vNULL;
+vec<haifa_insn_data_def> h_i_d;
void (* sched_init_only_bb) (basic_block, basic_block);
modulo_max_stages = max_stages;
modulo_n_insns = insns;
modulo_iter0_max_uid = max_uid;
- modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
+ modulo_backtracks_left = param_max_modulo_backtrack_attempts;
}
/* A structure to record a pair of insns where the first one is a real
/* Helpers for delay hashing. */
-struct delay_i1_hasher : typed_noop_remove <delay_pair>
+struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
{
- typedef delay_pair *value_type;
typedef void *compare_type;
static inline hashval_t hash (const delay_pair *);
static inline bool equal (const delay_pair *, const void *);
return x->i1 == y;
}
-struct delay_i2_hasher : typed_free_remove <delay_pair>
+struct delay_i2_hasher : free_ptr_hash <delay_pair>
{
- typedef delay_pair *value_type;
typedef void *compare_type;
static inline hashval_t hash (const delay_pair *);
static inline bool equal (const delay_pair *, const void *);
\f
/* Forward declarations. */
-static int priority (rtx_insn *);
+static int priority (rtx_insn *, bool force_recompute = false);
static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
static int rank_for_schedule (const void *, const void *);
static void swap_sort (rtx_insn **, int);
/* The following functions are used to implement multi-pass scheduling
on the first cycle. */
static rtx_insn *ready_remove (struct ready_list *, int);
-static void ready_remove_insn (rtx);
+static void ready_remove_insn (rtx_insn *);
static void fix_inter_tick (rtx_insn *, rtx_insn *);
static int fix_tick_ready (rtx_insn *);
static void init_h_i_d (rtx_insn *);
static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
static void generate_recovery_code (rtx_insn *);
-static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
+static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
static void begin_speculative_block (rtx_insn *);
static void add_to_speculative_block (rtx_insn *);
static void init_before_recovery (basic_block *);
/* Registers mentioned in the current region. */
static bitmap region_ref_regs;
+/* Temporary bitmap used for SCHED_PRESSURE_MODEL. */
+static bitmap tmp_bitmap;
+
/* Effective number of available registers of a given class (see comment
in sched_pressure_start_bb). */
static int sched_class_regs_num[N_REG_CLASSES];
-/* Number of call_used_regs. This is a helper for calculating of
+/* The number of registers that the function would need to save before it
+ uses them, and the number of fixed_regs. Helpers for calculating of
sched_class_regs_num. */
-static int call_used_regs_num[N_REG_CLASSES];
+static int call_saved_regs_num[N_REG_CLASSES];
+static int fixed_regs_num[N_REG_CLASSES];
/* Initiate register pressure relative info for scheduling the current
region. Currently it is only clearing register mentioned in the
static void
setup_ref_regs (rtx x)
{
- int i, j, regno;
+ int i, j;
const RTX_CODE code = GET_CODE (x);
const char *fmt;
if (REG_P (x))
{
- regno = REGNO (x);
- if (HARD_REGISTER_NUM_P (regno))
- bitmap_set_range (region_ref_regs, regno,
- hard_regno_nregs[regno][GET_MODE (x)]);
- else
- bitmap_set_bit (region_ref_regs, REGNO (x));
+ bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
return;
}
fmt = GET_RTX_FORMAT (code);
if (NONDEBUG_INSN_P (insn))
setup_ref_regs (PATTERN (insn));
initiate_reg_pressure_info (df_get_live_in (bb));
-#ifdef EH_RETURN_DATA_REGNO
if (bb_has_eh_pred (bb))
for (i = 0; ; ++i)
{
mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
regno, true);
}
-#endif
}
/* Save current register pressure related info. */
block, or the prev_head of the scheduling block. Used by
rank_for_schedule, so that insns independent of the last scheduled
insn will be preferred over dependent instructions. */
-static rtx last_nondebug_scheduled_insn;
+static rtx_insn *last_nondebug_scheduled_insn;
/* Pointer that iterates through the list of unscheduled insns if we
have a dbg_cnt enabled. It always points at an insn prior to the
This is the number of cycles between instruction issue and
instruction results. */
int
-insn_cost (rtx_insn *insn)
+insn_sched_cost (rtx_insn *insn)
{
int cost;
{
enum reg_note dep_type = DEP_TYPE (link);
- cost = insn_cost (insn);
+ cost = insn_sched_cost (insn);
if (INSN_CODE (insn) >= 0)
{
}
- if (targetm.sched.adjust_cost_2)
- cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
- dw);
- else if (targetm.sched.adjust_cost != NULL)
- {
- /* This variable is used for backward compatibility with the
- targets. */
- rtx_insn_list *dep_cost_rtx_link =
- alloc_INSN_LIST (NULL_RTX, NULL);
-
- /* Make it self-cycled, so that if some tries to walk over this
- incomplete list he/she will be caught in an endless loop. */
- XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
-
- /* Targets use only REG_NOTE_KIND of the link. */
- PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
-
- cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
- insn, cost);
-
- free_INSN_LIST_node (dep_cost_rtx_link);
- }
+ if (targetm.sched.adjust_cost)
+ cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost,
+ dw);
if (cost < 0)
cost = 0;
/* Compute the number of nondebug deps in list LIST for INSN. */
static int
-dep_list_size (rtx insn, sd_list_types_def list)
+dep_list_size (rtx_insn *insn, sd_list_types_def list)
{
sd_iterator_def sd_it;
dep_t dep;
/* Compute the priority number for INSN. */
static int
-priority (rtx_insn *insn)
+priority (rtx_insn *insn, bool force_recompute)
{
if (! INSN_P (insn))
return 0;
/* We should not be interested in priority of an already scheduled insn. */
gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
- if (!INSN_PRIORITY_KNOWN (insn))
+ if (force_recompute || !INSN_PRIORITY_KNOWN (insn))
{
int this_priority = -1;
INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
}
else if (dep_list_size (insn, SD_LIST_FORW) == 0)
- /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
- some forward deps but all of them are ignored by
+ /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
+ has some forward deps but all of them are ignored by
contributes_to_priority hook. At the moment we set priority of
such insn to 0. */
- this_priority = insn_cost (insn);
+ this_priority = insn_sched_cost (insn);
else
{
rtx_insn *prev_first, *twin;
{
gcc_assert (this_priority == -1);
- this_priority = insn_cost (insn);
+ this_priority = insn_sched_cost (insn);
}
INSN_PRIORITY (insn) = this_priority;
registers that will be born in the range [model_curr_point, POINT). */
num_uses = 0;
num_pending_births = 0;
+ bitmap_clear (tmp_bitmap);
for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
{
new_last = model_last_use_except (use);
- if (new_last < point)
+ if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
{
gcc_assert (num_uses < ARRAY_SIZE (uses));
uses[num_uses].last_use = new_last;
RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
- RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N };
+ RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
/* Corresponding strings for print outs. */
static const char *rfs_str[RFS_N] = {
"RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
"RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
"RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
- "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" };
+ "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
/* Statistical breakdown of rank_for_schedule decisions. */
-typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
+struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
static rank_for_schedule_stats_t rank_for_schedule_stats;
/* Return the result of comparing insns TMP and TMP2 and update
if (flag_sched_critical_path_heuristic && priority_val)
return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
- if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0)
+ if (param_sched_autopref_queue_depth >= 0)
{
int autopref = autopref_rank_for_schedule (tmp, tmp2);
if (autopref != 0)
{
dep_t dep1;
dep_t dep2;
- rtx last = last_nondebug_scheduled_insn;
+ rtx_insn *last = last_nondebug_scheduled_insn;
/* Classify the instructions into three classes:
1) Data dependent on last schedule insn.
}
/* Prefer instructions that occur earlier in the model schedule. */
- if (sched_pressure == SCHED_PRESSURE_MODEL
- && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
{
diff = model_index (tmp) - model_index (tmp2);
- gcc_assert (diff != 0);
- return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
+ if (diff != 0)
+ return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
}
/* Prefer the insn which has more later insns that depend on it.
if (flag_sched_dep_count_heuristic && val != 0)
return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
+ /* Sort by INSN_COST rather than INSN_LUID. This means that instructions
+ which take longer to execute are prioritised and it leads to more
+ dual-issue opportunities on in-order cores which have this feature. */
+
+ if (INSN_COST (tmp) != INSN_COST (tmp2))
+ return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp),
+ tmp, tmp2);
+
/* If insns are equally good, sort by INSN_LUID (original insn order),
so that we make the sort stable. This minimizes instruction movement,
thus minimizing sched's effect on debugging and cross-jumping. */
/* Remove INSN from the ready list. */
static void
-ready_remove_insn (rtx insn)
+ready_remove_insn (rtx_insn *insn)
{
int i;
only be scheduled once their control dependency is resolved. */
static void
-check_clobbered_conditions (rtx insn)
+check_clobbered_conditions (rtx_insn *insn)
{
HARD_REG_SET t;
int i;
}
/* Add INSN to the model worklist. Start looking for a suitable position
- between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
+ between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
insns either side. A null PREV indicates the beginning of the list and
a null NEXT indicates the end. */
{
int count;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
if (count > 0 && prev && model_order_p (insn, prev))
do
{
int count;
prev = insn->prev;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
while (count > 0 && prev && model_order_p (insn, prev))
{
count--;
{
fprintf (sched_dump, ";;\t+--- worklist:\n");
insn = model_worklist;
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
while (count > 0 && insn)
{
fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
Failing that, just pick the highest-priority instruction in the
worklist. */
- count = MAX_SCHED_READY_INSNS;
+ count = param_max_sched_ready_insns;
insn = model_worklist;
fallback = 0;
for (;;)
* If the basic block executes much more often than the prologue/epilogue
(e.g., inside a hot loop), then cost of spill in the prologue is close to
nil, so the effective number of available registers is
- (ira_class_hard_regs_num[cl] - 0).
+ (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
* If the basic block executes as often as the prologue/epilogue,
then spill in the block is as costly as in the prologue, so the effective
number of available registers is
- (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
+ (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
+ - call_saved_regs_num[cl]).
Note that all-else-equal, we prefer to spill in the prologue, since that
allows "extra" registers for other basic blocks of the function.
* If the basic block is on the cold path of the function and executes
rarely, then we should always prefer to spill in the block, rather than
in the prologue/epilogue. The effective number of available register is
- (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
+ (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
+ - call_saved_regs_num[cl]). */
{
int i;
- int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
- int bb_freq = bb->frequency;
+ int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun);
+ int bb_freq = bb->count.to_frequency (cfun);
if (bb_freq == 0)
{
for (i = 0; i < ira_pressure_classes_num; ++i)
{
enum reg_class cl = ira_pressure_classes[i];
- sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
+ sched_class_regs_num[cl] = ira_class_hard_regs_num[cl]
+ - fixed_regs_num[cl];
sched_class_regs_num[cl]
- -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
+ -= (call_saved_regs_num[cl] * entry_freq) / bb_freq;
}
}
gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
/* Reset debug insns invalidated by moving this insn. */
- if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
+ if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn))
for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
sd_iterator_cond (&sd_it, &dep);)
{
continue;
}
- gcc_assert (DEBUG_INSN_P (dbg));
+ gcc_assert (DEBUG_BIND_INSN_P (dbg));
if (sched_verbose >= 6)
fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
state_t curr_state;
rtx_insn *last_scheduled_insn;
- rtx last_nondebug_scheduled_insn;
+ rtx_insn *last_nondebug_scheduled_insn;
rtx_insn *nonscheduled_insns_begin;
int cycle_issued_insns;
/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
to SET_P. */
static void
-mark_backtrack_feeds (rtx insn, int set_p)
+mark_backtrack_feeds (rtx_insn *insn, int set_p)
{
sd_iterator_def sd_it;
dep_t dep;
queued nowhere. */
static void
-unschedule_insns_until (rtx insn)
+unschedule_insns_until (rtx_insn *insn)
{
auto_vec<rtx_insn *> recompute_vec;
success = validate_change (desc->insn, desc->loc, desc->newval, 0);
gcc_assert (success);
+ rtx_insn *insn = DEP_PRO (dep);
+
+ /* Recompute priority since dependent priorities may have changed. */
+ priority (insn, true);
update_insn_after_change (desc->insn);
+
if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
fix_tick_ready (desc->insn);
success = validate_change (desc->insn, desc->loc, desc->orig, 0);
gcc_assert (success);
+
+ rtx_insn *insn = DEP_PRO (dep);
+
+ if (QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
+ {
+ /* Recompute priority since dependent priorities may have changed. */
+ priority (insn, true);
+ }
+
update_insn_after_change (desc->insn);
+
if (backtrack_queue != NULL)
{
backtrack_queue->replacement_deps.safe_push (dep);
static int
estimate_shadow_tick (struct delay_pair *p)
{
- bitmap_head processed;
+ auto_bitmap processed;
int t;
bool cutoff;
- bitmap_initialize (&processed, 0);
- cutoff = !estimate_insn_tick (&processed, p->i2,
+ cutoff = !estimate_insn_tick (processed, p->i2,
max_insn_queue_index + pair_delay (p));
- bitmap_clear (&processed);
if (cutoff)
return max_insn_queue_index;
t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
{
rtx_insn *insn;
rtx_insn_list *link;
- rtx skip_insn;
+ rtx_insn *skip_insn;
q_ptr = NEXT_Q (q_ptr);
nonscheduled insn. */
skip_insn = first_nonscheduled_insn ();
else
- skip_insn = NULL_RTX;
+ skip_insn = NULL;
/* Add all pending insns that can be scheduled without stalls to the
ready list. */
/* If the ready list is full, delay the insn for 1 cycle.
See the comment in schedule_block for the rationale. */
if (!reload_completed
- && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+ && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
|| (sched_pressure == SCHED_PRESSURE_MODEL
- /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
- instructions too. */
+ /* Limit pressure recalculations to
+ param_max_sched_ready_insns instructions too. */
&& model_index (insn) > (model_curr_point
- + MAX_SCHED_READY_INSNS)))
+ + param_max_sched_ready_insns)))
&& !(sched_pressure == SCHED_PRESSURE_MODEL
&& model_curr_point < model_num_insns
/* Always allow the next model instruction to issue. */
addition) depending on user flags and target hooks. */
static bool
-ok_for_early_queue_removal (rtx insn)
+ok_for_early_queue_removal (rtx_insn *insn)
{
if (targetm.sched.is_costly_dependence)
{
- rtx prev_insn;
int n_cycles;
int i = scheduled_insns.length ();
for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
{
int cost;
- prev_insn = scheduled_insns[i];
+ rtx_insn *prev_insn = scheduled_insns[i];
if (!NOTE_P (prev_insn))
{
last = emit_note_before (note_type, last);
remove_note (insn, note);
+ df_insn_create_insn_record (last);
}
}
}
return false;
}
+/* Helper for autopref_multipass_init. Given a SET in PAT and whether
+ we're expecting a memory WRITE or not, check that the insn is relevant to
+ the autoprefetcher modelling code. Return true iff that is the case.
+ If it is relevant, record the base register of the memory op in BASE and
+ the offset in OFFSET. */
+
+static bool
+analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
+{
+ if (GET_CODE (pat) != SET)
+ return false;
+
+ rtx mem = write ? SET_DEST (pat) : SET_SRC (pat);
+ if (!MEM_P (mem))
+ return false;
+
+ struct address_info info;
+ decompose_mem_address (&info, mem);
+
+ /* TODO: Currently only (base+const) addressing is supported. */
+ if (info.base == NULL || !REG_P (*info.base)
+ || (info.disp != NULL && !CONST_INT_P (*info.disp)))
+ return false;
+
+ *base = *info.base;
+ *offset = info.disp ? INTVAL (*info.disp) : 0;
+ return true;
+}
+
/* Functions to model cache auto-prefetcher.
Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
/* Set insn entry initialized, but not relevant for auto-prefetcher. */
data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
+ rtx pat = PATTERN (insn);
+
+ /* We have a multi-set insn like a load-multiple or store-multiple.
+ We care about these as long as all the memory ops inside the PARALLEL
+ have the same base register. We care about the minimum and maximum
+ offsets from that base but don't check for the order of those offsets
+ within the PARALLEL insn itself. */
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int n_elems = XVECLEN (pat, 0);
+
+ int i, offset;
+ rtx base, prev_base = NULL_RTX;
+ int min_offset = INT_MAX;
+
+ for (i = 0; i < n_elems; i++)
+ {
+ rtx set = XVECEXP (pat, 0, i);
+ if (GET_CODE (set) != SET)
+ return;
+
+ if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
+ return;
+
+ /* Ensure that all memory operations in the PARALLEL use the same
+ base register. */
+ if (i > 0 && REGNO (base) != REGNO (prev_base))
+ return;
+ prev_base = base;
+ min_offset = MIN (min_offset, offset);
+ }
+
+ /* If we reached here then we have a valid PARALLEL of multiple memory ops
+ with prev_base as the base and min_offset containing the offset. */
+ gcc_assert (prev_base);
+ data->base = prev_base;
+ data->offset = min_offset;
+ data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
+ return;
+ }
+
+ /* Otherwise this is a single set memory operation. */
rtx set = single_set (insn);
if (set == NULL_RTX)
return;
- rtx mem = write ? SET_DEST (set) : SET_SRC (set);
- if (!MEM_P (mem))
+ if (!analyze_set_insn_for_autopref (set, write, &data->base,
+ &data->offset))
return;
- struct address_info info;
- decompose_mem_address (&info, mem);
-
- /* TODO: Currently only (base+const) addressing is supported. */
- if (info.base == NULL || !REG_P (*info.base)
- || (info.disp != NULL && !CONST_INT_P (*info.disp)))
- return;
-
- /* This insn is relevant for auto-prefetcher. */
- data->base = *info.base;
- data->offset = info.disp ? INTVAL (*info.disp) : 0;
+ /* This insn is relevant for the auto-prefetcher.
+ The base and offset fields will have been filled in the
+ analyze_set_insn_for_autopref call above. */
data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
}
static int
autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
{
- for (int write = 0; write < 2; ++write)
+ int r = 0;
+ for (int write = 0; write < 2 && !r; ++write)
{
autopref_multipass_data_t data1
= &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
autopref_multipass_init (insn1, write);
- if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
- continue;
if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
autopref_multipass_init (insn2, write);
- if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
- continue;
- if (!rtx_equal_p (data1->base, data2->base))
- continue;
+ int irrel1 = data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
+ int irrel2 = data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
- return data1->offset - data2->offset;
+ if (!irrel1 && !irrel2)
+ r = data1->offset - data2->offset;
+ else
+ r = irrel2 - irrel1;
}
- return 0;
+ return r;
}
/* True if header of debug dump was printed. */
{
int r = 0;
- if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0)
+ /* Exit early if the param forbids this or if we're not entering here through
+ normal haifa scheduling. This can happen if selective scheduling is
+ explicitly enabled. */
+ if (!insn_queue || param_sched_autopref_queue_depth <= 0)
return 0;
if (sched_verbose >= 2 && ready_index == 0)
}
}
- if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1)
+ if (param_sched_autopref_queue_depth == 1)
continue;
/* Everything from the current queue slot should have been moved to
the ready list. */
gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
- int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1;
+ int n_stalls = param_sched_autopref_queue_depth - 1;
if (n_stalls > max_insn_queue_index)
n_stalls = max_insn_queue_index;
{
int i, pass;
bool sched_group_found = false;
- int min_cost_group = 1;
+ int min_cost_group = 0;
if (sched_fusion)
return;
}
/* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
- such an insn first and note its cost, then schedule all other insns
- for one cycle later. */
+ such an insn first and note its cost. If at least one SCHED_GROUP_P insn
+ gets queued, then all other insns get queued for one cycle later. */
for (pass = sched_group_found ? 0 : 1; pass < 2; )
{
int n = ready.n_ready;
if (DEBUG_INSN_P (insn))
continue;
- if (sched_group_found && !SCHED_GROUP_P (insn))
+ if (sched_group_found && !SCHED_GROUP_P (insn)
+ && ((pass == 0) || (min_cost_group >= 1)))
{
if (pass == 0)
continue;
if (sched_verbose >= 4)
{
- if (NOTE_P (insn) || recog_memoized (insn) < 0)
+ if (NOTE_P (insn) || LABEL_P (insn) || recog_memoized (insn) < 0)
fprintf (sched_dump, "nothing");
else
print_reservation (sched_dump, insn);
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
- last_nondebug_scheduled_insn = NULL_RTX;
+ last_nondebug_scheduled_insn = NULL;
nonscheduled_insns_begin = NULL;
gcc_assert ((NOTE_P (last_scheduled_insn)
time in the worst case. Before reload we are more likely to have
big lists so truncate them to a reasonable size. */
if (!reload_completed
- && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
+ && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
{
ready_sort_debug (&ready);
ready_sort_real (&ready);
- /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
+ /* Find first free-standing insn past param_max_sched_ready_insns.
If there are debug insns, we know they're first. */
- for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
+ for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready;
+ i++)
if (!SCHED_GROUP_P (ready_element (&ready, i)))
break;
return n_insn;
}
-/* Set dump and sched_verbose for the desired debugging output. If no
- dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
- For -fsched-verbose=N, N>=10, print everything to stderr. */
+/* Set sched_dump and sched_verbose for the desired debugging output. */
void
setup_sched_dump (void)
{
sched_verbose = sched_verbose_param;
- if (sched_verbose_param == 0 && dump_file)
- sched_verbose = 1;
- sched_dump = ((sched_verbose_param >= 10 || !dump_file)
- ? stderr : dump_file);
+ sched_dump = dump_file;
+ if (!dump_file)
+ sched_verbose = 0;
}
/* Allocate data for register pressure sensitive scheduling. */
saved_reg_live = BITMAP_ALLOC (NULL);
region_ref_regs = BITMAP_ALLOC (NULL);
}
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
+ tmp_bitmap = BITMAP_ALLOC (NULL);
- /* Calculate number of CALL_USED_REGS in register classes that
- we calculate register pressure for. */
+ /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
+ that we calculate register pressure for. */
for (int c = 0; c < ira_pressure_classes_num; ++c)
{
enum reg_class cl = ira_pressure_classes[c];
- call_used_regs_num[cl] = 0;
+ call_saved_regs_num[cl] = 0;
+ fixed_regs_num[cl] = 0;
for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
- if (call_used_regs[ira_class_hard_regs[cl][i]])
- ++call_used_regs_num[cl];
+ {
+ unsigned int regno = ira_class_hard_regs[cl][i];
+ if (fixed_regs[regno])
+ ++fixed_regs_num[cl];
+ else if (!crtl->abi->clobbers_full_reg_p (regno))
+ ++call_saved_regs_num[cl];
+ }
}
}
}
BITMAP_FREE (region_ref_regs);
BITMAP_FREE (saved_reg_live);
}
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
+ BITMAP_FREE (tmp_bitmap);
BITMAP_FREE (curr_reg_live);
free (sched_regno_pressure_class);
}
sched_init (void)
{
/* Disable speculative loads in their presence if cc0 defined. */
-#ifdef HAVE_cc0
+ if (HAVE_cc0)
flag_schedule_speculative_load = 0;
-#endif
if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
&& !reload_completed
&& common_sched_info->sched_pass_id == SCHED_RGN_PASS)
sched_pressure = ((enum sched_pressure_algorithm)
- PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
+ param_sched_pressure_algorithm);
else
sched_pressure = SCHED_PRESSURE_NONE;
if (spec_info->mask != 0)
{
- spec_info->data_weakness_cutoff =
- (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
- spec_info->control_weakness_cutoff =
- (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
- * REG_BR_PROB_BASE) / 100;
+ spec_info->data_weakness_cutoff
+ = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
+ spec_info->control_weakness_cutoff
+ = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100;
}
else
/* So we won't read anything accidentally. */
/* Initialize luids, dependency caches, target and h_i_d for the
whole function. */
{
- bb_vec_t bbs;
- bbs.create (n_basic_blocks_for_fn (cfun));
- basic_block bb;
-
sched_init_bbs ();
+ auto_vec<basic_block> bbs (n_basic_blocks_for_fn (cfun));
+ basic_block bb;
FOR_EACH_BB_FN (bb, cfun)
bbs.quick_push (bb);
sched_init_luids (bbs);
sched_deps_init (true);
sched_extend_target ();
haifa_init_h_i_d (bbs);
-
- bbs.release ();
}
sched_init_only_bb = haifa_init_only_bb;
sched_deps_finish ();
sched_finish_luids ();
current_sched_info = NULL;
+ insn_queue = NULL;
sched_finish ();
}
fix_inter_tick (rtx_insn *head, rtx_insn *tail)
{
/* Set of instructions with corrected INSN_TICK. */
- bitmap_head processed;
+ auto_bitmap processed;
/* ??? It is doubtful if we should assume that cycle advance happens on
basic block boundaries. Basically insns that are unconditionally ready
on the start of the block are more preferable then those which have
a one cycle dependency over insn from the previous block. */
int next_clock = clock_var + 1;
- bitmap_initialize (&processed, 0);
-
/* Iterates over scheduled instructions and fix their INSN_TICKs and
INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
across different blocks. */
gcc_assert (tick >= MIN_TICK);
/* Fix INSN_TICK of instruction from just scheduled block. */
- if (bitmap_set_bit (&processed, INSN_LUID (head)))
+ if (bitmap_set_bit (processed, INSN_LUID (head)))
{
tick -= next_clock;
/* If NEXT has its INSN_TICK calculated, fix it.
If not - it will be properly calculated from
scratch later in fix_tick_ready. */
- && bitmap_set_bit (&processed, INSN_LUID (next)))
+ && bitmap_set_bit (processed, INSN_LUID (next)))
{
tick -= next_clock;
}
}
}
- bitmap_clear (&processed);
}
/* Check if NEXT is ready to be added to the ready or queue list.
Tries to add speculative dependencies of type FS between instructions
in deps_list L and TWIN. */
static void
-process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
+process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
{
sd_iterator_def sd_it;
dep_t dep;
ds_t ts;
sd_iterator_def sd_it;
dep_t dep;
- rtx_insn_list *twins = NULL;
- rtx_vec_t priorities_roots;
+ auto_vec<rtx_insn *, 10> twins;
ts = TODO_SPEC (insn);
gcc_assert (!(ts & ~BE_IN_SPEC));
sd_iterator_next (&sd_it);
}
- priorities_roots.create (0);
+ auto_vec<rtx_insn *> priorities_roots;
clear_priorities (insn, &priorities_roots);
while (1)
fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
INSN_UID (twin), rec->index);
- twins = alloc_INSN_LIST (twin, twins);
+ twins.safe_push (twin);
/* Add dependences between TWIN and all appropriate
instructions from REC. */
/* We couldn't have added the dependencies between INSN and TWINS earlier
because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
- while (twins)
+ unsigned int i;
+ rtx_insn *twin;
+ FOR_EACH_VEC_ELT_REVERSE (twins, i, twin)
{
- rtx_insn *twin;
- rtx_insn_list *next_node;
-
- twin = twins->insn ();
-
- {
- dep_def _new_dep, *new_dep = &_new_dep;
+ dep_def _new_dep, *new_dep = &_new_dep;
- init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
- sd_add_dep (new_dep, false);
- }
-
- next_node = twins->next ();
- free_INSN_LIST_node (twins);
- twins = next_node;
+ init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
+ sd_add_dep (new_dep, false);
}
calc_priorities (priorities_roots);
- priorities_roots.release ();
}
/* Extends and fills with zeros (only the new part) array pointed to by P. */
if (e)
{
- gcc_assert (e->dest == succ);
+ gcc_assert (e->dest == succ || e->dest->index == EXIT_BLOCK);
return e;
}
}
|| (!NOTE_P (insn)
&& !LABEL_P (insn)
/* Don't emit a NOTE if it would end up before a BARRIER. */
- && !BARRIER_P (NEXT_INSN (end))))
+ && !BARRIER_P (next_nondebug_insn (end))))
{
rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
/* Make note appear outside BB. */
Between these two blocks recovery blocks will be emitted. */
basic_block single, empty;
- rtx_insn *x;
- rtx label;
/* If the fallthrough edge to exit we've found is from the block we've
created before, don't do anything more. */
single->count = last->count;
empty->count = last->count;
- single->frequency = last->frequency;
- empty->frequency = last->frequency;
BB_COPY_PARTITION (single, last);
BB_COPY_PARTITION (empty, last);
make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
EDGE_FALLTHRU);
- label = block_label (empty);
- x = emit_jump_insn_after (gen_jump (label), BB_END (single));
+ rtx_code_label *label = block_label (empty);
+ rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
+ BB_END (single));
JUMP_LABEL (x) = label;
LABEL_NUSES (label)++;
haifa_init_insn (x);
basic_block
sched_create_recovery_block (basic_block *before_recovery_ptr)
{
- rtx label;
rtx_insn *barrier;
basic_block rec;
barrier = get_last_bb_insn (before_recovery);
gcc_assert (BARRIER_P (barrier));
- label = emit_label_after (gen_label_rtx (), barrier);
+ rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
rec = create_basic_block (label, label, before_recovery);
sched_create_recovery_edges (basic_block first_bb, basic_block rec,
basic_block second_bb)
{
- rtx label;
- rtx jump;
int edge_flags;
/* This is fixing of incoming edge. */
else
edge_flags = 0;
- make_edge (first_bb, rec, edge_flags);
- label = block_label (second_bb);
- jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
+ edge e2 = single_succ_edge (first_bb);
+ edge e = make_edge (first_bb, rec, edge_flags);
+
+ /* TODO: The actual probability can be determined and is computed as
+ 'todo_spec' variable in create_check_block_twin and
+ in sel-sched.c `check_ds' in create_speculation_check. */
+ e->probability = profile_probability::very_unlikely ();
+ rec->count = e->count ();
+ e2->probability = e->probability.invert ();
+
+ rtx_code_label *label = block_label (second_bb);
+ rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
+ BB_END (rec));
JUMP_LABEL (jump) = label;
LABEL_NUSES (label)++;
/* Partition type is the same, if it is "unpartitioned". */
{
/* Rewritten from cfgrtl.c. */
- if (flag_reorder_blocks_and_partition
- && targetm_common.have_named_sections)
+ if (crtl->has_bb_partition && targetm_common.have_named_sections)
{
/* We don't need the same note for the check because
any_condjump_p (check) == true. */
/* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
because it'll be done later in add_to_speculative_block. */
{
- rtx_vec_t priorities_roots = rtx_vec_t ();
+ auto_vec<rtx_insn *> priorities_roots;
clear_priorities (twin, &priorities_roots);
calc_priorities (priorities_roots);
- priorities_roots.release ();
}
}
fix_recovery_deps (basic_block rec)
{
rtx_insn *note, *insn, *jump;
- rtx_insn_list *ready_list = 0;
- bitmap_head in_ready;
- rtx_insn_list *link;
-
- bitmap_initialize (&in_ready, 0);
+ auto_vec<rtx_insn *, 10> ready_list;
+ auto_bitmap in_ready;
/* NOTE - a basic block note. */
note = NEXT_INSN (BB_HEAD (rec));
{
sd_delete_dep (sd_it);
- if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
- ready_list = alloc_INSN_LIST (consumer, ready_list);
+ if (bitmap_set_bit (in_ready, INSN_LUID (consumer)))
+ ready_list.safe_push (consumer);
}
else
{
}
while (insn != note);
- bitmap_clear (&in_ready);
-
/* Try to add instructions to the ready or queue list. */
- for (link = ready_list; link; link = link->next ())
- try_ready (link->insn ());
- free_INSN_LIST_list (&ready_list);
+ unsigned int i;
+ rtx_insn *temp;
+ FOR_EACH_VEC_ELT_REVERSE (ready_list, i, temp)
+ try_ready (temp);
/* Fixing jump's dependences. */
insn = BB_HEAD (rec);
{
int i;
haifa_insn_data_t data;
- struct reg_use_data *use, *next;
+ reg_use_data *use, *next_use;
+ reg_set_data *set, *next_set;
FOR_EACH_VEC_ELT (h_i_d, i, data)
{
free (data->max_reg_pressure);
free (data->reg_pressure);
- for (use = data->reg_use_list; use != NULL; use = next)
+ for (use = data->reg_use_list; use != NULL; use = next_use)
{
- next = use->next_insn_use;
+ next_use = use->next_insn_use;
free (use);
}
+ for (set = data->reg_set_list; set != NULL; set = next_set)
+ {
+ next_set = set->next_insn_set;
+ free (set);
+ }
+
}
h_i_d.release ();
}