#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
#define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
+/* Cached cost of the instruction. Use insn_cost to get cost of the
+ insn. -1 here means that the field is not initialized. */
+#define INSN_COST(INSN) (HID (INSN)->cost)
/* If INSN_TICK of an instruction is equal to INVALID_TICK,
then it should be recalculated from scratch. */
return false;
}
+/* This function should be called after modifying the pattern of INSN,
+ to update scheduler data structures as needed. */
+static void
+update_insn_after_change (rtx insn)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ dfa_clear_single_insn_cache (insn);
+
+ sd_it = sd_iterator_start (insn,
+ SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
+ while (sd_iterator_cond (&sd_it, &dep))
+ {
+ DEP_COST (dep) = UNKNOWN_DEP_COST;
+ sd_iterator_next (&sd_it);
+ }
+
+ /* Invalidate INSN_COST, so it'll be recalculated. */
+ INSN_COST (insn) = -1;
+ /* Invalidate INSN_TICK, so it'll be recalculated. */
+ INSN_TICK (insn) = INVALID_TICK;
+}
+
+DEF_VEC_P(dep_t);
+DEF_VEC_ALLOC_P(dep_t, heap);
+
+/* Two VECs, one to hold dependencies for which pattern replacements
+ need to be applied or restored at the start of the next cycle, and
+ another to hold an integer that is either one, to apply the
+ corresponding replacement, or zero to restore it. */
+static VEC(dep_t, heap) *next_cycle_replace_deps;
+static VEC(int, heap) *next_cycle_apply;
+
+static void apply_replacement (dep_t, bool);
+static void restore_pattern (dep_t, bool);
+
/* Look at the remaining dependencies for insn NEXT, and compute and return
the TODO_SPEC value we should use for it. This is called after one of
- NEXT's dependencies has been resolved. */
+ NEXT's dependencies has been resolved.
+ We also perform pattern replacements for predication, and for broken
+ replacement dependencies. The latter is only done if FOR_BACKTRACK is
+ false. */
static ds_t
-recompute_todo_spec (rtx next)
+recompute_todo_spec (rtx next, bool for_backtrack)
{
ds_t new_ds;
sd_iterator_def sd_it;
- dep_t dep, control_dep = NULL;
+ dep_t dep, modify_dep = NULL;
int n_spec = 0;
int n_control = 0;
+ int n_replace = 0;
bool first_p = true;
if (sd_lists_empty_p (next, SD_LIST_BACK))
FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
{
+ rtx pro = DEP_PRO (dep);
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
- if (DEBUG_INSN_P (DEP_PRO (dep)) && !DEBUG_INSN_P (next))
+ if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
continue;
if (ds)
else
new_ds = ds_merge (new_ds, ds);
}
- if (DEP_TYPE (dep) == REG_DEP_CONTROL)
+ else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
{
- n_control++;
- control_dep = dep;
+ if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
+ {
+ n_control++;
+ modify_dep = dep;
+ }
+ DEP_STATUS (dep) &= ~DEP_CANCELLED;
+ }
+ else if (DEP_REPLACE (dep) != NULL)
+ {
+ if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
+ {
+ n_replace++;
+ modify_dep = dep;
+ }
DEP_STATUS (dep) &= ~DEP_CANCELLED;
}
}
- if (n_control == 1 && n_spec == 0)
+ if (n_replace > 0 && n_control == 0 && n_spec == 0)
+ {
+ if (!dbg_cnt (sched_breakdep))
+ return HARD_DEP;
+ FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ if (desc != NULL)
+ {
+ if (desc->insn == next && !for_backtrack)
+ {
+ gcc_assert (n_replace == 1);
+ apply_replacement (dep, true);
+ }
+ DEP_STATUS (dep) |= DEP_CANCELLED;
+ }
+ }
+ return 0;
+ }
+
+ else if (n_control == 1 && n_replace == 0 && n_spec == 0)
{
rtx pro, other, new_pat;
rtx cond = NULL_RTX;
&& PREDICATED_PAT (next) == NULL_RTX))
return HARD_DEP;
- pro = DEP_PRO (control_dep);
+ pro = DEP_PRO (modify_dep);
other = real_insn_for_shadow (pro);
if (other != NULL_RTX)
pro = other;
PREDICATED_PAT (next));
gcc_assert (success);
}
- DEP_STATUS (control_dep) |= DEP_CANCELLED;
+ DEP_STATUS (modify_dep) |= DEP_CANCELLED;
return DEP_CONTROL;
}
dependencies, so we return HARD_DEP in such a case. Also fail if
we have speculative dependencies with not enough points, or more than
one control dependency. */
- if ((n_spec > 0 && n_control > 0)
+ if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
|| (n_spec > 0
/* Too few points? */
&& ds_weak (new_ds) < spec_info->data_weakness_cutoff)
- || (n_control > 1))
+ || n_control > 0
+ || n_replace > 0)
return HARD_DEP;
return new_ds;
first unscheduled one. */
static rtx nonscheduled_insns_begin;
-/* Cached cost of the instruction. Use below function to get cost of the
- insn. -1 here means that the field is not initialized. */
-#define INSN_COST(INSN) (HID (INSN)->cost)
-
/* Compute cost of executing INSN.
This is the number of cycles between instruction issue and
instruction results. */
DEP_PRO (dep)))
return false;
+ if (DEP_REPLACE (dep) != NULL)
+ return false;
+
/* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
then speculative instructions will less likely be
scheduled. That is because the priority of
if (print_p)
fprintf (sched_dump, MODEL_BAR);
}
+
+/* After DEP, which was cancelled, has been resolved for insn NEXT,
+ check whether the insn's pattern needs restoring. */
+static bool
+must_restore_pattern_p (rtx next, dep_t dep)
+{
+ if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
+ return false;
+
+ if (DEP_TYPE (dep) == REG_DEP_CONTROL)
+ {
+ gcc_assert (ORIG_PAT (next) != NULL_RTX);
+ gcc_assert (next == DEP_CON (dep));
+ }
+ else
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ if (desc->insn != next)
+ {
+ gcc_assert (*desc->loc == desc->orig);
+ return false;
+ }
+ }
+ return true;
+}
\f
/* model_spill_cost (CL, P, P') returns the cost of increasing the
pressure on CL from P to P'. We use this to calculate a "base ECC",
check_clobbered_conditions (insn);
- /* Update dependent instructions. */
+ /* Update dependent instructions. First, see if by scheduling this insn
+ now we broke a dependence in a way that requires us to change another
+ insn. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
+ sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ rtx pro = DEP_PRO (dep);
+ if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
+ && desc != NULL && desc->insn == pro)
+ apply_replacement (dep, false);
+ }
+
+ /* Go through and resolve forward dependencies. */
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
if (cancelled)
{
- if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
- {
- int tick = INSN_TICK (next);
- gcc_assert (ORIG_PAT (next) != NULL_RTX);
- haifa_change_pattern (next, ORIG_PAT (next));
- INSN_TICK (next) = tick;
- if (sd_lists_empty_p (next, SD_LIST_BACK))
- TODO_SPEC (next) = 0;
- else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
- TODO_SPEC (next) = HARD_DEP;
- }
+ if (must_restore_pattern_p (next, dep))
+ restore_pattern (dep, false);
continue;
}
to 0 when restoring. */
int q_size;
rtx *insn_queue;
+
+ /* Describe pattern replacements that occurred since this backtrack point
+ was queued. */
+ VEC (dep_t, heap) *replacement_deps;
+ VEC (int, heap) *replace_apply;
+
+ /* A copy of the next-cycle replacement vectors at the time of the backtrack
+ point. */
+ VEC (dep_t, heap) *next_cycle_deps;
+ VEC (int, heap) *next_cycle_apply;
};
/* A record, in reverse order, of all scheduled insns which have delay slots
save->sched_block = sched_block;
+ save->replacement_deps = NULL;
+ save->replace_apply = NULL;
+ save->next_cycle_deps = VEC_copy (dep_t, heap, next_cycle_replace_deps);
+ save->next_cycle_apply = VEC_copy (int, heap, next_cycle_apply);
+
if (current_sched_info->save_state)
save->fe_saved_data = (*current_sched_info->save_state) ();
}
}
+/* Undo the replacements that have occurred after backtrack point SAVE
+ was placed. */
+static void
+undo_replacements_for_backtrack (struct haifa_saved_data *save)
+{
+ while (!VEC_empty (dep_t, save->replacement_deps))
+ {
+ dep_t dep = VEC_pop (dep_t, save->replacement_deps);
+ int apply_p = VEC_pop (int, save->replace_apply);
+
+ if (apply_p)
+ restore_pattern (dep, true);
+ else
+ apply_replacement (dep, true);
+ }
+ VEC_free (dep_t, heap, save->replacement_deps);
+ VEC_free (int, heap, save->replace_apply);
+}
+
/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
Restore their dependencies to an unresolved state, and mark them as
queued nowhere. */
haifa_change_pattern (con, ORIG_PAT (con));
}
else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
- TODO_SPEC (con) = recompute_todo_spec (con);
+ TODO_SPEC (con) = recompute_todo_spec (con, true);
}
VEC_free (rtx, heap, recompute_vec);
}
targetm.sched.free_sched_context (save->be_saved_data);
}
+ /* Do this first since it clobbers INSN_TICK of the involved
+ instructions. */
+ undo_replacements_for_backtrack (save);
+
/* Clear the QUEUE_INDEX of everything in the ready list or one
of the queues. */
if (ready.n_ready > 0)
{
rtx insn = first[i];
QUEUE_INDEX (insn) = QUEUE_READY;
- TODO_SPEC (insn) = recompute_todo_spec (insn);
+ TODO_SPEC (insn) = recompute_todo_spec (insn, true);
INSN_TICK (insn) = save->clock_var;
}
}
{
rtx x = XEXP (link, 0);
QUEUE_INDEX (x) = i;
- TODO_SPEC (x) = recompute_todo_spec (x);
+ TODO_SPEC (x) = recompute_todo_spec (x, true);
INSN_TICK (x) = save->clock_var + i;
}
}
mark_backtrack_feeds (save->delay_pair->i2, 0);
+ gcc_assert (VEC_empty (dep_t, next_cycle_replace_deps));
+ next_cycle_replace_deps = VEC_copy (dep_t, heap, save->next_cycle_deps);
+ next_cycle_apply = VEC_copy (int, heap, save->next_cycle_apply);
+
free (save);
for (save = backtrack_queue; save; save = save->next)
INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
pair = pair->next_same_i1;
}
+ undo_replacements_for_backtrack (save);
+ }
+ else
+ {
+ VEC_free (dep_t, heap, save->replacement_deps);
+ VEC_free (int, heap, save->replace_apply);
}
+
if (targetm.sched.free_sched_context)
targetm.sched.free_sched_context (save->be_saved_data);
if (current_sched_info->restore_state)
free_topmost_backtrack_point (false);
}
+/* Apply a replacement described by DESC. If IMMEDIATELY is false, we
+ may have to postpone the replacement until the start of the next cycle,
+ at which point we will be called again with IMMEDIATELY true. This is
+ only done for machines which have instruction packets with explicit
+ parallelism however. */
+static void
+apply_replacement (dep_t dep, bool immediately)
+{
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
+ {
+ VEC_safe_push (dep_t, heap, next_cycle_replace_deps, dep);
+ VEC_safe_push (int, heap, next_cycle_apply, 1);
+ }
+ else
+ {
+ bool success;
+
+ if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
+ return;
+
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "applying replacement for insn %d\n",
+ INSN_UID (desc->insn));
+
+ success = validate_change (desc->insn, desc->loc, desc->newval, 0);
+ gcc_assert (success);
+
+ update_insn_after_change (desc->insn);
+ if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
+ fix_tick_ready (desc->insn);
+
+ if (backtrack_queue != NULL)
+ {
+ VEC_safe_push (dep_t, heap, backtrack_queue->replacement_deps, dep);
+ VEC_safe_push (int, heap, backtrack_queue->replace_apply, 1);
+ }
+ }
+}
+
+/* We have determined that a pattern involved in DEP must be restored.
+ If IMMEDIATELY is false, we may have to postpone the replacement
+ until the start of the next cycle, at which point we will be called
+ again with IMMEDIATELY true. */
+static void
+restore_pattern (dep_t dep, bool immediately)
+{
+ rtx next = DEP_CON (dep);
+ int tick = INSN_TICK (next);
+
+ /* If we already scheduled the insn, the modified version is
+ correct. */
+ if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
+ return;
+
+ if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
+ {
+ VEC_safe_push (dep_t, heap, next_cycle_replace_deps, dep);
+ VEC_safe_push (int, heap, next_cycle_apply, 0);
+ return;
+ }
+
+
+ if (DEP_TYPE (dep) == REG_DEP_CONTROL)
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "restoring pattern for insn %d\n",
+ INSN_UID (next));
+ haifa_change_pattern (next, ORIG_PAT (next));
+ }
+ else
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ bool success;
+
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "restoring pattern for insn %d\n",
+ INSN_UID (desc->insn));
+ tick = INSN_TICK (desc->insn);
+
+ success = validate_change (desc->insn, desc->loc, desc->orig, 0);
+ gcc_assert (success);
+ update_insn_after_change (desc->insn);
+ if (backtrack_queue != NULL)
+ {
+ VEC_safe_push (dep_t, heap, backtrack_queue->replacement_deps, dep);
+ VEC_safe_push (int, heap, backtrack_queue->replace_apply, 0);
+ }
+ }
+ INSN_TICK (next) = tick;
+ if (TODO_SPEC (next) == DEP_POSTPONED)
+ return;
+
+ if (sd_lists_empty_p (next, SD_LIST_BACK))
+ TODO_SPEC (next) = 0;
+ else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ TODO_SPEC (next) = HARD_DEP;
+}
+
+/* Perform pattern replacements that were queued up until the next
+ cycle. */
+static void
+perform_replacements_new_cycle (void)
+{
+ int i;
+ dep_t dep;
+ FOR_EACH_VEC_ELT (dep_t, next_cycle_replace_deps, i, dep)
+ {
+ int apply_p = VEC_index (int, next_cycle_apply, i);
+ if (apply_p)
+ apply_replacement (dep, true);
+ else
+ restore_pattern (dep, true);
+ }
+ VEC_truncate (dep_t, next_cycle_replace_deps, 0);
+ VEC_truncate (int, next_cycle_apply, 0);
+}
+
/* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
instructions we've previously encountered, a set bit prevents
recursion. BUDGET is a limit on how far ahead we look, it is
return head;
}
+/* When we know we are going to discard the schedule due to a failed attempt
+ at modulo scheduling, undo all replacements. */
+static void
+undo_all_replacements (void)
+{
+ rtx insn;
+ int i;
+
+ FOR_EACH_VEC_ELT (rtx, scheduled_insns, i, insn)
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ /* See if we must undo a replacement. */
+ for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
+ sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
+ {
+ struct dep_replacement *desc = DEP_REPLACE (dep);
+ if (desc != NULL)
+ validate_change (desc->insn, desc->loc, desc->orig, 0);
+ }
+ }
+}
+
/* Move insns that became ready to fire from queue to ready list. */
static void
rtx head = NEXT_INSN (prev_head);
rtx tail = PREV_INSN (next_tail);
+ if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0)
+ find_modifiable_mems (head, tail);
+
/* We used to have code to avoid getting parameters moved from hard
argument registers into pseudos.
/* Loop until all the insns in BB are scheduled. */
while ((*current_sched_info->schedule_more_p) ())
{
+ perform_replacements_new_cycle ();
do
{
start_clock_var = clock_var;
/* We normally get here only if we don't want to move
insn from the split block. */
{
- TODO_SPEC (insn) = HARD_DEP;
+ TODO_SPEC (insn) = DEP_POSTPONED;
goto restart_choose_ready;
}
gcc_assert (failed);
failed_insn = failed->delay_pair->i1;
+ /* Clear these queues. */
+ perform_replacements_new_cycle ();
toggle_cancelled_flags (false);
unschedule_insns_until (failed_insn);
while (failed != backtrack_queue)
if (ls.modulo_epilogue)
success = true;
end_schedule:
+ perform_replacements_new_cycle ();
if (modulo_ii > 0)
{
/* Once again, debug insn suckiness: they can be on the ready list
}
}
+ if (!success)
+ undo_all_replacements ();
+
/* Debug info. */
if (sched_verbose)
{
old_ts = TODO_SPEC (next);
- gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL))
- && ((old_ts & HARD_DEP)
+ gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
+ && (old_ts == HARD_DEP
+ || old_ts == DEP_POSTPONED
|| (old_ts & SPECULATIVE)
- || (old_ts & DEP_CONTROL)));
+ || old_ts == DEP_CONTROL));
- new_ts = recompute_todo_spec (next);
+ new_ts = recompute_todo_spec (next, false);
- if (new_ts & HARD_DEP)
+ if (new_ts & (HARD_DEP | DEP_POSTPONED))
gcc_assert (new_ts == old_ts
&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
else if (current_sched_info->new_ready)
TODO_SPEC (next) = new_ts;
- if (new_ts & HARD_DEP)
+ if (new_ts & (HARD_DEP | DEP_POSTPONED))
{
/* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
control-speculative NEXT could have been discarded by sched-rgn.c
static bool
haifa_change_pattern (rtx insn, rtx new_pat)
{
- sd_iterator_def sd_it;
- dep_t dep;
int t;
t = validate_change (insn, &PATTERN (insn), new_pat, 0);
if (!t)
return false;
- dfa_clear_single_insn_cache (insn);
-
- sd_it = sd_iterator_start (insn,
- SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
- while (sd_iterator_cond (&sd_it, &dep))
- {
- DEP_COST (dep) = UNKNOWN_DEP_COST;
- sd_iterator_next (&sd_it);
- }
- /* Invalidate INSN_COST, so it'll be recalculated. */
- INSN_COST (insn) = -1;
- /* Invalidate INSN_TICK, so it'll be recalculated. */
- INSN_TICK (insn) = INVALID_TICK;
+ update_insn_after_change (insn);
return true;
}
#include "insn-attr.h"
#include "except.h"
#include "recog.h"
+#include "emit-rtl.h"
#include "sched-int.h"
#include "params.h"
#include "cselib.h"
DEP_TYPE (dep) = type;
DEP_STATUS (dep) = ds;
DEP_COST (dep) = UNKNOWN_DEP_COST;
+ DEP_NONREG (dep) = 0;
+ DEP_MULTIPLE (dep) = 0;
+ DEP_REPLACE (dep) = NULL;
}
/* Init DEP with the arguments.
if (DEP_TYPE (dep) == REG_DEP_CONTROL)
return true;
}
+ if (DEP_REPLACE (dep) != NULL)
+ return true;
return false;
}
static bitmap_head *spec_dependency_cache = NULL;
static int cache_size;
+/* True if we should mark added dependencies as a non-register deps. */
+static bool mark_as_hard;
+
static int deps_may_trap_p (const_rtx);
static void add_dependence_1 (rtx, rtx, enum reg_note);
-static void add_dependence_list (rtx, rtx, int, enum reg_note);
+static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
static void add_dependence_list_and_free (struct deps_desc *, rtx,
- rtx *, int, enum reg_note);
+ rtx *, int, enum reg_note, bool);
static void delete_all_dependences (rtx);
static void chain_to_prev_insn (rtx);
enum reg_note old_type = DEP_TYPE (dep);
bool was_spec = dep_spec_p (dep);
+ DEP_NONREG (dep) |= DEP_NONREG (new_dep);
+ DEP_MULTIPLE (dep) = 1;
+
/* If this is a more restrictive type of dependence than the
existing one, then change the existing dependence to this
type. */
fprintf (sched_dump, "making DEP_CONTROL for %d\n",
INSN_UID (real_pro));
add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
- REG_DEP_TRUE);
+ REG_DEP_TRUE, false);
}
}
add_dependence_1 (con, pro, dep_type);
}
-/* A convenience wrapper to operate on an entire list. */
+/* A convenience wrapper to operate on an entire list. HARD should be
+ true if DEP_NONREG should be set on newly created dependencies. */
static void
-add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
+add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
+ bool hard)
{
+ mark_as_hard = hard;
for (; list; list = XEXP (list, 1))
{
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
add_dependence (insn, XEXP (list, 0), dep_type);
}
+ mark_as_hard = false;
}
/* Similar, but free *LISTP at the same time, when the context
- is not readonly. */
+ is not readonly. HARD should be true if DEP_NONREG should be set on
+ newly created dependencies. */
static void
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
- int uncond, enum reg_note dep_type)
+ int uncond, enum reg_note dep_type, bool hard)
{
- add_dependence_list (insn, *listp, uncond, dep_type);
+ add_dependence_list (insn, *listp, uncond, dep_type, hard);
/* We don't want to short-circuit dependencies involving debug
insns, because they may cause actual dependencies to be
if (for_write)
{
add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
- 1, REG_DEP_ANTI);
+ 1, REG_DEP_ANTI, true);
if (!deps->readonly)
{
free_EXPR_LIST_list (&deps->pending_read_mems);
}
add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
+ true);
add_dependence_list_and_free (deps, insn,
&deps->last_pending_memory_flush, 1,
- for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
+ for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
+ true);
add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
if (DEBUG_INSN_P (insn))
{
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
deps->pending_flush_length = 1;
}
+ mark_as_hard = false;
}
\f
/* Instruction which dependencies we are analyzing. */
init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
+ DEP_NONREG (dep) = 1;
maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
}
dep_t dep = &_dep;
init_dep (dep, elem, cur_insn, ds_to_dt (ds));
+ if (mark_as_hard)
+ DEP_NONREG (dep) = 1;
maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
}
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
else
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
}
}
}
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list (insn, deps->pending_jump_insns, 1,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, true);
if (!deps->readonly)
add_insn_mem_dependence (deps, false, insn, dest);
/* Avoid moving trapping instructions across function calls that might
not always return. */
add_dependence_list (insn, deps->last_function_call_may_noreturn,
- 1, REG_DEP_ANTI);
+ 1, REG_DEP_ANTI, true);
/* We must avoid creating a situation in which two successors of the
current block have different unwind info after scheduling. If at any
= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
/* Make sure epilogue insn is scheduled after preceding jumps. */
- add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI);
+ add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
+ true);
}
if (code == COND_EXEC)
instruction so that reg-stack won't get confused. */
if (code == CLOBBER)
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, true);
}
else if (code == PARALLEL)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
+ false);
add_dependence_list (insn, reg_last->implicit_sets,
- 0, REG_DEP_ANTI);
+ 0, REG_DEP_ANTI, false);
add_dependence_list (insn, reg_last->clobbers, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
}
}
}
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list (insn, deps->pending_jump_insns, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
}
}
add_dependence (insn, prev, REG_DEP_ANTI);
add_dependence_list (insn, deps->last_function_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
- for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- if (!sel_sched_p ())
+ if (!sel_sched_p ())
+ for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
/* There's no point in making REG_DEP_CONTROL dependencies for
debug insns. */
- add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
+ false);
if (!deps->readonly)
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
- add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
+ add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
+ false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
+ false);
if (!deps->readonly)
{
if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
+ false);
if (!deps->readonly)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
add_dependence_list (insn, reg_last->control_uses, 0,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
{
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
+ false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
add_dependence_list (insn, reg_last->control_uses, 0,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|| reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
{
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
add_dependence_list_and_free (deps, insn,
®_last->control_uses, 0,
- REG_DEP_ANTI);
- add_dependence_list_and_free
- (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT);
+ REG_DEP_ANTI, false);
+ add_dependence_list_and_free (deps, insn,
+ ®_last->clobbers, 0,
+ REG_DEP_OUTPUT, false);
if (!deps->readonly)
{
}
else
{
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
+ false);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ false);
add_dependence_list (insn, reg_last->control_uses, 0,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, false);
}
if (!deps->readonly)
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets,
- 0, REG_DEP_ANTI);
+ 0, REG_DEP_ANTI, false);
add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
- REG_DEP_OUTPUT);
+ REG_DEP_OUTPUT, false);
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, false);
add_dependence_list (insn, reg_last->control_uses, 0,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, false);
if (!deps->readonly)
{
if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
- add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
+ add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
+ false);
if (!deps->readonly)
reg_last->implicit_sets
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
- add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
+ add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
+ true);
add_dependence_list (insn, reg_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI, true);
add_dependence_list (insn, reg_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list (insn, reg_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI, true);
}
}
else
{
struct deps_reg *reg_last = &deps->reg_last[i];
add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list_and_free (deps, insn,
®_last->control_uses, 0,
- REG_DEP_CONTROL);
+ REG_DEP_CONTROL, true);
add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI,
+ true);
add_dependence_list_and_free (deps, insn,
®_last->implicit_sets, 0,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
reg_pending_barrier == TRUE_BARRIER
- ? REG_DEP_TRUE : REG_DEP_ANTI);
+ ? REG_DEP_TRUE : REG_DEP_ANTI,
+ true);
if (!deps->readonly)
{
/* For each insn which shouldn't cross a jump, add a dependence. */
add_dependence_list_and_free (deps, insn,
&deps->sched_before_next_jump, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
sched_analyze_insn (deps, PATTERN (insn), insn);
}
between that insn and this call insn. */
add_dependence_list_and_free (deps, insn,
&deps->sched_before_next_call, 1,
- REG_DEP_ANTI);
+ REG_DEP_ANTI, true);
sched_analyze_insn (deps, PATTERN (insn), insn);
}
#endif /* ENABLE_CHECKING */
+/* The following code discovers opportunities to switch a memory reference
+ and an increment by modifying the address. We ensure that this is done
+ only for dependencies that are only used to show a single register
+ dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
+ instruction involved is subject to only one dep that can cause a pattern
+ change.
+
+ When we discover a suitable dependency, we fill in the dep_replacement
+ structure to show how to modify the memory reference. */
+
+/* Holds information about a pair of memory reference and register increment
+ insns which depend on each other, but could possibly be interchanged. */
+struct mem_inc_info
+{
+ rtx inc_insn;
+ rtx mem_insn;
+
+ rtx *mem_loc;
+ /* A register occurring in the memory address for which we wish to break
+ the dependence. This must be identical to the destination register of
+ the increment. */
+ rtx mem_reg0;
+ /* Any kind of index that is added to that register. */
+ rtx mem_index;
+ /* The constant offset used in the memory address. */
+ HOST_WIDE_INT mem_constant;
+ /* The constant added in the increment insn. Negated if the increment is
+ after the memory address. */
+ HOST_WIDE_INT inc_constant;
+ /* The source register used in the increment. May be different from mem_reg0
+ if the increment occurs before the memory address. */
+ rtx inc_input;
+};
+
+/* Verify that the memory location described in MII can be replaced with
+ one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
+ insn remains unchanged by this function. */
+
+static rtx
+attempt_change (struct mem_inc_info *mii, rtx new_addr)
+{
+ rtx mem = *mii->mem_loc;
+ rtx new_mem;
+
+ /* Jump thru a lot of hoops to keep the attributes up to date. We
+ do not want to call one of the change address variants that take
+ an offset even though we know the offset in many cases. These
+ assume you are changing where the address is pointing by the
+ offset. */
+ new_mem = replace_equiv_address_nv (mem, new_addr);
+ if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "validation failure\n");
+ return NULL_RTX;
+ }
+
+ /* Put back the old one. */
+ validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
+
+ return new_mem;
+}
+
+/* Return true if INSN is of a form "a = b op c" where a and b are
+ regs. op is + if c is a reg and +|- if c is a const. Fill in
+ informantion in MII about what is found.
+ BEFORE_MEM indicates whether the increment is found before or after
+ a corresponding memory reference. */
+
+static bool
+parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
+{
+ rtx pat = single_set (insn);
+ rtx src, cst;
+ bool regs_equal;
+
+ if (RTX_FRAME_RELATED_P (insn) || !pat)
+ return false;
+
+ /* Result must be single reg. */
+ if (!REG_P (SET_DEST (pat)))
+ return false;
+
+ if (GET_CODE (SET_SRC (pat)) != PLUS
+ && GET_CODE (SET_SRC (pat)) != MINUS)
+ return false;
+
+ mii->inc_insn = insn;
+ src = SET_SRC (pat);
+ mii->inc_input = XEXP (src, 0);
+
+ if (!REG_P (XEXP (src, 0)))
+ return false;
+
+ if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
+ return false;
+
+ cst = XEXP (src, 1);
+ if (!CONST_INT_P (cst))
+ return false;
+ mii->inc_constant = INTVAL (cst);
+
+ regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
+
+ if (!before_mem)
+ {
+ mii->inc_constant = -mii->inc_constant;
+ if (!regs_equal)
+ return false;
+ }
+
+ if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
+ /* Note that the sign has already been reversed for !before_mem. */
+ return mii->inc_constant > 0;
+
+ return true;
+}
+
+/* Once a suitable mem reference has been found and the corresponding data
+ in MII has been filled in, this function is called to find a suitable
+ add or inc insn involving the register we found in the memory
+ reference. */
+
+static bool
+find_inc (struct mem_inc_info *mii, bool backwards)
+{
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ sd_it = sd_iterator_start (mii->mem_insn,
+ backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
+ while (sd_iterator_cond (&sd_it, &dep))
+ {
+ dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
+ rtx pro = DEP_PRO (dep);
+ rtx con = DEP_CON (dep);
+ rtx inc_cand = backwards ? pro : con;
+ if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
+ goto next;
+ if (parse_add_or_inc (mii, inc_cand, backwards))
+ {
+ struct dep_replacement *desc;
+ df_ref *def_rec;
+ rtx newaddr, newmem;
+
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
+ INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
+
+ /* Need to assure that none of the operands of the inc
+ instruction are assigned to by the mem insn. */
+ for (def_rec = DF_INSN_DEFS (mii->mem_insn); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
+ || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump,
+ "inc conflicts with store failure.\n");
+ goto next;
+ }
+ }
+ newaddr = mii->inc_input;
+ if (mii->mem_index != NULL_RTX)
+ newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
+ mii->mem_index);
+ newaddr = plus_constant (GET_MODE (newaddr), newaddr,
+ mii->mem_constant + mii->inc_constant);
+ newmem = attempt_change (mii, newaddr);
+ if (newmem == NULL_RTX)
+ goto next;
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "successful address replacement\n");
+ desc = XCNEW (struct dep_replacement);
+ DEP_REPLACE (dep) = desc;
+ desc->loc = mii->mem_loc;
+ desc->newval = newmem;
+ desc->orig = *desc->loc;
+ desc->insn = mii->mem_insn;
+ move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
+ INSN_SPEC_BACK_DEPS (con));
+ if (backwards)
+ {
+ FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
+ if (modified_in_p (mii->inc_input, DEP_PRO (dep)))
+ add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
+ REG_DEP_TRUE);
+ }
+ else
+ {
+ FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
+ if (modified_in_p (mii->inc_input, DEP_CON (dep)))
+ add_dependence_1 (DEP_CON (dep), mii->mem_insn,
+ REG_DEP_ANTI);
+ }
+ return true;
+ }
+ next:
+ sd_iterator_next (&sd_it);
+ }
+ return false;
+}
+
+/* A recursive function that walks ADDRESS_OF_X to find memory references
+ which could be modified during scheduling. We call find_inc for each
+ one we find that has a recognizable form. MII holds information about
+ the pair of memory/increment instructions.
+ We ensure that every instruction with a memory reference (which will be
+ the location of the replacement) is assigned at most one breakable
+ dependency. */
+
+static bool
+find_mem (struct mem_inc_info *mii, rtx *address_of_x)
+{
+ rtx x = *address_of_x;
+ enum rtx_code code = GET_CODE (x);
+ const char *const fmt = GET_RTX_FORMAT (code);
+ int i;
+
+ if (code == MEM)
+ {
+ rtx reg0 = XEXP (x, 0);
+
+ mii->mem_loc = address_of_x;
+ mii->mem_index = NULL_RTX;
+ mii->mem_constant = 0;
+ if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
+ {
+ mii->mem_constant = INTVAL (XEXP (reg0, 1));
+ reg0 = XEXP (reg0, 0);
+ }
+ if (GET_CODE (reg0) == PLUS)
+ {
+ mii->mem_index = XEXP (reg0, 1);
+ reg0 = XEXP (reg0, 0);
+ }
+ if (REG_P (reg0))
+ {
+ df_ref *def_rec;
+ int occurrences = 0;
+
+ /* Make sure this reg appears only once in this insn. Can't use
+ count_occurrences since that only works for pseudos. */
+ for (def_rec = DF_INSN_USES (mii->mem_insn); *def_rec; def_rec++)
+ {
+ df_ref def = *def_rec;
+ if (reg_overlap_mentioned_p (reg0, DF_REF_REG (def)))
+ if (++occurrences > 1)
+ {
+ if (sched_verbose >= 5)
+ fprintf (sched_dump, "mem count failure\n");
+ return false;
+ }
+ }
+
+ mii->mem_reg0 = reg0;
+ return find_inc (mii, true) || find_inc (mii, false);
+ }
+ return false;
+ }
+
+ if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
+ {
+ /* If REG occurs inside a MEM used in a bit-field reference,
+ that is unacceptable. */
+ return false;
+ }
+
+ /* Time for some deep diving. */
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (find_mem (mii, &XEXP (x, i)))
+ return true;
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (find_mem (mii, &XVECEXP (x, i, j)))
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/* Examine the instructions between HEAD and TAIL and try to find
+ dependencies that can be broken by modifying one of the patterns. */
+
+void
+find_modifiable_mems (rtx head, rtx tail)
+{
+ rtx insn;
+ int success_in_block = 0;
+
+ for (insn = head; insn != tail; insn = NEXT_INSN (insn))
+ {
+ struct mem_inc_info mii;
+
+ if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
+ continue;
+
+ mii.mem_insn = insn;
+ if (find_mem (&mii, &PATTERN (insn)))
+ success_in_block++;
+ }
+ if (success_in_block && sched_verbose >= 5)
+ fprintf (sched_dump, "%d candidates for address modification found.\n",
+ success_in_block);
+}
+
#endif /* INSN_SCHEDULING */