+2009-11-13 Andrey Belevantsev <abel@ispras.ru>
+
+ * sched-deps.c (init_deps): New parameter lazy_reg_last. Don't
+ allocate reg_last when in case lazy_reg_last is true.
+ (init_deps_reg_last): New.
+ (free_deps): When max_reg is 0, this context is already freed.
+ * sched-int.h (init_deps_reg_last): Export.
+ (init_deps): Update prototype.
+ * sched-ebb.c (schedule_ebb): Update the call to init_deps.
+ * sched-rgn.c (sched_rgn_compute_dependencies): Likewise.
+ * ddg.c (build_intra_loop_deps): Likewise.
+ * sel-sched-ir.c (copy_deps_context, create_deps_context,
+ reset_deps_context, deps_init_id): Likewise.
+ (init_first_time_insn_data): Lazy allocate INSN_DEPS_CONTEXT.
+ (free_data_for_scheduled_insn): New, break down from ...
+ (free_first_time_insn_data): ... here.
+ (has_dependence_p): Allocate reg_last now, when it is needed.
+ (extend_insn_data): When maximal LUID is big enough, allocate
+ per-insn data in smaller chunks.
+ * sel-sched-ir.h (free_data_for_scheduled_insn): Export.
+ * sel-sched.c (update_seqnos_and_stage): Free INSN_DEPS_CONTEXT
+ in scheduled insn.
+
2009-11-13 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.md (call_value): Fix comment.
/* Build the dependence information, using the sched_analyze function. */
init_deps_global ();
- init_deps (&tmp_deps);
+ init_deps (&tmp_deps, false);
/* Do the intra-block data dependence analysis for the given block. */
get_ebb_head_tail (g->bb, g->bb, &head, &tail);
}
\f
/* Initialize variables for region data dependence analysis.
- n_bbs is the number of region blocks. */
+ When LAZY_REG_LAST is true, do not allocate reg_last array
+ of struct deps immediately. */
void
-init_deps (struct deps *deps)
+init_deps (struct deps *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
+ if (lazy_reg_last)
+ deps->reg_last = NULL;
+ else
+ deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
deps->readonly = 0;
}
+/* Init only reg_last field of DEPS, which was not allocated before as
+ we inited DEPS lazily. */
+void
+init_deps_reg_last (struct deps *deps)
+{
+ gcc_assert (deps && deps->max_reg > 0);
+ gcc_assert (deps->reg_last == NULL);
+
+ deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
+}
+
+
/* Free insn lists found in DEPS. */
void
unsigned i;
reg_set_iterator rsi;
+ /* We set max_reg to 0 when this context was already freed. */
+ if (deps->max_reg == 0)
+ {
+ gcc_assert (deps->reg_last == NULL);
+ return;
+ }
+ deps->max_reg = 0;
+
free_INSN_LIST_list (&deps->pending_read_insns);
free_EXPR_LIST_list (&deps->pending_read_mems);
free_INSN_LIST_list (&deps->pending_write_insns);
CLEAR_REG_SET (&deps->reg_last_in_use);
CLEAR_REG_SET (&deps->reg_conditional_sets);
- free (deps->reg_last);
+ /* As we initialize reg_last lazily, it is possible that we didn't allocate
+ it at all. */
+ if (deps->reg_last)
+ free (deps->reg_last);
deps->reg_last = NULL;
deps = NULL;
init_deps_global ();
/* Compute dependencies. */
- init_deps (&tmp_deps);
+ init_deps (&tmp_deps, false);
sched_analyze (&tmp_deps, head, tail);
free_deps (&tmp_deps);
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
extern void add_dependence (rtx, rtx, enum reg_note);
extern void sched_analyze (struct deps *, rtx, rtx);
-extern void init_deps (struct deps *);
+extern void init_deps (struct deps *, bool);
+extern void init_deps_reg_last (struct deps *);
extern void free_deps (struct deps *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
/* Initializations for region data dependence analysis. */
bb_deps = XNEWVEC (struct deps, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
- init_deps (bb_deps + bb);
+ init_deps (bb_deps + bb, false);
/* Initialize bitmap used in add_branch_dependences. */
insn_referenced = sbitmap_alloc (sched_max_luid);
static void
copy_deps_context (deps_t to, deps_t from)
{
- init_deps (to);
+ init_deps (to, false);
deps_join (to, from);
}
{
deps_t dc = alloc_deps_context ();
- init_deps (dc);
+ init_deps (dc, false);
return dc;
}
reset_deps_context (deps_t dc)
{
clear_deps_context (dc);
- init_deps (dc);
+ init_deps (dc, false);
}
/* This structure describes the dependence analysis hooks for advancing
deps_init_id_data.force_unique_p = force_unique_p;
deps_init_id_data.force_use_p = false;
- init_deps (dc);
+ init_deps (dc, false);
memcpy (&deps_init_id_sched_deps_info,
&const_deps_init_id_sched_deps_info,
/* These are needed for nops too. */
INSN_LIVE (insn) = get_regset_from_pool ();
INSN_LIVE_VALID_P (insn) = false;
-
+
if (!INSN_NOP_P (insn))
{
INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
INSN_TRANSFORMED_INSNS (insn)
= htab_create (16, hash_transformed_insns,
eq_transformed_insns, free_transformed_insns);
- init_deps (&INSN_DEPS_CONTEXT (insn));
+ init_deps (&INSN_DEPS_CONTEXT (insn), true);
}
}
-/* Free the same data as above for INSN. */
-static void
-free_first_time_insn_data (insn_t insn)
+/* Free almost all above data for INSN that is scheduled already.
+ Used for extra-large basic blocks. */
+void
+free_data_for_scheduled_insn (insn_t insn)
{
gcc_assert (! first_time_insn_init (insn));
-
+
+ if (! INSN_ANALYZED_DEPS (insn))
+ return;
+
BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
BITMAP_FREE (INSN_FOUND_DEPS (insn));
htab_delete (INSN_TRANSFORMED_INSNS (insn));
- return_regset_to_pool (INSN_LIVE (insn));
- INSN_LIVE (insn) = NULL;
- INSN_LIVE_VALID_P (insn) = false;
-
+
/* This is allocated only for bookkeeping insns. */
if (INSN_ORIGINATORS (insn))
BITMAP_FREE (INSN_ORIGINATORS (insn));
free_deps (&INSN_DEPS_CONTEXT (insn));
+
+ INSN_ANALYZED_DEPS (insn) = NULL;
+
+ /* Clear the readonly flag so we would ICE when trying to recalculate
+ the deps context (as we believe that it should not happen). */
+ (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
+}
+
+/* Free the same data as above for INSN. */
+static void
+free_first_time_insn_data (insn_t insn)
+{
+ gcc_assert (! first_time_insn_init (insn));
+
+ free_data_for_scheduled_insn (insn);
+ return_regset_to_pool (INSN_LIVE (insn));
+ INSN_LIVE (insn) = NULL;
+ INSN_LIVE_VALID_P (insn) = false;
}
/* Initialize region-scope data structures for basic blocks. */
return false;
dc = &INSN_DEPS_CONTEXT (pred);
+
+ /* We init this field lazily. */
+ if (dc->reg_last == NULL)
+ init_deps_reg_last (dc);
+
if (!dc->readonly)
{
has_dependence_data.pro = NULL;
- VEC_length (sel_insn_data_def, s_i_d));
if (reserve > 0
&& ! VEC_space (sel_insn_data_def, s_i_d, reserve))
- VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d,
- 3 * sched_max_luid / 2);
+ {
+ int size;
+
+ if (sched_max_luid / 2 > 1024)
+ size = sched_max_luid + 1024;
+ else
+ size = 3 * sched_max_luid / 2;
+
+
+ VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
+ }
}
/* Finalize data structures for insns from current region. */
extern void free_lv_sets (void);
extern void setup_nop_and_exit_insns (void);
extern void free_nop_and_exit_insns (void);
+extern void free_data_for_scheduled_insn (insn_t);
extern void setup_nop_vinsn (void);
extern void free_nop_vinsn (void);
extern void sel_set_sched_flags (void);
gcc_assert (INSN_SEQNO (insn) < 0);
INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
gcc_assert (INSN_SEQNO (insn) <= new_hs);
+
+ /* When not pipelining, purge unneeded insn info on the scheduled insns.
+ For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
+ require > 1GB of memory e.g. on limit-fnargs.c. */
+ if (! pipelining_p)
+ free_data_for_scheduled_insn (insn);
}
ilist_clear (pscheduled_insns);