/* Allocation for dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
- 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Copyright (C) 1999-2021 Free Software Foundation, Inc.
Originally contributed by Michael P. Hayes
(m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
next call to df_analyze or df_process_deferred_rescans.
This mode is also used by a few passes that still rely on note_uses,
- note_stores and for_each_rtx instead of using the DF data. This
+ note_stores and rtx iterators instead of using the DF data. This
can be said to fall under case 1c.
To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
Artificial defs and uses occur both at the beginning and ends of blocks.
- For blocks that area at the destination of eh edges, the
+ For blocks that are at the destination of eh edges, the
artificial uses and defs occur at the beginning. The defs relate
to the registers specified in EH_RETURN_DATA_REGNO and the uses
- relate to the registers specified in ED_USES. Logically these
+ relate to the registers specified in EH_USES. Logically these
defs and uses should really occur along the eh edge, but there is
- no convenient way to do this. Artificial edges that occur at the
+ no convenient way to do this. Artificial defs that occur at the
beginning of the block have the DF_REF_AT_TOP flag set.
Artificial uses occur at the end of all blocks. These arise from
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
#include "rtl.h"
-#include "tm_p.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "function.h"
-#include "regs.h"
-#include "alloc-pool.h"
-#include "flags.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "sbitmap.h"
-#include "bitmap.h"
-#include "timevar.h"
#include "df.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+#include "cfganal.h"
#include "tree-pass.h"
-#include "params.h"
+#include "cfgloop.h"
static void *df_get_bb_info (struct dataflow *, unsigned int);
static void df_set_bb_info (struct dataflow *, unsigned int, void *);
static void df_set_clean_cfg (void);
#endif
+/* The obstack on which regsets are allocated. */
+struct bitmap_obstack reg_obstack;
+
/* An obstack for bitmap not related to specific dataflow problems.
This obstack should e.g. be used for bitmaps with a short life time
such as temporary bitmaps. */
Functions to create, destroy and manipulate an instance of df.
----------------------------------------------------------------------------*/
-struct df_d *df;
+class df_d *df;
/* Add PROBLEM (and any dependent problems) to the DF instance. */
void
-df_add_problem (struct df_problem *problem)
+df_add_problem (const struct df_problem *problem)
{
struct dataflow *dflow;
int i;
/* This block is called to change the focus from one subset
to another. */
int p;
- bitmap_head diff;
- bitmap_initialize (&diff, &df_bitmap_obstack);
- bitmap_and_compl (&diff, df->blocks_to_analyze, blocks);
+ auto_bitmap diff (&df_bitmap_obstack);
+ bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
for (p = 0; p < df->num_problems_defined; p++)
{
struct dataflow *dflow = df->problems_in_order[p];
bitmap_iterator bi;
unsigned int bb_index;
- EXECUTE_IF_SET_IN_BITMAP (&diff, 0, bb_index, bi)
+ EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
if (bb)
{
void *bb_info = df_get_bb_info (dflow, bb_index);
}
}
}
-
- bitmap_clear (&diff);
}
else
{
{
basic_block bb;
bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
- FOR_ALL_BB(bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
bitmap_set_bit (&blocks_to_reset, bb->index);
}
void
df_remove_problem (struct dataflow *dflow)
{
- struct df_problem *problem;
+ const struct df_problem *problem;
int i;
if (!dflow)
df_finish_pass (bool verify ATTRIBUTE_UNUSED)
{
int i;
- int removed = 0;
#ifdef ENABLE_DF_CHECKING
int saved_flags;
saved_flags = df->changeable_flags;
#endif
- for (i = 0; i < df->num_problems_defined; i++)
+ /* We iterate over problems by index as each problem removed will
+ lead to problems_in_order to be reordered. */
+ for (i = 0; i < DF_LAST_PROBLEM_PLUS1; i++)
{
- struct dataflow *dflow = df->problems_in_order[i];
- struct df_problem *problem = dflow->problem;
+ struct dataflow *dflow = df->problems_by_index[i];
- if (dflow->optional_p)
- {
- gcc_assert (problem->remove_problem_fun);
- (problem->remove_problem_fun) ();
- df->problems_in_order[i] = NULL;
- df->problems_by_index[problem->id] = NULL;
- removed++;
- }
+ if (dflow && dflow->optional_p)
+ df_remove_problem (dflow);
}
- df->num_problems_defined -= removed;
/* Clear all of the flags. */
df->changeable_flags = 0;
#endif
#endif
-#ifdef ENABLE_CHECKING
- if (verify)
+ if (flag_checking && verify)
df->changeable_flags |= DF_VERIFY_SCHEDULED;
-#endif
}
rest_of_handle_df_initialize (void)
{
gcc_assert (!df);
- df = XCNEW (struct df_d);
+ df = XCNEW (class df_d);
df->changeable_flags = 0;
bitmap_obstack_initialize (&df_bitmap_obstack);
if (optimize > 1)
df_live_add_problem ();
- df->postorder = XNEWVEC (int, last_basic_block);
- df->postorder_inverted = XNEWVEC (int, last_basic_block);
+ df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
df->n_blocks = post_order_compute (df->postorder, true, true);
- df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
- gcc_assert (df->n_blocks == df->n_blocks_inverted);
+ inverted_post_order_compute (&df->postorder_inverted);
+ gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
- df->hard_regs_live_count = XNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
- memset (df->hard_regs_live_count, 0,
- sizeof (unsigned int) * FIRST_PSEUDO_REGISTER);
+ df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
df_hard_reg_init ();
/* After reload, some ports add certain bits to regs_ever_live so
}
-static bool
-gate_opt (void)
-{
- return optimize > 0;
-}
+namespace {
-
-struct rtl_opt_pass pass_df_initialize_opt =
+const pass_data pass_data_df_initialize_opt =
{
- {
- RTL_PASS,
- "dfinit", /* name */
- gate_opt, /* gate */
- rest_of_handle_df_initialize, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_DF_SCAN, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+ RTL_PASS, /* type */
+ "dfinit", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_DF_SCAN, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+class pass_df_initialize_opt : public rtl_opt_pass
+{
+public:
+ pass_df_initialize_opt (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
+ {}
-static bool
-gate_no_opt (void)
+ /* opt_pass methods: */
+ virtual bool gate (function *) { return optimize > 0; }
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_df_initialize ();
+ }
+
+}; // class pass_df_initialize_opt
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_df_initialize_opt (gcc::context *ctxt)
{
- return optimize == 0;
+ return new pass_df_initialize_opt (ctxt);
}
-struct rtl_opt_pass pass_df_initialize_no_opt =
+namespace {
+
+const pass_data pass_data_df_initialize_no_opt =
{
- {
- RTL_PASS,
- "no-opt dfinit", /* name */
- gate_no_opt, /* gate */
- rest_of_handle_df_initialize, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_DF_SCAN, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+ RTL_PASS, /* type */
+ "no-opt dfinit", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_DF_SCAN, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+class pass_df_initialize_no_opt : public rtl_opt_pass
+{
+public:
+ pass_df_initialize_no_opt (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *) { return optimize == 0; }
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_df_initialize ();
+ }
+
+}; // class pass_df_initialize_no_opt
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_df_initialize_no_opt (gcc::context *ctxt)
+{
+ return new pass_df_initialize_no_opt (ctxt);
+}
+
/* Free all the dataflow info and the DF structure. This should be
called from the df_finish macro which also NULLs the parm. */
}
free (df->postorder);
- free (df->postorder_inverted);
+ df->postorder_inverted.release ();
free (df->hard_regs_live_count);
free (df);
df = NULL;
}
-struct rtl_opt_pass pass_df_finish =
-{
- {
- RTL_PASS,
- "dfinish", /* name */
- NULL, /* gate */
- rest_of_handle_df_finish, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+namespace {
+
+const pass_data pass_data_df_finish =
+{
+ RTL_PASS, /* type */
+ "dfinish", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
};
+class pass_df_finish : public rtl_opt_pass
+{
+public:
+ pass_df_finish (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_df_finish, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_df_finish ();
+ }
+
+}; // class pass_df_finish
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_df_finish (gcc::context *ctxt)
+{
+ return new pass_df_finish (ctxt);
+}
+
The general data flow analysis engine.
----------------------------------------------------------------------------*/
-/* Return time BB when it was visited for last time. */
-#define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
-
/* Helper function for df_worklist_dataflow.
Propagate the dataflow forward.
Given a BB_INDEX, do the dataflow propagation
unsigned *bbindex_to_postorder,
bitmap pending,
sbitmap considered,
- ptrdiff_t age)
+ vec<int> &last_change_age,
+ int age)
{
edge e;
edge_iterator ei;
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
bool changed = !age;
/* Calculate <conf_op> of incoming edges. */
if (EDGE_COUNT (bb->preds) > 0)
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if (age <= BB_LAST_CHANGE_AGE (e->src)
- && TEST_BIT (considered, e->src->index))
+ if (bbindex_to_postorder[e->src->index] < last_change_age.length ()
+ && age <= last_change_age[bbindex_to_postorder[e->src->index]]
+ && bitmap_bit_p (considered, e->src->index))
changed |= dataflow->problem->con_fun_n (e);
}
else if (dataflow->problem->con_fun_0)
{
unsigned ob_index = e->dest->index;
- if (TEST_BIT (considered, ob_index))
+ if (bitmap_bit_p (considered, ob_index))
bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
}
return true;
unsigned *bbindex_to_postorder,
bitmap pending,
sbitmap considered,
- ptrdiff_t age)
+ vec<int> &last_change_age,
+ int age)
{
edge e;
edge_iterator ei;
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
bool changed = !age;
/* Calculate <conf_op> of incoming edges. */
if (EDGE_COUNT (bb->succs) > 0)
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (age <= BB_LAST_CHANGE_AGE (e->dest)
- && TEST_BIT (considered, e->dest->index))
+ if (bbindex_to_postorder[e->dest->index] < last_change_age.length ()
+ && age <= last_change_age[bbindex_to_postorder[e->dest->index]]
+ && bitmap_bit_p (considered, e->dest->index))
changed |= dataflow->problem->con_fun_n (e);
}
else if (dataflow->problem->con_fun_0)
{
unsigned ob_index = e->src->index;
- if (TEST_BIT (considered, ob_index))
+ if (bitmap_bit_p (considered, ob_index))
bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
}
return true;
DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
need to visit.
BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
- BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder possition.
+ BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
PENDING will be freed.
The worklists are bitmaps indexed by postorder positions.
worklists (we are processing WORKLIST and storing new BBs to visit in
PENDING).
- As an optimization we maintain ages when BB was changed (stored in bb->aux)
- and when it was last visited (stored in last_visit_age). This avoids need
- to re-do confluence function for edges to basic blocks whose source
- did not change since destination was visited last time. */
+ As an optimization we maintain ages when BB was changed (stored in
+ last_change_age) and when it was last visited (stored in last_visit_age).
+ This avoids need to re-do confluence function for edges to basic blocks
+ whose source did not change since destination was visited last time. */
static void
df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
int age = 0;
bool changed;
- VEC(int, heap) *last_visit_age = NULL;
+ vec<int> last_visit_age = vNULL;
+ vec<int> last_change_age = vNULL;
int prev_age;
- basic_block bb;
- int i;
- VEC_safe_grow_cleared (int, heap, last_visit_age, n_blocks);
+ last_visit_age.safe_grow_cleared (n_blocks, true);
+ last_change_age.safe_grow_cleared (n_blocks, true);
/* Double-queueing. Worklist is for the current iteration,
and pending is for the next. */
bitmap_iterator bi;
unsigned int index;
- /* Swap pending and worklist. */
- bitmap temp = worklist;
- worklist = pending;
- pending = temp;
+ std::swap (pending, worklist);
EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
{
bitmap_clear_bit (pending, index);
bb_index = blocks_in_postorder[index];
- bb = BASIC_BLOCK (bb_index);
- prev_age = VEC_index (int, last_visit_age, index);
+ prev_age = last_visit_age[index];
if (dir == DF_FORWARD)
changed = df_worklist_propagate_forward (dataflow, bb_index,
bbindex_to_postorder,
pending, considered,
+ last_change_age,
prev_age);
else
changed = df_worklist_propagate_backward (dataflow, bb_index,
bbindex_to_postorder,
pending, considered,
+ last_change_age,
prev_age);
- VEC_replace (int, last_visit_age, index, ++age);
+ last_visit_age[index] = ++age;
if (changed)
- bb->aux = (void *)(ptrdiff_t)age;
+ last_change_age[index] = age;
}
bitmap_clear (worklist);
}
- for (i = 0; i < n_blocks; i++)
- BASIC_BLOCK (blocks_in_postorder[i])->aux = NULL;
BITMAP_FREE (worklist);
BITMAP_FREE (pending);
- VEC_free (int, heap, last_visit_age);
+ last_visit_age.release ();
+ last_change_age.release ();
/* Dump statistics. */
if (dump_file)
fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
- "n_basic_blocks %d n_edges %d"
+ " n_basic_blocks %d n_edges %d"
" count %d (%5.2g)\n",
- n_basic_blocks, n_edges,
- dcount, dcount / (float)n_basic_blocks);
+ n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
+ dcount, dcount / (double)n_basic_blocks_for_fn (cfun));
}
/* Worklist-based dataflow solver. It uses sbitmap as a worklist,
int n_blocks)
{
bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
- sbitmap considered = sbitmap_alloc (last_basic_block);
bitmap_iterator bi;
unsigned int *bbindex_to_postorder;
int i;
gcc_assert (dir != DF_NONE);
/* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder. */
- bbindex_to_postorder =
- (unsigned int *)xmalloc (last_basic_block * sizeof (unsigned int));
+ bbindex_to_postorder = XNEWVEC (unsigned int,
+ last_basic_block_for_fn (cfun));
/* Initialize the array to an out-of-bound value. */
- for (i = 0; i < last_basic_block; i++)
- bbindex_to_postorder[i] = last_basic_block;
+ for (i = 0; i < last_basic_block_for_fn (cfun); i++)
+ bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
/* Initialize the considered map. */
- sbitmap_zero (considered);
+ auto_sbitmap considered (last_basic_block_for_fn (cfun));
+ bitmap_clear (considered);
EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
{
- SET_BIT (considered, index);
+ bitmap_set_bit (considered, index);
}
/* Initialize the mapping of block index to postorder. */
blocks_in_postorder,
bbindex_to_postorder,
n_blocks);
- sbitmap_free (considered);
free (bbindex_to_postorder);
}
}
-/* Analyze dataflow info for the basic blocks specified by the bitmap
- BLOCKS, or for the whole CFG if BLOCKS is zero. */
+/* Analyze dataflow info. */
-void
-df_analyze (void)
+static void
+df_analyze_1 (void)
{
- bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
- bool everything;
int i;
- free (df->postorder);
- free (df->postorder_inverted);
- df->postorder = XNEWVEC (int, last_basic_block);
- df->postorder_inverted = XNEWVEC (int, last_basic_block);
- df->n_blocks = post_order_compute (df->postorder, true, true);
- df->n_blocks_inverted = inverted_post_order_compute (df->postorder_inverted);
-
/* These should be the same. */
- gcc_assert (df->n_blocks == df->n_blocks_inverted);
+ gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
/* We need to do this before the df_verify_all because this is
not kept incrementally up to date. */
#endif
df_verify ();
- for (i = 0; i < df->n_blocks; i++)
- bitmap_set_bit (current_all_blocks, df->postorder[i]);
-
-#ifdef ENABLE_CHECKING
- /* Verify that POSTORDER_INVERTED only contains blocks reachable from
- the ENTRY block. */
- for (i = 0; i < df->n_blocks_inverted; i++)
- gcc_assert (bitmap_bit_p (current_all_blocks, df->postorder_inverted[i]));
-#endif
-
- /* Make sure that we have pruned any unreachable blocks from these
- sets. */
- if (df->analyze_subset)
- {
- everything = false;
- bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
- df->n_blocks = df_prune_to_subcfg (df->postorder,
- df->n_blocks, df->blocks_to_analyze);
- df->n_blocks_inverted = df_prune_to_subcfg (df->postorder_inverted,
- df->n_blocks_inverted,
- df->blocks_to_analyze);
- BITMAP_FREE (current_all_blocks);
- }
- else
- {
- everything = true;
- df->blocks_to_analyze = current_all_blocks;
- current_all_blocks = NULL;
- }
-
/* Skip over the DF_SCAN problem. */
for (i = 1; i < df->num_problems_defined; i++)
{
if (dflow->problem->dir == DF_FORWARD)
df_analyze_problem (dflow,
df->blocks_to_analyze,
- df->postorder_inverted,
- df->n_blocks_inverted);
+ df->postorder_inverted.address (),
+ df->postorder_inverted.length ());
else
df_analyze_problem (dflow,
df->blocks_to_analyze,
}
}
- if (everything)
+ if (!df->analyze_subset)
{
BITMAP_FREE (df->blocks_to_analyze);
df->blocks_to_analyze = NULL;
#endif
}
+/* Analyze dataflow info. */
+
+void
+df_analyze (void)
+{
+ bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
+
+ free (df->postorder);
+ df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ df->n_blocks = post_order_compute (df->postorder, true, true);
+ df->postorder_inverted.truncate (0);
+ inverted_post_order_compute (&df->postorder_inverted);
+
+ for (int i = 0; i < df->n_blocks; i++)
+ bitmap_set_bit (current_all_blocks, df->postorder[i]);
+
+ if (flag_checking)
+ {
+ /* Verify that POSTORDER_INVERTED only contains blocks reachable from
+ the ENTRY block. */
+ for (unsigned int i = 0; i < df->postorder_inverted.length (); i++)
+ gcc_assert (bitmap_bit_p (current_all_blocks,
+ df->postorder_inverted[i]));
+ }
+
+ /* Make sure that we have pruned any unreachable blocks from these
+ sets. */
+ if (df->analyze_subset)
+ {
+ bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
+ df->n_blocks = df_prune_to_subcfg (df->postorder,
+ df->n_blocks, df->blocks_to_analyze);
+ unsigned int newlen = df_prune_to_subcfg (df->postorder_inverted.address (),
+ df->postorder_inverted.length (),
+ df->blocks_to_analyze);
+ df->postorder_inverted.truncate (newlen);
+ BITMAP_FREE (current_all_blocks);
+ }
+ else
+ {
+ df->blocks_to_analyze = current_all_blocks;
+ current_all_blocks = NULL;
+ }
+
+ df_analyze_1 ();
+}
+
+/* Compute the reverse top sort order of the sub-CFG specified by LOOP.
+ Returns the number of blocks which is always loop->num_nodes. */
+
+static int
+loop_post_order_compute (int *post_order, class loop *loop)
+{
+ edge_iterator *stack;
+ int sp;
+ int post_order_num = 0;
+
+ /* Allocate stack for back-tracking up CFG. */
+ stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
+ sp = 0;
+
+ /* Allocate bitmap to track nodes that have been visited. */
+ auto_bitmap visited;
+
+ /* Push the first edge on to the stack. */
+ stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
+
+ while (sp)
+ {
+ edge_iterator ei;
+ basic_block src;
+ basic_block dest;
+
+ /* Look at the edge on the top of the stack. */
+ ei = stack[sp - 1];
+ src = ei_edge (ei)->src;
+ dest = ei_edge (ei)->dest;
+
+ /* Check if the edge destination has been visited yet and mark it
+ if not so. */
+ if (flow_bb_inside_loop_p (loop, dest)
+ && bitmap_set_bit (visited, dest->index))
+ {
+ if (EDGE_COUNT (dest->succs) > 0)
+ /* Since the DEST node has been visited for the first
+ time, check its successors. */
+ stack[sp++] = ei_start (dest->succs);
+ else
+ post_order[post_order_num++] = dest->index;
+ }
+ else
+ {
+ if (ei_one_before_end_p (ei)
+ && src != loop_preheader_edge (loop)->src)
+ post_order[post_order_num++] = src->index;
+
+ if (!ei_one_before_end_p (ei))
+ ei_next (&stack[sp - 1]);
+ else
+ sp--;
+ }
+ }
+
+ free (stack);
+
+ return post_order_num;
+}
+
+/* Compute the reverse top sort order of the inverted sub-CFG specified
+ by LOOP. Returns the number of blocks which is always loop->num_nodes. */
+
+static void
+loop_inverted_post_order_compute (vec<int> *post_order, class loop *loop)
+{
+ basic_block bb;
+ edge_iterator *stack;
+ int sp;
+
+ post_order->reserve_exact (loop->num_nodes);
+
+ /* Allocate stack for back-tracking up CFG. */
+ stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
+ sp = 0;
+
+ /* Allocate bitmap to track nodes that have been visited. */
+ auto_bitmap visited;
+
+ /* Put all latches into the initial work list. In theory we'd want
+ to start from loop exits but then we'd have the special case of
+ endless loops. It doesn't really matter for DF iteration order and
+ handling latches last is probably even better. */
+ stack[sp++] = ei_start (loop->header->preds);
+ bitmap_set_bit (visited, loop->header->index);
+
+ /* The inverted traversal loop. */
+ while (sp)
+ {
+ edge_iterator ei;
+ basic_block pred;
+
+ /* Look at the edge on the top of the stack. */
+ ei = stack[sp - 1];
+ bb = ei_edge (ei)->dest;
+ pred = ei_edge (ei)->src;
+
+ /* Check if the predecessor has been visited yet and mark it
+ if not so. */
+ if (flow_bb_inside_loop_p (loop, pred)
+ && bitmap_set_bit (visited, pred->index))
+ {
+ if (EDGE_COUNT (pred->preds) > 0)
+ /* Since the predecessor node has been visited for the first
+ time, check its predecessors. */
+ stack[sp++] = ei_start (pred->preds);
+ else
+ post_order->quick_push (pred->index);
+ }
+ else
+ {
+ if (flow_bb_inside_loop_p (loop, bb)
+ && ei_one_before_end_p (ei))
+ post_order->quick_push (bb->index);
+
+ if (!ei_one_before_end_p (ei))
+ ei_next (&stack[sp - 1]);
+ else
+ sp--;
+ }
+ }
+
+ free (stack);
+}
+
+
+/* Analyze dataflow info for the basic blocks contained in LOOP. */
+
+void
+df_analyze_loop (class loop *loop)
+{
+ free (df->postorder);
+
+ df->postorder = XNEWVEC (int, loop->num_nodes);
+ df->postorder_inverted.truncate (0);
+ df->n_blocks = loop_post_order_compute (df->postorder, loop);
+ loop_inverted_post_order_compute (&df->postorder_inverted, loop);
+ gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
+ gcc_assert (df->postorder_inverted.length () == loop->num_nodes);
+
+ bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
+ for (int i = 0; i < df->n_blocks; ++i)
+ bitmap_set_bit (blocks, df->postorder[i]);
+ df_set_blocks (blocks);
+ BITMAP_FREE (blocks);
+
+ df_analyze_1 ();
+}
+
/* Return the number of basic blocks from the last call to df_analyze. */
if (dir == DF_FORWARD)
{
- gcc_assert (df->postorder_inverted);
- return df->n_blocks_inverted;
+ gcc_assert (df->postorder_inverted.length ());
+ return df->postorder_inverted.length ();
}
gcc_assert (df->postorder);
if (dir == DF_FORWARD)
{
- gcc_assert (df->postorder_inverted);
- return df->postorder_inverted;
+ gcc_assert (df->postorder_inverted.length ());
+ return df->postorder_inverted.address ();
}
gcc_assert (df->postorder);
return df->postorder;
void
df_grow_bb_info (struct dataflow *dflow)
{
- unsigned int new_size = last_basic_block + 1;
+ unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
if (dflow->block_info_size < new_size)
{
new_size += new_size / 4;
int i, p;
basic_block bb;
void *problem_temps;
- bitmap_head tmp;
- bitmap_initialize (&tmp, &df_bitmap_obstack);
+ auto_bitmap tmp (&df_bitmap_obstack);
for (p = 0; p < df->num_problems_defined; p++)
{
struct dataflow *dflow = df->problems_in_order[p];
dflow problem. */
if (dflow->out_of_date_transfer_functions)
{
- bitmap_copy (&tmp, dflow->out_of_date_transfer_functions);
+ bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
bitmap_clear (dflow->out_of_date_transfer_functions);
- if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
+ if (bitmap_bit_p (tmp, ENTRY_BLOCK))
bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
- if (bitmap_bit_p (&tmp, EXIT_BLOCK))
+ if (bitmap_bit_p (tmp, EXIT_BLOCK))
bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- if (bitmap_bit_p (&tmp, bb->index))
+ if (bitmap_bit_p (tmp, bb->index))
bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
i++;
}
/* Now shuffle the block info for the problem. */
if (dflow->problem->free_bb_fun)
{
- int size = last_basic_block * dflow->problem->block_info_elt_size;
+ int size = (last_basic_block_for_fn (cfun)
+ * dflow->problem->block_info_elt_size);
problem_temps = XNEWVAR (char, size);
df_grow_bb_info (dflow);
memcpy (problem_temps, dflow->block_info, size);
place in the block_info vector. Null out the copied
item. The entry and exit blocks never move. */
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
df_set_bb_info (dflow, i,
(char *)problem_temps
}
memset ((char *)dflow->block_info
+ i * dflow->problem->block_info_elt_size, 0,
- (last_basic_block - i)
+ (last_basic_block_for_fn (cfun) - i)
* dflow->problem->block_info_elt_size);
free (problem_temps);
}
if (df->blocks_to_analyze)
{
- if (bitmap_bit_p (&tmp, ENTRY_BLOCK))
+ if (bitmap_bit_p (tmp, ENTRY_BLOCK))
bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
- if (bitmap_bit_p (&tmp, EXIT_BLOCK))
+ if (bitmap_bit_p (tmp, EXIT_BLOCK))
bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
- bitmap_copy (&tmp, df->blocks_to_analyze);
+ bitmap_copy (tmp, df->blocks_to_analyze);
bitmap_clear (df->blocks_to_analyze);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- if (bitmap_bit_p (&tmp, bb->index))
+ if (bitmap_bit_p (tmp, bb->index))
bitmap_set_bit (df->blocks_to_analyze, i);
i++;
}
}
- bitmap_clear (&tmp);
-
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- SET_BASIC_BLOCK (i, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
- for (; i < last_basic_block; i++)
- SET_BASIC_BLOCK (i, NULL);
+ for (; i < last_basic_block_for_fn (cfun); i++)
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
#ifdef DF_DEBUG_CFG
if (!df_lr->solutions_dirty)
fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
gcc_assert (df);
- gcc_assert (BASIC_BLOCK (old_index) == NULL);
+ gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
for (p = 0; p < df->num_problems_defined; p++)
{
}
df_clear_bb_dirty (new_block);
- SET_BASIC_BLOCK (old_index, new_block);
+ SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
new_block->index = old_index;
- df_set_bb_dirty (BASIC_BLOCK (old_index));
- SET_BASIC_BLOCK (new_block_index, NULL);
+ df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
+ SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
}
void
df_bb_delete (int bb_index)
{
- basic_block bb = BASIC_BLOCK (bb_index);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
int i;
if (!df)
if (df_live)
df_live_verify_transfer_functions ();
#endif
+ df->changeable_flags &= ~DF_VERIFY_SCHEDULED;
}
#ifdef DF_DEBUG_CFG
df_compute_cfg_image (void)
{
basic_block bb;
- int size = 2 + (2 * n_basic_blocks);
+ int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
int i;
int * map;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
size += EDGE_COUNT (bb->succs);
}
map = XNEWVEC (int, size);
map[0] = size;
i = 1;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
edge_iterator ei;
edge e;
df_ref
df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
{
- rtx insn;
- df_ref *def_rec;
- unsigned int uid;
+ rtx_insn *insn;
+ df_ref def;
FOR_BB_INSNS (bb, insn)
{
if (!INSN_P (insn))
continue;
- uid = INSN_UID (insn);
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (DF_REF_REGNO (def) == regno)
- return def;
- }
+ FOR_EACH_INSN_DEF (def, insn)
+ if (DF_REF_REGNO (def) == regno)
+ return def;
}
return NULL;
}
df_ref
df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
{
- rtx insn;
- df_ref *def_rec;
- unsigned int uid;
+ rtx_insn *insn;
+ df_ref def;
FOR_BB_INSNS_REVERSE (bb, insn)
{
if (!INSN_P (insn))
continue;
- uid = INSN_UID (insn);
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (DF_REF_REGNO (def) == regno)
- return def;
- }
+ FOR_EACH_INSN_DEF (def, insn)
+ if (DF_REF_REGNO (def) == regno)
+ return def;
}
return NULL;
DF is the dataflow object. */
df_ref
-df_find_def (rtx insn, rtx reg)
+df_find_def (rtx_insn *insn, rtx reg)
{
- unsigned int uid;
- df_ref *def_rec;
+ df_ref def;
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
gcc_assert (REG_P (reg));
- uid = INSN_UID (insn);
- for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
- {
- df_ref def = *def_rec;
- if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
- return def;
- }
+ FOR_EACH_INSN_DEF (def, insn)
+ if (DF_REF_REGNO (def) == REGNO (reg))
+ return def;
return NULL;
}
/* Return true if REG is defined in INSN, zero otherwise. */
bool
-df_reg_defined (rtx insn, rtx reg)
+df_reg_defined (rtx_insn *insn, rtx reg)
{
return df_find_def (insn, reg) != NULL;
}
DF is the dataflow object. */
df_ref
-df_find_use (rtx insn, rtx reg)
+df_find_use (rtx_insn *insn, rtx reg)
{
- unsigned int uid;
- df_ref *use_rec;
+ df_ref use;
if (GET_CODE (reg) == SUBREG)
reg = SUBREG_REG (reg);
gcc_assert (REG_P (reg));
- uid = INSN_UID (insn);
- for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
- {
- df_ref use = *use_rec;
- if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
- return use;
- }
+ df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
+ FOR_EACH_INSN_INFO_USE (use, insn_info)
+ if (DF_REF_REGNO (use) == REGNO (reg))
+ return use;
if (df->changeable_flags & DF_EQ_NOTES)
- for (use_rec = DF_INSN_UID_EQ_USES (uid); *use_rec; use_rec++)
- {
- df_ref use = *use_rec;
- if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
- return use;
- }
+ FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
+ if (DF_REF_REGNO (use) == REGNO (reg))
+ return use;
return NULL;
}
/* Return true if REG is referenced in INSN, zero otherwise. */
bool
-df_reg_used (rtx insn, rtx reg)
+df_reg_used (rtx_insn *insn, rtx reg)
{
return df_find_use (insn, reg) != NULL;
}
Debugging and printing functions.
----------------------------------------------------------------------------*/
+/* Write information about registers and basic blocks into FILE.
+ This is part of making a debugging dump. */
+
+void
+dump_regset (regset r, FILE *outf)
+{
+ unsigned i;
+ reg_set_iterator rsi;
+
+ if (r == NULL)
+ {
+ fputs (" (nil)", outf);
+ return;
+ }
+
+ EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
+ {
+ fprintf (outf, " %d", i);
+ if (i < FIRST_PSEUDO_REGISTER)
+ fprintf (outf, " [%s]",
+ reg_names[i]);
+ }
+}
+
+/* Print a human-readable representation of R on the standard error
+ stream. This function is designed to be used from within the
+ debugger. */
+extern void debug_regset (regset);
+DEBUG_FUNCTION void
+debug_regset (regset r)
+{
+ dump_regset (r, stderr);
+ putc ('\n', stderr);
+}
/* Write information about registers and basic blocks into FILE.
This is part of making a debugging dump. */
void
-df_print_regset (FILE *file, bitmap r)
+df_print_regset (FILE *file, const_bitmap r)
{
unsigned int i;
bitmap_iterator bi;
debugging dump. */
void
-df_print_word_regset (FILE *file, bitmap r)
+df_print_word_regset (FILE *file, const_bitmap r)
{
unsigned int max_reg = max_reg_num ();
basic_block bb;
df_dump_start (file);
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
{
df_print_bb_index (bb, file);
df_dump_top (bb, file);
EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
{
- basic_block bb = BASIC_BLOCK (bb_index);
-
- df_print_bb_index (bb, file);
- df_dump_top (bb, file);
- df_dump_bottom (bb, file);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
+ dump_bb (file, bb, 0, TDF_DETAILS);
}
fprintf (file, "\n");
}
{
df_dump_problem_function fun = dflow->problem->dump_start_fun;
if (fun)
- fun(file);
+ fun (file);
}
}
}
-/* Dump the top of the block information for BB. */
-
-void
-df_dump_top (basic_block bb, FILE *file)
+/* Dump the top or bottom of the block information for BB. */
+static void
+df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
{
int i;
struct dataflow *dflow = df->problems_in_order[i];
if (dflow->computed)
{
- df_dump_bb_problem_function bbfun = dflow->problem->dump_top_fun;
+ df_dump_bb_problem_function bbfun;
+
+ if (top)
+ bbfun = dflow->problem->dump_top_fun;
+ else
+ bbfun = dflow->problem->dump_bottom_fun;
+
if (bbfun)
bbfun (bb, file);
}
}
}
+/* Dump the top of the block information for BB. */
+
+void
+df_dump_top (basic_block bb, FILE *file)
+{
+ df_dump_bb_problem_data (bb, file, /*top=*/true);
+}
/* Dump the bottom of the block information for BB. */
void
df_dump_bottom (basic_block bb, FILE *file)
+{
+ df_dump_bb_problem_data (bb, file, /*top=*/false);
+}
+
+
+/* Dump information about INSN just before or after dumping INSN itself. */
+static void
+df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top)
{
int i;
struct dataflow *dflow = df->problems_in_order[i];
if (dflow->computed)
{
- df_dump_bb_problem_function bbfun = dflow->problem->dump_bottom_fun;
- if (bbfun)
- bbfun (bb, file);
+ df_dump_insn_problem_function insnfun;
+
+ if (top)
+ insnfun = dflow->problem->dump_insn_top_fun;
+ else
+ insnfun = dflow->problem->dump_insn_bottom_fun;
+
+ if (insnfun)
+ insnfun (insn, file);
}
}
}
+/* Dump information about INSN before dumping INSN itself. */
+
+void
+df_dump_insn_top (const rtx_insn *insn, FILE *file)
+{
+ df_dump_insn_problem_data (insn, file, /*top=*/true);
+}
+
+/* Dump information about INSN after dumping INSN itself. */
+
+void
+df_dump_insn_bottom (const rtx_insn *insn, FILE *file)
+{
+ df_dump_insn_problem_data (insn, file, /*top=*/false);
+}
+
static void
df_ref_dump (df_ref ref, FILE *file)
}
void
-df_refs_chain_dump (df_ref *ref_rec, bool follow_chain, FILE *file)
+df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
{
fprintf (file, "{ ");
- while (*ref_rec)
+ for (; ref; ref = DF_REF_NEXT_LOC (ref))
{
- df_ref ref = *ref_rec;
df_ref_dump (ref, file);
if (follow_chain)
df_chain_dump (DF_REF_CHAIN (ref), file);
- ref_rec++;
}
fprintf (file, "}");
}
static void
-df_mws_dump (struct df_mw_hardreg **mws, FILE *file)
+df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
{
- while (*mws)
- {
- fprintf (file, "mw %c r[%d..%d]\n",
- (DF_MWS_REG_DEF_P (*mws)) ? 'd' : 'u',
- (*mws)->start_regno, (*mws)->end_regno);
- mws++;
- }
+ for (; mws; mws = DF_MWS_NEXT (mws))
+ fprintf (file, "mw %c r[%d..%d]\n",
+ DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
+ mws->start_regno, mws->end_regno);
}
DEBUG_FUNCTION void
-df_insn_debug (rtx insn, bool follow_chain, FILE *file)
+df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file)
{
df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
}
DEBUG_FUNCTION void
-df_insn_debug_regno (rtx insn, FILE *file)
+df_insn_debug_regno (rtx_insn *insn, FILE *file)
{
struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
/* Functions for debugging from GDB. */
DEBUG_FUNCTION void
-debug_df_insn (rtx insn)
+debug_df_insn (rtx_insn *insn)
{
df_insn_debug (insn, true, stderr);
debug_rtx (insn);