/* Generic partial redundancy elimination with lazy code motion support.
- Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ Copyright (C) 1998-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
-#include "rtl.h"
-#include "regs.h"
-#include "hard-reg-set.h"
-#include "flags.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "basic-block.h"
-#include "tm_p.h"
-#include "function.h"
-#include "sbitmap.h"
-#include "dumpfile.h"
+#include "backend.h"
+#include "cfganal.h"
+#include "lcm.h"
/* Edge based LCM routines. */
static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
- FOR_EACH_BB_REVERSE (bb)
+ int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ int postorder_num = post_order_compute (postorder, false, false);
+ for (int i = 0; i < postorder_num; ++i)
{
+ bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
*qin++ = bb;
bb->aux = bb;
}
+ free (postorder);
qin = worklist;
qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
sbitmap *antout, sbitmap *avout, sbitmap *kill,
sbitmap *earliest)
{
- sbitmap difference, temp_bitmap;
int x, num_edges;
basic_block pred, succ;
num_edges = NUM_EDGES (edge_list);
- difference = sbitmap_alloc (n_exprs);
- temp_bitmap = sbitmap_alloc (n_exprs);
-
+ auto_sbitmap difference (n_exprs), temp_bitmap (n_exprs);
for (x = 0; x < num_edges; x++)
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
}
}
}
-
- sbitmap_free (temp_bitmap);
- sbitmap_free (difference);
}
/* later(p,s) is dependent on the calculation of laterin(p).
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- FOR_EACH_BB (bb)
+ auto_vec<int, 20> postorder;
+ inverted_post_order_compute (&postorder);
+ for (unsigned int i = 0; i < postorder.length (); ++i)
{
+ bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ continue;
*qin++ = bb;
bb->aux = bb;
}
bitmap_ones (laterin[bb->index]);
FOR_EACH_EDGE (e, ei, bb->preds)
bitmap_and (laterin[bb->index], laterin[bb->index],
- later[(size_t)e->aux]);
+ later[(size_t)e->aux]);
/* Calculate LATER for all outgoing edges. */
FOR_EACH_EDGE (e, ei, bb->succs)
if (bitmap_ior_and_compl (later[(size_t) e->aux],
- earliest[(size_t) e->aux],
- laterin[e->src->index],
- antloc[e->src->index])
+ earliest[(size_t) e->aux],
+ laterin[bb->index],
+ antloc[bb->index])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
&& e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest->aux == 0)
bitmap_ones (laterin[last_basic_block_for_fn (cfun)]);
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
bitmap_and (laterin[last_basic_block_for_fn (cfun)],
- laterin[last_basic_block_for_fn (cfun)],
- later[(size_t) e->aux]);
+ laterin[last_basic_block_for_fn (cfun)],
+ later[(size_t) e->aux]);
clear_aux_for_edges ();
free (worklist);
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], antloc[bb->index],
laterin[bb->index]);
}
}
-/* Given local properties TRANSP, ANTLOC, AVOUT, KILL return the insert and
- delete vectors for edge based LCM. Returns an edgelist which is used to
+/* Given local properties TRANSP, ANTLOC, AVLOC, KILL return the insert and
+ delete vectors for edge based LCM and return the AVIN, AVOUT bitmap.
map the insert vector to what edge an expression should be inserted on. */
struct edge_list *
-pre_edge_lcm (int n_exprs, sbitmap *transp,
+pre_edge_lcm_avs (int n_exprs, sbitmap *transp,
sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
+ sbitmap *avin, sbitmap *avout,
sbitmap **insert, sbitmap **del)
{
sbitmap *antin, *antout, *earliest;
- sbitmap *avin, *avout;
sbitmap *later, *laterin;
struct edge_list *edge_list;
int num_edges;
#endif
/* Compute global availability. */
- avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
- avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
compute_available (avloc, kill, avout, avin);
- sbitmap_vector_free (avin);
/* Compute global anticipatability. */
antin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
sbitmap_vector_free (antout);
sbitmap_vector_free (antin);
- sbitmap_vector_free (avout);
later = sbitmap_vector_alloc (num_edges, n_exprs);
return edge_list;
}
+/* Wrapper to allocate avin/avout and call pre_edge_lcm_avs. */
+
+struct edge_list *
+pre_edge_lcm (int n_exprs, sbitmap *transp,
+ sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
+ sbitmap **insert, sbitmap **del)
+{
+ struct edge_list *edge_list;
+ sbitmap *avin, *avout;
+
+ avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+ avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_exprs);
+
+ edge_list = pre_edge_lcm_avs (n_exprs, transp, avloc, antloc, kill,
+ avin, avout, insert, del);
+
+ sbitmap_vector_free (avout);
+ sbitmap_vector_free (avin);
+
+ return edge_list;
+}
+
/* Compute the AVIN and AVOUT vectors from the AVLOC and KILL vectors.
Return the number of passes we performed to iterate to a solution. */
bitmap_vector_ones (avout, last_basic_block_for_fn (cfun));
/* Put every block on the worklist; this is necessary because of the
- optimistic initialization of AVOUT above. */
- FOR_EACH_BB (bb)
+ optimistic initialization of AVOUT above. Use inverted postorder
+ to make the dataflow problem require less iterations. */
+ auto_vec<int, 20> postorder;
+ inverted_post_order_compute (&postorder);
+ for (unsigned int i = 0; i < postorder.length (); ++i)
{
+ bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ continue;
*qin++ = bb;
bb->aux = bb;
}
sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin,
sbitmap *kill, sbitmap *farthest)
{
- sbitmap difference, temp_bitmap;
int x, num_edges;
basic_block pred, succ;
num_edges = NUM_EDGES (edge_list);
- difference = sbitmap_alloc (n_exprs);
- temp_bitmap = sbitmap_alloc (n_exprs);
-
+ auto_sbitmap difference (n_exprs), temp_bitmap (n_exprs);
for (x = 0; x < num_edges; x++)
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
}
}
}
-
- sbitmap_free (temp_bitmap);
- sbitmap_free (difference);
}
/* Compute nearer and nearerout vectors for edge based lcm.
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*tos++ = bb;
bb->aux = bb;
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], st_avloc[bb->index],
nearerout[bb->index]);