/* Natural loop discovery code for GNU compiler.
- Copyright (C) 2000-2013 Free Software Foundation, Inc.
+ Copyright (C) 2000-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
#include "rtl.h"
-#include "function.h"
-#include "basic-block.h"
-#include "cfgloop.h"
-#include "diagnostic-core.h"
-#include "flags.h"
#include "tree.h"
#include "gimple.h"
-#include "gimple-iterator.h"
+#include "cfghooks.h"
#include "gimple-ssa.h"
-#include "pointer-set.h"
-#include "ggc.h"
+#include "diagnostic-core.h"
+#include "cfganal.h"
+#include "cfgloop.h"
+#include "gimple-iterator.h"
#include "dumpfile.h"
static void flow_loops_cfg_dump (FILE *);
if (!file)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge succ;
edge_iterator ei;
loop_depth (loop), (long) (loop_outer (loop)
? loop_outer (loop)->num : -1));
+ if (loop->latch)
+ {
+ bool read_profile_p;
+ gcov_type nit = expected_loop_iterations_unbounded (loop, &read_profile_p);
+ if (read_profile_p && !loop->any_estimate)
+ fprintf (file, ";; profile-based iteration count: %" PRIu64 "\n",
+ (uint64_t) nit);
+ }
+
fprintf (file, ";; nodes:");
bbs = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
void
flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
{
- loop_iterator li;
struct loop *loop;
if (!current_loops || ! file)
fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
- FOR_EACH_LOOP (li, loop, LI_INCLUDE_ROOT)
+ FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
{
flow_loop_dump (loop, file, loop_dump_aux, verbose);
}
/* Add LOOP to the loop hierarchy tree where FATHER is father of the
added loop. If LOOP has some children, take care of that their
- pred field will be initialized correctly. */
+ pred field will be initialized correctly. If AFTER is non-null
+ then it's expected it's a pointer into FATHERs inner sibling
+ list and LOOP is added behind AFTER, otherwise it's added in front
+ of FATHERs siblings. */
void
-flow_loop_tree_node_add (struct loop *father, struct loop *loop)
+flow_loop_tree_node_add (struct loop *father, struct loop *loop,
+ struct loop *after)
{
- loop->next = father->inner;
- father->inner = loop;
+ if (after)
+ {
+ loop->next = after->next;
+ after->next = loop;
+ }
+ else
+ {
+ loop->next = father->inner;
+ father->inner = loop;
+ }
establish_preds (loop, father);
}
struct loop *
alloc_loop (void)
{
- struct loop *loop = ggc_alloc_cleared_loop ();
+ struct loop *loop = ggc_cleared_alloc<struct loop> ();
- loop->exits = ggc_alloc_cleared_loop_exit ();
+ loop->exits = ggc_cleared_alloc<loop_exit> ();
loop->exits->next = loop->exits->prev = loop->exits;
loop->can_be_parallel = false;
-
+ loop->constraints = 0;
+ loop->nb_iterations_upper_bound = 0;
+ loop->nb_iterations_likely_upper_bound = 0;
+ loop->nb_iterations_estimate = 0;
return loop;
}
/* Dummy loop containing whole function. */
root = alloc_loop ();
- root->num_nodes = n_basic_blocks_for_function (fn);
- root->latch = EXIT_BLOCK_PTR_FOR_FUNCTION (fn);
- root->header = ENTRY_BLOCK_PTR_FOR_FUNCTION (fn);
- ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
- EXIT_BLOCK_PTR_FOR_FUNCTION (fn)->loop_father = root;
+ root->num_nodes = n_basic_blocks_for_fn (fn);
+ root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
+ root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
+ ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
+ EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
loops->larray->quick_push (root);
loops->tree_root = root;
FOR_EACH_EDGE (e, ei, header->preds)
{
basic_block latch = e->src;
- if (latch != ENTRY_BLOCK_PTR
+ if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& dominated_by_p (CDI_DOMINATORS, latch, header))
return true;
}
int *rc_order;
int b;
unsigned i;
- vec<loop_p> larray;
/* Ensure that the dominators are computed. */
calculate_dominance_info (CDI_DOMINATORS);
if (!loops)
{
- loops = ggc_alloc_cleared_loops ();
+ loops = ggc_cleared_alloc<struct loops> ();
init_loops_structure (cfun, loops, 1);
}
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
return loops;
/* The root loop node contains all basic-blocks. */
- loops->tree_root->num_nodes = n_basic_blocks;
+ loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
- rc_order = XNEWVEC (int, n_basic_blocks);
+ rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
pre_and_rev_post_order_compute (NULL, rc_order, false);
/* Gather all loop headers in reverse completion order and allocate
loop structures for loops that are not already present. */
- larray.create (loops->larray->length ());
- for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
+ auto_vec<loop_p> larray (loops->larray->length ());
+ for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
{
- basic_block header = BASIC_BLOCK (rc_order[b]);
+ basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
if (bb_loop_header_p (header))
{
struct loop *loop;
}
}
- larray.release ();
-
return loops;
}
+/* qsort helper for sort_sibling_loops. */
+
+static int *sort_sibling_loops_cmp_rpo;
+static int
+sort_sibling_loops_cmp (const void *la_, const void *lb_)
+{
+ const struct loop *la = *(const struct loop * const *)la_;
+ const struct loop *lb = *(const struct loop * const *)lb_;
+ return (sort_sibling_loops_cmp_rpo[la->header->index]
+ - sort_sibling_loops_cmp_rpo[lb->header->index]);
+}
+
+/* Sort sibling loops in RPO order. */
+
+void
+sort_sibling_loops (function *fn)
+{
+ /* Match flow_loops_find in the order we sort sibling loops. */
+ sort_sibling_loops_cmp_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+ pre_and_rev_post_order_compute_fn (fn, NULL, rc_order, false);
+ for (int i = 0; i < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; ++i)
+ sort_sibling_loops_cmp_rpo[rc_order[i]] = i;
+ free (rc_order);
+
+ auto_vec<loop_p, 3> siblings;
+ loop_p loop;
+ FOR_EACH_LOOP_FN (fn, loop, LI_INCLUDE_ROOT)
+ if (loop->inner && loop->inner->next)
+ {
+ loop_p sibling = loop->inner;
+ do
+ {
+ siblings.safe_push (sibling);
+ sibling = sibling->next;
+ }
+ while (sibling);
+ siblings.qsort (sort_sibling_loops_cmp);
+ loop_p *siblingp = &loop->inner;
+ for (unsigned i = 0; i < siblings.length (); ++i)
+ {
+ *siblingp = siblings[i];
+ siblingp = &(*siblingp)->next;
+ }
+ *siblingp = NULL;
+ siblings.truncate (0);
+ }
+
+ free (sort_sibling_loops_cmp_rpo);
+ sort_sibling_loops_cmp_rpo = NULL;
+}
+
/* Ratio of frequencies of edges so that one of more latch edges is
considered to belong to inner loop with same header. */
#define HEAVY_EDGE_RATIO 8
{
unsigned i;
edge e, me = NULL;
- gcov_type mcount = 0, tcount = 0;
+ profile_count mcount = profile_count::zero (), tcount = profile_count::zero ();
FOR_EACH_VEC_ELT (latches, i, e)
{
- if (e->count > mcount)
+ if (e->count ()> mcount)
{
me = e;
- mcount = e->count;
+ mcount = e->count();
}
- tcount += e->count;
+ tcount += e->count();
}
- if (tcount < HEAVY_EDGE_MIN_SAMPLES
- || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
+ if (!tcount.initialized_p () || !(tcount.ipa () > HEAVY_EDGE_MIN_SAMPLES)
+ || (tcount - mcount).apply_scale (HEAVY_EDGE_RATIO, 1) > tcount)
return NULL;
if (dump_file)
{
edge e, latch = latches[0];
unsigned i;
- gimple phi;
- gimple_stmt_iterator psi;
+ gphi *phi;
+ gphi_iterator psi;
tree lop;
basic_block bb;
a subloop. */
for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
{
- phi = gsi_stmt (psi);
+ phi = psi.phi ();
lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
/* Ignore the values that are not changed inside the subloop. */
/* Callback for make_forwarder_block. Returns true if the edge E is marked
in the set MFB_REIS_SET. */
-static struct pointer_set_t *mfb_reis_set;
+static hash_set<edge> *mfb_reis_set;
static bool
mfb_redirect_edges_in_set (edge e)
{
- return pointer_set_contains (mfb_reis_set, e);
+ return mfb_reis_set->contains (e);
}
/* Creates a subloop of LOOP with latch edge LATCH. */
edge e, new_entry;
struct loop *new_loop;
- mfb_reis_set = pointer_set_create ();
+ mfb_reis_set = new hash_set<edge>;
FOR_EACH_EDGE (e, ei, loop->header->preds)
{
if (e != latch)
- pointer_set_insert (mfb_reis_set, e);
+ mfb_reis_set->add (e);
}
new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
NULL);
- pointer_set_destroy (mfb_reis_set);
+ delete mfb_reis_set;
loop->header = new_entry->src;
if (dump_file)
fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
- mfb_reis_set = pointer_set_create ();
+ mfb_reis_set = new hash_set<edge>;
FOR_EACH_VEC_ELT (latches, i, e)
- pointer_set_insert (mfb_reis_set, e);
+ mfb_reis_set->add (e);
latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
NULL);
- pointer_set_destroy (mfb_reis_set);
+ delete mfb_reis_set;
loop->header = latch->dest;
loop->latch = latch->src;
block. This would cause problems if the entry edge was the one from the
entry block. To avoid having to handle this case specially, split
such entry edge. */
- e = find_edge (ENTRY_BLOCK_PTR, loop->header);
+ e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
if (e)
split_edge (e);
void
disambiguate_loops_with_multiple_latches (void)
{
- loop_iterator li;
struct loop *loop;
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (!loop->latch)
disambiguate_multiple_latches (loop);
{
struct loop *source_loop;
- if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return 0;
source_loop = bb->loop_father;
body = XNEWVEC (basic_block, loop->num_nodes);
- if (loop->latch == EXIT_BLOCK_PTR)
+ if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
/* There may be blocks unreachable from EXIT_BLOCK, hence we need to
special-case the fake loop that contains the whole function. */
- gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
+ gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
- body[tv++] = EXIT_BLOCK_PTR;
- FOR_EACH_BB (bb)
+ body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
+ FOR_EACH_BB_FN (bb, cfun)
body[tv++] = bb;
}
else
tovisit = XNEWVEC (basic_block, loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
tv = 0;
fill_sons_in_loop (loop, loop->header, tovisit, &tv);
{
basic_block *blocks;
basic_block bb;
- bitmap visited;
- unsigned int i = 0;
- unsigned int vc = 1;
+ unsigned int i = 1;
+ unsigned int vc = 0;
gcc_assert (loop->num_nodes);
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
blocks = XNEWVEC (basic_block, loop->num_nodes);
- visited = BITMAP_ALLOC (NULL);
-
- bb = loop->header;
+ auto_bitmap visited;
+ blocks[0] = loop->header;
+ bitmap_set_bit (visited, loop->header->index);
while (i < loop->num_nodes)
{
edge e;
edge_iterator ei;
-
- if (bitmap_set_bit (visited, bb->index))
- /* This basic block is now visited */
- blocks[i++] = bb;
+ gcc_assert (i > vc);
+ bb = blocks[vc++];
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (flow_bb_inside_loop_p (loop, e->dest))
{
+ /* This bb is now visited. */
if (bitmap_set_bit (visited, e->dest->index))
blocks[i++] = e->dest;
}
}
-
- gcc_assert (i >= vc);
-
- bb = blocks[vc++];
}
- BITMAP_FREE (visited);
return blocks;
}
/* Hash function for struct loop_exit. */
-static hashval_t
-loop_exit_hash (const void *ex)
+hashval_t
+loop_exit_hasher::hash (loop_exit *exit)
{
- const struct loop_exit *const exit = (const struct loop_exit *) ex;
-
return htab_hash_pointer (exit->e);
}
/* Equality function for struct loop_exit. Compares with edge. */
-static int
-loop_exit_eq (const void *ex, const void *e)
+bool
+loop_exit_hasher::equal (loop_exit *exit, edge e)
{
- const struct loop_exit *const exit = (const struct loop_exit *) ex;
-
return exit->e == e;
}
/* Frees the list of loop exit descriptions EX. */
-static void
-loop_exit_free (void *ex)
+void
+loop_exit_hasher::remove (loop_exit *exit)
{
- struct loop_exit *exit = (struct loop_exit *) ex, *next;
-
+ loop_exit *next;
for (; exit; exit = next)
{
next = exit->next_e;
static struct loop_exit *
get_exit_descriptions (edge e)
{
- return (struct loop_exit *) htab_find_with_hash (current_loops->exits, e,
- htab_hash_pointer (e));
+ return current_loops->exits->find_with_hash (e, htab_hash_pointer (e));
}
/* Updates the lists of loop exits in that E appears.
void
rescan_loop_exit (edge e, bool new_edge, bool removed)
{
- void **slot;
struct loop_exit *exits = NULL, *exit;
struct loop *aloop, *cloop;
aloop != cloop;
aloop = loop_outer (aloop))
{
- exit = ggc_alloc_loop_exit ();
+ exit = ggc_alloc<loop_exit> ();
exit->e = e;
exit->next = aloop->exits->next;
if (!exits && new_edge)
return;
- slot = htab_find_slot_with_hash (current_loops->exits, e,
- htab_hash_pointer (e),
- exits ? INSERT : NO_INSERT);
+ loop_exit **slot
+ = current_loops->exits->find_slot_with_hash (e, htab_hash_pointer (e),
+ exits ? INSERT : NO_INSERT);
if (!slot)
return;
if (exits)
{
if (*slot)
- loop_exit_free (*slot);
+ loop_exit_hasher::remove (*slot);
*slot = exits;
}
else
- htab_clear_slot (current_loops->exits, slot);
+ current_loops->exits->clear_slot (slot);
}
/* For each loop, record list of exit edges, and start maintaining these
loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
gcc_assert (current_loops->exits == NULL);
- current_loops->exits = htab_create_ggc (2 * number_of_loops (cfun),
- loop_exit_hash, loop_exit_eq,
- loop_exit_free);
+ current_loops->exits
+ = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
/* Dumps information about the exit in *SLOT to FILE.
Callback for htab_traverse. */
-static int
-dump_recorded_exit (void **slot, void *file)
+int
+dump_recorded_exit (loop_exit **slot, FILE *file)
{
- struct loop_exit *exit = (struct loop_exit *) *slot;
+ struct loop_exit *exit = *slot;
unsigned n = 0;
edge e = exit->e;
for (; exit != NULL; exit = exit->next_e)
n++;
- fprintf ((FILE*) file, "Edge %d->%d exits %u loops\n",
+ fprintf (file, "Edge %d->%d exits %u loops\n",
e->src->index, e->dest->index, n);
return 1;
{
if (!current_loops->exits)
return;
- htab_traverse (current_loops->exits, dump_recorded_exit, file);
+ current_loops->exits->traverse<FILE *, dump_recorded_exit> (file);
}
/* Releases lists of loop exits. */
void
-release_recorded_exits (void)
+release_recorded_exits (function *fn)
{
- gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
- htab_delete (current_loops->exits);
- current_loops->exits = NULL;
- loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
+ gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS));
+ loops_for_fn (fn)->exits->empty ();
+ loops_for_fn (fn)->exits = NULL;
+ loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
}
/* Returns the list of the exit edges of a LOOP. */
edge_iterator ei;
struct loop_exit *exit;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
/* If we maintain the lists of exits, use them. Otherwise we must
scan the body of the loop. */
unsigned i, n;
basic_block * body;
- gcc_assert (loop->latch != EXIT_BLOCK_PTR);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
body = get_loop_body (loop);
n = 0;
verify_loop_structure (void)
{
unsigned *sizes, i, j;
- sbitmap irreds;
basic_block bb, *bbs;
struct loop *loop;
int err = 0;
edge e;
unsigned num = number_of_loops (cfun);
- loop_iterator li;
struct loop_exit *exit, *mexit;
bool dom_available = dom_info_available_p (CDI_DOMINATORS);
- sbitmap visited;
if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
{
else
verify_dominators (CDI_DOMINATORS);
+ /* Check the loop tree root. */
+ if (current_loops->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || current_loops->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ || (current_loops->tree_root->num_nodes
+ != (unsigned) n_basic_blocks_for_fn (cfun)))
+ {
+ error ("corrupt loop tree root");
+ err = 1;
+ }
+
/* Check the headers. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_loop_header_p (bb))
{
if (bb->loop_father->header == NULL)
}
/* Check the recorded loop father and sizes of loops. */
- visited = sbitmap_alloc (last_basic_block);
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
- bbs = XNEWVEC (basic_block, n_basic_blocks);
- FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
+ bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
+ FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
unsigned n;
continue;
}
- n = get_loop_body_with_size (loop, bbs, n_basic_blocks);
+ n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
if (loop->num_nodes != n)
{
error ("size of loop %d should be %d, not %d",
}
}
free (bbs);
- sbitmap_free (visited);
/* Check headers and latches. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
i = loop->num;
if (loop->header == NULL)
/* Check irreducible loops. */
if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
{
+ auto_edge_flag saved_irr_mask (cfun);
/* Record old info. */
- irreds = sbitmap_alloc (last_basic_block);
- FOR_EACH_BB (bb)
+ auto_sbitmap irreds (last_basic_block_for_fn (cfun));
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->flags & BB_IRREDUCIBLE_LOOP)
bitmap_clear_bit (irreds, bb->index);
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
- e->flags |= EDGE_ALL_FLAGS + 1;
+ e->flags |= saved_irr_mask;
}
/* Recount it. */
mark_irreducible_loops ();
/* Compare. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
{
if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
- && !(e->flags & (EDGE_ALL_FLAGS + 1)))
+ && !(e->flags & saved_irr_mask))
{
error ("edge from %d to %d should be marked irreducible",
e->src->index, e->dest->index);
err = 1;
}
else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
- && (e->flags & (EDGE_ALL_FLAGS + 1)))
+ && (e->flags & saved_irr_mask))
{
error ("edge from %d to %d should not be marked irreducible",
e->src->index, e->dest->index);
err = 1;
}
- e->flags &= ~(EDGE_ALL_FLAGS + 1);
+ e->flags &= ~saved_irr_mask;
}
}
- free (irreds);
}
/* Check the recorded loop exits. */
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
if (!loop->exits || loop->exits->e != NULL)
{
sizes = XCNEWVEC (unsigned, num);
memset (sizes, 0, sizeof (unsigned) * num);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->loop_father == current_loops->tree_root)
}
}
- if (n_exits != htab_elements (current_loops->exits))
+ if (n_exits != current_loops->exits->elements ())
{
error ("too many loop exits recorded");
err = 1;
}
- FOR_EACH_LOOP (li, loop, 0)
+ FOR_EACH_LOOP (loop, 0)
{
eloops = 0;
for (exit = loop->exits->next; exit->e; exit = exit->next)
edge e;
edge_iterator ei;
- gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
+ gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
+ && ! loops_state_satisfies_p (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
FOR_EACH_EDGE (e, ei, loop->header->preds)
if (e->src != loop->latch)
break;
+ if (! e)
+ {
+ gcc_assert (! loop_outer (loop));
+ return single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ }
+
return e;
}
/* Return location corresponding to the loop control condition if possible. */
-location_t
+dump_user_location_t
get_loop_location (struct loop *loop)
{
- rtx insn = NULL;
+ rtx_insn *insn = NULL;
struct niter_desc *desc = NULL;
edge exit;
FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
{
if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
- return INSN_LOCATION (insn);
+ return insn;
}
}
/* If loop has a single exit, then the loop control branch
FOR_BB_INSNS_REVERSE (exit->src, insn)
{
if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
- return INSN_LOCATION (insn);
+ return insn;
}
}
/* Next check the latch, to see if it is non-empty. */
FOR_BB_INSNS_REVERSE (loop->latch, insn)
{
if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
- return INSN_LOCATION (insn);
+ return insn;
}
/* Finally, if none of the above identifies the loop control branch,
return the first location in the loop header. */
FOR_BB_INSNS (loop->header, insn)
{
if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
- return INSN_LOCATION (insn);
+ return insn;
}
/* If all else fails, simply return the current function location. */
- return DECL_SOURCE_LOCATION (current_function_decl);
+ return dump_user_location_t::from_function_decl (current_function_decl);
}
/* Records that every statement in LOOP is executed I_BOUND times.
I_BOUND times. */
void
-record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
- bool upper)
+record_niter_bound (struct loop *loop, const widest_int &i_bound,
+ bool realistic, bool upper)
{
/* Update the bounds only when there is no previous estimation, or when the
current estimation is smaller. */
if (upper
&& (!loop->any_upper_bound
- || i_bound.ult (loop->nb_iterations_upper_bound)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
{
loop->any_upper_bound = true;
loop->nb_iterations_upper_bound = i_bound;
+ if (!loop->any_likely_upper_bound)
+ {
+ loop->any_likely_upper_bound = true;
+ loop->nb_iterations_likely_upper_bound = i_bound;
+ }
}
if (realistic
&& (!loop->any_estimate
- || i_bound.ult (loop->nb_iterations_estimate)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
{
loop->any_estimate = true;
loop->nb_iterations_estimate = i_bound;
}
+ if (!realistic
+ && (!loop->any_likely_upper_bound
+ || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
+ {
+ loop->any_likely_upper_bound = true;
+ loop->nb_iterations_likely_upper_bound = i_bound;
+ }
/* If an upper bound is smaller than the realistic estimate of the
number of iterations, use the upper bound instead. */
if (loop->any_upper_bound
&& loop->any_estimate
- && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
+ && wi::ltu_p (loop->nb_iterations_upper_bound,
+ loop->nb_iterations_estimate))
loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
+ if (loop->any_upper_bound
+ && loop->any_likely_upper_bound
+ && wi::ltu_p (loop->nb_iterations_upper_bound,
+ loop->nb_iterations_likely_upper_bound))
+ loop->nb_iterations_likely_upper_bound = loop->nb_iterations_upper_bound;
}
/* Similar to get_estimated_loop_iterations, but returns the estimate only
HOST_WIDE_INT
get_estimated_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!get_estimated_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
return snit < 0 ? -1 : snit;
}
+/* Returns an likely upper bound on the number of executions of statements
+ in the LOOP. For statements before the loop exit, this exceeds
+ the number of execution of the latch by one. */
+
+HOST_WIDE_INT
+likely_max_stmt_executions_int (struct loop *loop)
+{
+ HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
+ HOST_WIDE_INT snit;
+
+ if (nit == -1)
+ return -1;
+
+ snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
+
+ /* If the computation overflows, return -1. */
+ return snit < 0 ? -1 : snit;
+}
+
/* Sets NIT to the estimated number of executions of the latch of the
LOOP. If we have no reliable estimate, the function returns false, otherwise
returns true. */
bool
-get_estimated_loop_iterations (struct loop *loop, double_int *nit)
+get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
{
/* Even if the bound is not recorded, possibly we can derrive one from
profile. */
if (!loop->any_estimate)
{
- if (loop->header->count)
+ if (loop->header->count.reliable_p ())
{
- *nit = gcov_type_to_double_int
+ *nit = gcov_type_to_wide_int
(expected_loop_iterations_unbounded (loop) + 1);
return true;
}
false, otherwise returns true. */
bool
-get_max_loop_iterations (struct loop *loop, double_int *nit)
+get_max_loop_iterations (const struct loop *loop, widest_int *nit)
{
if (!loop->any_upper_bound)
return false;
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_max_loop_iterations_int (struct loop *loop)
+get_max_loop_iterations_int (const struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!get_max_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
+ return -1;
+ hwi_nit = nit.to_shwi ();
+
+ return hwi_nit < 0 ? -1 : hwi_nit;
+}
+
+/* Sets NIT to an upper bound for the maximum number of executions of the
+ latch of the LOOP. If we have no reliable estimate, the function returns
+ false, otherwise returns true. */
+
+bool
+get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
+{
+ if (!loop->any_likely_upper_bound)
+ return false;
+
+ *nit = loop->nb_iterations_likely_upper_bound;
+ return true;
+}
+
+/* Similar to get_max_loop_iterations, but returns the estimate only
+ if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
+ on the number of iterations of LOOP could not be derived, returns -1. */
+
+HOST_WIDE_INT
+get_likely_max_loop_iterations_int (struct loop *loop)
+{
+ widest_int nit;
+ HOST_WIDE_INT hwi_nit;
+
+ if (!get_likely_max_loop_iterations (loop, &nit))
+ return -1;
+
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
{
return bb->loop_father ? loop_depth (bb->loop_father) : 0;
}
+
+/* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
+
+void
+mark_loop_for_removal (loop_p loop)
+{
+ if (loop->header == NULL)
+ return;
+ loop->former_header = loop->header;
+ loop->header = NULL;
+ loop->latch = NULL;
+ loops_state_set (LOOPS_NEED_FIXUP);
+}