final gcond. */
static gcond *
-vect_set_loop_condition_partial_vectors (class loop *loop,
+vect_set_loop_condition_partial_vectors (class loop *loop, edge exit_edge,
loop_vec_info loop_vinfo, tree niters,
tree final_iv, bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
add_header_seq (loop, header_seq);
/* Get a boolean result that tells us whether to iterate. */
- edge exit_edge = single_exit (loop);
gcond *cond_stmt;
if (LOOP_VINFO_USING_DECREMENTING_IV_P (loop_vinfo)
&& !LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
if (final_iv)
{
gassign *assign = gimple_build_assign (final_iv, orig_niters);
- gsi_insert_on_edge_immediate (single_exit (loop), assign);
+ gsi_insert_on_edge_immediate (exit_edge, assign);
}
return cond_stmt;
static gcond *
vect_set_loop_condition_partial_vectors_avx512 (class loop *loop,
+ edge exit_edge,
loop_vec_info loop_vinfo, tree niters,
tree final_iv,
bool niters_maybe_zero,
add_preheader_seq (loop, preheader_seq);
/* Adjust the exit test using the decrementing IV. */
- edge exit_edge = single_exit (loop);
tree_code code = (exit_edge->flags & EDGE_TRUE_VALUE) ? LE_EXPR : GT_EXPR;
/* When we peel for alignment with niter_skip != 0 this can
cause niter + niter_skip to wrap and since we are comparing the
loop handles exactly VF scalars per iteration. */
static gcond *
-vect_set_loop_condition_normal (class loop *loop, tree niters, tree step,
+vect_set_loop_condition_normal (loop_vec_info /* loop_vinfo */, edge exit_edge,
+ class loop *loop, tree niters, tree step,
tree final_iv, bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
{
gcond *cond_stmt;
gcond *orig_cond;
edge pe = loop_preheader_edge (loop);
- edge exit_edge = single_exit (loop);
gimple_stmt_iterator incr_gsi;
bool insert_after;
enum tree_code code;
tree niters_type = TREE_TYPE (niters);
- orig_cond = get_loop_exit_condition (loop);
+ orig_cond = get_loop_exit_condition (exit_edge);
gcc_assert (orig_cond);
loop_cond_gsi = gsi_for_stmt (orig_cond);
if (final_iv)
{
gassign *assign;
- edge exit = single_exit (loop);
- gcc_assert (single_pred_p (exit->dest));
+ gcc_assert (single_pred_p (exit_edge->dest));
tree phi_dest
= integer_zerop (init) ? final_iv : copy_ssa_name (indx_after_incr);
/* Make sure to maintain LC SSA form here and elide the subtraction
if the value is zero. */
- gphi *phi = create_phi_node (phi_dest, exit->dest);
- add_phi_arg (phi, indx_after_incr, exit, UNKNOWN_LOCATION);
+ gphi *phi = create_phi_node (phi_dest, exit_edge->dest);
+ add_phi_arg (phi, indx_after_incr, exit_edge, UNKNOWN_LOCATION);
if (!integer_zerop (init))
{
assign = gimple_build_assign (final_iv, MINUS_EXPR,
phi_dest, init);
- gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
+ gimple_stmt_iterator gsi = gsi_after_labels (exit_edge->dest);
gsi_insert_before (&gsi, assign, GSI_SAME_STMT);
}
}
Assumption: the exit-condition of LOOP is the last stmt in the loop. */
void
-vect_set_loop_condition (class loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition (class loop *loop, edge loop_e, loop_vec_info loop_vinfo,
tree niters, tree step, tree final_iv,
bool niters_maybe_zero)
{
gcond *cond_stmt;
- gcond *orig_cond = get_loop_exit_condition (loop);
+ gcond *orig_cond = get_loop_exit_condition (loop_e);
gimple_stmt_iterator loop_cond_gsi = gsi_for_stmt (orig_cond);
if (loop_vinfo && LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo))
{
if (LOOP_VINFO_PARTIAL_VECTORS_STYLE (loop_vinfo) == vect_partial_vectors_avx512)
- cond_stmt = vect_set_loop_condition_partial_vectors_avx512 (loop, loop_vinfo,
+ cond_stmt = vect_set_loop_condition_partial_vectors_avx512 (loop, loop_e,
+ loop_vinfo,
niters, final_iv,
niters_maybe_zero,
loop_cond_gsi);
else
- cond_stmt = vect_set_loop_condition_partial_vectors (loop, loop_vinfo,
+ cond_stmt = vect_set_loop_condition_partial_vectors (loop, loop_e,
+ loop_vinfo,
niters, final_iv,
niters_maybe_zero,
loop_cond_gsi);
}
else
- cond_stmt = vect_set_loop_condition_normal (loop, niters, step, final_iv,
+ cond_stmt = vect_set_loop_condition_normal (loop_vinfo, loop_e, loop,
+ niters,
+ step, final_iv,
niters_maybe_zero,
loop_cond_gsi);
get_current_def (PHI_ARG_DEF_FROM_EDGE (from_phi, from)));
}
-
/* Given LOOP this function generates a new copy of it and puts it
on E which is either the entry or exit of LOOP. If SCALAR_LOOP is
non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the
entry or exit of LOOP. */
class loop *
-slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
- class loop *scalar_loop, edge e)
+slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop, edge loop_exit,
+ class loop *scalar_loop,
+ edge scalar_exit, edge e, edge *new_e)
{
class loop *new_loop;
basic_block *new_bbs, *bbs, *pbbs;
edge exit, new_exit;
bool duplicate_outer_loop = false;
- exit = single_exit (loop);
+ exit = loop_exit;
at_exit = (e == exit);
if (!at_exit && e != loop_preheader_edge (loop))
return NULL;
if (scalar_loop == NULL)
- scalar_loop = loop;
+ {
+ scalar_loop = loop;
+ scalar_exit = loop_exit;
+ }
bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
pbbs = bbs + 1;
bbs[0] = preheader;
new_bbs = XNEWVEC (basic_block, scalar_loop->num_nodes + 1);
- exit = single_exit (scalar_loop);
copy_bbs (bbs, scalar_loop->num_nodes + 1, new_bbs,
- &exit, 1, &new_exit, NULL,
+ &scalar_exit, 1, &new_exit, NULL,
at_exit ? loop->latch : e->src, true);
- exit = single_exit (loop);
+ exit = loop_exit;
basic_block new_preheader = new_bbs[0];
+ if (new_e)
+ *new_e = new_exit;
+
/* Before installing PHI arguments make sure that the edges
into them match that of the scalar loop we analyzed. This
makes sure the SLP tree matches up between the main vectorized
but LOOP will not. slpeel_update_phi_nodes_for_guard{1,2} expects
the LOOP SSA_NAMEs (on the exit edge and edge from latch to
header) to have current_def set, so copy them over. */
- slpeel_duplicate_current_defs_from_edges (single_exit (scalar_loop),
- exit);
+ slpeel_duplicate_current_defs_from_edges (scalar_exit, exit);
slpeel_duplicate_current_defs_from_edges (EDGE_SUCC (scalar_loop->latch,
0),
EDGE_SUCC (loop->latch, 0));
*/
bool
-slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
+slpeel_can_duplicate_loop_p (const class loop *loop, const_edge exit_e,
+ const_edge e)
{
- edge exit_e = single_exit (loop);
edge entry_e = loop_preheader_edge (loop);
- gcond *orig_cond = get_loop_exit_condition (loop);
+ gcond *orig_cond = get_loop_exit_condition (exit_e);
gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
unsigned int num_bb = loop->inner? 5 : 2;
if (!loop_outer (loop)
|| loop->num_nodes != num_bb
|| !empty_block_p (loop->latch)
- || !single_exit (loop)
+ || !exit_e
/* Verify that new loop exit condition can be trivially modified. */
|| (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
|| (e != exit_e && e != entry_e))
return ret;
}
-/* Function vect_get_loop_location.
+/* Function find_loop_location.
Extract the location of the loop in the source code.
If the loop is not well formed for vectorization, an estimated
if (!loop)
return dump_user_location_t ();
- stmt = get_loop_exit_condition (loop);
+ if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
+ {
+ /* We only care about the loop location, so use any exit with location
+ information. */
+ for (edge e : get_loop_exit_edges (loop))
+ {
+ stmt = get_loop_exit_condition (e);
- if (stmt
- && LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
- return stmt;
+ if (stmt
+ && LOCATION_LOCUS (gimple_location (stmt)) > BUILTINS_LOCATION)
+ return stmt;
+ }
+ }
/* If we got here the loop is probably not "well formed",
try to estimate the loop location */
gphi_iterator gsi, gsi1;
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block update_bb = update_e->dest;
- basic_block exit_bb = single_exit (loop)->dest;
+
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
/* Make sure there exists a single-predecessor exit bb: */
gcc_assert (single_pred_p (exit_bb));
{
/* We should be using a step_vector of VF if VF is variable. */
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ();
- class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree type = TREE_TYPE (niters_vector);
tree log_vf = build_int_cst (type, exact_log2 (vf));
- basic_block exit_bb = single_exit (loop)->dest;
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
gcc_assert (niters_vector_mult_vf_ptr != NULL);
tree niters_vector_mult_vf = fold_build2 (LSHIFT_EXPR, type,
NULL. */
static tree
-find_guard_arg (class loop *loop, class loop *epilog ATTRIBUTE_UNUSED,
- gphi *lcssa_phi)
+find_guard_arg (class loop *loop ATTRIBUTE_UNUSED, const_edge loop_e,
+ class loop *epilog ATTRIBUTE_UNUSED, gphi *lcssa_phi)
{
gphi_iterator gsi;
- edge e = single_exit (loop);
- gcc_assert (single_pred_p (e->dest));
- for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
+ gcc_assert (single_pred_p (loop_e->dest));
+ for (gsi = gsi_start_phis (loop_e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
if (operand_equal_p (PHI_ARG_DEF (phi, 0),
static void
slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
- class loop *first, class loop *second,
+ class loop *first, edge first_loop_e,
+ class loop *second, edge second_loop_e,
bool create_lcssa_for_iv_phis)
{
gphi_iterator gsi_update, gsi_orig;
edge first_latch_e = EDGE_SUCC (first->latch, 0);
edge second_preheader_e = loop_preheader_edge (second);
- basic_block between_bb = single_exit (first)->dest;
+ basic_block between_bb = first_loop_e->dest;
gcc_assert (between_bb == second_preheader_e->src);
gcc_assert (single_pred_p (between_bb) && single_succ_p (between_bb));
{
tree new_res = copy_ssa_name (PHI_RESULT (orig_phi));
gphi *lcssa_phi = create_phi_node (new_res, between_bb);
- add_phi_arg (lcssa_phi, arg, single_exit (first), UNKNOWN_LOCATION);
+ add_phi_arg (lcssa_phi, arg, first_loop_e, UNKNOWN_LOCATION);
arg = new_res;
}
for correct vectorization of live stmts. */
if (loop == first)
{
- basic_block orig_exit = single_exit (second)->dest;
+ basic_block orig_exit = second_loop_e->dest;
for (gsi_orig = gsi_start_phis (orig_exit);
!gsi_end_p (gsi_orig); gsi_next (&gsi_orig))
{
if (TREE_CODE (orig_arg) != SSA_NAME || virtual_operand_p (orig_arg))
continue;
+ const_edge exit_e = LOOP_VINFO_IV_EXIT (loop_vinfo);
/* Already created in the above loop. */
- if (find_guard_arg (first, second, orig_phi))
+ if (find_guard_arg (first, exit_e, second, orig_phi))
continue;
tree new_res = copy_ssa_name (orig_arg);
gphi *lcphi = create_phi_node (new_res, between_bb);
- add_phi_arg (lcphi, orig_arg, single_exit (first), UNKNOWN_LOCATION);
+ add_phi_arg (lcphi, orig_arg, first_loop_e, UNKNOWN_LOCATION);
}
}
}
if (!merge_arg)
merge_arg = old_arg;
- tree guard_arg = find_guard_arg (loop, epilog, update_phi);
+ tree guard_arg
+ = find_guard_arg (loop, single_exit (loop), epilog, update_phi);
/* If the var is live after loop but not a reduction, we simply
use the old arg. */
if (!guard_arg)
}
if (vect_epilogues)
- /* Make sure to set the epilogue's epilogue scalar loop, such that we can
- use the original scalar loop as remaining epilogue if necessary. */
- LOOP_VINFO_SCALAR_LOOP (epilogue_vinfo)
- = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ {
+ /* Make sure to set the epilogue's epilogue scalar loop, such that we can
+ use the original scalar loop as remaining epilogue if necessary. */
+ LOOP_VINFO_SCALAR_LOOP (epilogue_vinfo)
+ = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ LOOP_VINFO_SCALAR_IV_EXIT (epilogue_vinfo)
+ = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
+ }
if (prolog_peeling)
{
e = loop_preheader_edge (loop);
- gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e));
+ edge exit_e = LOOP_VINFO_IV_EXIT (loop_vinfo);
+ gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, exit_e, e));
/* Peel prolog and put it on preheader edge of loop. */
- prolog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, scalar_loop, e);
+ edge scalar_e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
+ edge prolog_e = NULL;
+ prolog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, exit_e,
+ scalar_loop, scalar_e,
+ e, &prolog_e);
gcc_assert (prolog);
prolog->force_vectorize = false;
- slpeel_update_phi_nodes_for_loops (loop_vinfo, prolog, loop, true);
+ slpeel_update_phi_nodes_for_loops (loop_vinfo, prolog, prolog_e, loop,
+ exit_e, true);
first_loop = prolog;
reset_original_copy_tables ();
/* Update the number of iterations for prolog loop. */
tree step_prolog = build_one_cst (TREE_TYPE (niters_prolog));
- vect_set_loop_condition (prolog, NULL, niters_prolog,
+ vect_set_loop_condition (prolog, prolog_e, loop_vinfo, niters_prolog,
step_prolog, NULL_TREE, false);
/* Skip the prolog loop. */
if (epilog_peeling)
{
- e = single_exit (loop);
- gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e));
+ e = LOOP_VINFO_IV_EXIT (loop_vinfo);
+ gcc_checking_assert (slpeel_can_duplicate_loop_p (loop, e, e));
/* Peel epilog and put it on exit edge of loop. If we are vectorizing
said epilog then we should use a copy of the main loop as a starting
If we are not vectorizing the epilog then we should use the scalar loop
as the transformations mentioned above make less or no sense when not
vectorizing. */
+ edge scalar_e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
epilog = vect_epilogues ? get_loop_copy (loop) : scalar_loop;
- epilog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, epilog, e);
+ edge epilog_e = vect_epilogues ? e : scalar_e;
+ edge new_epilog_e = NULL;
+ epilog = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e, epilog,
+ epilog_e, e,
+ &new_epilog_e);
+ LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo) = new_epilog_e;
gcc_assert (epilog);
-
epilog->force_vectorize = false;
- slpeel_update_phi_nodes_for_loops (loop_vinfo, loop, epilog, false);
+ slpeel_update_phi_nodes_for_loops (loop_vinfo, loop, e, epilog,
+ new_epilog_e, false);
bb_before_epilog = loop_preheader_edge (epilog)->src;
/* Scalar version loop may be preferred. In this case, add guard
{
guard_cond = fold_build2 (EQ_EXPR, boolean_type_node,
niters, niters_vector_mult_vf);
- guard_bb = single_exit (loop)->dest;
- guard_to = split_edge (single_exit (epilog));
+ guard_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
+ edge epilog_e = LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo);
+ guard_to = split_edge (epilog_e);
guard_e = slpeel_add_loop_guard (guard_bb, guard_cond, guard_to,
skip_vector ? anchor : guard_bb,
prob_epilog.invert (),
irred_flag);
if (vect_epilogues)
epilogue_vinfo->skip_this_loop_edge = guard_e;
- slpeel_update_phi_nodes_for_guard2 (loop, epilog, guard_e,
- single_exit (epilog));
+ slpeel_update_phi_nodes_for_guard2 (loop, epilog, guard_e, epilog_e);
/* Only need to handle basic block before epilog loop if it's not
the guard_bb, which is the case when skip_vector is true. */
if (guard_bb != bb_before_epilog)
{
epilog->aux = epilogue_vinfo;
LOOP_VINFO_LOOP (epilogue_vinfo) = epilog;
+ LOOP_VINFO_IV_EXIT (epilogue_vinfo)
+ = LOOP_VINFO_EPILOGUE_IV_EXIT (loop_vinfo);
loop_constraint_clear (epilog, LOOP_C_INFINITE);
static gcond *
-vect_get_loop_niters (class loop *loop, tree *assumptions,
+vect_get_loop_niters (class loop *loop, edge exit, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
- edge exit = single_exit (loop);
class tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
return cond;
}
+/* Determine the main loop exit for the vectorizer. */
+
+edge
+vec_init_loop_exit_info (class loop *loop)
+{
+ /* Before we begin we must first determine which exit is the main one and
+ which are auxilary exits. */
+ auto_vec<edge> exits = get_loop_exit_edges (loop);
+ if (exits.length () == 1)
+ return exits[0];
+ else
+ return NULL;
+}
+
/* Function bb_in_loop_p
Used as predicate for dfs order traversal of the loop bbs. */
has_mask_store (false),
scalar_loop_scaling (profile_probability::uninitialized ()),
scalar_loop (NULL),
- orig_loop_info (NULL)
+ orig_loop_info (NULL),
+ vec_loop_iv_exit (NULL),
+ vec_epilogue_loop_iv_exit (NULL),
+ scalar_loop_iv_exit (NULL)
{
/* CHECKME: We want to visit all BBs before their successors (except for
latch blocks, for which this assertion wouldn't hold). In the simple
{
DUMP_VECT_SCOPE ("vect_analyze_loop_form");
+ edge exit_e = vec_init_loop_exit_info (loop);
+ if (!exit_e)
+ return opt_result::failure_at (vect_location,
+ "not vectorized:"
+ " could not determine main exit from"
+ " loop with multiple exits.\n");
+ info->loop_exit = exit_e;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location,
+ "using as main loop exit: %d -> %d [AUX: %p]\n",
+ exit_e->src->index, exit_e->dest->index, exit_e->aux);
+
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
" abnormal loop exit edge.\n");
info->loop_cond
- = vect_get_loop_niters (loop, &info->assumptions,
+ = vect_get_loop_niters (loop, e, &info->assumptions,
&info->number_of_iterations,
&info->number_of_iterationsm1);
if (!info->loop_cond)
stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (info->loop_cond);
STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
+
+ LOOP_VINFO_IV_EXIT (loop_vinfo) = info->loop_exit;
+
if (info->inner_loop_cond)
{
stmt_vec_info inner_loop_cond_info
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
- || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
- single_exit (LOOP_VINFO_LOOP
- (loop_vinfo))))
+ || !slpeel_can_duplicate_loop_p (loop,
+ LOOP_VINFO_IV_EXIT (loop_vinfo),
+ LOOP_VINFO_IV_EXIT (loop_vinfo)))
{
ok = opt_result::failure_at (vect_location,
"not vectorized: can't create required "
Store them in NEW_PHIS. */
if (double_reduc)
loop = outer_loop;
- exit_bb = single_exit (loop)->dest;
+ exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
exit_gsi = gsi_after_labels (exit_bb);
reduc_inputs.create (slp_node ? vec_num : ncopies);
for (unsigned i = 0; i < vec_num; i++)
phi = create_phi_node (new_def, exit_bb);
if (j)
def = gimple_get_lhs (STMT_VINFO_VEC_STMTS (rdef_info)[j]);
- SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
+ SET_PHI_ARG_DEF (phi, LOOP_VINFO_IV_EXIT (loop_vinfo)->dest_idx, def);
new_def = gimple_convert (&stmts, vectype, new_def);
reduc_inputs.quick_push (new_def);
}
lhs' = new_tree; */
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- basic_block exit_bb = single_exit (loop)->dest;
+ basic_block exit_bb = LOOP_VINFO_IV_EXIT (loop_vinfo)->dest;
gcc_assert (single_pred_p (exit_bb));
tree vec_lhs_phi = copy_ssa_name (vec_lhs);
gimple *phi = create_phi_node (vec_lhs_phi, exit_bb);
- SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, vec_lhs);
+ SET_PHI_ARG_DEF (phi, LOOP_VINFO_IV_EXIT (loop_vinfo)->dest_idx, vec_lhs);
gimple_seq stmts = NULL;
tree new_tree;
profile. */
static void
-scale_profile_for_vect_loop (class loop *loop, unsigned vf, bool flat)
+scale_profile_for_vect_loop (class loop *loop, edge exit_e, unsigned vf, bool flat)
{
/* For flat profiles do not scale down proportionally by VF and only
cap by known iteration count bounds. */
return;
}
/* Loop body executes VF fewer times and exit increases VF times. */
- edge exit_e = single_exit (loop);
profile_count entry_count = loop_preheader_edge (loop)->count ();
/* If we have unreliable loop profile avoid dropping entry
/* Make sure there exists a single-predecessor exit bb. Do this before
versioning. */
- edge e = single_exit (loop);
+ edge e = LOOP_VINFO_IV_EXIT (loop_vinfo);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e, true);
loop closed PHI nodes on the exit. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
{
- e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
+ e = LOOP_VINFO_SCALAR_IV_EXIT (loop_vinfo);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e, true);
a zero NITERS becomes a nonzero NITERS_VECTOR. */
if (integer_onep (step_vector))
niters_no_overflow = true;
- vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
- niters_vector_mult_vf, !niters_no_overflow);
+ vect_set_loop_condition (loop, LOOP_VINFO_IV_EXIT (loop_vinfo), loop_vinfo,
+ niters_vector, step_vector, niters_vector_mult_vf,
+ !niters_no_overflow);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
assumed_vf) - 1
: wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1);
- scale_profile_for_vect_loop (loop, assumed_vf, flat);
+ scale_profile_for_vect_loop (loop, LOOP_VINFO_IV_EXIT (loop_vinfo),
+ assumed_vf, flat);
if (dump_enabled_p ())
{