int ncopies;
if (slp_node)
{
- vec_num = SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis).length ();
+ vec_num = SLP_TREE_VEC_DEFS (slp_node_instance->reduc_phis).length ();
ncopies = 1;
}
else
new_stmt, gsi);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
{
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else if (single_defuse_cycle
&& i < ncopies - 1)
{
/* The loop-latch arg is set in epilogue processing. */
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
+ slp_node->push_vec_def (new_phi);
else
{
if (j == 0)
gphi *new_phi = create_phi_node (vec_dest, bb);
add_phi_arg (new_phi, vec_oprnds[i], e, UNKNOWN_LOCATION);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
+ slp_node->push_vec_def (new_phi);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_phi);
}
/* Skip not yet vectorized defs. */
if (SLP_TREE_DEF_TYPE (child) == vect_internal_def
- && SLP_TREE_VEC_STMTS (child).is_empty ())
+ && SLP_TREE_VEC_DEFS (child).is_empty ())
continue;
auto_vec<tree> vec_oprnds;
{
/* Create the vectorized LC PHI node. */
new_phis.quick_push (create_phi_node (vec_dest, bb));
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phis[j]);
+ slp_node->push_vec_def (new_phis[j]);
}
}
edge e = gimple_phi_arg_edge (as_a <gphi *> (stmt_info->stmt), i);
vect_finish_stmt_generation (loop_vinfo, stmt_info, vperm, &gsi2);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (vperm);
+ slp_node->push_vec_def (vperm);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (vperm);
}
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
+ slp_node->push_vec_def (induction_phi);
}
if (!nested_in_vect_loop)
{
vec_steps.reserve (nivs-ivn);
for (; ivn < nivs; ++ivn)
{
- SLP_TREE_VEC_STMTS (slp_node)
- .quick_push (SLP_TREE_VEC_STMTS (slp_node)[0]);
+ slp_node->push_vec_def (SLP_TREE_VEC_DEFS (slp_node)[0]);
vec_steps.quick_push (vec_steps[0]);
}
}
: build_int_cstu (stept, vfp));
for (; ivn < nvects; ++ivn)
{
- gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
+ gimple *iv
+ = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (slp_node)[ivn - nivs]);
tree def = gimple_get_lhs (iv);
if (ivn < 2*nivs)
vec_steps[ivn - nivs]
gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
}
- SLP_TREE_VEC_STMTS (slp_node)
- .quick_push (SSA_NAME_DEF_STMT (def));
+ slp_node->push_vec_def (def);
}
}
gcc_assert (!loop_vinfo || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
/* Get the correct slp vectorized stmt. */
- vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry];
- vec_lhs = gimple_get_lhs (vec_stmt);
+ vec_lhs = SLP_TREE_VEC_DEFS (slp_node)[vec_entry];
+ vec_stmt = SSA_NAME_DEF_STMT (vec_lhs);
/* Get entry to use. */
bitstart = bitsize_int (vec_index);
slp_first_node = this;
SLP_TREE_SCALAR_STMTS (this) = vNULL;
SLP_TREE_SCALAR_OPS (this) = vNULL;
- SLP_TREE_VEC_STMTS (this) = vNULL;
SLP_TREE_VEC_DEFS (this) = vNULL;
SLP_TREE_NUMBER_OF_VEC_STMTS (this) = 0;
SLP_TREE_CHILDREN (this) = vNULL;
SLP_TREE_CHILDREN (this).release ();
SLP_TREE_SCALAR_STMTS (this).release ();
SLP_TREE_SCALAR_OPS (this).release ();
- SLP_TREE_VEC_STMTS (this).release ();
SLP_TREE_VEC_DEFS (this).release ();
SLP_TREE_LOAD_PERMUTATION (this).release ();
SLP_TREE_LANE_PERMUTATION (this).release ();
free (failed);
}
+/* Push the single SSA definition in DEF to the vector of vector defs. */
+
+void
+_slp_tree::push_vec_def (gimple *def)
+{
+ if (gphi *phi = dyn_cast <gphi *> (def))
+ vec_defs.quick_push (gimple_phi_result (phi));
+ else
+ {
+ def_operand_p defop = single_ssa_def_operand (def, SSA_OP_ALL_DEFS);
+ vec_defs.quick_push (get_def_from_ptr (defop));
+ }
+}
+
/* Recursively free the memory allocated for the SLP tree rooted at NODE. */
void
tree
vect_get_slp_vect_def (slp_tree slp_node, unsigned i)
{
- if (SLP_TREE_VEC_STMTS (slp_node).exists ())
- return gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[i]);
- else
- return SLP_TREE_VEC_DEFS (slp_node)[i];
+ return SLP_TREE_VEC_DEFS (slp_node)[i];
}
/* Get the vectorized definitions of SLP_NODE in *VEC_DEFS. */
vect_get_slp_defs (slp_tree slp_node, vec<tree> *vec_defs)
{
vec_defs->create (SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node));
- if (SLP_TREE_DEF_TYPE (slp_node) == vect_internal_def)
- {
- unsigned j;
- gimple *vec_def_stmt;
- FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), j, vec_def_stmt)
- vec_defs->quick_push (gimple_get_lhs (vec_def_stmt));
- }
- else
- vec_defs->splice (SLP_TREE_VEC_DEFS (slp_node));
+ vec_defs->splice (SLP_TREE_VEC_DEFS (slp_node));
}
/* Get N vectorized definitions for SLP_NODE. */
/* Initialize the vect stmts of NODE to properly insert the generated
stmts later. */
if (! analyze_only)
- for (unsigned i = SLP_TREE_VEC_STMTS (node).length (); i < nstmts; i++)
- SLP_TREE_VEC_STMTS (node).quick_push (NULL);
+ for (unsigned i = SLP_TREE_VEC_DEFS (node).length (); i < nstmts; i++)
+ SLP_TREE_VEC_DEFS (node).quick_push (NULL_TREE);
/* Generate permutation masks for every NODE. Number of masks for each NODE
is equal to GROUP_SIZE.
}
/* Store the vector statement in NODE. */
- SLP_TREE_VEC_STMTS (node)[vect_stmts_counter++] = perm_stmt;
+ SLP_TREE_VEC_DEFS (node)[vect_stmts_counter++] = perm_dest;
}
}
else if (!analyze_only)
tree first_vec = dr_chain[first_vec_index + ri];
/* If mask was NULL_TREE generate the requested
identity transform. */
- gimple *perm_stmt = SSA_NAME_DEF_STMT (first_vec);
if (dce_chain)
bitmap_set_bit (used_defs, first_vec_index + ri);
/* Store the vector statement in NODE. */
- SLP_TREE_VEC_STMTS (node)[vect_stmts_counter++] = perm_stmt;
+ SLP_TREE_VEC_DEFS (node)[vect_stmts_counter++] = first_vec;
}
}
}
vect_finish_stmt_generation (vinfo, NULL, perm_stmt, gsi);
/* Store the vector statement in NODE. */
- SLP_TREE_VEC_STMTS (node).quick_push (perm_stmt);
+ node->push_vec_def (perm_stmt);
}
/* Subroutine of vectorizable_slp_permutation. Check whether the target
slp_tree child;
/* For existing vectors there's nothing to do. */
- if (SLP_TREE_VEC_DEFS (node).exists ())
+ if (SLP_TREE_DEF_TYPE (node) == vect_external_def
+ && SLP_TREE_VEC_DEFS (node).exists ())
return;
- gcc_assert (SLP_TREE_VEC_STMTS (node).is_empty ());
+ gcc_assert (SLP_TREE_VEC_DEFS (node).is_empty ());
/* Vectorize externals and constants. */
if (SLP_TREE_DEF_TYPE (node) == vect_constant_def
stmt_vec_info stmt_info = SLP_TREE_REPRESENTATIVE (node);
gcc_assert (SLP_TREE_NUMBER_OF_VEC_STMTS (node) != 0);
- SLP_TREE_VEC_STMTS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
+ SLP_TREE_VEC_DEFS (node).create (SLP_TREE_NUMBER_OF_VEC_STMTS (node));
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
reduction PHI but we still have SLP_TREE_NUM_VEC_STMTS
set so the representation isn't perfect. Resort to the
last scalar def here. */
- if (SLP_TREE_VEC_STMTS (child).is_empty ())
+ if (SLP_TREE_VEC_DEFS (child).is_empty ())
{
gcc_assert (STMT_VINFO_TYPE (SLP_TREE_REPRESENTATIVE (child))
== cycle_phi_info_type);
??? Unless we have a load permutation applied and that
figures to re-use an earlier generated load. */
unsigned j;
- gimple *vstmt;
- FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (child), j, vstmt)
- if (!last_stmt
- || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
- last_stmt = vstmt;
+ tree vdef;
+ FOR_EACH_VEC_ELT (SLP_TREE_VEC_DEFS (child), j, vdef)
+ {
+ gimple *vstmt = SSA_NAME_DEF_STMT (vdef);
+ if (!last_stmt
+ || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
+ last_stmt = vstmt;
+ }
}
else if (!SLP_TREE_VECTYPE (child))
{
{
if (SLP_TREE_NUMBER_OF_VEC_STMTS (node) == 1)
{
- gimple *child_stmt = SLP_TREE_VEC_STMTS (node)[0];
- tree vect_lhs = gimple_get_lhs (child_stmt);
+ tree vect_lhs = SLP_TREE_VEC_DEFS (node)[0];
tree root_lhs = gimple_get_lhs (instance->root_stmts[0]->stmt);
if (!useless_type_conversion_p (TREE_TYPE (root_lhs),
TREE_TYPE (vect_lhs)))
else if (SLP_TREE_NUMBER_OF_VEC_STMTS (node) > 1)
{
int nelts = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
- gimple *child_stmt;
+ tree child_def;
int j;
vec<constructor_elt, va_gc> *v;
vec_alloc (v, nelts);
/* A CTOR can handle V16HI composition from VNx8HI so we
do not need to convert vector elements if the types
do not match. */
- FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (node), j, child_stmt)
- CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
- gimple_get_lhs (child_stmt));
+ FOR_EACH_VEC_ELT (SLP_TREE_VEC_DEFS (node), j, child_def)
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, child_def);
tree lhs = gimple_get_lhs (instance->root_stmts[0]->stmt);
tree rtype
= TREE_TYPE (gimple_assign_rhs1 (instance->root_stmts[0]->stmt));
child = SLP_TREE_CHILDREN (phi_node)[dest_idx];
if (!child || SLP_TREE_DEF_TYPE (child) != vect_internal_def)
continue;
- unsigned n = SLP_TREE_VEC_STMTS (phi_node).length ();
+ unsigned n = SLP_TREE_VEC_DEFS (phi_node).length ();
/* Simply fill all args. */
if (STMT_VINFO_DEF_TYPE (SLP_TREE_REPRESENTATIVE (phi_node))
!= vect_first_order_recurrence)
for (unsigned i = 0; i < n; ++i)
- add_phi_arg (as_a <gphi *> (SLP_TREE_VEC_STMTS (phi_node)[i]),
- vect_get_slp_vect_def (child, i),
- e, gimple_phi_arg_location (phi, dest_idx));
+ {
+ tree phidef = SLP_TREE_VEC_DEFS (phi_node)[i];
+ gphi *phi = as_a <gphi *> (SSA_NAME_DEF_STMT (phidef));
+ add_phi_arg (phi, vect_get_slp_vect_def (child, i),
+ e, gimple_phi_arg_location (phi, dest_idx));
+ }
else
{
/* Unless it is a first order recurrence which needs
args filled in for both the PHI node and the permutes. */
- gimple *perm = SLP_TREE_VEC_STMTS (phi_node)[0];
+ gimple *perm
+ = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (phi_node)[0]);
gimple *rphi = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (perm));
add_phi_arg (as_a <gphi *> (rphi),
vect_get_slp_vect_def (child, n - 1),
e, gimple_phi_arg_location (phi, dest_idx));
for (unsigned i = 0; i < n; ++i)
{
- gimple *perm = SLP_TREE_VEC_STMTS (phi_node)[i];
+ gimple *perm
+ = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (phi_node)[i]);
if (i > 0)
gimple_assign_set_rhs1 (perm,
vect_get_slp_vect_def (child, i - 1));
vectype, tem2));
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
new_stmt = call;
}
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
}
continue;
}
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
- SLP_TREE_VEC_STMTS (slp_node).quick_push (call);
+ slp_node->push_vec_def (call);
}
continue;
}
vectors in SLP_NODE or in vector info of the scalar statement
(or in STMT_VINFO_RELATED_STMT chain). */
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
new_stmt = SSA_NAME_DEF_STMT (vop0);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
vectors in SLP_NODE or in vector info of the scalar statement
(or in STMT_VINFO_RELATED_STMT chain). */
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
gimple_assign_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
}
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
if (slp)
for (j = 0; j < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); ++j)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
{
for (j = 0; j < ncopies; ++j)
if (slp_perm)
dr_chain.quick_push (gimple_assign_lhs (new_stmt));
else
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
}
else
{
/* Check if the chain of loads is already vectorized. */
if (STMT_VINFO_VEC_STMTS (first_stmt_info).exists ()
- /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
+ /* For SLP we would need to copy over SLP_TREE_VEC_DEFS.
??? But we can only do so if there is exactly one
as we have no way to get at the rest. Leave the CSE
opportunity alone.
/* Store vector loads in the corresponding SLP_NODE. */
if (!costing_p && slp && !slp_perm)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
/* With SLP permutation we load the gaps as well, without
we need to skip the gaps after we manage to fully load
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
}
}
if (slp_node)
- SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
else
STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
_slp_tree ();
~_slp_tree ();
+ void push_vec_def (gimple *def);
+ void push_vec_def (tree def) { vec_defs.quick_push (def); }
+
/* Nodes that contain def-stmts of this node statements operands. */
vec<slp_tree> children;
lane_permutation_t lane_permutation;
tree vectype;
- /* Vectorized stmt/s. */
- vec<gimple *> vec_stmts;
+ /* Vectorized defs. */
vec<tree> vec_defs;
/* Number of vector stmts that are created to replace the group of scalar
stmts. It is calculated during the transformation phase as the number of
#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
#define SLP_TREE_SCALAR_OPS(S) (S)->ops
#define SLP_TREE_REF_COUNT(S) (S)->refcnt
-#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
#define SLP_TREE_VEC_DEFS(S) (S)->vec_defs
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation