}
else if (vls_type != VLS_STORE_INVARIANT)
return;
- *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info,
+ *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
slp_node, 0, vect_prologue);
};
if (nstores > 1)
inside_cost
+= record_stmt_cost (cost_vec, n_adjacent_stores,
- vec_to_scalar, stmt_info, slp_node,
- 0, vect_body);
+ vec_to_scalar, slp_node, 0, vect_body);
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
{
if (costing_p && vls_type == VLS_STORE_INVARIANT)
prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
- stmt_info, slp_node, 0,
- vect_prologue);
+ slp_node, 0, vect_prologue);
else if (!costing_p)
{
/* Since the store is not grouped, DR_GROUP_SIZE is 1, and
unsigned int cnunits = vect_nunits_for_cost (vectype);
inside_cost
+= record_stmt_cost (cost_vec, cnunits, scalar_store,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
continue;
}
unsigned int cnunits = vect_nunits_for_cost (vectype);
inside_cost
+= record_stmt_cost (cost_vec, cnunits, scalar_store,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
consumed by the load). */
inside_cost
+= record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
/* N scalar stores plus extracting the elements. */
inside_cost
+= record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
inside_cost
+= record_stmt_cost (cost_vec, cnunits, scalar_store,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: "
{
if (costing_p)
inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree perm_mask = perm_mask_for_reverse (vectype);
/* Spill. */
prologue_cost
+= record_stmt_cost (cost_vec, ncopies, vector_store,
- stmt_info, slp_node, 0, vect_epilogue);
+ slp_node, 0, vect_epilogue);
/* Loads. */
prologue_cost
+= record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
- stmt_info, slp_node, 0, vect_epilogue);
+ slp_node, 0, vect_epilogue);
}
}
}
enum vect_cost_model_location cost_loc
= hoist_p ? vect_prologue : vect_body;
unsigned int cost = record_stmt_cost (cost_vec, 1, scalar_load,
- stmt_info, slp_node, 0,
- cost_loc);
- cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info,
+ slp_node, 0, cost_loc);
+ cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
slp_node, 0, cost_loc);
unsigned int prologue_cost = hoist_p ? cost : 0;
unsigned int inside_cost = hoist_p ? 0 : cost;
n_adjacent_loads++;
else
inside_cost += record_stmt_cost (cost_vec, 1, scalar_load,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
continue;
}
tree this_off = build_int_cst (TREE_TYPE (alias_off),
{
if (costing_p)
inside_cost += record_stmt_cost (cost_vec, 1, vec_construct,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree vec_inv = build_constructor (lvectype, v);
vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, vf,
true, &n_perms, &n_loads);
inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
- first_stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
}
else
vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf,
unsigned int cnunits = vect_nunits_for_cost (vectype);
inside_cost
= record_stmt_cost (cost_vec, cnunits, scalar_load,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
unsigned int cnunits = vect_nunits_for_cost (vectype);
inside_cost
= record_stmt_cost (cost_vec, cnunits, scalar_load,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
poly_uint64 offset_nunits
/* For emulated gathers N offset vector element
offset add is consumed by the load). */
inside_cost = record_stmt_cost (cost_vec, const_nunits,
- vec_to_scalar, stmt_info,
+ vec_to_scalar,
slp_node, 0, vect_body);
/* N scalar loads plus gathering them into a
vector. */
inside_cost
= record_stmt_cost (cost_vec, const_nunits, scalar_load,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
inside_cost
= record_stmt_cost (cost_vec, 1, vec_construct,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
unsigned HOST_WIDE_INT const_offset_nunits
{
if (costing_p)
inside_cost = record_stmt_cost (cost_vec, 1, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree perm_mask = perm_mask_for_reverse (vectype);
vect_transform_slp_perm_load (vinfo, slp_node, vNULL, nullptr, vf,
true, &n_perms, nullptr);
inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
}
else
{
int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,