return;
}
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mat_gather_scatter_p (memory_access_type))
{
internal_fn ifn = (is_load
? IFN_MASK_GATHER_LOAD
gs_info->offset_vectype,
gs_info->scale,
elsvals)
- || gs_info->decl != NULL_TREE)
+ || memory_access_type == VMAT_GATHER_SCATTER_LEGACY)
vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
scalar_mask);
else
*memory_access_type = VMAT_STRIDED_SLP;
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{
- *memory_access_type = VMAT_GATHER_SCATTER;
slp_tree offset_node = SLP_TREE_CHILDREN (slp_node)[0];
tree offset_vectype = SLP_TREE_VECTYPE (offset_node);
memset (gs_info, 0, sizeof (gather_scatter_info));
offset_vectype, gs_info->scale,
&gs_info->ifn, &tem,
elsvals))
- /* GATHER_SCATTER_IFN_P. */;
+ *memory_access_type = VMAT_GATHER_SCATTER_IFN;
else if (vls_type == VLS_LOAD
? (targetm.vectorize.builtin_gather
&& (gs_info->decl
TREE_TYPE
(offset_vectype),
gs_info->scale))))
- /* GATHER_SCATTER_LEGACY_P. */;
+ *memory_access_type = VMAT_GATHER_SCATTER_LEGACY;
else
{
/* GATHER_SCATTER_EMULATED_P. */
"gather.\n");
return false;
}
+ *memory_access_type = VMAT_GATHER_SCATTER_EMULATED;
}
}
else
&& vect_use_strided_gather_scatters_p (stmt_info, vectype, loop_vinfo,
masked_p, gs_info, elsvals,
group_size, single_element_p))
- *memory_access_type = VMAT_GATHER_SCATTER;
+ *memory_access_type = VMAT_GATHER_SCATTER_IFN;
if (*memory_access_type == VMAT_CONTIGUOUS_DOWN
|| *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
*poffset = neg_ldst_offset;
if (*memory_access_type == VMAT_ELEMENTWISE
- || (*memory_access_type == VMAT_GATHER_SCATTER
- && GATHER_SCATTER_LEGACY_P (*gs_info))
+ || *memory_access_type == VMAT_GATHER_SCATTER_LEGACY
|| *memory_access_type == VMAT_STRIDED_SLP
|| *memory_access_type == VMAT_INVARIANT)
{
}
else
{
- if (*memory_access_type == VMAT_GATHER_SCATTER
+ if (mat_gather_scatter_p (*memory_access_type)
&& !first_dr_info)
*misalignment = DR_MISALIGNMENT_UNKNOWN;
else
*alignment_support_scheme
= vect_supportable_dr_alignment
(vinfo, first_dr_info, vectype, *misalignment,
- *memory_access_type == VMAT_GATHER_SCATTER);
+ mat_gather_scatter_p (*memory_access_type));
}
if (overrun_p)
if (loop_vinfo
&& dr_safe_speculative_read_required (stmt_info)
&& LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
- && (*memory_access_type == VMAT_GATHER_SCATTER
+ && (mat_gather_scatter_p (*memory_access_type)
|| *memory_access_type == VMAT_STRIDED_SLP))
{
if (dump_enabled_p ())
vector iteration or force masking. */
if (dr_safe_speculative_read_required (stmt_info)
&& (*alignment_support_scheme == dr_aligned
- && *memory_access_type != VMAT_GATHER_SCATTER))
+ && !mat_gather_scatter_p (*memory_access_type)))
{
/* We can only peel for loops, of course. */
gcc_checking_assert (loop_vinfo);
tree step = vect_dr_behavior (vinfo, dr_info)->step;
/* gather/scatter never reach here. */
- gcc_assert (memory_access_type != VMAT_GATHER_SCATTER);
+ gcc_assert (!mat_gather_scatter_p (memory_access_type));
/* When we support SELECT_VL pattern, we dynamic adjust
the memory address by .SELECT_VL result.
return false;
}
else if (memory_access_type != VMAT_LOAD_STORE_LANES
- && (memory_access_type != VMAT_GATHER_SCATTER
- || (GATHER_SCATTER_LEGACY_P (gs_info)
+ && (!mat_gather_scatter_p (memory_access_type)
+ || (memory_access_type == VMAT_GATHER_SCATTER_LEGACY
&& !VECTOR_BOOLEAN_TYPE_P (mask_vectype))))
{
if (dump_enabled_p ())
"unsupported access type for masked store.\n");
return false;
}
- else if (memory_access_type == VMAT_GATHER_SCATTER
- && GATHER_SCATTER_EMULATED_P (gs_info))
+ else if (memory_access_type == VMAT_GATHER_SCATTER_EMULATED)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && memory_access_type != VMAT_GATHER_SCATTER);
+ && !mat_gather_scatter_p (memory_access_type));
if (grouped_store)
{
first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
aggr_type = NULL_TREE;
bump = NULL_TREE;
}
- else if (memory_access_type == VMAT_GATHER_SCATTER)
+ else if (mat_gather_scatter_p (memory_access_type))
{
aggr_type = elem_type;
if (!costing_p)
return true;
}
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mat_gather_scatter_p (memory_access_type))
{
gcc_assert (!grouped_store);
auto_vec<tree> vec_offsets;
unsigned align = get_object_alignment (DR_REF (first_dr_info->dr));
tree alias_align_ptr = build_int_cst (ref_type, align);
- if (GATHER_SCATTER_IFN_P (gs_info))
+ if (memory_access_type == VMAT_GATHER_SCATTER_IFN)
{
if (costing_p)
{
vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
new_stmt = call;
}
- else if (GATHER_SCATTER_LEGACY_P (gs_info))
+ else if (memory_access_type == VMAT_GATHER_SCATTER_LEGACY)
{
/* The builtin decls path for scatter is legacy, x86 only. */
gcc_assert (nunits.is_constant ()
get_load_store_type. */
if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
&& !((memory_access_type == VMAT_ELEMENTWISE
- || memory_access_type == VMAT_GATHER_SCATTER)
+ || mat_gather_scatter_p (memory_access_type))
&& SLP_TREE_LANES (slp_node) == 1))
{
slp_perm = true;
return false;
}
else if (memory_access_type != VMAT_LOAD_STORE_LANES
- && memory_access_type != VMAT_GATHER_SCATTER)
+ && !mat_gather_scatter_p (memory_access_type))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported access type for masked load.\n");
return false;
}
- else if (memory_access_type == VMAT_GATHER_SCATTER
- && GATHER_SCATTER_EMULATED_P (gs_info))
+ else if (memory_access_type == VMAT_GATHER_SCATTER_EMULATED)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (dump_enabled_p ()
&& memory_access_type != VMAT_ELEMENTWISE
- && memory_access_type != VMAT_GATHER_SCATTER
+ && !mat_gather_scatter_p (memory_access_type)
&& memory_access_type != VMAT_STRIDED_SLP
&& memory_access_type != VMAT_INVARIANT
&& alignment_support_scheme != dr_aligned)
return true;
}
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mat_gather_scatter_p (memory_access_type))
grouped_load = false;
if (grouped_load
gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
&& !mask_node
&& !loop_masks)
- || memory_access_type == VMAT_GATHER_SCATTER
+ || mat_gather_scatter_p (memory_access_type)
|| alignment_support_scheme == dr_aligned
|| alignment_support_scheme == dr_unaligned_supported);
return true;
}
- if (memory_access_type == VMAT_GATHER_SCATTER)
+ if (mat_gather_scatter_p (memory_access_type))
{
gcc_assert (!grouped_load && !slp_perm);
/* 2. Create the vector-load in the loop. */
unsigned align = get_object_alignment (DR_REF (first_dr_info->dr));
tree alias_align_ptr = build_int_cst (ref_type, align);
- if (GATHER_SCATTER_IFN_P (gs_info))
+ if (memory_access_type == VMAT_GATHER_SCATTER_IFN)
{
if (costing_p)
{
new_stmt = call;
data_ref = NULL_TREE;
}
- else if (GATHER_SCATTER_LEGACY_P (gs_info))
+ else if (memory_access_type == VMAT_GATHER_SCATTER_LEGACY)
{
/* The builtin decls path for gather is legacy, x86 only. */
gcc_assert (!final_len && nunits.is_constant ());