if (STMT_VINFO_DATA_REF (stmt_info)
&& DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
{
- if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
- gcc_assert (gimple_call_internal_p (stmt, IFN_MASK_LOAD)
- || gimple_call_internal_p (stmt, IFN_GATHER_LOAD)
- || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD)
- || gimple_call_internal_p (stmt, IFN_MASK_LEN_GATHER_LOAD));
- else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+ if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
else
{
load_permutation.create (group_size);
stmt_vec_info first_stmt_info
= DR_GROUP_FIRST_ELEMENT (SLP_TREE_SCALAR_STMTS (node)[0]);
+ bool any_permute = false;
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load_info)
{
int load_place;
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
load_place = vect_get_place_in_interleaving_chain
- (load_info, first_stmt_info);
+ (load_info, first_stmt_info);
else
load_place = 0;
gcc_assert (load_place != -1);
- load_permutation.safe_push (load_place);
+ any_permute |= load_place != j;
+ load_permutation.quick_push (load_place);
+ }
+
+ if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
+ {
+ gcc_assert (gimple_call_internal_p (stmt, IFN_MASK_LOAD)
+ || gimple_call_internal_p (stmt, IFN_GATHER_LOAD)
+ || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD)
+ || gimple_call_internal_p (stmt,
+ IFN_MASK_LEN_GATHER_LOAD));
+ load_permutation.release ();
+ /* We cannot handle permuted masked loads, see PR114375. */
+ if (any_permute
+ || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
+ && DR_GROUP_SIZE (first_stmt_info) != group_size)
+ || STMT_VINFO_STRIDED_P (stmt_info))
+ {
+ matches[0] = false;
+ return NULL;
+ }
+ }
+ else
+ {
+ SLP_TREE_LOAD_PERMUTATION (node) = load_permutation;
+ return node;
}
- SLP_TREE_LOAD_PERMUTATION (node) = load_permutation;
- return node;
}
}
else if (gimple_assign_single_p (stmt_info->stmt)