if (compress_point < 0)
return false;
- /* It must be series increasing from compress point. */
- if (!d->perm.series_p (compress_point, 1, d->perm[compress_point], 1))
- return false;
-
/* We can only apply compress approach when all index values from 0 to
compress point are increasing. */
for (int i = 1; i < compress_point; i++)
- if (known_le (d->perm[i], d->perm[i - 1]))
+ if (maybe_le (d->perm[i], d->perm[i - 1]))
+ return false;
+
+ /* It must be series increasing from compress point. */
+ for (int i = 1 + compress_point; i < vlen; i++)
+ if (maybe_ne (d->perm[i], d->perm[i - 1] + 1))
return false;
/* Success! */
if (need_slideup_p)
{
int slideup_cnt = vlen - (d->perm[vlen - 1].to_constant () % vlen) - 1;
- rtx ops[] = {d->target, d->op1, gen_int_mode (slideup_cnt, Pmode)};
+ merge = gen_reg_rtx (vmode);
+ rtx ops[] = {merge, d->op1, gen_int_mode (slideup_cnt, Pmode)};
insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
emit_vlmax_insn (icode, BINARY_OP, ops);
- merge = d->target;
}
insn_code icode = code_for_pred_compress (vmode);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv_zvfh_zfh_zvl512b -mabi=ilp32d -O3 -ftree-vectorize -std=c99 -fno-vect-cost-model" } */
+
+#include <stdint-gcc.h>
+#define TYPE uint64_t
+#define ITYPE int64_t
+
+void __attribute__ ((noinline, noclone))
+foo (TYPE *__restrict a, TYPE *__restrict b, TYPE *__restrict c,
+ TYPE *__restrict d, ITYPE n)
+{
+ for (ITYPE i = 0; i < n; ++i)
+ {
+ d[i * 3] = a[i];
+ d[i * 3 + 1] = b[i];
+ d[i * 3 + 2] = c[i];
+ }
+}
+
+/* We don't want vcompress.vv. */
+/* { dg-final { scan-assembler-not {vcompress\.vv} } } */