1, /* fp_stmt_cost */
1, /* gather_load_cost */
1, /* scatter_store_cost */
- 1, /* vec_to_scalar_cost */
+ 2, /* vec_to_scalar_cost */
1, /* scalar_to_vec_cost */
- 1, /* permute_cost */
- 3, /* align_load_cost */
- 3, /* align_store_cost */
- 3, /* unalign_load_cost */
- 3, /* unalign_store_cost */
+ 2, /* permute_cost */
+ 1, /* align_load_cost */
+ 1, /* align_store_cost */
+ 1, /* unalign_load_cost */
+ 1, /* unalign_store_cost */
};
/* Generic costs for VLA vector operations. */
1, /* fp_stmt_cost */
1, /* gather_load_cost */
1, /* scatter_store_cost */
- 1, /* vec_to_scalar_cost */
+ 2, /* vec_to_scalar_cost */
1, /* scalar_to_vec_cost */
- 1, /* permute_cost */
- 3, /* align_load_cost */
- 3, /* align_store_cost */
- 3, /* unalign_load_cost */
- 3, /* unalign_store_cost */
+ 2, /* permute_cost */
+ 1, /* align_load_cost */
+ 1, /* align_store_cost */
+ 1, /* unalign_load_cost */
+ 1, /* unalign_store_cost */
},
};
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -mtune=generic-ooo -O3 -ftree-vectorize -fdump-tree-vect-details" } */
+
+#include <stdint-gcc.h>
+
+void
+f2 (uint64_t *__restrict y, uint64_t *__restrict x,
+ uint64_t *__restrict indices, uint64_t n)
+{
+ for (int64_t i = 0; i < n; ++i)
+ {
+ y[i * 2] = x[indices[i * 2]] + 1;
+ y[i * 2 + 1] = x[indices[i * 2 + 1]] + 2;
+ }
+}
+
+/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect } } */
+/* { dg-final { scan-assembler-not "vlseg" } } */
+/* { dg-final { scan-assembler-not "vsseg" } } */