This enables the new tuning flag for Neoverse V1, Neoverse V2 and Neoverse N2.
It is kept off for generic codegen.
Note the reason for the +sve even though they are in aarch64-sve.exp is if the
testsuite is ran with a forced SVE off option, e.g. -march=armv8-a+nosve then
the intrinsics end up being disabled because the -march is preferred over the
-mcpu even though the -mcpu comes later.
This prevents the tests from failing in such runs.
gcc/ChangeLog:
* config/aarch64/tuning_models/neoversen2.h (neoversen2_tunings): Add
AARCH64_EXTRA_TUNE_AVOID_PRED_RMW.
* config/aarch64/tuning_models/neoversev1.h (neoversev1_tunings): Add
AARCH64_EXTRA_TUNE_AVOID_PRED_RMW.
* config/aarch64/tuning_models/neoversev2.h (neoversev2_tunings): Add
AARCH64_EXTRA_TUNE_AVOID_PRED_RMW.
gcc/testsuite/ChangeLog:
* gcc.target/aarch64/sve/pred_clobber_1.c: New test.
* gcc.target/aarch64/sve/pred_clobber_2.c: New test.
* gcc.target/aarch64/sve/pred_clobber_3.c: New test.
* gcc.target/aarch64/sve/pred_clobber_4.c: New test.
(AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND
| AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
- | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
+ | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT
+ | AARCH64_EXTRA_TUNE_AVOID_PRED_RMW), /* tune_flags. */
&generic_prefetch_tune,
AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
(AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
| AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT
- | AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND), /* tune_flags. */
+ | AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND
+ | AARCH64_EXTRA_TUNE_AVOID_PRED_RMW), /* tune_flags. */
&generic_prefetch_tune,
AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
(AARCH64_EXTRA_TUNE_CHEAP_SHIFT_EXTEND
| AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS
| AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
- | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
+ | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT
+ | AARCH64_EXTRA_TUNE_AVOID_PRED_RMW), /* tune_flags. */
&generic_prefetch_tune,
AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=neoverse-n2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#pragma GCC target "+sve"
+
+#include <arm_sve.h>
+
+extern void use(svbool_t);
+
+/*
+** foo:
+** ...
+** ptrue p([1-3]).b, all
+** cmplo p0.h, p\1/z, z0.h, z[0-9]+.h
+** ...
+*/
+void foo (svuint16_t a, uint16_t b)
+{
+ svbool_t p0 = svcmplt_n_u16 (svptrue_b16 (), a, b);
+ use (p0);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=neoverse-v2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#pragma GCC target "+sve"
+
+#include <arm_sve.h>
+
+extern void use(svbool_t);
+
+/*
+** foo:
+** ...
+** ptrue p([1-9][0-9]?).b, all
+** cmplo p0.h, p\1/z, z0.h, z[0-9]+.h
+** ...
+*/
+void foo (svuint16_t a, uint16_t b)
+{
+ svbool_t p0 = svcmplt_n_u16 (svptrue_b16 (), a, b);
+ use (p0);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -mcpu=neoverse-v1" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#pragma GCC target "+sve"
+
+#include <arm_sve.h>
+
+extern void use(svbool_t);
+
+/*
+** foo:
+** ...
+** ptrue p([1-9][0-9]?).b, all
+** cmplo p0.h, p\1/z, z0.h, z[0-9]+.h
+** ...
+*/
+void foo (svuint16_t a, uint16_t b)
+{
+ svbool_t p0 = svcmplt_n_u16 (svptrue_b16 (), a, b);
+ use (p0);
+}
+
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#pragma GCC target "+sve"
+
+#include <arm_sve.h>
+
+extern void use(svbool_t);
+
+/*
+** foo:
+** ...
+** ptrue p0.b, all
+** cmplo p0.h, p0/z, z0.h, z[0-9]+.h
+** ...
+*/
+void foo (svuint16_t a, uint16_t b)
+{
+ svbool_t p0 = svcmplt_n_u16 (svptrue_b16 (), a, b);
+ use (p0);
+}