AARCH64_FMV_FEATURE("sve2-sm4", SVE_SM4, (SVE2_SM4))
+AARCH64_OPT_EXTENSION("sve2p1", SVE2p1, (SVE2), (), (), "")
+
AARCH64_OPT_FMV_EXTENSION("sme", SME, (BF16, SVE2), (), (), "sme")
AARCH64_OPT_EXTENSION("memtag", MEMTAG, (), (), (), "")
DEF_SVE_FUNCTION (svsm4ekey, binary, s_unsigned, none)
#undef REQUIRED_EXTENSIONS
-#define REQUIRED_EXTENSIONS streaming_only (0)
+#define REQUIRED_EXTENSIONS sve_and_sme (AARCH64_FL_SVE2p1, 0)
DEF_SVE_FUNCTION (svclamp, clamp, all_integer, none)
DEF_SVE_FUNCTION (svpsel_lane, select_pred, all_pred_count, none)
DEF_SVE_FUNCTION (svrevd, unary, all_data, mxz)
(match_operand:SI 3 "register_operand" "Ucj")
(const_int BHSD_BITS)]
UNSPEC_PSEL))]
- "TARGET_STREAMING"
+ "TARGET_SVE2p1_OR_SME"
"psel\t%0, %1, %2.<bits_etype>[%w3, 0]"
)
(match_operand:SI 4 "const_int_operand"))
(const_int BHSD_BITS)]
UNSPEC_PSEL))]
- "TARGET_STREAMING
+ "TARGET_SVE2p1_OR_SME
&& UINTVAL (operands[4]) < 128 / <BHSD_BITS>"
"psel\t%0, %1, %2.<bits_etype>[%w3, %4]"
)
(match_operand:SVE_FULL_I 1 "register_operand")
(match_operand:SVE_FULL_I 2 "register_operand"))
(match_operand:SVE_FULL_I 3 "register_operand")))]
- "TARGET_STREAMING"
+ "TARGET_SVE2p1_OR_SME"
{@ [cons: =0, 1, 2, 3; attrs: movprfx]
[ w, %0, w, w; * ] <su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
[ ?&w, w, w, w; yes ] movprfx\t%0, %1\;<su>clamp\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>
UNSPEC_PRED_X)
(match_operand:SVE_FULL_I 3 "register_operand"))]
UNSPEC_PRED_X))]
- "TARGET_STREAMING"
+ "TARGET_SVE2p1_OR_SME"
{@ [cons: =0, 1, 2, 3; attrs: movprfx]
[ w, %0, w, w; * ] #
[ ?&w, w, w, w; yes ] #
[(match_operand:SVE_FULL 2 "register_operand")]
UNSPEC_REVD_ONLY)]
UNSPEC_PRED_X))]
- "TARGET_STREAMING"
+ "TARGET_SVE2p1_OR_SME"
{@ [ cons: =0 , 1 , 2 ; attrs: movprfx ]
[ w , Upl , 0 ; * ] revd\t%0.q, %1/m, %2.q
[ ?&w , Upl , w ; yes ] movprfx\t%0, %2\;revd\t%0.q, %1/m, %2.q
UNSPEC_REVD_ONLY)
(match_operand:SVE_FULL 3 "register_operand")]
UNSPEC_SEL))]
- "TARGET_STREAMING"
+ "TARGET_SVE2p1_OR_SME"
{@ [ cons: =0 , 1 , 2 , 3 ; attrs: movprfx ]
[ w , Upl , w , 0 ; * ] revd\t%0.q, %1/m, %2.q
[ ?&w , Upl , w , w ; yes ] movprfx\t%0, %3\;revd\t%0.q, %1/m, %2.q
/* SVE2 SM4 instructions, enabled through +sve2-sm4. */
#define TARGET_SVE2_SM4 (AARCH64_HAVE_ISA (SVE2_SM4) && TARGET_NON_STREAMING)
+/* SVE2p1 instructions, enabled through +sve2p1. */
+#define TARGET_SVE2p1 AARCH64_HAVE_ISA (SVE2p1)
+
/* SME instructions, enabled through +sme. Note that this does not
imply anything about the state of PSTATE.SM; instructions that require
SME and streaming mode should use TARGET_STREAMING instead. */
/* fp8 instructions are enabled through +fp8. */
#define TARGET_FP8 AARCH64_HAVE_ISA (FP8)
+/* Combinatorial tests. */
+
+/* There's no need to check TARGET_SME for streaming or streaming-compatible
+ functions, since streaming mode itself implies SME. */
+#define TARGET_SVE2p1_OR_SME (TARGET_SVE2p1 || TARGET_STREAMING)
+
/* Standard register usage. */
/* 31 64-bit general purpose registers R0-R30:
Enable SVE2 aes instructions. This also enables SVE2 instructions.
@item sve2-sha3
Enable SVE2 sha3 instructions. This also enables SVE2 instructions.
+@item sve2p1
+Enable SVE2.1 instructions. This also enables SVE2 instructions.
@item tme
Enable the Transactional Memory Extension.
@item i8mm
--- /dev/null
+// { dg-options "-O" }
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+#define TEST(TYPE) \
+ TYPE \
+ tied1_##TYPE(TYPE a, TYPE b, TYPE c) \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), a, b), c); \
+ } \
+ \
+ TYPE \
+ tied2_##TYPE(TYPE a, TYPE b, TYPE c) \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), b, a), c); \
+ }
+
+TEST(svint8_t)
+TEST(svint16_t)
+TEST(svint32_t)
+TEST(svint64_t)
+
+TEST(svuint8_t)
+TEST(svuint16_t)
+TEST(svuint32_t)
+TEST(svuint64_t)
+
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.b, z1\.b, z2\.b\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.h, z1\.h, z2\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.s, z1\.s, z2\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.d, z1\.d, z2\.d\n} 2 } } */
+
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.b, z1\.b, z2\.b\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.h, z1\.h, z2\.h\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.s, z1\.s, z2\.s\n} 2 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.d, z1\.d, z2\.d\n} 2 } } */
+
+/* { dg-final { scan-assembler-not {\tmovprfx\t} } } */
--- /dev/null
+// { dg-options "-O" }
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+#define TEST(TYPE) \
+ TYPE \
+ untied_##TYPE(TYPE a, TYPE b, TYPE c, TYPE d) \
+ { \
+ return svmin_x(svptrue_b8(), svmax_x(svptrue_b8(), b, c), d); \
+ }
+
+TEST(svint8_t)
+TEST(svint16_t)
+TEST(svint32_t)
+TEST(svint64_t)
+
+TEST(svuint8_t)
+TEST(svuint16_t)
+TEST(svuint32_t)
+TEST(svuint64_t)
+
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.b, z2\.b, z3\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.h, z2\.h, z3\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.s, z2\.s, z3\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tsclamp\tz0\.d, z2\.d, z3\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.b, z2\.b, z3\.b\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.h, z2\.h, z3\.h\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.s, z2\.s, z3\.s\n} 1 } } */
+/* { dg-final { scan-assembler-times {\tuclamp\tz0\.d, z2\.d, z3\.d\n} 1 } } */
+
+/* { dg-final { scan-assembler-times {\tmovprfx\tz0, z1\n} 8 } } */
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_s16_tied1:
+** sclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied1, svint16_t,
+ z0 = svclamp_s16 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s16_tied2:
+** sclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied2, svint16_t,
+ z0 = svclamp_s16 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s16_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.h, z2\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_tied3, svint16_t,
+ z0 = svclamp_s16 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s16_untied:
+** movprfx z0, z1
+** sclamp z0\.h, z2\.h, z3\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s16_untied, svint16_t,
+ z0 = svclamp_s16 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_s32_tied1:
+** sclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied1, svint32_t,
+ z0 = svclamp_s32 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s32_tied2:
+** sclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied2, svint32_t,
+ z0 = svclamp_s32 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.s, z2\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_tied3, svint32_t,
+ z0 = svclamp_s32 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s32_untied:
+** movprfx z0, z1
+** sclamp z0\.s, z2\.s, z3\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s32_untied, svint32_t,
+ z0 = svclamp_s32 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_s64_tied1:
+** sclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied1, svint64_t,
+ z0 = svclamp_s64 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s64_tied2:
+** sclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied2, svint64_t,
+ z0 = svclamp_s64 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s64_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.d, z2\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_tied3, svint64_t,
+ z0 = svclamp_s64 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s64_untied:
+** movprfx z0, z1
+** sclamp z0\.d, z2\.d, z3\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s64_untied, svint64_t,
+ z0 = svclamp_s64 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_s8_tied1:
+** sclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied1, svint8_t,
+ z0 = svclamp_s8 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_s8_tied2:
+** sclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied2, svint8_t,
+ z0 = svclamp_s8 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_s8_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** sclamp z0\.b, z2\.b, \1\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_tied3, svint8_t,
+ z0 = svclamp_s8 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_s8_untied:
+** movprfx z0, z1
+** sclamp z0\.b, z2\.b, z3\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_s8_untied, svint8_t,
+ z0 = svclamp_s8 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_u16_tied1:
+** uclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied1, svuint16_t,
+ z0 = svclamp_u16 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u16_tied2:
+** uclamp z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied2, svuint16_t,
+ z0 = svclamp_u16 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u16_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.h, z2\.h, \1\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_tied3, svuint16_t,
+ z0 = svclamp_u16 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u16_untied:
+** movprfx z0, z1
+** uclamp z0\.h, z2\.h, z3\.h
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u16_untied, svuint16_t,
+ z0 = svclamp_u16 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_u32_tied1:
+** uclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied1, svuint32_t,
+ z0 = svclamp_u32 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u32_tied2:
+** uclamp z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied2, svuint32_t,
+ z0 = svclamp_u32 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u32_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.s, z2\.s, \1\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_tied3, svuint32_t,
+ z0 = svclamp_u32 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u32_untied:
+** movprfx z0, z1
+** uclamp z0\.s, z2\.s, z3\.s
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u32_untied, svuint32_t,
+ z0 = svclamp_u32 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_u64_tied1:
+** uclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied1, svuint64_t,
+ z0 = svclamp_u64 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u64_tied2:
+** uclamp z0\.d, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied2, svuint64_t,
+ z0 = svclamp_u64 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u64_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.d, z2\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_tied3, svuint64_t,
+ z0 = svclamp_u64 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u64_untied:
+** movprfx z0, z1
+** uclamp z0\.d, z2\.d, z3\.d
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u64_untied, svuint64_t,
+ z0 = svclamp_u64 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** clamp_u8_tied1:
+** uclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied1, svuint8_t,
+ z0 = svclamp_u8 (z0, z1, z2),
+ z0 = svclamp (z0, z1, z2))
+
+/*
+** clamp_u8_tied2:
+** uclamp z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied2, svuint8_t,
+ z0 = svclamp_u8 (z1, z0, z2),
+ z0 = svclamp (z1, z0, z2))
+
+/*
+** clamp_u8_tied3:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** uclamp z0\.b, z2\.b, \1\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_tied3, svuint8_t,
+ z0 = svclamp_u8 (z1, z2, z0),
+ z0 = svclamp (z1, z2, z0))
+
+/*
+** clamp_u8_untied:
+** movprfx z0, z1
+** uclamp z0\.b, z2\.b, z3\.b
+** ret
+*/
+TEST_UNIFORM_Z (clamp_u8_untied, svuint8_t,
+ z0 = svclamp_u8 (z1, z2, z3),
+ z0 = svclamp (z1, z2, z3))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_lane_b16 (p2, p7, 0),
+ p0 = svpsel_lane_b16 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_lane_b16 (p7, p8, w11),
+ p2 = svpsel_lane_b16 (p7, p8, w11))
+
+/*
+** psel_lane_p7_p8_p13_w12:
+** psel p7, p8, p13\.h\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_lane_b16 (p8, p13, w12),
+ p7 = svpsel_lane_b16 (p8, p13, w12))
+
+/*
+** psel_lane_p8_p13_p15_w15:
+** psel p8, p13, p15\.h\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_lane_b16 (p13, p15, w15),
+ p8 = svpsel_lane_b16 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_lane_b16 (p15, p0, w16),
+ p13 = svpsel_lane_b16 (p15, p0, w16))
+
+/*
+** psel_lane_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.h\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_lane_b16 (p13, p8, w12 + 1),
+ p15 = svpsel_lane_b16 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p8_p7_w12p7:
+** psel p13, p8, p7\.h\[w12, 7\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p8_p7_w12p7, svbool_t,
+ p13 = svpsel_lane_b16 (p8, p7, w12 + 7),
+ p13 = svpsel_lane_b16 (p8, p7, w12 + 7))
+
+/*
+** psel_lane_p0_p0_p0_w12p8:
+** add (w[0-9]+), w12, #?8
+** psel p0, p0, p0\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p0_w12p8, svbool_t,
+ p0 = svpsel_lane_b16 (p0, p0, w12 + 8),
+ p0 = svpsel_lane_b16 (p0, p0, w12 + 8))
+
+/*
+** psel_lane_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_lane_b16 (p15, p15, w12 - 1),
+ p15 = svpsel_lane_b16 (p15, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_lane_b32 (p2, p7, 0),
+ p0 = svpsel_lane_b32 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_lane_b32 (p7, p8, w11),
+ p2 = svpsel_lane_b32 (p7, p8, w11))
+
+/*
+** psel_lane_p7_p8_p13_w12:
+** psel p7, p8, p13\.s\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_lane_b32 (p8, p13, w12),
+ p7 = svpsel_lane_b32 (p8, p13, w12))
+
+/*
+** psel_lane_p8_p13_p15_w15:
+** psel p8, p13, p15\.s\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_lane_b32 (p13, p15, w15),
+ p8 = svpsel_lane_b32 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_lane_b32 (p15, p0, w16),
+ p13 = svpsel_lane_b32 (p15, p0, w16))
+
+/*
+** psel_lane_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.s\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_lane_b32 (p13, p8, w12 + 1),
+ p15 = svpsel_lane_b32 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p8_p7_w12p3:
+** psel p13, p8, p7\.s\[w12, 3\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p8_p7_w12p3, svbool_t,
+ p13 = svpsel_lane_b32 (p8, p7, w12 + 3),
+ p13 = svpsel_lane_b32 (p8, p7, w12 + 3))
+
+/*
+** psel_lane_p0_p0_p0_w12p4:
+** add (w[0-9]+), w12, #?4
+** psel p0, p0, p0\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p0_w12p4, svbool_t,
+ p0 = svpsel_lane_b32 (p0, p0, w12 + 4),
+ p0 = svpsel_lane_b32 (p0, p0, w12 + 4))
+
+/*
+** psel_lane_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_lane_b32 (p15, p15, w12 - 1),
+ p15 = svpsel_lane_b32 (p15, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_lane_b64 (p2, p7, 0),
+ p0 = svpsel_lane_b64 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_lane_b64 (p7, p8, w11),
+ p2 = svpsel_lane_b64 (p7, p8, w11))
+
+/*
+** psel_lane_p7_p8_p13_w12:
+** psel p7, p8, p13\.d\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_lane_b64 (p8, p13, w12),
+ p7 = svpsel_lane_b64 (p8, p13, w12))
+
+/*
+** psel_lane_p8_p13_p15_w15:
+** psel p8, p13, p15\.d\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_lane_b64 (p13, p15, w15),
+ p8 = svpsel_lane_b64 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_lane_b64 (p15, p0, w16),
+ p13 = svpsel_lane_b64 (p15, p0, w16))
+
+/*
+** psel_lane_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.d\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_lane_b64 (p13, p8, w12 + 1),
+ p15 = svpsel_lane_b64 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p0_p0_p0_w12p2:
+** add (w[0-9]+), w12, #?2
+** psel p0, p0, p0\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p0_w12p2, svbool_t,
+ p0 = svpsel_lane_b64 (p0, p0, w12 + 2),
+ p0 = svpsel_lane_b64 (p0, p0, w12 + 2))
+
+/*
+** psel_lane_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_lane_b64 (p15, p15, w12 - 1),
+ p15 = svpsel_lane_b64 (p15, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svbool_t,
+ p0 = svpsel_lane_b8 (p2, p7, 0),
+ p0 = svpsel_lane_b8 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p7_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p7, p8\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p7_p8_w11, svbool_t,
+ p2 = svpsel_lane_b8 (p7, p8, w11),
+ p2 = svpsel_lane_b8 (p7, p8, w11))
+
+/*
+** psel_lane_p7_p8_p13_w12:
+** psel p7, p8, p13\.b\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p7_p8_p13_w12, svbool_t,
+ p7 = svpsel_lane_b8 (p8, p13, w12),
+ p7 = svpsel_lane_b8 (p8, p13, w12))
+
+/*
+** psel_lane_p8_p13_p15_w15:
+** psel p8, p13, p15\.b\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p8_p13_p15_w15, svbool_t,
+ p8 = svpsel_lane_b8 (p13, p15, w15),
+ p8 = svpsel_lane_b8 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p15_p0_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p15, p0\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p15_p0_w16, svbool_t,
+ p13 = svpsel_lane_b8 (p15, p0, w16),
+ p13 = svpsel_lane_b8 (p15, p0, w16))
+
+/*
+** psel_lane_p15_p13_p8_w12p1:
+** psel p15, p13, p8\.b\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p13_p8_w12p1, svbool_t,
+ p15 = svpsel_lane_b8 (p13, p8, w12 + 1),
+ p15 = svpsel_lane_b8 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p8_p7_w12p15:
+** psel p13, p8, p7\.b\[w12, 15\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p8_p7_w12p15, svbool_t,
+ p13 = svpsel_lane_b8 (p8, p7, w12 + 15),
+ p13 = svpsel_lane_b8 (p8, p7, w12 + 15))
+
+/*
+** psel_lane_p0_p0_p0_w12p16:
+** add (w[0-9]+), w12, #?16
+** psel p0, p0, p0\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p0_w12p16, svbool_t,
+ p0 = svpsel_lane_b8 (p0, p0, w12 + 16),
+ p0 = svpsel_lane_b8 (p0, p0, w12 + 16))
+
+/*
+** psel_lane_p15_p15_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p15, p15, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p15_p15_p15_w12m1, svbool_t,
+ p15 = svpsel_lane_b8 (p15, p15, w12 - 1),
+ p15 = svpsel_lane_b8 (p15, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_lane_c16 (p2, p7, 0),
+ p0 = svpsel_lane_c16 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p0_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p0, p8\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p0_p8_w11, svcount_t,
+ p2 = svpsel_lane_c16 (p0, p8, w11),
+ p2 = svpsel_lane_c16 (p0, p8, w11))
+
+/*
+** psel_lane_p2_p13_p15_w12:
+** psel p2, p13, p15\.h\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p15_w12, svcount_t,
+ p2 = svpsel_lane_c16 (p13, p15, w12),
+ p2 = svpsel_lane_c16 (p13, p15, w12))
+
+/*
+** psel_lane_p0_p13_p15_w15:
+** psel p0, p13, p15\.h\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p13_p15_w15, svcount_t,
+ p0 = svpsel_lane_c16 (p13, p15, w15),
+ p0 = svpsel_lane_c16 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p0_p15_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p15\.h\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p15_w16, svcount_t,
+ p13 = svpsel_lane_c16 (p0, p15, w16),
+ p13 = svpsel_lane_c16 (p0, p15, w16))
+
+/*
+** psel_lane_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.h\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_lane_c16 (p13, p8, w12 + 1),
+ p2 = svpsel_lane_c16 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p0_p7_w12p7:
+** psel p13, p0, p7\.h\[w12, 7\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p7_w12p7, svcount_t,
+ p13 = svpsel_lane_c16 (p0, p7, w12 + 7),
+ p13 = svpsel_lane_c16 (p0, p7, w12 + 7))
+
+/*
+** psel_lane_p0_p0_p15_w12p8:
+** add (w[0-9]+), w12, #?8
+** psel p0, p0, p15\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p15_w12p8, svcount_t,
+ p0 = svpsel_lane_c16 (p0, p15, w12 + 8),
+ p0 = svpsel_lane_c16 (p0, p15, w12 + 8))
+
+/*
+** psel_lane_p13_p13_p7_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p7\.h\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p13_p7_w12m1, svcount_t,
+ p13 = svpsel_lane_c16 (p13, p7, w12 - 1),
+ p13 = svpsel_lane_c16 (p13, p7, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_lane_c32 (p2, p7, 0),
+ p0 = svpsel_lane_c32 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p13_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p13, p8\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p8_w11, svcount_t,
+ p2 = svpsel_lane_c32 (p13, p8, w11),
+ p2 = svpsel_lane_c32 (p13, p8, w11))
+
+/*
+** psel_lane_p0_p13_p15_w12:
+** psel p0, p13, p15\.s\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p13_p15_w12, svcount_t,
+ p0 = svpsel_lane_c32 (p13, p15, w12),
+ p0 = svpsel_lane_c32 (p13, p15, w12))
+
+/*
+** psel_lane_p2_p0_p15_w15:
+** psel p2, p0, p15\.s\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p0_p15_w15, svcount_t,
+ p2 = svpsel_lane_c32 (p0, p15, w15),
+ p2 = svpsel_lane_c32 (p0, p15, w15))
+
+/*
+** psel_lane_p13_p0_p7_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p7\.s\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p7_w16, svcount_t,
+ p13 = svpsel_lane_c32 (p0, p7, w16),
+ p13 = svpsel_lane_c32 (p0, p7, w16))
+
+/*
+** psel_lane_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.s\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_lane_c32 (p13, p8, w12 + 1),
+ p2 = svpsel_lane_c32 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p0_p7_w12p3:
+** psel p13, p0, p7\.s\[w12, 3\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p7_w12p3, svcount_t,
+ p13 = svpsel_lane_c32 (p0, p7, w12 + 3),
+ p13 = svpsel_lane_c32 (p0, p7, w12 + 3))
+
+/*
+** psel_lane_p0_p0_p7_w12p4:
+** add (w[0-9]+), w12, #?4
+** psel p0, p0, p7\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p7_w12p4, svcount_t,
+ p0 = svpsel_lane_c32 (p0, p7, w12 + 4),
+ p0 = svpsel_lane_c32 (p0, p7, w12 + 4))
+
+/*
+** psel_lane_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.s\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_lane_c32 (p13, p15, w12 - 1),
+ p13 = svpsel_lane_c32 (p13, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_lane_c64 (p2, p7, 0),
+ p0 = svpsel_lane_c64 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p13_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p13, p8\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p8_w11, svcount_t,
+ p2 = svpsel_lane_c64 (p13, p8, w11),
+ p2 = svpsel_lane_c64 (p13, p8, w11))
+
+/*
+** psel_lane_p2_p0_p15_w12:
+** psel p2, p0, p15\.d\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p0_p15_w12, svcount_t,
+ p2 = svpsel_lane_c64 (p0, p15, w12),
+ p2 = svpsel_lane_c64 (p0, p15, w12))
+
+/*
+** psel_lane_p0_p13_p15_w15:
+** psel p0, p13, p15\.d\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p13_p15_w15, svcount_t,
+ p0 = svpsel_lane_c64 (p13, p15, w15),
+ p0 = svpsel_lane_c64 (p13, p15, w15))
+
+/*
+** psel_lane_p13_p0_p15_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p13, p0, p15\.d\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p15_w16, svcount_t,
+ p13 = svpsel_lane_c64 (p0, p15, w16),
+ p13 = svpsel_lane_c64 (p0, p15, w16))
+
+/*
+** psel_lane_p2_p13_p8_w12p1:
+** psel p2, p13, p8\.d\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p8_w12p1, svcount_t,
+ p2 = svpsel_lane_c64 (p13, p8, w12 + 1),
+ p2 = svpsel_lane_c64 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p0_p0_p8_w12p2:
+** add (w[0-9]+), w12, #?2
+** psel p0, p0, p8\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p8_w12p2, svcount_t,
+ p0 = svpsel_lane_c64 (p0, p8, w12 + 2),
+ p0 = svpsel_lane_c64 (p0, p8, w12 + 2))
+
+/*
+** psel_lane_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.d\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_lane_c64 (p13, p15, w12 - 1),
+ p13 = svpsel_lane_c64 (p13, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** psel_lane_p0_p2_p7_0:
+** mov [wx](1[2-5]), #?0
+** psel p0, p2, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p2_p7_0, svcount_t,
+ p0 = svpsel_lane_c8 (p2, p7, 0),
+ p0 = svpsel_lane_c8 (p2, p7, 0))
+
+/*
+** psel_lane_p2_p0_p8_w11:
+** mov [wx](1[2-5]), [wx]11
+** psel p2, p0, p8\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p0_p8_w11, svcount_t,
+ p2 = svpsel_lane_c8 (p0, p8, w11),
+ p2 = svpsel_lane_c8 (p0, p8, w11))
+
+/*
+** psel_lane_p0_p13_p15_w12:
+** psel p0, p13, p15\.b\[w12, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p13_p15_w12, svcount_t,
+ p0 = svpsel_lane_c8 (p13, p15, w12),
+ p0 = svpsel_lane_c8 (p13, p15, w12))
+
+/*
+** psel_lane_p13_p0_p8_w15:
+** psel p13, p0, p8\.b\[w15, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p0_p8_w15, svcount_t,
+ p13 = svpsel_lane_c8 (p0, p8, w15),
+ p13 = svpsel_lane_c8 (p0, p8, w15))
+
+/*
+** psel_lane_p2_p13_p7_w16:
+** mov [wx](1[2-5]), [wx]16
+** psel p2, p13, p7\.b\[w\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p2_p13_p7_w16, svcount_t,
+ p2 = svpsel_lane_c8 (p13, p7, w16),
+ p2 = svpsel_lane_c8 (p13, p7, w16))
+
+/*
+** psel_lane_p0_p13_p8_w12p1:
+** psel p0, p13, p8\.b\[w12, 1\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p13_p8_w12p1, svcount_t,
+ p0 = svpsel_lane_c8 (p13, p8, w12 + 1),
+ p0 = svpsel_lane_c8 (p13, p8, w12 + 1))
+
+/*
+** psel_lane_p13_p2_p7_w12p15:
+** psel p13, p2, p7\.b\[w12, 15\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p2_p7_w12p15, svcount_t,
+ p13 = svpsel_lane_c8 (p2, p7, w12 + 15),
+ p13 = svpsel_lane_c8 (p2, p7, w12 + 15))
+
+/*
+** psel_lane_p0_p0_p15_w12p16:
+** add (w[0-9]+), w12, #?16
+** psel p0, p0, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p0_p0_p15_w12p16, svcount_t,
+ p0 = svpsel_lane_c8 (p0, p15, w12 + 16),
+ p0 = svpsel_lane_c8 (p0, p15, w12 + 16))
+
+/*
+** psel_lane_p13_p13_p15_w12m1:
+** sub (w[0-9]+), w12, #?1
+** psel p13, p13, p15\.b\[\1, 0\]
+** ret
+*/
+TEST_SELECT_P (psel_lane_p13_p13_p15_w12m1, svcount_t,
+ p13 = svpsel_lane_c8 (p13, p15, w12 - 1),
+ p13 = svpsel_lane_c8 (p13, p15, w12 - 1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_bf16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied12, svbfloat16_t,
+ z0 = svrevd_bf16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_bf16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_bf16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_tied2, svbfloat16_t,
+ z0 = svrevd_bf16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_bf16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_m_untied, svbfloat16_t,
+ z0 = svrevd_bf16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_bf16_z_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_bf16_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_z_untied, svbfloat16_t,
+ z0 = svrevd_bf16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_bf16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_x_tied1, svbfloat16_t,
+ z0 = svrevd_bf16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_bf16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_bf16_x_untied, svbfloat16_t,
+ z0 = svrevd_bf16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_f16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied12, svfloat16_t,
+ z0 = svrevd_f16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied1, svfloat16_t,
+ z0 = svrevd_f16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_tied2, svfloat16_t,
+ z0 = svrevd_f16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_m_untied, svfloat16_t,
+ z0 = svrevd_f16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f16_z_tied1, svfloat16_t,
+ z0 = svrevd_f16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f16_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_z_untied, svfloat16_t,
+ z0 = svrevd_f16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_x_tied1, svfloat16_t,
+ z0 = svrevd_f16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f16_x_untied, svfloat16_t,
+ z0 = svrevd_f16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_f32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied12, svfloat32_t,
+ z0 = svrevd_f32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied1, svfloat32_t,
+ z0 = svrevd_f32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_tied2, svfloat32_t,
+ z0 = svrevd_f32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_m_untied, svfloat32_t,
+ z0 = svrevd_f32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f32_z_tied1, svfloat32_t,
+ z0 = svrevd_f32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f32_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_z_untied, svfloat32_t,
+ z0 = svrevd_f32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_x_tied1, svfloat32_t,
+ z0 = svrevd_f32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f32_x_untied, svfloat32_t,
+ z0 = svrevd_f32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_f64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied12, svfloat64_t,
+ z0 = svrevd_f64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_f64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied1, svfloat64_t,
+ z0 = svrevd_f64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_f64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_tied2, svfloat64_t,
+ z0 = svrevd_f64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_f64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_m_untied, svfloat64_t,
+ z0 = svrevd_f64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_f64_z_tied1, svfloat64_t,
+ z0 = svrevd_f64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_f64_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_z_untied, svfloat64_t,
+ z0 = svrevd_f64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_f64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_x_tied1, svfloat64_t,
+ z0 = svrevd_f64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_f64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_f64_x_untied, svfloat64_t,
+ z0 = svrevd_f64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_s16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied12, svint16_t,
+ z0 = svrevd_s16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied1, svint16_t,
+ z0 = svrevd_s16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_tied2, svint16_t,
+ z0 = svrevd_s16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_m_untied, svint16_t,
+ z0 = svrevd_s16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s16_z_tied1, svint16_t,
+ z0 = svrevd_s16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s16_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_z_untied, svint16_t,
+ z0 = svrevd_s16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_x_tied1, svint16_t,
+ z0 = svrevd_s16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s16_x_untied, svint16_t,
+ z0 = svrevd_s16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_s32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied12, svint32_t,
+ z0 = svrevd_s32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied1, svint32_t,
+ z0 = svrevd_s32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_tied2, svint32_t,
+ z0 = svrevd_s32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_m_untied, svint32_t,
+ z0 = svrevd_s32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s32_z_tied1, svint32_t,
+ z0 = svrevd_s32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s32_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_z_untied, svint32_t,
+ z0 = svrevd_s32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_x_tied1, svint32_t,
+ z0 = svrevd_s32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s32_x_untied, svint32_t,
+ z0 = svrevd_s32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_s64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied12, svint64_t,
+ z0 = svrevd_s64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied1, svint64_t,
+ z0 = svrevd_s64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_tied2, svint64_t,
+ z0 = svrevd_s64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_m_untied, svint64_t,
+ z0 = svrevd_s64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s64_z_tied1, svint64_t,
+ z0 = svrevd_s64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s64_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_z_untied, svint64_t,
+ z0 = svrevd_s64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_x_tied1, svint64_t,
+ z0 = svrevd_s64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s64_x_untied, svint64_t,
+ z0 = svrevd_s64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_s8_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied12, svint8_t,
+ z0 = svrevd_s8_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_s8_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied1, svint8_t,
+ z0 = svrevd_s8_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_s8_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_tied2, svint8_t,
+ z0 = svrevd_s8_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_s8_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_m_untied, svint8_t,
+ z0 = svrevd_s8_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_s8_z_tied1, svint8_t,
+ z0 = svrevd_s8_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_s8_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_z_untied, svint8_t,
+ z0 = svrevd_s8_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_s8_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_x_tied1, svint8_t,
+ z0 = svrevd_s8_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_s8_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_s8_x_untied, svint8_t,
+ z0 = svrevd_s8_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_u16_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied12, svuint16_t,
+ z0 = svrevd_u16_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u16_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied1, svuint16_t,
+ z0 = svrevd_u16_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u16_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_tied2, svuint16_t,
+ z0 = svrevd_u16_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u16_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_m_untied, svuint16_t,
+ z0 = svrevd_u16_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u16_z_tied1, svuint16_t,
+ z0 = svrevd_u16_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u16_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_z_untied, svuint16_t,
+ z0 = svrevd_u16_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u16_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_x_tied1, svuint16_t,
+ z0 = svrevd_u16_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u16_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u16_x_untied, svuint16_t,
+ z0 = svrevd_u16_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_u32_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied12, svuint32_t,
+ z0 = svrevd_u32_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u32_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied1, svuint32_t,
+ z0 = svrevd_u32_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u32_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_tied2, svuint32_t,
+ z0 = svrevd_u32_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u32_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_m_untied, svuint32_t,
+ z0 = svrevd_u32_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u32_z_tied1, svuint32_t,
+ z0 = svrevd_u32_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u32_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_z_untied, svuint32_t,
+ z0 = svrevd_u32_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u32_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_x_tied1, svuint32_t,
+ z0 = svrevd_u32_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u32_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u32_x_untied, svuint32_t,
+ z0 = svrevd_u32_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_u64_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied12, svuint64_t,
+ z0 = svrevd_u64_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u64_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied1, svuint64_t,
+ z0 = svrevd_u64_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u64_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_tied2, svuint64_t,
+ z0 = svrevd_u64_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u64_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_m_untied, svuint64_t,
+ z0 = svrevd_u64_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u64_z_tied1, svuint64_t,
+ z0 = svrevd_u64_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u64_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_z_untied, svuint64_t,
+ z0 = svrevd_u64_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u64_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_x_tied1, svuint64_t,
+ z0 = svrevd_u64_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u64_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u64_x_untied, svuint64_t,
+ z0 = svrevd_u64_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
--- /dev/null
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** revd_u8_m_tied12:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied12, svuint8_t,
+ z0 = svrevd_u8_m (z0, p0, z0),
+ z0 = svrevd_m (z0, p0, z0))
+
+/*
+** revd_u8_m_tied1:
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied1, svuint8_t,
+ z0 = svrevd_u8_m (z0, p0, z1),
+ z0 = svrevd_m (z0, p0, z1))
+
+/*
+** revd_u8_m_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** revd z0\.q, p0/m, \1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_tied2, svuint8_t,
+ z0 = svrevd_u8_m (z1, p0, z0),
+ z0 = svrevd_m (z1, p0, z0))
+
+/*
+** revd_u8_m_untied:
+** movprfx z0, z2
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_m_untied, svuint8_t,
+ z0 = svrevd_u8_m (z2, p0, z1),
+ z0 = svrevd_m (z2, p0, z1))
+
+/* Awkward register allocation. Don't require specific output. */
+TEST_UNIFORM_Z (revd_u8_z_tied1, svuint8_t,
+ z0 = svrevd_u8_z (p0, z0),
+ z0 = svrevd_z (p0, z0))
+
+/*
+** revd_u8_z_untied:
+** movi? [vdz]0\.?(?:[0-9]*[bhsd])?, #?0
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_z_untied, svuint8_t,
+ z0 = svrevd_u8_z (p0, z1),
+ z0 = svrevd_z (p0, z1))
+
+/*
+** revd_u8_x_tied1:
+** revd z0\.q, p0/m, z0\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_x_tied1, svuint8_t,
+ z0 = svrevd_u8_x (p0, z0),
+ z0 = svrevd_x (p0, z0))
+
+/*
+** revd_u8_x_untied:
+** movprfx z0, z1
+** revd z0\.q, p0/m, z1\.q
+** ret
+*/
+TEST_UNIFORM_Z (revd_u8_x_untied, svuint8_t,
+ z0 = svrevd_u8_x (p0, z1),
+ z0 = svrevd_x (p0, z1))
}]
}
+proc check_effective_target_aarch64_asm_sve2p1_ok { } {
+ if { [istarget aarch64*-*-*] } {
+ return [check_no_compiler_messages aarch64_sve2p1_assembler object {
+ __asm__ (".arch_extension sve2p1; ld1w {z0.q},p7/z,[x0]");
+ } "-march=armv8-a+sve2p1"]
+ } else {
+ return 0
+ }
+}
+
proc check_effective_target_aarch64_small { } {
if { [istarget aarch64*-*-*] } {
return [check_no_compiler_messages aarch64_small object {