]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
AArch64: Add tests to ensure rev is produced
authorChris Bazley <Chris.Bazley@arm.com>
Wed, 11 Feb 2026 15:53:22 +0000 (15:53 +0000)
committerTamar Christina <tamar.christina@arm.com>
Wed, 11 Feb 2026 15:54:11 +0000 (15:54 +0000)
If the compiler mistakenly vectorizes byte order reversal
then the resultant code is inevitably less efficient than a
rev instruction.  This kind of error will become more likely if
SVE predication is ever used to vectorize smaller groups
than could be vectorized using ASIMD instructions.  Add tests to
guard against future regressions.

gcc/testsuite/ChangeLog:

* gcc.target/aarch64/rev_32_1.c: New test.
* gcc.target/aarch64/rev_32_2.c: New test.
* gcc.target/aarch64/rev_32_3.c: New test

gcc/testsuite/gcc.target/aarch64/rev_32_1.c [new file with mode: 0644]
gcc/testsuite/gcc.target/aarch64/rev_32_2.c [new file with mode: 0644]
gcc/testsuite/gcc.target/aarch64/rev_32_3.c [new file with mode: 0644]

diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_1.c b/gcc/testsuite/gcc.target/aarch64/rev_32_1.c
new file mode 100644 (file)
index 0000000..31765f0
--- /dev/null
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned int __u32;
+typedef unsigned char __u8;
+
+/*
+** rev:
+**     rev     w1, w1
+**     str     w1, \[x0\]
+**     ret
+*/
+void
+rev (__u8 (*dst)[4], __u32 src)
+{
+  (*dst)[0] = src >> 24;
+  (*dst)[1] = src >> 16;
+  (*dst)[2] = src >> 8;
+  (*dst)[3] = src >> 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_2.c b/gcc/testsuite/gcc.target/aarch64/rev_32_2.c
new file mode 100644 (file)
index 0000000..08bfc2a
--- /dev/null
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned int __u32;
+typedef unsigned char __u8;
+
+/*
+** rev2:
+**     ldr     w0, \[x0\]
+**     rev     w0, w0
+**     ret
+*/
+__u32
+rev2 (const __u8 (*src)[4])
+{
+  __u32 dst = 0;
+
+  dst |= (__u32) (*src)[3] << 0;
+  dst |= (__u32) (*src)[2] << 8;
+  dst |= (__u32) (*src)[1] << 16;
+  dst |= (__u32) (*src)[0] << 24;
+
+  return dst;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_3.c b/gcc/testsuite/gcc.target/aarch64/rev_32_3.c
new file mode 100644 (file)
index 0000000..d80b1f0
--- /dev/null
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned char __u8;
+
+/*
+** rev3:
+**     ldr     w1, \[x1\]
+**     rev     w1, w1
+**     str     w1, \[x0\]
+**     ret
+*/
+void
+rev3 (unsigned char (*__restrict dst)[4],
+      const unsigned char (*__restrict src)[4])
+{
+  (*dst)[0] = (*src)[3];
+  (*dst)[1] = (*src)[2];
+  (*dst)[2] = (*src)[1];
+  (*dst)[3] = (*src)[0];
+}