]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
[AArch64] Logical vector shift right conformance
authorAlex Velenko <Alex.Velenko@arm.com>
Mon, 24 Mar 2014 12:05:38 +0000 (12:05 +0000)
committerJames Greenhalgh <jgreenhalgh@gcc.gnu.org>
Mon, 24 Mar 2014 12:05:38 +0000 (12:05 +0000)
gcc/

* config/aarch64/aarch64-simd-builtins.def (lshr): DI mode excluded.
(lshr_simd): DI mode added.
* config/aarch64/aarch64-simd.md (aarch64_lshr_simddi): New pattern.
(aarch64_ushr_simddi): Likewise.
* config/aarch64/aarch64.md (UNSPEC_USHR64): New unspec.
* config/aarch64/arm_neon.h (vshr_n_u64): Intrinsic fixed.
(vshrd_n_u64): Likewise.

gcc/testsuite/

* gcc.target/aarch64/ushr64_1.c: New testcase.

From-SVN: r208789

gcc/ChangeLog
gcc/config/aarch64/aarch64-builtins.c
gcc/config/aarch64/aarch64-simd-builtins.def
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/aarch64.md
gcc/config/aarch64/arm_neon.h
gcc/testsuite/ChangeLog
gcc/testsuite/gcc.target/aarch64/ushr64_1.c [new file with mode: 0644]

index fd8cd6c486a586131a325221cdbfd237d9b5c079..a0e21f28616eb862214a68524a20f4991a0473da 100644 (file)
@@ -1,3 +1,13 @@
+2014-03-24  Alex Velenko  <Alex.Velenko@arm.com>
+
+       * config/aarch64/aarch64-simd-builtins.def (lshr): DI mode excluded.
+       (lshr_simd): DI mode added.
+       * config/aarch64/aarch64-simd.md (aarch64_lshr_simddi): New pattern.
+       (aarch64_ushr_simddi): Likewise.
+       * config/aarch64/aarch64.md (UNSPEC_USHR64): New unspec.
+       * config/aarch64/arm_neon.h (vshr_n_u64): Intrinsic fixed.
+       (vshrd_n_u64): Likewise.
+
 2014-03-24  Rainer Orth  <ro@CeBiTec.Uni-Bielefeld.DE>
 
        * Makefile.in (s-macro_list): Depend on cc1.
index 9dfe0b6e1a5db9a3049e2d59596d09ada433cd66..55cfe0ab225b39c4ac96ad686fd8f2e76b0980c8 100644 (file)
@@ -183,6 +183,10 @@ aarch64_types_getlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
 #define TYPES_GETLANE (aarch64_types_getlane_qualifiers)
 #define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers)
 static enum aarch64_type_qualifiers
+aarch64_types_unsigned_shift_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+  = { qualifier_unsigned, qualifier_unsigned, qualifier_immediate };
+#define TYPES_USHIFTIMM (aarch64_types_unsigned_shift_qualifiers)
+static enum aarch64_type_qualifiers
 aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
   = { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
 #define TYPES_SETLANE (aarch64_types_setlane_qualifiers)
index e5f71b479ccfd1a9cbf84aed0f96b49762053f59..c9b7570e565979cb454d594c84e625380419d0e6 100644 (file)
 
   BUILTIN_VDQ_I (SHIFTIMM, ashr, 3)
   VAR1 (SHIFTIMM, ashr_simd, 0, di)
-  BUILTIN_VSDQ_I_DI (SHIFTIMM, lshr, 3)
+  BUILTIN_VDQ_I (SHIFTIMM, lshr, 3)
+  VAR1 (USHIFTIMM, lshr_simd, 0, di)
   /* Implemented by aarch64_<sur>shr_n<mode>.  */
   BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0)
   BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n, 0)
index 4dffb59e856aeaafb79007255d3b91a73ef1ef13..6048d605c72e6a43b9a004a8bc89dbfa89f3ed5b 100644 (file)
   DONE;
 })
 
+(define_expand "aarch64_lshr_simddi"
+  [(match_operand:DI 0 "register_operand" "=w")
+   (match_operand:DI 1 "register_operand" "w")
+   (match_operand:SI 2 "aarch64_shift_imm64_di" "")]
+  "TARGET_SIMD"
+  {
+    if (INTVAL (operands[2]) == 64)
+      emit_insn (gen_aarch64_ushr_simddi (operands[0], operands[1]));
+    else
+      emit_insn (gen_lshrdi3 (operands[0], operands[1], operands[2]));
+    DONE;
+  }
+)
+
+;; SIMD shift by 64.  This pattern is a special case as standard pattern does
+;; not handle NEON shifts by 64.
+(define_insn "aarch64_ushr_simddi"
+  [(set (match_operand:DI 0 "register_operand" "=w")
+        (unspec:DI
+          [(match_operand:DI 1 "register_operand" "w")] UNSPEC_USHR64))]
+  "TARGET_SIMD"
+  "ushr\t%d0, %d1, 64"
+  [(set_attr "type" "neon_shift_imm")]
+)
+
 (define_expand "vec_set<mode>"
   [(match_operand:VQ_S 0 "register_operand")
    (match_operand:<VEL> 1 "register_operand")
index 99a6ac8fcbdcd24a0ea18cc037bef9cf72070281..c86a29d8e7f8df21f25e14d22df1c3e8c37c907f 100644 (file)
     UNSPEC_TLS
     UNSPEC_TLSDESC
     UNSPEC_USHL_2S
+    UNSPEC_USHR64
     UNSPEC_VSTRUCTDUMMY
 ])
 
index 8272a843c410cea4196c648f78cdd61368895921..747a292ba9b2260e74566c946fe57afaea267969 100644 (file)
@@ -23364,7 +23364,7 @@ vshr_n_u32 (uint32x2_t __a, const int __b)
 __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
 vshr_n_u64 (uint64x1_t __a, const int __b)
 {
-  return (uint64x1_t) __builtin_aarch64_lshrdi ((int64x1_t) __a, __b);
+  return __builtin_aarch64_lshr_simddi_uus ( __a, __b);
 }
 
 __extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
@@ -23421,10 +23421,10 @@ vshrd_n_s64 (int64x1_t __a, const int __b)
   return (int64x1_t) __builtin_aarch64_ashr_simddi (__a, __b);
 }
 
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshrd_n_u64 (uint64x1_t __a, const int __b)
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vshrd_n_u64 (uint64_t __a, const int __b)
 {
-  return (uint64x1_t) __builtin_aarch64_lshrdi (__a, __b);
+  return __builtin_aarch64_lshr_simddi_uus (__a, __b);
 }
 
 /* vsli */
index 99801b51438b524dcc9c2b7e517f665328ba7e59..a07de067040141dcbf03c215fa6426f6b2273e21 100644 (file)
@@ -1,3 +1,7 @@
+2014-03-24  Alex Velenko  <Alex.Velenko@arm.com>
+
+       * gcc.target/aarch64/ushr64_1.c: New.
+
 2014-03-24  James Greenhalgh  <james.greenhalgh@arm.com>
 
        * gcc.target/aarch64/vect-abs.c (dg-options): Add -std=c99.
diff --git a/gcc/testsuite/gcc.target/aarch64/ushr64_1.c b/gcc/testsuite/gcc.target/aarch64/ushr64_1.c
new file mode 100644 (file)
index 0000000..b1c741d
--- /dev/null
@@ -0,0 +1,84 @@
+/* Test logical SIMD shift works correctly.  */
+/* { dg-do run } */
+/* { dg-options "--save-temps" } */
+
+#include "arm_neon.h"
+
+extern void abort (void);
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_64 (uint64x1_t passed, uint64_t expected)
+{
+  return vget_lane_u64 (vshr_n_u64 (passed, 64), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_4 (uint64x1_t passed, uint64_t expected)
+{
+  return vget_lane_u64 (vshr_n_u64 (passed, 4), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_0 (uint64x1_t passed, uint64_t expected)
+{
+  return vget_lane_u64 (vshr_n_u64 (passed, 0), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_64 (uint64_t passed, uint64_t expected)
+{
+  return vshrd_n_u64 (passed, 64) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_4 (uint64_t passed, uint64_t expected)
+{
+  return vshrd_n_u64 (passed, 4) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_0 (uint64_t passed, uint64_t expected)
+{
+  return vshrd_n_u64 (passed, 0) != expected;
+}
+
+/* { dg-final { scan-assembler-times "ushr\\td\[0-9\]+, d\[0-9\]+, 64" 2 } } */
+/* { dg-final { (scan-assembler-times "ushr\\td\[0-9\]+, d\[0-9\]+, 4" 2)  || \
+   (scan-assembler-times "lsr\\tx\[0-9\]+, x\[0-9\]+, 4" 2) } } */
+/* { dg-final { scan-assembler-not "ushr\\td\[0-9\]+, d\[0-9\]+, 0" } } */
+
+int
+main (int argc, char *argv[])
+{
+  /* Testing vshr_n_u64.  */
+  if (test_vshr_n_u64_64 (vcreate_u64 (0x0000000080000000), 0))
+    abort ();
+  if (test_vshr_n_u64_64 (vcreate_u64 (0xffffffff80000000), 0))
+    abort ();
+
+  if (test_vshr_n_u64_4 (vcreate_u64 (0x0000000080000000), 0x0000000008000000))
+    abort ();
+  if (test_vshr_n_u64_4 (vcreate_u64 (0xffffffff80000000), 0x0ffffffff8000000))
+    abort ();
+
+  if (test_vshr_n_u64_0 (vcreate_u64 (0x0000000080000000), 0x0000000080000000))
+    abort ();
+
+  /* Testing vshrd_n_u64.  */
+  if (test_vshrd_n_u64_64 (0x0000000080000000, 0))
+    abort ();
+  if (test_vshrd_n_u64_64 (0xffffffff80000000, 0))
+    abort ();
+
+  if (test_vshrd_n_u64_4 (0x0000000080000000, 0x0000000008000000))
+    abort ();
+  if (test_vshrd_n_u64_4 (0xffffffff80000000, 0x0ffffffff8000000))
+    abort ();
+
+  if (test_vshrd_n_u64_0 (0x0000000080000000, 0x0000000080000000))
+    abort ();
+
+  return 0;
+}
+
+/* { dg-final { cleanup-saved-temps } } */