+2014-03-24 Alex Velenko <Alex.Velenko@arm.com>
+
+ * config/aarch64/aarch64-simd-builtins.def (lshr): DI mode excluded.
+ (lshr_simd): DI mode added.
+ * config/aarch64/aarch64-simd.md (aarch64_lshr_simddi): New pattern.
+ (aarch64_ushr_simddi): Likewise.
+ * config/aarch64/aarch64.md (UNSPEC_USHR64): New unspec.
+ * config/aarch64/arm_neon.h (vshr_n_u64): Intrinsic fixed.
+ (vshrd_n_u64): Likewise.
+
2014-03-24 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* Makefile.in (s-macro_list): Depend on cc1.
#define TYPES_GETLANE (aarch64_types_getlane_qualifiers)
#define TYPES_SHIFTIMM (aarch64_types_getlane_qualifiers)
static enum aarch64_type_qualifiers
+aarch64_types_unsigned_shift_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_unsigned, qualifier_unsigned, qualifier_immediate };
+#define TYPES_USHIFTIMM (aarch64_types_unsigned_shift_qualifiers)
+static enum aarch64_type_qualifiers
aarch64_types_setlane_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_none, qualifier_none, qualifier_immediate };
#define TYPES_SETLANE (aarch64_types_setlane_qualifiers)
BUILTIN_VDQ_I (SHIFTIMM, ashr, 3)
VAR1 (SHIFTIMM, ashr_simd, 0, di)
- BUILTIN_VSDQ_I_DI (SHIFTIMM, lshr, 3)
+ BUILTIN_VDQ_I (SHIFTIMM, lshr, 3)
+ VAR1 (USHIFTIMM, lshr_simd, 0, di)
/* Implemented by aarch64_<sur>shr_n<mode>. */
BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0)
BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n, 0)
DONE;
})
+(define_expand "aarch64_lshr_simddi"
+ [(match_operand:DI 0 "register_operand" "=w")
+ (match_operand:DI 1 "register_operand" "w")
+ (match_operand:SI 2 "aarch64_shift_imm64_di" "")]
+ "TARGET_SIMD"
+ {
+ if (INTVAL (operands[2]) == 64)
+ emit_insn (gen_aarch64_ushr_simddi (operands[0], operands[1]));
+ else
+ emit_insn (gen_lshrdi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+;; SIMD shift by 64. This pattern is a special case as standard pattern does
+;; not handle NEON shifts by 64.
+(define_insn "aarch64_ushr_simddi"
+ [(set (match_operand:DI 0 "register_operand" "=w")
+ (unspec:DI
+ [(match_operand:DI 1 "register_operand" "w")] UNSPEC_USHR64))]
+ "TARGET_SIMD"
+ "ushr\t%d0, %d1, 64"
+ [(set_attr "type" "neon_shift_imm")]
+)
+
(define_expand "vec_set<mode>"
[(match_operand:VQ_S 0 "register_operand")
(match_operand:<VEL> 1 "register_operand")
UNSPEC_TLS
UNSPEC_TLSDESC
UNSPEC_USHL_2S
+ UNSPEC_USHR64
UNSPEC_VSTRUCTDUMMY
])
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vshr_n_u64 (uint64x1_t __a, const int __b)
{
- return (uint64x1_t) __builtin_aarch64_lshrdi ((int64x1_t) __a, __b);
+ return __builtin_aarch64_lshr_simddi_uus ( __a, __b);
}
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
return (int64x1_t) __builtin_aarch64_ashr_simddi (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
-vshrd_n_u64 (uint64x1_t __a, const int __b)
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vshrd_n_u64 (uint64_t __a, const int __b)
{
- return (uint64x1_t) __builtin_aarch64_lshrdi (__a, __b);
+ return __builtin_aarch64_lshr_simddi_uus (__a, __b);
}
/* vsli */
+2014-03-24 Alex Velenko <Alex.Velenko@arm.com>
+
+ * gcc.target/aarch64/ushr64_1.c: New.
+
2014-03-24 James Greenhalgh <james.greenhalgh@arm.com>
* gcc.target/aarch64/vect-abs.c (dg-options): Add -std=c99.
--- /dev/null
+/* Test logical SIMD shift works correctly. */
+/* { dg-do run } */
+/* { dg-options "--save-temps" } */
+
+#include "arm_neon.h"
+
+extern void abort (void);
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_64 (uint64x1_t passed, uint64_t expected)
+{
+ return vget_lane_u64 (vshr_n_u64 (passed, 64), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_4 (uint64x1_t passed, uint64_t expected)
+{
+ return vget_lane_u64 (vshr_n_u64 (passed, 4), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshr_n_u64_0 (uint64x1_t passed, uint64_t expected)
+{
+ return vget_lane_u64 (vshr_n_u64 (passed, 0), 0) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_64 (uint64_t passed, uint64_t expected)
+{
+ return vshrd_n_u64 (passed, 64) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_4 (uint64_t passed, uint64_t expected)
+{
+ return vshrd_n_u64 (passed, 4) != expected;
+}
+
+int __attribute__ ((noinline))
+test_vshrd_n_u64_0 (uint64_t passed, uint64_t expected)
+{
+ return vshrd_n_u64 (passed, 0) != expected;
+}
+
+/* { dg-final { scan-assembler-times "ushr\\td\[0-9\]+, d\[0-9\]+, 64" 2 } } */
+/* { dg-final { (scan-assembler-times "ushr\\td\[0-9\]+, d\[0-9\]+, 4" 2) || \
+ (scan-assembler-times "lsr\\tx\[0-9\]+, x\[0-9\]+, 4" 2) } } */
+/* { dg-final { scan-assembler-not "ushr\\td\[0-9\]+, d\[0-9\]+, 0" } } */
+
+int
+main (int argc, char *argv[])
+{
+ /* Testing vshr_n_u64. */
+ if (test_vshr_n_u64_64 (vcreate_u64 (0x0000000080000000), 0))
+ abort ();
+ if (test_vshr_n_u64_64 (vcreate_u64 (0xffffffff80000000), 0))
+ abort ();
+
+ if (test_vshr_n_u64_4 (vcreate_u64 (0x0000000080000000), 0x0000000008000000))
+ abort ();
+ if (test_vshr_n_u64_4 (vcreate_u64 (0xffffffff80000000), 0x0ffffffff8000000))
+ abort ();
+
+ if (test_vshr_n_u64_0 (vcreate_u64 (0x0000000080000000), 0x0000000080000000))
+ abort ();
+
+ /* Testing vshrd_n_u64. */
+ if (test_vshrd_n_u64_64 (0x0000000080000000, 0))
+ abort ();
+ if (test_vshrd_n_u64_64 (0xffffffff80000000, 0))
+ abort ();
+
+ if (test_vshrd_n_u64_4 (0x0000000080000000, 0x0000000008000000))
+ abort ();
+ if (test_vshrd_n_u64_4 (0xffffffff80000000, 0x0ffffffff8000000))
+ abort ();
+
+ if (test_vshrd_n_u64_0 (0x0000000080000000, 0x0000000080000000))
+ abort ();
+
+ return 0;
+}
+
+/* { dg-final { cleanup-saved-temps } } */