From: Jonathan Wright Date: Mon, 8 Feb 2021 21:23:48 +0000 (+0000) Subject: aarch64: Use RTL builtins for [su]paddl[q] intrinsics X-Git-Tag: basepoints/gcc-13~8081 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fa18085a32df06be6e7d899fd804d537c0149baf;p=thirdparty%2Fgcc.git aarch64: Use RTL builtins for [su]paddl[q] intrinsics Rewrite [su]paddl[q] Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-02-08 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Add [su]addlp builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_addlp): Define. * config/aarch64/arm_neon.h (vpaddl_s8): Use RTL builtin instead of inline asm. (vpaddl_s16): Likewise. (vpaddl_s32): Likewise. (vpaddl_u8): Likewise. (vpaddl_u16): Likewise. (vpaddl_u32): Likewise. (vpaddlq_s8): Likewise. (vpaddlq_s16): Likewise. (vpaddlq_s32): Likewise. (vpaddlq_u8): Likewise. (vpaddlq_u16): Likewise. (vpaddlq_u32): Liwewise. * config/aarch64/iterators.md: Define [SU]ADDLP unspecs with appropriate attributes. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 92804e08e35f..ecf80197f8e8 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -154,6 +154,10 @@ BUILTIN_VDQ_BHSI (BINOP, srhadd, 0, NONE) BUILTIN_VDQ_BHSI (BINOP, urhadd, 0, NONE) + /* Implemented by aarch64_addlp. */ + BUILTIN_VDQV_L (UNOP, saddlp, 0, NONE) + BUILTIN_VDQV_L (UNOPU, uaddlp, 0, NONE) + /* Implemented by aarch64_addlv. */ BUILTIN_VDQV_L (UNOP, saddlv, 0, NONE) BUILTIN_VDQV_L (UNOPU, uaddlv, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 60e11c6a073d..8aae6a649624 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3149,6 +3149,15 @@ [(set_attr "type" "neon_reduc_add")] ) +(define_insn "aarch64_addlp" + [(set (match_operand: 0 "register_operand" "=w") + (unspec: [(match_operand:VDQV_L 1 "register_operand" "w")] + USADDLP))] + "TARGET_SIMD" + "addlp\\t%0., %1." + [(set_attr "type" "neon_reduc_add")] +) + ;; ADDV with result zero-extended to SI/DImode (for popcount). (define_insn "aarch64_zero_extend_reduc_plus_" [(set (match_operand:GPI 0 "register_operand" "=w") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 52f3714b5eba..7eed6c6362ff 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8529,144 +8529,84 @@ __extension__ extern __inline int16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_s8 (int8x8_t __a) { - int16x4_t __result; - __asm__ ("saddlp %0.4h,%1.8b" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_saddlpv8qi (__a); } __extension__ extern __inline int32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_s16 (int16x4_t __a) { - int32x2_t __result; - __asm__ ("saddlp %0.2s,%1.4h" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_saddlpv4hi (__a); } __extension__ extern __inline int64x1_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_s32 (int32x2_t __a) { - int64x1_t __result; - __asm__ ("saddlp %0.1d,%1.2s" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return (int64x1_t) __builtin_aarch64_saddlpv2si (__a); } __extension__ extern __inline uint16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_u8 (uint8x8_t __a) { - uint16x4_t __result; - __asm__ ("uaddlp %0.4h,%1.8b" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_uaddlpv8qi_uu (__a); } __extension__ extern __inline uint32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_u16 (uint16x4_t __a) { - uint32x2_t __result; - __asm__ ("uaddlp %0.2s,%1.4h" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_uaddlpv4hi_uu (__a); } __extension__ extern __inline uint64x1_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddl_u32 (uint32x2_t __a) { - uint64x1_t __result; - __asm__ ("uaddlp %0.1d,%1.2s" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return (uint64x1_t) __builtin_aarch64_uaddlpv2si_uu (__a); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_s8 (int8x16_t __a) { - int16x8_t __result; - __asm__ ("saddlp %0.8h,%1.16b" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_saddlpv16qi (__a); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_s16 (int16x8_t __a) { - int32x4_t __result; - __asm__ ("saddlp %0.4s,%1.8h" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_saddlpv8hi (__a); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_s32 (int32x4_t __a) { - int64x2_t __result; - __asm__ ("saddlp %0.2d,%1.4s" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_saddlpv4si (__a); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_u8 (uint8x16_t __a) { - uint16x8_t __result; - __asm__ ("uaddlp %0.8h,%1.16b" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_uaddlpv16qi_uu (__a); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_u16 (uint16x8_t __a) { - uint32x4_t __result; - __asm__ ("uaddlp %0.4s,%1.8h" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_uaddlpv8hi_uu (__a); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddlq_u32 (uint32x4_t __a) { - uint64x2_t __result; - __asm__ ("uaddlp %0.2d,%1.4s" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_uaddlpv4si_uu (__a); } __extension__ extern __inline int8x16_t diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index a3d895a67fb2..8a765ea8a32b 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -550,6 +550,8 @@ UNSPEC_SSHLL ; Used in aarch64-simd.md. UNSPEC_USHLL ; Used in aarch64-simd.md. UNSPEC_ADDP ; Used in aarch64-simd.md. + UNSPEC_SADDLP ; Used in aarch64-simd.md. + UNSPEC_UADDLP ; Used in aarch64-simd.md. UNSPEC_TBL ; Used in vector permute patterns. UNSPEC_TBX ; Used in vector permute patterns. UNSPEC_CONCAT ; Used in vector permute patterns. @@ -2209,6 +2211,8 @@ (define_int_iterator SVE_INT_ADDV [UNSPEC_SADDV UNSPEC_UADDV]) +(define_int_iterator USADDLP [UNSPEC_SADDLP UNSPEC_UADDLP]) + (define_int_iterator USADDLV [UNSPEC_SADDLV UNSPEC_UADDLV]) (define_int_iterator LOGICALF [UNSPEC_ANDF UNSPEC_IORF UNSPEC_XORF]) @@ -2961,6 +2965,8 @@ ;; "s" for signed operations and "u" for unsigned ones. (define_int_attr su [(UNSPEC_SADDV "s") (UNSPEC_UADDV "u") + (UNSPEC_SADDLP "s") + (UNSPEC_UADDLP "u") (UNSPEC_SADDLV "s") (UNSPEC_UADDLV "u") (UNSPEC_UNPACKSHI "s")