From: Jonathan Wright Date: Tue, 9 Nov 2021 14:23:45 +0000 (+0000) Subject: aarch64: Use type-qualified builtins for ADDP Neon intrinsics X-Git-Tag: basepoints/gcc-13~3142 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f341c03203d8e8ca9965b73c5709337687d32cc2;p=thirdparty%2Fgcc.git aarch64: Use type-qualified builtins for ADDP Neon intrinsics Declare unsigned type-qualified builtins and use them to implement the pairwise addition Neon intrinsics. This removes the need for many casts in arm_neon.h. gcc/ChangeLog: 2021-11-09 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: * config/aarch64/arm_neon.h (vpaddq_u8): Use type-qualified builtin and remove casts. (vpaddq_u16): Likewise. (vpaddq_u32): Likewise. (vpaddq_u64): Likewise. (vpadd_u8): Likewise. (vpadd_u16): Likewise. (vpadd_u32): Likewise. (vpaddd_u64): Likewise. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 6f632f6ba00f..f09ff8dbe37f 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -51,7 +51,9 @@ BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP) BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP) BUILTIN_VDQ_I (BINOP, addp, 0, NONE) + BUILTIN_VDQ_I (BINOPU, addp, 0, NONE) VAR1 (UNOP, addp, 0, NONE, di) + VAR1 (UNOPU, addp, 0, NONE, di) BUILTIN_VDQ_BHSI (UNOP, clrsb, 2, NONE) BUILTIN_VDQ_BHSI (UNOP, clz, 2, NONE) BUILTIN_VS (UNOP, ctz, 2, NONE) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 288f0176baeb..ce0162343e6b 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8011,32 +8011,28 @@ __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_addpv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return __builtin_aarch64_addpv16qi_uuu (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_addpv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return __builtin_aarch64_addpv8hi_uuu (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_addpv4si ((int32x4_t) __a, - (int32x4_t) __b); + return __builtin_aarch64_addpv4si_uuu (__a, __b); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_addpv2di ((int64x2_t) __a, - (int64x2_t) __b); + return __builtin_aarch64_addpv2di_uuu (__a, __b); } __extension__ extern __inline int16x4_t @@ -20293,24 +20289,21 @@ __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpadd_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return __builtin_aarch64_addpv8qi_uuu (__a, __b); } __extension__ extern __inline uint16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpadd_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return __builtin_aarch64_addpv4hi_uuu (__a, __b); } __extension__ extern __inline uint32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpadd_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a, - (int32x2_t) __b); + return __builtin_aarch64_addpv2si_uuu (__a, __b); } __extension__ extern __inline float32_t @@ -20338,7 +20331,7 @@ __extension__ extern __inline uint64_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vpaddd_u64 (uint64x2_t __a) { - return __builtin_aarch64_addpdi ((int64x2_t) __a); + return __builtin_aarch64_addpdi_uu (__a); } /* vqabs */