From: Jonathan Wright Date: Thu, 18 Feb 2021 23:27:00 +0000 (+0000) Subject: aarch64: Use RTL builtins for vcvtx intrinsics X-Git-Tag: basepoints/gcc-13~8075 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8456a4cd96823704beec0b863010cd0dcc8dd591;p=thirdparty%2Fgcc.git aarch64: Use RTL builtins for vcvtx intrinsics Rewrite vcvtx Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-02-18 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Add float_trunc_rodd builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_float_trunc_rodd_df): Define. (aarch64_float_trunc_rodd_lo_v2sf): Define. (aarch64_float_trunc_rodd_hi_v4sf_le): Define. (aarch64_float_trunc_rodd_hi_v4sf_be): Define. (aarch64_float_trunc_rodd_hi_v4sf): Define. * config/aarch64/arm_neon.h (vcvtx_f32_f64): Use RTL builtin instead of inline asm. (vcvtx_high_f32_f64): Likewise. (vcvtxd_f32_f64): Likewise. * config/aarch64/iterators.md: Add FCVTXN unspec. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index a7d4f2b94b58..f01a1b439e1f 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -632,6 +632,10 @@ VAR1 (UNOP, float_extend_lo_, 0, FP, v4sf) BUILTIN_VDF (UNOP, float_truncate_lo_, 0, FP) + VAR1 (UNOP, float_trunc_rodd_, 0, FP, df) + VAR1 (UNOP, float_trunc_rodd_lo_, 0, FP, v2sf) + VAR1 (BINOP, float_trunc_rodd_hi_, 0, FP, v4sf) + /* Implemented by aarch64_ld1. */ BUILTIN_VALL_F16 (LOAD1, ld1, 0, LOAD) VAR1(STORE1P, ld1, 0, ALL, v2di) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 299d911aff69..72f429c75150 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2981,6 +2981,60 @@ ;; Float narrowing operations. +(define_insn "aarch64_float_trunc_rodd_df" + [(set (match_operand:SF 0 "register_operand" "=w") + (unspec:SF [(match_operand:DF 1 "register_operand" "w")] + UNSPEC_FCVTXN))] + "TARGET_SIMD" + "fcvtxn\\t%s0, %d1" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_insn "aarch64_float_trunc_rodd_lo_v2sf" + [(set (match_operand:V2SF 0 "register_operand" "=w") + (unspec:V2SF [(match_operand:V2DF 1 "register_operand" "w")] + UNSPEC_FCVTXN))] + "TARGET_SIMD" + "fcvtxn\\t%0.2s, %1.2d" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_insn "aarch64_float_trunc_rodd_hi_v4sf_le" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (vec_concat:V4SF + (match_operand:V2SF 1 "register_operand" "0") + (unspec:V2SF [(match_operand:V2DF 2 "register_operand" "w")] + UNSPEC_FCVTXN)))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "fcvtxn2\\t%0.4s, %2.2d" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_insn "aarch64_float_trunc_rodd_hi_v4sf_be" + [(set (match_operand:V4SF 0 "register_operand" "=w") + (vec_concat:V4SF + (unspec:V2SF [(match_operand:V2DF 2 "register_operand" "w")] + UNSPEC_FCVTXN) + (match_operand:V2SF 1 "register_operand" "0")))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "fcvtxn2\\t%0.4s, %2.2d" + [(set_attr "type" "neon_fp_cvt_narrow_d_q")] +) + +(define_expand "aarch64_float_trunc_rodd_hi_v4sf" + [(match_operand:V4SF 0 "register_operand") + (match_operand:V2SF 1 "register_operand") + (match_operand:V2DF 2 "register_operand")] + "TARGET_SIMD" +{ + rtx (*gen) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN + ? gen_aarch64_float_trunc_rodd_hi_v4sf_be + : gen_aarch64_float_trunc_rodd_hi_v4sf_le; + emit_insn (gen (operands[0], operands[1], operands[2])); + DONE; +} +) + (define_insn "aarch64_float_truncate_lo_" [(set (match_operand:VDF 0 "register_operand" "=w") (float_truncate:VDF diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index ead2bd0990d4..4b8ec529f198 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7014,36 +7014,21 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vcvtx_f32_f64 (float64x2_t __a) { - float32x2_t __result; - __asm__ ("fcvtxn %0.2s,%1.2d" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_trunc_rodd_lo_v2sf (__a); } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vcvtx_high_f32_f64 (float32x2_t __a, float64x2_t __b) { - float32x4_t __result; - __asm__ ("fcvtxn2 %0.4s,%1.2d" - : "=w"(__result) - : "w" (__b), "0"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_trunc_rodd_hi_v4sf (__a, __b); } __extension__ extern __inline float32_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vcvtxd_f32_f64 (float64_t __a) { - float32_t __result; - __asm__ ("fcvtxn %s0,%d1" - : "=w"(__result) - : "w"(__a) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_trunc_rodd_df (__a); } __extension__ extern __inline float32x2_t diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index fe2c51cebf13..3d66e6384f35 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -861,6 +861,7 @@ UNSPEC_BFCVTN ; Used in aarch64-simd.md. UNSPEC_BFCVTN2 ; Used in aarch64-simd.md. UNSPEC_BFCVT ; Used in aarch64-simd.md. + UNSPEC_FCVTXN ; Used in aarch64-simd.md. ]) ;; ------------------------------------------------------------------