From: Jonathan Wright Date: Tue, 16 Feb 2021 15:42:36 +0000 (+0000) Subject: aarch64: Use RTL builtins for FP ml[as] intrinsics X-Git-Tag: basepoints/gcc-13~7983 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b0d9aac8992c1f8c3198d9528a9867c653623dfb;p=thirdparty%2Fgcc.git aarch64: Use RTL builtins for FP ml[as] intrinsics Rewrite floating-point vml[as][q] Neon intrinsics to use RTL builtins rather than relying on the GCC vector extensions. Using RTL builtins allows control over the emission of fmla/fmls instructions (which we don't want here.) With this commit, the code generated by these intrinsics changes from a fused multiply-add/subtract instruction to an fmul followed by an fadd/fsub instruction. If the programmer really wants fmla/fmls instructions, they can use the vfm[as] intrinsics. gcc/ChangeLog: 2021-02-16 Jonathan Wright * config/aarch64/aarch64-simd-builtins.def: Add float_ml[as] builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_float_mla): Define. (aarch64_float_mls): Define. * config/aarch64/arm_neon.h (vmla_f32): Use RTL builtin instead of relying on GCC vector extensions. (vmla_f64): Likewise. (vmlaq_f32): Likewise. (vmlaq_f64): Likewise. (vmls_f32): Likewise. (vmls_f64): Likewise. (vmlsq_f32): Likewise. (vmlsq_f64): Likewise. * config/aarch64/iterators.md: Define VDQF_DF mode iterator. --- diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 3b5e88443a8f..2a2fc2076b11 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -668,6 +668,8 @@ BUILTIN_VHSDF (TERNOP, fnma, 4, FP) VAR1 (TERNOP, fnma, 4, FP, hf) + BUILTIN_VDQF_DF (TERNOP, float_mla, 0, FP) + BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP) BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP) BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 6edfd2d637ef..0f96cd0bd512 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2636,6 +2636,38 @@ [(set_attr "type" "neon_fp_abs_")] ) +(define_expand "aarch64_float_mla" + [(set (match_operand:VDQF_DF 0 "register_operand") + (plus:VDQF_DF + (mult:VDQF_DF + (match_operand:VDQF_DF 2 "register_operand") + (match_operand:VDQF_DF 3 "register_operand")) + (match_operand:VDQF_DF 1 "register_operand")))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (mode); + emit_insn (gen_mul3 (scratch, operands[2], operands[3])); + emit_insn (gen_add3 (operands[0], operands[1], scratch)); + DONE; + } +) + +(define_expand "aarch64_float_mls" + [(set (match_operand:VDQF_DF 0 "register_operand") + (minus:VDQF_DF + (match_operand:VDQF_DF 1 "register_operand") + (mult:VDQF_DF + (match_operand:VDQF_DF 2 "register_operand") + (match_operand:VDQF_DF 3 "register_operand"))))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (mode); + emit_insn (gen_mul3 (scratch, operands[2], operands[3])); + emit_insn (gen_sub3 (operands[0], operands[1], scratch)); + DONE; + } +) + (define_expand "aarch64_float_mla_n" [(set (match_operand:VDQSF 0 "register_operand") (plus:VDQSF diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index f1e1e0ee7914..0227cadb7e86 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -20347,28 +20347,28 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) { - return __a + __b * __c; + return __builtin_aarch64_float_mlav2sf (__a, __b, __c); } __extension__ extern __inline float64x1_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { - return __a + __b * __c; + return (float64x1_t) {__builtin_aarch64_float_mladf (__a[0], __b[0], __c[0])}; } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __a + __b * __c; + return __builtin_aarch64_float_mlav4sf (__a, __b, __c); } __extension__ extern __inline float64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c) { - return __a + __b * __c; + return __builtin_aarch64_float_mlav2df (__a, __b, __c); } /* vmla_lane */ @@ -20545,28 +20545,28 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) { - return __a - __b * __c; + return __builtin_aarch64_float_mlsv2sf (__a, __b, __c); } __extension__ extern __inline float64x1_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c) { - return __a - __b * __c; + return (float64x1_t) {__builtin_aarch64_float_mlsdf (__a[0], __b[0], __c[0])}; } __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) { - return __a - __b * __c; + return __builtin_aarch64_float_mlsv4sf (__a, __b, __c); } __extension__ extern __inline float64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c) { - return __a - __b * __c; + return __builtin_aarch64_float_mlsv2df (__a, __b, __c); } /* vmls_lane */ diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index 634c44e225c2..c57aa6bf2f44 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -149,6 +149,7 @@ V2SF V4SF V2DF]) ;; Advanced SIMD Float modes, and DF. +(define_mode_iterator VDQF_DF [V2SF V4SF V2DF DF]) (define_mode_iterator VHSDF_DF [(V4HF "TARGET_SIMD_F16INST") (V8HF "TARGET_SIMD_F16INST") V2SF V4SF V2DF DF])