]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
authorJonathan Wright <jonathan.wright@arm.com>
Tue, 16 Feb 2021 23:59:22 +0000 (23:59 +0000)
committerJonathan Wright <jonathan.wright@arm.com>
Fri, 30 Apr 2021 17:41:11 +0000 (18:41 +0100)
Rewrite floating-point vml[as][q]_lane Neon intrinsics to use RTL
builtins rather than relying on the GCC vector extensions. Using RTL
builtins allows control over the emission of fmla/fmls instructions
(which we don't want here.)

With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.

gcc/ChangeLog:

2021-02-16  Jonathan Wright  <jonathan.wright@arm.com>

* config/aarch64/aarch64-simd-builtins.def: Add
float_ml[as]_lane builtin generator macros.
* config/aarch64/aarch64-simd.md (*aarch64_mul3_elt<mode>):
Rename to...
(mul_lane<mode>3): This, and re-order arguments.
(aarch64_float_mla_lane<mode>): Define.
(aarch64_float_mls_lane<mode>): Define.
* config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
instead of GCC vector extensions.
(vmlaq_lane_f32): Likewise.
(vmls_lane_f32): Likewise.
(vmlsq_lane_f32): Likewise.

gcc/config/aarch64/aarch64-simd-builtins.def
gcc/config/aarch64/aarch64-simd.md
gcc/config/aarch64/arm_neon.h

index 2a2fc2076b11a83c1de0b9a7847488df73d312be..8e4b4edc8a46ffba777a42058f06ce7204152824 100644 (file)
   BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
   BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
   BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+  BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
+  BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
 
   /* Implemented by aarch64_simd_bsl<mode>.  */
   BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
index 0f96cd0bd512eb8437b6f16f45618f29e1d1526c..bdee49f74f4725409d33af733bb55be290b3f0e7 100644 (file)
 }
 )
 
-(define_insn "*aarch64_mul3_elt<mode>"
+(define_insn "mul_lane<mode>3"
  [(set (match_operand:VMUL 0 "register_operand" "=w")
-    (mult:VMUL
-      (vec_duplicate:VMUL
-         (vec_select:<VEL>
-           (match_operand:VMUL 1 "register_operand" "<h_con>")
-           (parallel [(match_operand:SI 2 "immediate_operand")])))
-      (match_operand:VMUL 3 "register_operand" "w")))]
+       (mult:VMUL
+        (vec_duplicate:VMUL
+          (vec_select:<VEL>
+            (match_operand:VMUL 2 "register_operand" "<h_con>")
+            (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+        (match_operand:VMUL 1 "register_operand" "w")))]
   "TARGET_SIMD"
   {
-    operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
-    return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
+    operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
+    return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
   }
   [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
 )
   }
 )
 
+(define_expand "aarch64_float_mla_lane<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+       (plus:VDQSF
+         (mult:VDQSF
+           (vec_duplicate:VDQSF
+             (vec_select:<VEL>
+               (match_operand:V2SF 3 "register_operand")
+               (parallel [(match_operand:SI 4 "immediate_operand")])))
+           (match_operand:VDQSF 2 "register_operand"))
+         (match_operand:VDQSF 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+                                   operands[3], operands[4]));
+    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
+(define_expand "aarch64_float_mls_lane<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+       (minus:VDQSF
+         (match_operand:VDQSF 1 "register_operand")
+         (mult:VDQSF
+           (vec_duplicate:VDQSF
+             (vec_select:<VEL>
+               (match_operand:V2SF 3 "register_operand")
+               (parallel [(match_operand:SI 4 "immediate_operand")])))
+           (match_operand:VDQSF 2 "register_operand"))))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+                                   operands[3], operands[4]));
+    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
 (define_insn "fma<mode>4"
   [(set (match_operand:VHSDF 0 "register_operand" "=w")
        (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
index 0227cadb7e869ee23dddd7abb71f169093f3cd05..5328d447a424fdf4ce1941abf3c1218d4fe8f42a 100644 (file)
@@ -20378,7 +20378,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
               float32x2_t __c, const int __lane)
 {
-  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x4_t
@@ -20462,7 +20462,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
                float32x2_t __c, const int __lane)
 {
-  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x8_t
@@ -20576,7 +20576,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
               float32x2_t __c, const int __lane)
 {
-  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x4_t
@@ -20660,7 +20660,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
                float32x2_t __c, const int __lane)
 {
-  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x8_t