BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
}
)
-(define_insn "*aarch64_mul3_elt<mode>"
+(define_insn "mul_lane<mode>3"
[(set (match_operand:VMUL 0 "register_operand" "=w")
- (mult:VMUL
- (vec_duplicate:VMUL
- (vec_select:<VEL>
- (match_operand:VMUL 1 "register_operand" "<h_con>")
- (parallel [(match_operand:SI 2 "immediate_operand")])))
- (match_operand:VMUL 3 "register_operand" "w")))]
+ (mult:VMUL
+ (vec_duplicate:VMUL
+ (vec_select:<VEL>
+ (match_operand:VMUL 2 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VMUL 1 "register_operand" "w")))]
"TARGET_SIMD"
{
- operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
- return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
+ operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
+ return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
}
)
+(define_expand "aarch64_float_mla_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t