BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
+(define_insn "mul_laneq<mode>3"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 2 "register_operand" "w")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VDQSF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
+ return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
+)
+
(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
[(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
(mult:VMUL_CHANGE_NLANES
}
)
+(define_expand "aarch64_float_mla_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t