VAR1 (UNOP, float_extend_lo_, 0, FP, v4sf)
BUILTIN_VDF (UNOP, float_truncate_lo_, 0, FP)
+ VAR1 (UNOP, float_trunc_rodd_, 0, FP, df)
+ VAR1 (UNOP, float_trunc_rodd_lo_, 0, FP, v2sf)
+ VAR1 (BINOP, float_trunc_rodd_hi_, 0, FP, v4sf)
+
/* Implemented by aarch64_ld1<VALL_F16:mode>. */
BUILTIN_VALL_F16 (LOAD1, ld1, 0, LOAD)
VAR1(STORE1P, ld1, 0, ALL, v2di)
;; Float narrowing operations.
+(define_insn "aarch64_float_trunc_rodd_df"
+ [(set (match_operand:SF 0 "register_operand" "=w")
+ (unspec:SF [(match_operand:DF 1 "register_operand" "w")]
+ UNSPEC_FCVTXN))]
+ "TARGET_SIMD"
+ "fcvtxn\\t%s0, %d1"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_insn "aarch64_float_trunc_rodd_lo_v2sf"
+ [(set (match_operand:V2SF 0 "register_operand" "=w")
+ (unspec:V2SF [(match_operand:V2DF 1 "register_operand" "w")]
+ UNSPEC_FCVTXN))]
+ "TARGET_SIMD"
+ "fcvtxn\\t%0.2s, %1.2d"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_insn "aarch64_float_trunc_rodd_hi_v4sf_le"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (vec_concat:V4SF
+ (match_operand:V2SF 1 "register_operand" "0")
+ (unspec:V2SF [(match_operand:V2DF 2 "register_operand" "w")]
+ UNSPEC_FCVTXN)))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "fcvtxn2\\t%0.4s, %2.2d"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_insn "aarch64_float_trunc_rodd_hi_v4sf_be"
+ [(set (match_operand:V4SF 0 "register_operand" "=w")
+ (vec_concat:V4SF
+ (unspec:V2SF [(match_operand:V2DF 2 "register_operand" "w")]
+ UNSPEC_FCVTXN)
+ (match_operand:V2SF 1 "register_operand" "0")))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "fcvtxn2\\t%0.4s, %2.2d"
+ [(set_attr "type" "neon_fp_cvt_narrow_d_q")]
+)
+
+(define_expand "aarch64_float_trunc_rodd_hi_v4sf"
+ [(match_operand:V4SF 0 "register_operand")
+ (match_operand:V2SF 1 "register_operand")
+ (match_operand:V2DF 2 "register_operand")]
+ "TARGET_SIMD"
+{
+ rtx (*gen) (rtx, rtx, rtx) = BYTES_BIG_ENDIAN
+ ? gen_aarch64_float_trunc_rodd_hi_v4sf_be
+ : gen_aarch64_float_trunc_rodd_hi_v4sf_le;
+ emit_insn (gen (operands[0], operands[1], operands[2]));
+ DONE;
+}
+)
+
(define_insn "aarch64_float_truncate_lo_<mode>"
[(set (match_operand:VDF 0 "register_operand" "=w")
(float_truncate:VDF
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtx_f32_f64 (float64x2_t __a)
{
- float32x2_t __result;
- __asm__ ("fcvtxn %0.2s,%1.2d"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_float_trunc_rodd_lo_v2sf (__a);
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtx_high_f32_f64 (float32x2_t __a, float64x2_t __b)
{
- float32x4_t __result;
- __asm__ ("fcvtxn2 %0.4s,%1.2d"
- : "=w"(__result)
- : "w" (__b), "0"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_float_trunc_rodd_hi_v4sf (__a, __b);
}
__extension__ extern __inline float32_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtxd_f32_f64 (float64_t __a)
{
- float32_t __result;
- __asm__ ("fcvtxn %s0,%d1"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_float_trunc_rodd_df (__a);
}
__extension__ extern __inline float32x2_t
UNSPEC_BFCVTN ; Used in aarch64-simd.md.
UNSPEC_BFCVTN2 ; Used in aarch64-simd.md.
UNSPEC_BFCVT ; Used in aarch64-simd.md.
+ UNSPEC_FCVTXN ; Used in aarch64-simd.md.
])
;; ------------------------------------------------------------------