DEF_HELPER_FLAGS_5(sme2_umlsll_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sme2_usmlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sme2_sumlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_bfcvt, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_bfcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvt_n, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
SUB_aaz_s 11000001 101 000010 .. 111 ...00 11 ... @az_4x4_o3
SUB_aaz_d 11000001 111 000000 .. 111 ....0 11 ... @az_2x2_o3
SUB_aaz_d 11000001 111 000010 .. 111 ...00 11 ... @az_4x4_o3
+
+### SME2 Multi-vector SVE Constructive Unary
+
+&zz_n zd zn n
+@zz_1x2 ........ ... ..... ...... ..... zd:5 \
+ &zz_n n=1 zn=%zn_ax2
+
+BFCVT 11000001 011 00000 111000 ....0 ..... @zz_1x2
+BFCVTN 11000001 011 00000 111000 ....1 ..... @zz_1x2
+
+FCVT_n 11000001 001 00000 111000 ....0 ..... @zz_1x2
+FCVTN 11000001 001 00000 111000 ....1 ..... @zz_1x2
DO_MLALL_IDX(sme2_sumlall_idx_s, uint32_t, int8_t, uint8_t, H4, H1, +)
#undef DO_MLALL_IDX
+
+/* Convert and compress */
+void HELPER(sme2_bfcvt)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ ARMVectorReg scratch;
+ size_t oprsz = simd_oprsz(desc);
+ size_t i, n = oprsz / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ if (vd == s1) {
+ s1 = memcpy(&scratch, s1, oprsz);
+ }
+
+ for (i = 0; i < n; ++i) {
+ d[H2(i)] = float32_to_bfloat16(s0[H4(i)], fpst);
+ }
+ for (i = 0; i < n; ++i) {
+ d[H2(i) + n] = float32_to_bfloat16(s1[H4(i)], fpst);
+ }
+}
+
+void HELPER(sme2_fcvt_n)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ ARMVectorReg scratch;
+ size_t oprsz = simd_oprsz(desc);
+ size_t i, n = oprsz / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ float16 *d = vd;
+
+ if (vd == s1) {
+ s1 = memcpy(&scratch, s1, oprsz);
+ }
+
+ for (i = 0; i < n; ++i) {
+ d[H2(i)] = sve_f32_to_f16(s0[H4(i)], fpst);
+ }
+ for (i = 0; i < n; ++i) {
+ d[H2(i) + n] = sve_f32_to_f16(s1[H4(i)], fpst);
+ }
+}
+
+/* Convert and interleave */
+void HELPER(sme2_bfcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ for (i = 0; i < n; ++i) {
+ bfloat16 d0 = float32_to_bfloat16(s0[H4(i)], fpst);
+ bfloat16 d1 = float32_to_bfloat16(s1[H4(i)], fpst);
+ d[H2(i * 2 + 0)] = d0;
+ d[H2(i * 2 + 1)] = d1;
+ }
+}
+
+void HELPER(sme2_fcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ for (i = 0; i < n; ++i) {
+ bfloat16 d0 = sve_f32_to_f16(s0[H4(i)], fpst);
+ bfloat16 d1 = sve_f32_to_f16(s1[H4(i)], fpst);
+ d[H2(i * 2 + 0)] = d0;
+ d[H2(i * 2 + 1)] = d1;
+ }
+}
return ret;
}
-static inline float16 sve_f32_to_f16(float32 f, float_status *fpst)
+float16 sve_f32_to_f16(float32 f, float_status *fpst)
{
bool save = get_flush_to_zero(fpst);
float16 ret;
TRANS_FEAT(SMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_smlsll_idx_d)
TRANS_FEAT(UMLALL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlall_idx_d)
TRANS_FEAT(UMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlsll_idx_d)
+
+static bool do_zz_fpst(DisasContext *s, arg_zz_n *a, int data,
+ ARMFPStatusFlavour type, gen_helper_gvec_2_ptr *fn)
+{
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ TCGv_ptr fpst = fpstatus_ptr(type);
+
+ for (int i = 0, n = a->n; i < n; ++i) {
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->zd + i),
+ vec_full_reg_offset(s, a->zn + i),
+ fpst, svl, svl, data, fn);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(BFCVT, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_bfcvt)
+TRANS_FEAT(BFCVTN, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_bfcvtn)
+TRANS_FEAT(FCVT_n, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_fcvt_n)
+TRANS_FEAT(FCVTN, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_fcvtn)
bfloat16 helper_sme2_ah_fmax_b16(bfloat16 a, bfloat16 b, float_status *fpst);
bfloat16 helper_sme2_ah_fmin_b16(bfloat16 a, bfloat16 b, float_status *fpst);
+float16 sve_f32_to_f16(float32 f, float_status *fpst);
+
#endif /* TARGET_ARM_VEC_INTERNAL_H */