void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_6(sme2_fdot_idx_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sme2_fvdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sme2_bfvdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, env, i32)
BFDOT_nx 11000001 0101 .... 0 .. 1 .. ....0 11 ... @azx_2x1_i2_o3
BFDOT_nx 11000001 0101 .... 1 .. 1 .. ...00 11 ... @azx_4x1_i2_o3
+
+FVDOT 11000001 0101 .... 0 .. 0 .. ....0 01 ... @azx_2x1_i2_o3
+BFVDOT 11000001 0101 .... 0 .. 0 .. ....0 11 ... @azx_2x1_i2_o3
}
}
+void HELPER(sme2_fvdot_idx_h)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_maxsz(desc);
+ intptr_t elements = oprsz / sizeof(float32);
+ intptr_t eltspersegment = MIN(4, elements);
+ int idx = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ float_status fpst_odd, *fpst_std, *fpst_f16;
+ float32 *d = vd, *a = va;
+ uint16_t *n0 = vn;
+ uint16_t *n1 = vn + sizeof(ARMVectorReg);
+ uint32_t *m = (uint32_t *)vm + H4(idx);
+
+ fpst_std = &env->vfp.fp_status[FPST_ZA];
+ fpst_f16 = &env->vfp.fp_status[FPST_ZA_F16];
+ fpst_odd = *fpst_std;
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t mm = m[i];
+ for (j = 0; j < eltspersegment; ++j) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = f16_dotadd(a[i + H4(j)], nn, mm,
+ fpst_f16, fpst_std, &fpst_odd);
+ }
+ }
+}
+
void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
void *vpn, void *vpm, CPUARMState *env, uint32_t desc)
{
}
TRANS_FEAT(BFDOT_nx, aa64_sme2, do_bfdot_nx, a)
+
+static bool do_vdot(DisasContext *s, arg_azx_n *a, gen_helper_gvec_4_ptr *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / 2;
+ TCGv_ptr t_za = get_zarray(s, a->rv, a->off, 2, 1);
+ TCGv_ptr t_zn = vec_full_reg_ptr(s, a->zn);
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, a->zm);
+ TCGv_ptr t = tcg_temp_new_ptr();
+
+ for (int i = 0; i < 2; ++i) {
+ int o_za = i * vstride * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, a->idx | (i << 2));
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t_zn, t_zm, t, tcg_env, tcg_constant_i32(desc));
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(FVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_fvdot_idx_h)
+TRANS_FEAT(BFVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_bfvdot_idx)
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(sme2_bfvdot_idx)(void *vd, void *vn, void *vm,
+ void *va, CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT, 2);
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ intptr_t elements = opr_sz / 4;
+ intptr_t eltspersegment = MIN(16 / 4, elements);
+ float32 *d = vd, *a = va;
+ uint16_t *n0 = vn;
+ uint16_t *n1 = vn + sizeof(ARMVectorReg);
+ uint32_t *m = vm;
+ float_status fpst, fpst_odd;
+
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(idx)];
+
+ for (j = 0; j < eltspersegment; j++) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = bfdotadd_ebf(a[i + H4(j)], nn, m_idx,
+ &fpst, &fpst_odd);
+ }
+ }
+ } else {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(idx)];
+
+ for (j = 0; j < eltspersegment; j++) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = bfdotadd(a[i + H4(j)], nn, m_idx, &fpst);
+ }
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va,
CPUARMState *env, uint32_t desc)
{