DEF_HELPER_FLAGS_3(sme2_uunpk4_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sme2_uunpk4_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sme2_uunpk4_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_zip4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_uzp4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
### SME2 Multi-vector SVE Constructive Unary
+&zz_e zd zn esz
&zz_n zd zn n
@zz_1x2 ........ ... ..... ...... ..... zd:5 \
&zz_n n=1 zn=%zn_ax2
UUNPK_4bh 11000001 011 10101 111000 ....0 ...01 @zz_4x2_n1
UUNPK_4hs 11000001 101 10101 111000 ....0 ...01 @zz_4x2_n1
UUNPK_4sd 11000001 111 10101 111000 ....0 ...01 @zz_4x2_n1
+
+ZIP_4 11000001 esz:2 1 10110 111000 ...00 ... 00 \
+ &zz_e zd=%zd_ax4 zn=%zn_ax4
+ZIP_4 11000001 001 10111 111000 ...00 ... 00 \
+ &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4
+
+UZP_4 11000001 esz:2 1 10110 111000 ...00 ... 10 \
+ &zz_e zd=%zd_ax4 zn=%zn_ax4
+UZP_4 11000001 001 10111 111000 ...00 ... 10 \
+ &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4
d[i] = uint32_to_float32(s[i], fpst);
}
}
+
+#define ZIP4(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[4]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t quads = oprsz / (sizeof(TYPE) * 4); \
+ TYPE *s0, *s1, *s2, *s3; \
+ if (vs == vd) { \
+ vs = memcpy(scratch, vs, sizeof(scratch)); \
+ } \
+ s0 = vs; \
+ s1 = vs + sizeof(ARMVectorReg); \
+ s2 = vs + 2 * sizeof(ARMVectorReg); \
+ s3 = vs + 3 * sizeof(ARMVectorReg); \
+ for (size_t r = 0; r < 4; ++r) { \
+ TYPE *d = vd + r * sizeof(ARMVectorReg); \
+ size_t base = r * quads; \
+ for (size_t q = 0; q < quads; ++q) { \
+ d[H(4 * q + 0)] = s0[base + H(q)]; \
+ d[H(4 * q + 1)] = s1[base + H(q)]; \
+ d[H(4 * q + 2)] = s2[base + H(q)]; \
+ d[H(4 * q + 3)] = s3[base + H(q)]; \
+ } \
+ } \
+}
+
+ZIP4(sme2_zip4_b, uint8_t, H1)
+ZIP4(sme2_zip4_h, uint16_t, H2)
+ZIP4(sme2_zip4_s, uint32_t, H4)
+ZIP4(sme2_zip4_d, uint64_t, )
+ZIP4(sme2_zip4_q, Int128, )
+
+#undef ZIP4
+
+#define UZP4(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[4]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t quads = oprsz / (sizeof(TYPE) * 4); \
+ TYPE *d0, *d1, *d2, *d3; \
+ if (vs == vd) { \
+ vs = memcpy(scratch, vs, sizeof(scratch)); \
+ } \
+ d0 = vd; \
+ d1 = vd + sizeof(ARMVectorReg); \
+ d2 = vd + 2 * sizeof(ARMVectorReg); \
+ d3 = vd + 3 * sizeof(ARMVectorReg); \
+ for (size_t r = 0; r < 4; ++r) { \
+ TYPE *s = vs + r * sizeof(ARMVectorReg); \
+ size_t base = r * quads; \
+ for (size_t q = 0; q < quads; ++q) { \
+ d0[base + H(q)] = s[H(4 * q + 0)]; \
+ d1[base + H(q)] = s[H(4 * q + 1)]; \
+ d2[base + H(q)] = s[H(4 * q + 2)]; \
+ d3[base + H(q)] = s[H(4 * q + 3)]; \
+ } \
+ } \
+}
+
+UZP4(sme2_uzp4_b, uint8_t, H1)
+UZP4(sme2_uzp4_h, uint16_t, H2)
+UZP4(sme2_uzp4_s, uint32_t, H4)
+UZP4(sme2_uzp4_d, uint64_t, )
+UZP4(sme2_uzp4_q, Int128, )
+
+#undef UZP4
TRANS_FEAT(UUNPK_4bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_bh)
TRANS_FEAT(UUNPK_4hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_hs)
TRANS_FEAT(UUNPK_4sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_sd)
+
+static bool do_zipuzp_4(DisasContext *s, arg_zz_e *a,
+ gen_helper_gvec_2 * const fn[5])
+{
+ int bytes_per_op = 4 << a->esz;
+
+ /* Both MO_64 and MO_128 can fail the size test. */
+ if (s->max_svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ if (svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else {
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ svl, svl, 0, fn[a->esz]);
+ }
+ }
+ return true;
+}
+
+static gen_helper_gvec_2 * const zip4_fns[] = {
+ gen_helper_sme2_zip4_b,
+ gen_helper_sme2_zip4_h,
+ gen_helper_sme2_zip4_s,
+ gen_helper_sme2_zip4_d,
+ gen_helper_sme2_zip4_q,
+};
+TRANS_FEAT(ZIP_4, aa64_sme2, do_zipuzp_4, a, zip4_fns)
+
+static gen_helper_gvec_2 * const uzp4_fns[] = {
+ gen_helper_sme2_uzp4_b,
+ gen_helper_sme2_uzp4_h,
+ gen_helper_sme2_uzp4_s,
+ gen_helper_sme2_uzp4_d,
+ gen_helper_sme2_uzp4_q,
+};
+TRANS_FEAT(UZP_4, aa64_sme2, do_zipuzp_4, a, uzp4_fns)