DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
-DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32)
DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32)
}
GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
-GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
static bool slidedown_check(DisasContext *s, arg_rmrr *a)
}
GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
-GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
+typedef void gen_helper_vslide1_vx(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
+ TCGv_env, TCGv_i32);
+
+#define GEN_OPIVX_VSLIDE1_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ static gen_helper_vslide1_vx * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ \
+ TCGv_ptr dest, src2, mask; \
+ TCGv_i64 src1; \
+ TCGv_i32 desc; \
+ uint32_t data = 0; \
+ \
+ dest = tcg_temp_new_ptr(); \
+ mask = tcg_temp_new_ptr(); \
+ src2 = tcg_temp_new_ptr(); \
+ src1 = tcg_temp_new_i64(); \
+ \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, \
+ s->cfg_ptr->vlenb, data)); \
+ \
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); \
+ tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); \
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); \
+ tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN)); \
+ \
+ fns[s->sew](dest, mask, src1, src2, tcg_env, desc); \
+ \
+ tcg_gen_movi_tl(cpu_vstart, 0); \
+ finalize_rvv_inst(s); \
+ \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPIVX_VSLIDE1_TRANS(vslide1up_vx, slideup_check)
+GEN_OPIVX_VSLIDE1_TRANS(vslide1down_vx, slidedown_check)
+
/* Vector Floating-Point Slide Instructions */
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
{
GEN_VEXT_VSLIE1UP(32, H4)
GEN_VEXT_VSLIE1UP(64, H8)
-#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
+#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
}
/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
GEN_VEXT_VSLIDE1DOWN(32, H4)
GEN_VEXT_VSLIDE1DOWN(64, H8)
-#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
- CPURISCVState *env, uint32_t desc) \
-{ \
- vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
+#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \
}
/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */