From: Tom Musta Date: Wed, 15 Jan 2014 14:10:40 +0000 (-0600) Subject: target-ppc: VSX Stage 4: Add xscvsxdsp and xscvuxdsp X-Git-Tag: v2.0.0-rc0~37^2~84 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=74698350ca1bc95eda751f8c5a93268f20f7214e;p=thirdparty%2Fqemu.git target-ppc: VSX Stage 4: Add xscvsxdsp and xscvuxdsp This patch adds the VSX Scalar Convert Unsigned Integer Doubleword to Floating Point Format and Round to Single Precision (xscvuxdsp) and VSX Scalar Convert Signed Integer Douglbeword to Floating Point Format and Round to Single Precision (xscvsxdsp) instructions. The existing integer to floating point conversion macro (VSX_CVT_INT_TO_FP) is modified to support the rounding of the intermediate floating point result to single precision. Signed-off-by: Tom Musta Reviewed-by: Richard Henderson Signed-off-by: Alexander Graf --- diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index 7e5003a5701..1dfb3c02729 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -2558,7 +2558,7 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0) * jdef - definition of the j index (i or 2*i) * sfprf - set FPRF */ -#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf) \ +#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, uint32_t opcode) \ { \ ppc_vsr_t xt, xb; \ @@ -2570,6 +2570,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ for (i = 0; i < nels; i++) { \ int j = jdef; \ xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ + if (r2sp) { \ + xt.tfld = helper_frsp(env, xt.tfld); \ + } \ if (sfprf) { \ helper_compute_fprf(env, xt.tfld, sfprf); \ } \ @@ -2579,20 +2582,22 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ helper_float_check_status(env); \ } -VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1) -VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1) -VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0) -VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0) +VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0) +VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0) +VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1) +VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1) +VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0) VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \ - 2*i + JOFFSET, 0) + 2*i + JOFFSET, 0, 0) VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \ - 2*i + JOFFSET, 0) + 2*i + JOFFSET, 0, 0) VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \ - 2*i + JOFFSET, 0) + 2*i + JOFFSET, 0, 0) VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \ - 2*i + JOFFSET, 0) -VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0) -VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0) + 2*i + JOFFSET, 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0) /* For "use current rounding mode", define a value that will not be one of * the existing rounding model enums. diff --git a/target-ppc/helper.h b/target-ppc/helper.h index 655b670fef6..6250ebab3e3 100644 --- a/target-ppc/helper.h +++ b/target-ppc/helper.h @@ -279,6 +279,8 @@ DEF_HELPER_2(xscvdpsxws, void, env, i32) DEF_HELPER_2(xscvdpuxds, void, env, i32) DEF_HELPER_2(xscvdpuxws, void, env, i32) DEF_HELPER_2(xscvsxddp, void, env, i32) +DEF_HELPER_2(xscvuxdsp, void, env, i32) +DEF_HELPER_2(xscvsxdsp, void, env, i32) DEF_HELPER_2(xscvuxddp, void, env, i32) DEF_HELPER_2(xsrdpi, void, env, i32) DEF_HELPER_2(xsrdpic, void, env, i32) diff --git a/target-ppc/translate.c b/target-ppc/translate.c index b2a610c87c0..61271e143fb 100644 --- a/target-ppc/translate.c +++ b/target-ppc/translate.c @@ -7373,6 +7373,8 @@ GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207) +GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207) +GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207) GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX) GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX) @@ -10195,6 +10197,8 @@ GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207), GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207), GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207), GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207), +GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207), +GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207), GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX), GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),