From: Carl Love Date: Fri, 22 Mar 2019 16:56:38 +0000 (-0500) Subject: PPC64, fix implementation of xvcvsxdsp and xvcvuxddp instructions. X-Git-Tag: VALGRIND_3_15_0~45 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=886b0a1cf49412cfa597253a122dcd8bbc89fc49;p=thirdparty%2Fvalgrind.git PPC64, fix implementation of xvcvsxdsp and xvcvuxddp instructions. Instructions need to write result to upper and lower 32-bit half of the 64-bit result. This is a fix for Valgrind bug 405356. --- diff --git a/NEWS b/NEWS index a460d8eae5..a2c055ed31 100644 --- a/NEWS +++ b/NEWS @@ -113,6 +113,8 @@ where XXXXXX is the bug number as listed below. 405403 s390x disassembler cannot be used on x86 405458 MIPS mkFormVEC arguments swapped? 405716 drd: Fix an integer overflow in the stack margin calculation +405356 PPC64, xvcvsxdsp, xvcvuxdsp are supposed to write the 32-bit result to + the upper and lower 32-bits of the 64-bit result n-i-bz add syswrap for PTRACE_GET|SET_THREAD_AREA on amd64. n-i-bz Fix callgrind_annotate non deterministic order for equal total diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c index 00ae6df2d6..d6c671abab 100644 --- a/VEX/priv/guest_ppc_toIR.c +++ b/VEX/priv/guest_ppc_toIR.c @@ -16312,52 +16312,71 @@ dis_vx_conv ( UInt theInstr, UInt opc2 ) break; case 0x370: // xvcvsxdsp (VSX Vector Convert and round Signed Integer Doubleword // to Single-Precision format) - DIP("xvcvsxddp v%u,v%u\n", XT, XB); + { + IRTemp result32hi = newTemp(Ity_I32); + IRTemp result32lo = newTemp(Ity_I32); + + DIP("xvcvsxdsp v%u,v%u\n", XT, XB); + assign( result32hi, + unop( Iop_ReinterpF32asI32, + unop( Iop_TruncF64asF32, + binop( Iop_RoundF64toF32, + get_IR_roundingmode(), + binop( Iop_I64StoF64, + get_IR_roundingmode(), + mkexpr( xB ) ) ) ) ) ); + assign( result32lo, + unop( Iop_ReinterpF32asI32, + unop( Iop_TruncF64asF32, + binop( Iop_RoundF64toF32, + get_IR_roundingmode(), + binop( Iop_I64StoF64, + get_IR_roundingmode(), + mkexpr( xB2 ) ) ) ) ) ); + putVSReg( XT, binop( Iop_64HLtoV128, binop( Iop_32HLto64, - unop( Iop_ReinterpF32asI32, - unop( Iop_TruncF64asF32, - binop( Iop_RoundF64toF32, - get_IR_roundingmode(), - binop( Iop_I64StoF64, - get_IR_roundingmode(), - mkexpr( xB ) ) ) ) ), - mkU32( 0 ) ), + mkexpr( result32hi ), + mkexpr( result32hi ) ), binop( Iop_32HLto64, - unop( Iop_ReinterpF32asI32, - unop( Iop_TruncF64asF32, - binop( Iop_RoundF64toF32, - get_IR_roundingmode(), - binop( Iop_I64StoF64, - get_IR_roundingmode(), - mkexpr( xB2 ) ) ) ) ), - mkU32( 0 ) ) ) ); - break; + mkexpr( result32lo ), + mkexpr( result32lo ) ) ) ); + } + break; case 0x350: // xvcvuxdsp (VSX Vector Convert and round Unsigned Integer Doubleword // to Single-Precision format) - DIP("xvcvuxddp v%u,v%u\n", XT, XB); + { + IRTemp result32hi = newTemp(Ity_I32); + IRTemp result32lo = newTemp(Ity_I32); + + DIP("xvcvuxdsp v%u,v%u\n", XT, XB); + assign( result32hi, + unop( Iop_ReinterpF32asI32, + unop( Iop_TruncF64asF32, + binop( Iop_RoundF64toF32, + get_IR_roundingmode(), + binop( Iop_I64UtoF64, + get_IR_roundingmode(), + mkexpr( xB ) ) ) ) ) ); + assign( result32lo, + unop( Iop_ReinterpF32asI32, + unop( Iop_TruncF64asF32, + binop( Iop_RoundF64toF32, + get_IR_roundingmode(), + binop( Iop_I64UtoF64, + get_IR_roundingmode(), + mkexpr( xB2 ) ) ) ) ) ); putVSReg( XT, binop( Iop_64HLtoV128, binop( Iop_32HLto64, - unop( Iop_ReinterpF32asI32, - unop( Iop_TruncF64asF32, - binop( Iop_RoundF64toF32, - get_IR_roundingmode(), - binop( Iop_I64UtoF64, - get_IR_roundingmode(), - mkexpr( xB ) ) ) ) ), - mkU32( 0 ) ), + mkexpr( result32hi ), + mkexpr( result32hi ) ), binop( Iop_32HLto64, - unop( Iop_ReinterpF32asI32, - unop( Iop_TruncF64asF32, - binop( Iop_RoundF64toF32, - get_IR_roundingmode(), - binop( Iop_I64UtoF64, - get_IR_roundingmode(), - mkexpr( xB2 ) ) ) ) ), - mkU32( 0 ) ) ) ); - break; + mkexpr( result32lo ), + mkexpr( result32lo ) ) ) ); + } + break; case 0x1f0: // xvcvsxwdp (VSX Vector Convert Signed Integer Word to Double-Precision format) DIP("xvcvsxwdp v%u,v%u\n", XT, XB);