From: Petar Jovanovic Date: Thu, 1 Feb 2018 17:09:56 +0000 (+0100) Subject: add Iops Iop_Rotx32 and Iop_Rotx64 X-Git-Tag: VALGRIND_3_14_0~170 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2671481a5f88329d5ad31545513fe925629fb973;p=thirdparty%2Fvalgrind.git add Iops Iop_Rotx32 and Iop_Rotx64 Part of MIPS32/64 Revision 6 changes. Contributed by: Tamara Vlahovic, Aleksandar Rikalo and Aleksandra Karadzic. Related BZ issue - #387410. --- diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c index 8627beb545..2cffd0b571 100644 --- a/VEX/priv/ir_defs.c +++ b/VEX/priv/ir_defs.c @@ -1297,6 +1297,8 @@ void ppIROp ( IROp op ) case Iop_BCDSub: vex_printf("BCDSub"); return; case Iop_I128StoBCD128: vex_printf("bcdcfsq."); return; case Iop_BCD128toI128S: vex_printf("bcdctsq."); return; + case Iop_Rotx32: vex_printf("bitswap"); return; + case Iop_Rotx64: vex_printf("dbitswap"); return; case Iop_PwBitMtxXpose64x2: vex_printf("BitMatrixTranspose64x2"); return; @@ -3578,6 +3580,10 @@ void typeOfPrimop ( IROp op, case Iop_ShrN64x4: case Iop_SarN16x16: case Iop_SarN32x8: BINARY(Ity_V256,Ity_I8, Ity_V256); + case Iop_Rotx32: + QUATERNARY(Ity_I32, Ity_I8, Ity_I8, Ity_I8, Ity_I32); + case Iop_Rotx64: + QUATERNARY(Ity_I64, Ity_I8, Ity_I8, Ity_I8, Ity_I64); default: ppIROp(op); diff --git a/VEX/pub/libvex_ir.h b/VEX/pub/libvex_ir.h index ed7d52b273..2b07afd512 100644 --- a/VEX/pub/libvex_ir.h +++ b/VEX/pub/libvex_ir.h @@ -1936,6 +1936,7 @@ typedef Iop_Max32Fx8, Iop_Min32Fx8, Iop_Max64Fx4, Iop_Min64Fx4, + Iop_Rotx32, Iop_Rotx64, Iop_LAST /* must be the last enumerator */ } IROp; diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c index ae7e472d13..6667191600 100644 --- a/memcheck/mc_translate.c +++ b/memcheck/mc_translate.c @@ -2012,6 +2012,30 @@ IRAtom* mkLazy4 ( MCEnv* mce, IRType finalVty, return at; } + if (t1 == Ity_I32 && t2 == Ity_I8 && t3 == Ity_I8 && t4 == Ity_I8 + && finalVty == Ity_I32) { + if (0) VG_(printf)("mkLazy4: I32 x I8 x I8 x I8 -> I32\n"); + at = mkPCastTo(mce, Ity_I8, va1); + /* Now fold in 2nd, 3rd, 4th args. */ + at = mkUifU(mce, Ity_I8, at, va2); + at = mkUifU(mce, Ity_I8, at, va3); + at = mkUifU(mce, Ity_I8, at, va4); + at = mkPCastTo(mce, Ity_I32, at); + return at; + } + + if (t1 == Ity_I64 && t2 == Ity_I8 && t3 == Ity_I8 && t4 == Ity_I8 + && finalVty == Ity_I64) { + if (0) VG_(printf)("mkLazy4: I64 x I8 x I8 x I8 -> I64\n"); + at = mkPCastTo(mce, Ity_I8, va1); + /* Now fold in 2nd, 3rd, 4th args. */ + at = mkUifU(mce, Ity_I8, at, va2); + at = mkUifU(mce, Ity_I8, at, va3); + at = mkUifU(mce, Ity_I8, at, va4); + at = mkPCastTo(mce, Ity_I64, at); + return at; + } + if (1) { VG_(printf)("mkLazy4: "); ppIRType(t1); @@ -3020,6 +3044,11 @@ IRAtom* expr2vbits_Qop ( MCEnv* mce, return assignNew('V', mce, Ity_V256, IRExpr_Qop(op, vatom1, vatom2, vatom3, vatom4)); + /* I32/I64 x I8 x I8 x I8 -> I32/I64 */ + case Iop_Rotx32: + return mkLazy4(mce, Ity_I32, vatom1, vatom2, vatom3, vatom4); + case Iop_Rotx64: + return mkLazy4(mce, Ity_I64, vatom1, vatom2, vatom3, vatom4); default: ppIROp(op); VG_(tool_panic)("memcheck:expr2vbits_Qop"); diff --git a/memcheck/tests/vbit-test/irops.c b/memcheck/tests/vbit-test/irops.c index a969a14b26..adc7845aee 100644 --- a/memcheck/tests/vbit-test/irops.c +++ b/memcheck/tests/vbit-test/irops.c @@ -1128,6 +1128,8 @@ static irop_t irops[] = { { DEFOP(Iop_NCipherLV128, UNDEF_ALL_64x2), .ppc64 = 1, .ppc32 = 1 }, { DEFOP(Iop_SHA512, UNDEF_SOME), .ppc64 = 1, .ppc32 = 1 }, { DEFOP(Iop_SHA256, UNDEF_SOME), .ppc64 = 1, .ppc32 = 1 }, + { DEFOP(Iop_Rotx32, UNDEF_ALL), }, + { DEFOP(Iop_Rotx64, UNDEF_ALL), }, { DEFOP(Iop_PwBitMtxXpose64x2, UNDEF_64x2_TRANSPOSE), .ppc64 = 1, .ppc32 = 1 }, };