return at;
}
+ /* I32 x I8 x I64 -> I64 */
+ if (t1 == Ity_I32 && t2 == Ity_I8 && t3 == Ity_I64
+ && finalVty == Ity_I64) {
+ if (0) VG_(printf)("mkLazy3: I32 x I8 x I64 -> I64\n");
+ /* Widen 1st and 2nd args to I64. Since 1st arg is typically a
+ * rounding mode indication which is fully defined, this should
+ * get folded out later.
+ */
+ IRAtom* at1 = mkPCastTo(mce, Ity_I64, va1);
+ IRAtom* at2 = mkPCastTo(mce, Ity_I64, va2);
+ at = mkUifU(mce, Ity_I64, at1, at2); // UifU(PCast(va1), PCast(va2))
+ at = mkUifU(mce, Ity_I64, at, va3);
+ /* and PCast once again. */
+ at = mkPCastTo(mce, Ity_I64, at);
+ return at;
+ }
+
/* I32 x I64 x I64 -> I32 */
if (t1 == Ity_I32 && t2 == Ity_I64 && t3 == Ity_I64
&& finalVty == Ity_I32) {
at = mkPCastTo(mce, Ity_I128, at);
return at;
}
+
+ /* I32 x I8 x I128 -> I128 */
+ /* Standard FP idiom: rm x FParg1 x FParg2 -> FPresult */
+ if (t1 == Ity_I32 && t2 == Ity_I8 && t3 == Ity_I128
+ && finalVty == Ity_I128) {
+ if (0) VG_(printf)("mkLazy3: I32 x I8 x I128 -> I128\n");
+ /* Widen 1st and 2nd args to I128. Since 1st arg is typically a rounding
+ mode indication which is fully defined, this should get
+ folded out later. */
+ IRAtom* at1 = mkPCastTo(mce, Ity_I64, va1);
+ IRAtom* at2 = mkPCastTo(mce, Ity_I64, va2);
+ /* Now fold in 2nd and 3rd args. */
+ at = mkUifU(mce, Ity_I64, at1, at2); // UifU(PCast(va1), PCast(va2))
+ at = mkUifU(mce, Ity_I128, at, va3);
+ /* and PCast once again. */
+ at = mkPCastTo(mce, Ity_I128, at);
+ return at;
+ }
if (1) {
VG_(printf)("mkLazy3: ");
ppIRType(t1);
UNARY(Ity_D32, Ity_D64);
case Iop_ExtractExpD64:
- UNARY(Ity_D64, Ity_D64);
+ UNARY(Ity_D64, Ity_I64);
case Iop_ExtractSigD64:
UNARY(Ity_D64, Ity_I64);
case Iop_InsertExpD64:
- BINARY(Ity_D64,Ity_D64, Ity_D64);
+ BINARY(Ity_I64,Ity_D64, Ity_D64);
case Iop_ExtractExpD128:
- UNARY(Ity_D128, Ity_D64);
+ UNARY(Ity_D128, Ity_I64);
case Iop_ExtractSigD128:
UNARY(Ity_D128, Ity_I64);
case Iop_InsertExpD128:
- BINARY(Ity_D64,Ity_D128, Ity_D128);
+ BINARY(Ity_I64,Ity_D128, Ity_D128);
case Iop_D64toD128:
UNARY(Ity_D64, Ity_D128);
case Iop_I32UtoD128:
UNARY(Ity_I32, Ity_D128);
- case Iop_I64StoD128: /* I64 bit pattern stored in Float register */
- UNARY(Ity_D64, Ity_D128);
+ case Iop_I64StoD128:
+ UNARY(Ity_I64, Ity_D128);
case Iop_I64UtoD128:
UNARY(Ity_I64, Ity_D128);
BINARY(ity_RMode, Ity_D128, Ity_I32);
case Iop_D128toI64S:
- BINARY(ity_RMode, Ity_D128, Ity_D64);
+ BINARY(ity_RMode, Ity_D128, Ity_I64);
case Iop_D128toI64U:
BINARY(ity_RMode, Ity_D128, Ity_I64);
case Iop_ShrD64:
BINARY(Ity_D64, Ity_I8, Ity_D64 );
- case Iop_D64toD32:
+ case Iop_D64toD32:
BINARY(ity_RMode, Ity_D64, Ity_D32);
case Iop_D64toI32S:
BINARY(ity_RMode, Ity_D64, Ity_I32);
case Iop_D64toI64S:
- BINARY(ity_RMode, Ity_D64, Ity_D64);
-
case Iop_D64toI64U:
BINARY(ity_RMode, Ity_D64, Ity_I64);
case Iop_I32UtoD64:
UNARY(Ity_I32, Ity_D64);
- case Iop_I64StoD64: /* I64 bit pattern stored in Float register */
- BINARY(ity_RMode, Ity_D64, Ity_D64);
-
+ case Iop_I64StoD64:
case Iop_I64UtoD64:
BINARY(ity_RMode, Ity_I64, Ity_D64);
BINARY(Ity_D128,Ity_D128, Ity_I32);
case Iop_QuantizeD64:
- case Iop_SignificanceRoundD64:
TERNARY(ity_RMode,Ity_D64,Ity_D64, Ity_D64);
+ case Iop_SignificanceRoundD64:
+ TERNARY(ity_RMode,Ity_I8,Ity_D64, Ity_D64);
+
case Iop_QuantizeD128:
- case Iop_SignificanceRoundD128:
TERNARY(ity_RMode,Ity_D128,Ity_D128, Ity_D128);
+ case Iop_SignificanceRoundD128:
+ TERNARY(ity_RMode,Ity_I8,Ity_D128, Ity_D128);
+
case Iop_ShlD128:
case Iop_ShrD128:
BINARY(Ity_D128, Ity_I8, Ity_D128 );