vpanic("setFlags_DEP1_DEP2_shift(amd64)");
}
+ /* guard :: Ity_I8. We need to convert it to I1. */
+ IRTemp guardB = newTemp(Ity_I1);
+ assign( guardB, binop(Iop_CmpNE8, mkexpr(guard), mkU8(0)) );
+
/* DEP1 contains the result, DEP2 contains the undershifted value. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_OP,Ity_I64),
mkU64(ccOp))) );
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_DEP1,Ity_I64),
widenUto64(mkexpr(res)))) );
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_DEP2,Ity_I64),
widenUto64(mkexpr(resUS)))) );
}
assign(oldFlags, mk_amd64g_calculate_rflags_all());
+ /* rot_amt64 :: Ity_I8. We need to convert it to I1. */
+ IRTemp rot_amt64b = newTemp(Ity_I1);
+ assign(rot_amt64b, binop(Iop_CmpNE8, mkexpr(rot_amt64), mkU8(0)) );
+
/* CC_DEP1 is the rotated value. CC_NDEP is flags before. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(rot_amt64),
+ IRExpr_Mux0X( mkexpr(rot_amt64b),
IRExpr_Get(OFFB_CC_OP,Ity_I64),
mkU64(ccOp))) );
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(rot_amt64),
+ IRExpr_Mux0X( mkexpr(rot_amt64b),
IRExpr_Get(OFFB_CC_DEP1,Ity_I64),
widenUto64(mkexpr(dst1)))) );
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(rot_amt64),
+ IRExpr_Mux0X( mkexpr(rot_amt64b),
IRExpr_Get(OFFB_CC_DEP2,Ity_I64),
mkU64(0))) );
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(rot_amt64),
+ IRExpr_Mux0X( mkexpr(rot_amt64b),
IRExpr_Get(OFFB_CC_NDEP,Ity_I64),
mkexpr(oldFlags))) );
} /* if (isRotate) */
IRTemp res64 = newTemp(Ity_I64);
assign(res64,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ64, mkexpr(src64x), mkU64(0))),
+ binop(Iop_CmpEQ64, mkexpr(src64x), mkU64(0)),
unop(Iop_Clz64, mkexpr(src64x)),
mkU64(8 * sizeofIRType(ty))
));
static void put_ST ( Int i, IRExpr* value )
{
- put_ST_UNCHECKED( i,
- IRExpr_Mux0X( get_ST_TAG(i),
- /* 0 means empty */
- value,
- /* non-0 means full */
- mkQNaN64()
- )
+ put_ST_UNCHECKED(
+ i,
+ IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* 0 means empty */
+ value,
+ /* non-0 means full */
+ mkQNaN64()
+ )
);
}
static IRExpr* get_ST ( Int i )
{
return
- IRExpr_Mux0X( get_ST_TAG(i),
+ IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
/* 0 means empty */
mkQNaN64(),
/* non-0 means full */
assign( t32, e32 );
return
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLT64U,
- unop(Iop_32Uto64,
- binop(Iop_Add32, mkexpr(t32), mkU32(32768))),
- mkU64(65536))),
+ binop(Iop_CmpLT64U,
+ unop(Iop_32Uto64,
+ binop(Iop_Add32, mkexpr(t32), mkU32(32768))),
+ mkU64(65536)),
mkU16( 0x8000 ),
unop(Iop_32to16, mkexpr(t32)));
}
DIP("fcmovb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondB)),
+ mk_amd64g_calculate_condition(AMD64CondB),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovz %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondZ)),
+ mk_amd64g_calculate_condition(AMD64CondZ),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovbe %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondBE)),
+ mk_amd64g_calculate_condition(AMD64CondBE),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovu %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondP)),
+ mk_amd64g_calculate_condition(AMD64CondP),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovnb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondNB)),
+ mk_amd64g_calculate_condition(AMD64CondNB),
get_ST(0), get_ST(r_src)) );
break;
put_ST_UNCHECKED(
0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondNZ)),
+ mk_amd64g_calculate_condition(AMD64CondNZ),
get_ST(0),
get_ST(r_src)
)
put_ST_UNCHECKED(
0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondNBE)),
+ mk_amd64g_calculate_condition(AMD64CondNBE),
get_ST(0),
get_ST(r_src)
)
put_ST_UNCHECKED(
0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(AMD64CondNP)),
+ mk_amd64g_calculate_condition(AMD64CondNP),
get_ST(0),
get_ST(r_src)
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size))),
+ binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
mkU64(0),
binop(op, mkexpr(g0), mkexpr(amt8))
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size))),
+ binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
binop(op, mkexpr(g0), mkU8(size-1)),
binop(op, mkexpr(g0), mkexpr(amt8))
)
*/
return
IRExpr_Mux0X(
- mkexpr(amt),
+ binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
mkexpr(base),
binop(Iop_Or64,
binop(Iop_Shl64, mkexpr(base), mkexpr(amt)),
*/
return
IRExpr_Mux0X(
- mkexpr(amt),
+ binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
mkexpr(base),
binop(Iop_Or64,
binop(Iop_Shr64, mkexpr(base), mkexpr(amt)),
IRTemp dst = newTemp(ty);
IRTemp src64 = newTemp(Ity_I64);
IRTemp dst64 = newTemp(Ity_I64);
- IRTemp src8 = newTemp(Ity_I8);
+ IRTemp srcB = newTemp(Ity_I1);
vassert(sz == 8 || sz == 4 || sz == 2);
/* First, widen src to 64 bits if it is not already. */
assign( src64, widenUto64(mkexpr(src)) );
- /* Generate an 8-bit expression which is zero iff the original is
+ /* Generate a bool expression which is zero iff the original is
zero, and nonzero otherwise. Ask for a CmpNE version which, if
instrumented by Memcheck, is instrumented expensively, since
this may be used on the output of a preceding movmskb insn,
which has been known to be partially defined, and in need of
careful handling. */
- assign( src8,
- unop(Iop_1Uto8,
- binop(Iop_ExpCmpNE64,
- mkexpr(src64), mkU64(0))) );
+ assign( srcB, binop(Iop_ExpCmpNE64, mkexpr(src64), mkU64(0)) );
/* Flags: Z is 1 iff source value is zero. All others
are undefined -- we force them to zero. */
stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(src8),
+ IRExpr_Mux0X( mkexpr(srcB),
/* src==0 */
mkU64(AMD64G_CC_MASK_Z),
/* src!=0 */
/* The main computation, guarding against zero. */
assign( dst64,
IRExpr_Mux0X(
- mkexpr(src8),
+ mkexpr(srcB),
/* src == 0 -- leave dst unchanged */
widenUto64( getIRegG( sz, pfx, modrm ) ),
/* src != 0 */
IRTemp dest = newTemp(ty);
IRTemp dest2 = newTemp(ty);
IRTemp acc2 = newTemp(ty);
- IRTemp cond8 = newTemp(Ity_I8);
+ IRTemp cond = newTemp(Ity_I1);
IRTemp addr = IRTemp_INVALID;
UChar rm = getUChar(delta0);
assign( src, getIRegG(size, pfx, rm) );
assign( acc, getIRegRAX(size) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_amd64g_calculate_condition(AMD64CondZ)) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+ assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIRegRAX(size, mkexpr(acc2));
putIRegE(size, pfx, rm, mkexpr(dest2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
assign( src, getIRegG(size, pfx, rm) );
assign( acc, getIRegRAX(size) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_amd64g_calculate_condition(AMD64CondZ)) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+ assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIRegRAX(size, mkexpr(acc2));
storeLE( mkexpr(addr), mkexpr(dest2) );
DIP("cmpxchg%c %s,%s\n", nameISize(size),
NULL, mkexpr(acc), NULL, mkexpr(src) )
));
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_amd64g_calculate_condition(AMD64CondZ)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIRegRAX(size, mkexpr(acc2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIRegG(size,pfx,rm), dis_buf);
assign( tmpd, getIRegG(sz, pfx, rm) );
putIRegG( sz, pfx, rm,
- IRExpr_Mux0X( unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(cond)),
+ IRExpr_Mux0X( mk_amd64g_calculate_condition(cond),
mkexpr(tmpd),
mkexpr(tmps) )
);
assign( tmpd, getIRegG(sz, pfx, rm) );
putIRegG( sz, pfx, rm,
- IRExpr_Mux0X( unop(Iop_1Uto8,
- mk_amd64g_calculate_condition(cond)),
+ IRExpr_Mux0X( mk_amd64g_calculate_condition(cond),
mkexpr(tmpd),
mkexpr(tmps) )
);
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size))),
+ binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
mkV128(0x0000),
binop(op, mkexpr(g0), mkexpr(amt8))
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size))),
+ binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
binop(op, mkexpr(g0), mkU8(size-1)),
binop(op, mkexpr(g0), mkexpr(amt8))
)
return delta;
}
-/* Returns Ity_I32 */
-static IRExpr *IRExpr_getMSBs16x8(IRExpr *exp)
-{
- IRTemp lo = newTemp(Ity_I64);
- IRTemp hi = newTemp(Ity_I64);
- assign(lo, unop(Iop_V128to64, exp));
- assign(hi, unop(Iop_V128HIto64, exp));
- return unop(Iop_16Uto32,
- binop(Iop_8HLto16,
- unop(Iop_GetMSBs8x8, mkexpr(hi)),
- unop(Iop_GetMSBs8x8, mkexpr(lo))));
-}
-
-static IRExpr *IRExpr_ctz32(IRExpr *exp)
+static IRExpr* math_CTZ32(IRExpr *exp)
{
- /* Iop_Ctz32 appears to be broken, so use Iop_Ctz64. */
+ /* Iop_Ctz32 isn't implemented by the amd64 back end, so use Iop_Ctz64. */
return unop(Iop_64to32, unop(Iop_Ctz64, unop(Iop_32Uto64, exp)));
}
-/* For expression representing x, return !!x */
-static IRExpr* IRExpr_notnot(IRExpr *exp)
-{
- /* Iop_ExpCmpNE32 appears broken, so use Iop_ExpCmpNE64. */
- return unop(Iop_1Uto32, binop(Iop_ExpCmpNE64, unop(Iop_32Uto64, exp),
- mkU64(0)));
-}
-
static Long dis_PCMPISTRI_3A ( UChar modrm, UInt regNoL, UInt regNoR,
Long delta, UChar opc, UChar imm,
HChar dis_buf[])
assign(argR, getXMMReg(regNoR));
IRTemp zmaskL = newTemp(Ity_I32);
- assign(zmaskL, IRExpr_getMSBs16x8(binop(Iop_CmpEQ8x16, mkexpr(argL),
- mkV128(0))));
+ assign(zmaskL, unop(Iop_16Uto32,
+ unop(Iop_GetMSBs8x16,
+ binop(Iop_CmpEQ8x16, mkexpr(argL), mkV128(0)))));
IRTemp zmaskR = newTemp(Ity_I32);
- assign(zmaskR, IRExpr_getMSBs16x8(binop(Iop_CmpEQ8x16, mkexpr(argR),
- mkV128(0))));
+ assign(zmaskR, unop(Iop_16Uto32,
+ unop(Iop_GetMSBs8x16,
+ binop(Iop_CmpEQ8x16, mkexpr(argR), mkV128(0)))));
/* We want validL = ~(zmaskL | -zmaskL)
validL = (zmaskL ? (1 << ctz(zmaskL)) : 0) - 1
*/
- IRExpr *ctzL = unop(Iop_32to8, IRExpr_ctz32(mkexpr(zmaskL)));
+ IRExpr *ctzL = unop(Iop_32to8, math_CTZ32(mkexpr(zmaskL)));
- /* Generate an 8-bit expression which is zero iff the original is
+ /* Generate a bool expression which is zero iff the original is
zero. Do this carefully so memcheck can propagate validity bits
correctly.
*/
- IRTemp zmaskL_zero = newTemp(Ity_I32);
- assign(zmaskL_zero, IRExpr_notnot(mkexpr(zmaskL)));
+ IRTemp zmaskL_zero = newTemp(Ity_I1);
+ assign(zmaskL_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskL), mkU32(0)));
IRTemp validL = newTemp(Ity_I32);
assign(validL, binop(Iop_Sub32,
- IRExpr_Mux0X(unop(Iop_32to8, mkexpr(zmaskL_zero)),
+ IRExpr_Mux0X(mkexpr(zmaskL_zero),
mkU32(0),
binop(Iop_Shl32, mkU32(1), ctzL)),
mkU32(1)));
/* And similarly for validR. */
- IRExpr *ctzR = unop(Iop_32to8, IRExpr_ctz32(mkexpr(zmaskR)));
- IRTemp zmaskR_zero = newTemp(Ity_I32);
- assign(zmaskR_zero, IRExpr_notnot(mkexpr(zmaskR)));
+ IRExpr *ctzR = unop(Iop_32to8, math_CTZ32(mkexpr(zmaskR)));
+ IRTemp zmaskR_zero = newTemp(Ity_I1);
+ assign(zmaskR_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskR), mkU32(0)));
IRTemp validR = newTemp(Ity_I32);
assign(validR, binop(Iop_Sub32,
- IRExpr_Mux0X(unop(Iop_32to8, mkexpr(zmaskR_zero)),
+ IRExpr_Mux0X(mkexpr(zmaskR_zero),
mkU32(0),
binop(Iop_Shl32, mkU32(1), ctzR)),
mkU32(1)));
/* Do the actual comparison. */
- IRExpr *boolResII = IRExpr_getMSBs16x8(binop(Iop_CmpEQ8x16,
- mkexpr(argL),
- mkexpr(argR)));
+ IRExpr *boolResII = unop(Iop_16Uto32,
+ unop(Iop_GetMSBs8x16,
+ binop(Iop_CmpEQ8x16, mkexpr(argL),
+ mkexpr(argR))));
/* Compute boolresII & validL & validR (i.e., if both valid, use
comparison result) */
/* If the 0x40 bit were set in imm=0x3A, we would return the index
of the msb. Since it is clear, we return the index of the
lsb. */
- IRExpr *newECX = IRExpr_ctz32(binop(Iop_Or32,
- mkexpr(intRes2), mkU32(0x10000)));
+ IRExpr *newECX = math_CTZ32(binop(Iop_Or32,
+ mkexpr(intRes2), mkU32(0x10000)));
/* And thats our rcx. */
putIReg32(R_RCX, newECX);
/* Now for the condition codes... */
/* C == 0 iff intRes2 == 0 */
- IRExpr *c_bit = binop(Iop_Shl32, IRExpr_notnot(mkexpr(intRes2)),
- mkU8(AMD64G_CC_SHIFT_C));
+ IRExpr *c_bit = IRExpr_Mux0X( binop(Iop_ExpCmpNE32, mkexpr(intRes2),
+ mkU32(0)),
+ mkU32(0),
+ mkU32(1 << AMD64G_CC_SHIFT_C) );
/* Z == 1 iff any in argL is 0 */
- IRExpr *z_bit = binop(Iop_Shl32, mkexpr(zmaskL_zero),
- mkU8(AMD64G_CC_SHIFT_Z));
+ IRExpr *z_bit = IRExpr_Mux0X( mkexpr(zmaskL_zero),
+ mkU32(0),
+ mkU32(1 << AMD64G_CC_SHIFT_Z) );
/* S == 1 iff any in argR is 0 */
- IRExpr *s_bit = binop(Iop_Shl32, mkexpr(zmaskR_zero),
- mkU8(AMD64G_CC_SHIFT_S));
+ IRExpr *s_bit = IRExpr_Mux0X( mkexpr(zmaskR_zero),
+ mkU32(0),
+ mkU32(1 << AMD64G_CC_SHIFT_S) );
/* O == IntRes2[0] */
IRExpr *o_bit = binop(Iop_Shl32, binop(Iop_And32, mkexpr(intRes2),
mkU32(0x01)),
delta += alen+1;
}
+ /* Print the insn here, since dis_PCMPISTRI_3A doesn't do so
+ itself. */
+ if (regNoL == 16) {
+ DIP("%spcmp%cstr%c $%x,%s,%s\n",
+ isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
+ (UInt)imm, dis_buf, nameXMMReg(regNoR));
+ } else {
+ DIP("%spcmp%cstr%c $%x,%s,%s\n",
+ isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
+ (UInt)imm, nameXMMReg(regNoL), nameXMMReg(regNoR));
+ }
+
+ /* Handle special case(s). */
if (imm == 0x3A && isISTRx && !isxSTRM) {
return dis_PCMPISTRI_3A ( modrm, regNoL, regNoR, delta,
opc, imm, dis_buf);
stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
- if (regNoL == 16) {
- DIP("%spcmp%cstr%c $%x,%s,%s\n",
- isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
- (UInt)imm, dis_buf, nameXMMReg(regNoR));
- } else {
- DIP("%spcmp%cstr%c $%x,%s,%s\n",
- isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
- (UInt)imm, nameXMMReg(regNoL), nameXMMReg(regNoR));
- }
-
return delta;
}
stmt( IRStmt_Put(
OFFB_DFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
- unop(Iop_64to32,
+ unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(10)),
- mkU64(1)))),
+ mkU64(1))),
mkU64(1),
mkU64(0xFFFFFFFFFFFFFFFFULL)))
);
stmt( IRStmt_Put(
OFFB_IDFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
- unop(Iop_64to32,
+ unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(21)),
- mkU64(1)))),
+ mkU64(1))),
mkU64(0),
mkU64(1)))
);
stmt( IRStmt_Put(
OFFB_ACFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
- unop(Iop_64to32,
+ unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(18)),
- mkU64(1)))),
+ mkU64(1))),
mkU64(0),
mkU64(1)))
);
expdHi64:expdLo64, even if we're doing a cmpxchg8b. */
/* It's just _so_ much fun ... */
putIRegRDX( 8,
- IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(success)),
+ IRExpr_Mux0X( mkexpr(success),
sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi))
: mkexpr(oldHi),
mkexpr(expdHi64)
));
putIRegRAX( 8,
- IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(success)),
+ IRExpr_Mux0X( mkexpr(success),
sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo))
: mkexpr(oldLo),
mkexpr(expdLo64)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size))),
+ binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
mkV128(0x0000),
binop(op, mkexpr(g0), mkexpr(amt8))
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size))),
+ binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
binop(op, mkexpr(g0), mkU8(size-1)),
binop(op, mkexpr(g0), mkexpr(amt8))
)
breakupV128to64s( dataV, &dHi, &dLo );
breakupV128to64s( ctrlV, &cHi, &cLo );
IRExpr* rHi
- = IRExpr_Mux0X( unop(Iop_64to8,
- binop(Iop_And64, mkexpr(cHi), mkU64(2))),
+ = IRExpr_Mux0X( unop(Iop_64to1,
+ binop(Iop_Shr64, mkexpr(cHi), mkU8(1))),
mkexpr(dLo), mkexpr(dHi) );
IRExpr* rLo
- = IRExpr_Mux0X( unop(Iop_64to8,
- binop(Iop_And64, mkexpr(cLo), mkU64(2))),
+ = IRExpr_Mux0X( unop(Iop_64to1,
+ binop(Iop_Shr64, mkexpr(cLo), mkU8(1))),
mkexpr(dLo), mkexpr(dHi) );
IRTemp res = newTemp(Ity_V128);
assign(res, binop(Iop_64HLtoV128, rHi, rLo));
*/
return
IRExpr_Mux0X(
- unop(Iop_32to8, cc_ndep),
+ binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
/* case oldC == 0 */
unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1)),
/* case oldC != 0 */
llPutIReg( iregNo, e );
} else {
llPutIReg( iregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetIReg(iregNo),
e ));
}
llPutIReg( iregNo, e );
} else {
llPutIReg( iregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetIReg(iregNo),
e ));
}
llPutDReg( dregNo, e );
} else {
llPutDReg( dregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetDReg(dregNo),
e ));
}
llPutDRegI64( dregNo, e );
} else {
llPutDRegI64( dregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetDRegI64(dregNo),
e ));
}
llPutQReg( qregNo, e );
} else {
llPutQReg( qregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetQReg(qregNo),
e ));
}
llPutFReg( fregNo, e );
} else {
llPutFReg( fregNo,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
llGetFReg(fregNo),
e ));
}
} else {
stmt(IRStmt_Put(
gsoffset,
- IRExpr_Mux0X( unop(Iop_32to8, mkexpr(guardT)),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
IRExpr_Get(gsoffset, Ity_I32),
e
)
IRTemp t_dep2, IRTemp t_ndep,
IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
{
- IRTemp c8;
vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I32));
vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I32));
stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(t_ndep) ));
} else {
/* conditional */
- c8 = newTemp(Ity_I8);
- assign( c8, unop(Iop_32to8, mkexpr(guardT)) );
+ IRTemp c1 = newTemp(Ity_I1);
+ assign( c1, binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)) );
stmt( IRStmt_Put(
OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(c8),
+ IRExpr_Mux0X( mkexpr(c1),
IRExpr_Get(OFFB_CC_OP, Ity_I32),
mkU32(cc_op) )));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(c8),
+ IRExpr_Mux0X( mkexpr(c1),
IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
mkexpr(t_dep1) )));
stmt( IRStmt_Put(
OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(c8),
+ IRExpr_Mux0X( mkexpr(c1),
IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
mkexpr(t_dep2) )));
stmt( IRStmt_Put(
OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(c8),
+ IRExpr_Mux0X( mkexpr(c1),
IRExpr_Get(OFFB_CC_NDEP, Ity_I32),
mkexpr(t_ndep) )));
}
UInt ceil = (1 << imm5) - 1; // (2^imm5)-1
UInt floor = 0;
- IRTemp node0 = newTemp(Ity_I32);
- IRTemp node1 = newTemp(Ity_I32);
- IRTemp node2 = newTemp(Ity_I1);
- IRTemp node3 = newTemp(Ity_I32);
- IRTemp node4 = newTemp(Ity_I32);
- IRTemp node5 = newTemp(Ity_I1);
- IRTemp node6 = newTemp(Ity_I32);
-
- assign( node0, mkexpr(regT) );
- assign( node1, mkU32(ceil) );
- assign( node2, binop( Iop_CmpLT32S, mkexpr(node1), mkexpr(node0) ) );
- assign( node3, IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(node2)),
- mkexpr(node0),
- mkexpr(node1) ) );
- assign( node4, mkU32(floor) );
- assign( node5, binop( Iop_CmpLT32S, mkexpr(node3), mkexpr(node4) ) );
- assign( node6, IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(node5)),
- mkexpr(node3),
- mkexpr(node4) ) );
- assign( *res, mkexpr(node6) );
+ IRTemp nd0 = newTemp(Ity_I32);
+ IRTemp nd1 = newTemp(Ity_I32);
+ IRTemp nd2 = newTemp(Ity_I1);
+ IRTemp nd3 = newTemp(Ity_I32);
+ IRTemp nd4 = newTemp(Ity_I32);
+ IRTemp nd5 = newTemp(Ity_I1);
+ IRTemp nd6 = newTemp(Ity_I32);
+
+ assign( nd0, mkexpr(regT) );
+ assign( nd1, mkU32(ceil) );
+ assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
+ assign( nd3, IRExpr_Mux0X(mkexpr(nd2), mkexpr(nd0), mkexpr(nd1)) );
+ assign( nd4, mkU32(floor) );
+ assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
+ assign( nd6, IRExpr_Mux0X(mkexpr(nd5), mkexpr(nd3), mkexpr(nd4)) );
+ assign( *res, mkexpr(nd6) );
/* if saturation occurred, then resQ is set to some nonzero value
if sat did not occur, resQ is guaranteed to be zero. */
Int ceil = (1 << (imm5-1)) - 1; // (2^(imm5-1))-1
Int floor = -(1 << (imm5-1)); // -(2^(imm5-1))
- IRTemp node0 = newTemp(Ity_I32);
- IRTemp node1 = newTemp(Ity_I32);
- IRTemp node2 = newTemp(Ity_I1);
- IRTemp node3 = newTemp(Ity_I32);
- IRTemp node4 = newTemp(Ity_I32);
- IRTemp node5 = newTemp(Ity_I1);
- IRTemp node6 = newTemp(Ity_I32);
-
- assign( node0, mkexpr(regT) );
- assign( node1, mkU32(ceil) );
- assign( node2, binop( Iop_CmpLT32S, mkexpr(node1), mkexpr(node0) ) );
- assign( node3, IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(node2)),
- mkexpr(node0), mkexpr(node1) ) );
- assign( node4, mkU32(floor) );
- assign( node5, binop( Iop_CmpLT32S, mkexpr(node3), mkexpr(node4) ) );
- assign( node6, IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(node5)),
- mkexpr(node3), mkexpr(node4) ) );
- assign( *res, mkexpr(node6) );
+ IRTemp nd0 = newTemp(Ity_I32);
+ IRTemp nd1 = newTemp(Ity_I32);
+ IRTemp nd2 = newTemp(Ity_I1);
+ IRTemp nd3 = newTemp(Ity_I32);
+ IRTemp nd4 = newTemp(Ity_I32);
+ IRTemp nd5 = newTemp(Ity_I1);
+ IRTemp nd6 = newTemp(Ity_I32);
+
+ assign( nd0, mkexpr(regT) );
+ assign( nd1, mkU32(ceil) );
+ assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
+ assign( nd3, IRExpr_Mux0X( mkexpr(nd2), mkexpr(nd0), mkexpr(nd1) ) );
+ assign( nd4, mkU32(floor) );
+ assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
+ assign( nd6, IRExpr_Mux0X( mkexpr(nd5), mkexpr(nd3), mkexpr(nd4) ) );
+ assign( *res, mkexpr(nd6) );
/* if saturation occurred, then resQ is set to some nonzero value
if sat did not occur, resQ is guaranteed to be zero. */
assign(
*newC,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32))),
+ binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
mkU32(0),
binop(Iop_And32,
binop(Iop_Shr32,
assign(
*newC,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32))),
+ binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
mkU32(0),
binop(Iop_And32,
binop(Iop_Shr32,
assign(
*newC,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32))),
+ binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
binop(Iop_And32,
binop(Iop_Shr32,
mkexpr(rMt),
unop(
Iop_32to8,
IRExpr_Mux0X(
- unop(
- Iop_1Uto8,
- binop(Iop_CmpLT32U, mkexpr(amtT), mkU32(32))),
+ binop(Iop_CmpLT32U, mkexpr(amtT), mkU32(32)),
mkU32(31),
mkexpr(amtT)))));
DIS(buf, "r%u, ASR r%u", rM, rS);
assign(
*newC,
IRExpr_Mux0X(
- unop(Iop_32to8, mkexpr(amtT)),
+ binop(Iop_CmpNE32, mkexpr(amtT), mkU32(0)),
mkexpr(oldC),
binop(Iop_And32,
binop(Iop_Shr32,
assign(
*res,
IRExpr_Mux0X(
- unop(Iop_32to8, mkexpr(amt5T)), mkexpr(rMt),
+ binop(Iop_CmpNE32, mkexpr(amt5T), mkU32(0)),
+ mkexpr(rMt),
binop(Iop_Or32,
binop(Iop_Shr32,
mkexpr(rMt),
IRTemp res = newTemp(Ity_I32);
assign(arg, getIRegA(rM));
assign(res, IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpEQ32, mkexpr(arg),
- mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
unop(Iop_Clz32, mkexpr(arg)),
mkU32(32)
));
false errors from Memcheck. */
condT = newTemp(Ity_I32);
assign(condT, IRExpr_Mux0X(
- unop(Iop_32to8, binop(Iop_And32,
- mkexpr(old_itstate),
- mkU32(0xF0))),
+ binop(Iop_CmpNE32, binop(Iop_And32,
+ mkexpr(old_itstate),
+ mkU32(0xF0)),
+ mkU32(0)),
mkU32(1),
mkexpr(condT1)
));
/* ------ NOP ------ */
if (INSN0(15,0) == 0xBF00) {
- DIP("nop");
+ DIP("nop\n");
goto decode_success;
}
IRTemp res = newTemp(Ity_I32);
assign(arg, getIRegT(rM1));
assign(res, IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpEQ32,
- mkexpr(arg),
- mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
unop(Iop_Clz32, mkexpr(arg)),
mkU32(32)
));
return IRExpr_Mux0X(
/* if (hi32 == (lo32 >>s 31)) */
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(hi32),
- binop( Iop_Sar32, mkexpr(lo32), mkU8(31)))),
+ binop(Iop_CmpEQ32, mkexpr(hi32),
+ binop( Iop_Sar32, mkexpr(lo32), mkU8(31))),
/* else: sign dep saturate: 1->0x80000000, 0->0x7FFFFFFF */
binop(Iop_Add32, mkU32(0x7FFFFFFF),
binop(Iop_Shr32, mkexpr(hi32), mkU8(31))),
return IRExpr_Mux0X(
/* if (top 32 bits of t64 are 0) */
- unop(Iop_1Uto8, binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0)),
/* else: positive saturate -> 0xFFFFFFFF */
mkU32(0xFFFFFFFF),
/* then: within unsigned-32 range: lo half good enough */
because otherwise the Shr is a shift by the word size when
mask denotes zero. For rotates by immediates, a lot of
this junk gets folded out. */
- return IRExpr_Mux0X( mask, /* zero rotate */ src,
- /* non-zero rotate */ rot );
+ return IRExpr_Mux0X( binop(Iop_CmpNE8, mask, mkU8(0)),
+ /* zero rotate */ src,
+ /* non-zero rotate */ rot );
}
/* Standard effective address calc: (rA + rB) */
xer_ca
= IRExpr_Mux0X(
/* shift amt > 31 ? */
- unop(Iop_1Uto8, binop(Iop_CmpLT32U, mkU32(31), argR)),
+ binop(Iop_CmpLT32U, mkU32(31), argR),
/* no -- be like srawi */
unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0))),
/* yes -- get sign bit of argL */
xer_ca
= IRExpr_Mux0X(
/* shift amt > 31 ? */
- unop(Iop_1Uto8, binop(Iop_CmpLT64U, mkU64(31), argR)),
+ binop(Iop_CmpLT64U, mkU64(31), argR),
/* no -- be like srawi */
unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))),
/* yes -- get sign bit of argL */
xer_ca
= IRExpr_Mux0X(
/* shift amt > 63 ? */
- unop(Iop_1Uto8, binop(Iop_CmpLT64U, mkU64(63), argR)),
+ binop(Iop_CmpLT64U, mkU64(63), argR),
/* no -- be like sradi */
unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))),
/* yes -- get sign bit of argL */
// Iop_Clz32 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE32, lo32, mkU32(0));
assign(rA, mkWidenFrom32(ty,
- IRExpr_Mux0X( unop(Iop_1Uto8, irx),
+ IRExpr_Mux0X( irx,
mkU32(32),
unop(Iop_Clz32, lo32)),
False));
flag_rC ? ".":"", rA_addr, rS_addr);
// Iop_Clz64 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0));
- assign(rA, IRExpr_Mux0X( unop(Iop_1Uto8, irx),
+ assign(rA, IRExpr_Mux0X( irx,
mkU64(64),
unop(Iop_Clz64, mkexpr(rS)) ));
// TODO: alternatively: assign(rA, verbose_Clz64(rS));
IRTemp rA = newTemp(ty);
IRTemp rS = newTemp(ty);
IRTemp rB = newTemp(ty);
- IRTemp outofrange = newTemp(Ity_I8);
+ IRTemp outofrange = newTemp(Ity_I1);
IRTemp rS_lo32 = newTemp(Ity_I32);
IRTemp rB_lo32 = newTemp(Ity_I32);
IRExpr* e_tmp;
assign( sh_amt, binop(Iop_And32, mkU32(0x3F),
mkexpr(rB_lo32)) );
assign( outofrange,
- unop( Iop_1Uto8,
- binop(Iop_CmpLT32U, mkU32(31),
- mkexpr(sh_amt)) ));
+ binop(Iop_CmpLT32U, mkU32(31), mkexpr(sh_amt)) );
e_tmp = binop( Iop_Sar32,
mkexpr(rS_lo32),
unop( Iop_32to8,
*/
assign( sh_amt, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) );
assign( outofrange,
- unop( Iop_1Uto8,
- binop(Iop_CmpLT64U, mkU64(63),
- mkexpr(sh_amt)) ));
+ binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt)) );
assign( rA,
binop( Iop_Sar64,
mkexpr(rS),
// = (cc_b0 == 0) ? frC : frB
assign( frD,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0)),
mkexpr(frB),
mkexpr(frC) ));
/* need to preserve sign of zero */
/* frD = (fabs(frB) > 9e18) ? frB :
(sign(frB)) ? -fabs((double)r_tmp64) : (double)r_tmp64 */
- assign(frD, IRExpr_Mux0X( unop(Iop_32to8,
- binop(Iop_CmpF64,
- IRExpr_Const(IRConst_F64(9e18)),
- unop(Iop_AbsF64, mkexpr(frB)))),
- IRExpr_Mux0X(unop(Iop_32to8,
- binop(Iop_Shr32,
- unop(Iop_64HIto32,
- unop(Iop_ReinterpF64asI64,
- mkexpr(frB))), mkU8(31))),
- binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) ),
- unop(Iop_NegF64,
- unop( Iop_AbsF64,
- binop(Iop_I64StoF64, mkU32(0),
- mkexpr(r_tmp64)) )) ),
- mkexpr(frB)));
+ assign(frD, IRExpr_Mux0X(
+ binop(Iop_CmpNE8,
+ unop(Iop_32to8,
+ binop(Iop_CmpF64,
+ IRExpr_Const(IRConst_F64(9e18)),
+ unop(Iop_AbsF64, mkexpr(frB)))),
+ mkU8(0)),
+ IRExpr_Mux0X(
+ binop(Iop_CmpNE32,
+ binop(Iop_Shr32,
+ unop(Iop_64HIto32,
+ unop(Iop_ReinterpF64asI64,
+ mkexpr(frB))),
+ mkU8(31)),
+ mkU32(0)),
+ binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) ),
+ unop(Iop_NegF64,
+ unop( Iop_AbsF64,
+ binop(Iop_I64StoF64, mkU32(0),
+ mkexpr(r_tmp64)) ))
+ ),
+ mkexpr(frB)
+ ));
break;
default:
assign( res1, unop(Iop_64HIto32, mkexpr(lo64)) );
assign( res0, unop(Iop_64to32, mkexpr(lo64)) );
- b3_result = IRExpr_Mux0X(unop(Iop_1Uto8, is_NaN_32(b3)),
+ b3_result = IRExpr_Mux0X(is_NaN_32(b3),
// else: result is from the Iop_QFtoI32{s|u}x4_RZ
mkexpr(res3),
// then: result is 0x{8|0}80000000
mkU32(un_signed ? 0x00000000 : 0x80000000));
- b2_result = IRExpr_Mux0X(unop(Iop_1Uto8, is_NaN_32(b2)),
+ b2_result = IRExpr_Mux0X(is_NaN_32(b2),
// else: result is from the Iop_QFtoI32{s|u}x4_RZ
mkexpr(res2),
// then: result is 0x{8|0}80000000
mkU32(un_signed ? 0x00000000 : 0x80000000));
- b1_result = IRExpr_Mux0X(unop(Iop_1Uto8, is_NaN_32(b1)),
+ b1_result = IRExpr_Mux0X(is_NaN_32(b1),
// else: result is from the Iop_QFtoI32{s|u}x4_RZ
mkexpr(res1),
// then: result is 0x{8|0}80000000
mkU32(un_signed ? 0x00000000 : 0x80000000));
- b0_result = IRExpr_Mux0X(unop(Iop_1Uto8, is_NaN_32(b0)),
+ b0_result = IRExpr_Mux0X(is_NaN_32(b0),
// else: result is from the Iop_QFtoI32{s|u}x4_RZ
mkexpr(res0),
// then: result is 0x{8|0}80000000
#define SNAN_MASK 0x0008000000000000ULL
return
- IRExpr_Mux0X(unop(Iop_1Uto8, mkexpr(frA_isSNaN)),
+ IRExpr_Mux0X(mkexpr(frA_isSNaN),
/* else: if frB is a SNaN */
- IRExpr_Mux0X(unop(Iop_1Uto8, mkexpr(frB_isSNaN)),
+ IRExpr_Mux0X(mkexpr(frB_isSNaN),
/* else: if frB is a QNaN */
- IRExpr_Mux0X(unop(Iop_1Uto8, mkexpr(frB_isQNaN)),
+ IRExpr_Mux0X(mkexpr(frB_isQNaN),
/* else: frA is a QNaN, so result = frB */
mkexpr(frB_I64),
/* then: result = frA */
unop( Iop_ReinterpI64asF64,
mkexpr( src2 ) ) ) );
- return IRExpr_Mux0X( unop( Iop_1Uto8,
- binop( Iop_CmpEQ32,
- mkexpr( src1cmpsrc2 ),
- mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ) ),
+ return IRExpr_Mux0X( binop( Iop_CmpEQ32,
+ mkexpr( src1cmpsrc2 ),
+ mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ),
/* else: use src2 */
mkexpr( src2 ),
/* then: use src1 */
assign(anyNaN, mkOR1(is_NaN(frA_I64), is_NaN(frB_I64)));
#define MINUS_ZERO 0x8000000000000000ULL
- return IRExpr_Mux0X( unop( Iop_1Uto8,
- /* If both arguments are zero . . . */
- mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ) ),
+ return IRExpr_Mux0X( /* If both arguments are zero . . . */
+ mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ),
/* else: check if either input is a NaN*/
- IRExpr_Mux0X( unop( Iop_1Uto8, mkexpr( anyNaN ) ),
+ IRExpr_Mux0X( mkexpr( anyNaN ),
/* else: use "comparison helper" */
_get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ),
/* then: use "NaN helper" */
/* then: if frA is -0 and isMin==True, return -0;
* else if frA is +0 and isMin==False; return +0;
* otherwise, simply return frB. */
- IRExpr_Mux0X( unop( Iop_1Uto8,
- binop( Iop_CmpEQ32,
- unop( Iop_64HIto32,
- mkexpr( frA_I64 ) ),
- mkU32( isMin ? 0x80000000 : 0 ) ) ),
+ IRExpr_Mux0X( binop( Iop_CmpEQ32,
+ unop( Iop_64HIto32,
+ mkexpr( frA_I64 ) ),
+ mkU32( isMin ? 0x80000000 : 0 ) ),
mkexpr( frB_I64 ),
mkU64( isMin ? MINUS_ZERO : 0ULL ) ) );
}
/* frD = (fabs(frB) > 9e18) ? frB :
(sign(frB)) ? -fabs((double)intermediateResult) : (double)intermediateResult */
assign( frD,
- IRExpr_Mux0X( unop( Iop_32to8,
- binop( Iop_CmpF64,
- IRExpr_Const( IRConst_F64( 9e18 ) ),
- unop( Iop_AbsF64, mkexpr( frB ) ) ) ),
- IRExpr_Mux0X( unop( Iop_32to8,
- binop( Iop_Shr32,
- unop( Iop_64HIto32,
- mkexpr( frB_I64 ) ),
- mkU8( 31 ) ) ),
- binop( Iop_I64StoF64,
- mkU32( 0 ),
- mkexpr( intermediateResult ) ),
- unop( Iop_NegF64,
- unop( Iop_AbsF64,
- binop( Iop_I64StoF64,
- mkU32( 0 ),
- mkexpr( intermediateResult ) ) ) ) ),
- mkexpr( frB ) ) );
+ IRExpr_Mux0X(
+ binop( Iop_CmpNE8,
+ unop( Iop_32to8,
+ binop( Iop_CmpF64,
+ IRExpr_Const( IRConst_F64( 9e18 ) ),
+ unop( Iop_AbsF64, mkexpr( frB ) ) ) ),
+ mkU8(0) ),
+ IRExpr_Mux0X(
+ binop( Iop_CmpNE32,
+ binop( Iop_Shr32,
+ unop( Iop_64HIto32,
+ mkexpr( frB_I64 ) ),
+ mkU8( 31 ) ),
+ mkU32(0) ),
+ binop( Iop_I64StoF64,
+ mkU32( 0 ),
+ mkexpr( intermediateResult ) ),
+ unop( Iop_NegF64,
+ unop( Iop_AbsF64,
+ binop( Iop_I64StoF64,
+ mkU32( 0 ),
+ mkexpr( intermediateResult ) ) ) )
+ ),
+ mkexpr( frB )
+ )
+ );
/* See Appendix "Floating-Point Round to Integer Model" in ISA doc.
* If frB is a SNAN, then frD <- frB, with bit 12 set to '1'.
binop( Iop_And32, hi32, mkU32( 0x00080000 ) ),
mkU32( 0 ) ) ) );
- return IRExpr_Mux0X( unop( Iop_1Uto8, mkexpr( is_SNAN ) ),
+ return IRExpr_Mux0X( mkexpr( is_SNAN ),
mkexpr( frD ),
unop( Iop_ReinterpI64asF64,
binop( Iop_Xor64,
UInt bi = ifieldRegC( theInstr );
putIReg(
rT,
- IRExpr_Mux0X( unop(Iop_32to8,getCRbit( bi )),
+ IRExpr_Mux0X( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)),
getIReg(rB),
rA == 0 ? (mode64 ? mkU64(0) : mkU32(0))
: getIReg(rA) )
{
vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
- return IRExpr_Mux0X(unop(Iop_1Uto8, condition), iffalse, iftrue);
+ return IRExpr_Mux0X(condition, iffalse, iftrue);
}
/* Add a statement that stores DATA at ADDR. This is a big-endian machine. */
vpanic("setFlags_DEP1_DEP2_shift(x86)");
}
+ /* guard :: Ity_I8. We need to convert it to I1. */
+ IRTemp guardB = newTemp(Ity_I1);
+ assign( guardB, binop(Iop_CmpNE8, mkexpr(guard), mkU8(0)) );
+
/* DEP1 contains the result, DEP2 contains the undershifted value. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_OP,Ity_I32),
mkU32(ccOp))) );
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_DEP1,Ity_I32),
widenUto32(mkexpr(res)))) );
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_DEP2,Ity_I32),
widenUto32(mkexpr(resUS)))) );
/* Set NDEP even though it isn't used. This makes redundant-PUT
elimination of previous stores to this field work better. */
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(guard),
+ IRExpr_Mux0X( mkexpr(guardB),
IRExpr_Get(OFFB_CC_NDEP,Ity_I32),
mkU32(0) )));
}
assign(oldFlags, mk_x86g_calculate_eflags_all());
+ /* rot_amt32 :: Ity_I8. We need to convert it to I1. */
+ IRTemp rot_amt32b = newTemp(Ity_I1);
+ assign(rot_amt32b, binop(Iop_CmpNE8, mkexpr(rot_amt32), mkU8(0)) );
+
/* CC_DEP1 is the rotated value. CC_NDEP is flags before. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(rot_amt32),
+ IRExpr_Mux0X( mkexpr(rot_amt32b),
IRExpr_Get(OFFB_CC_OP,Ity_I32),
mkU32(ccOp))) );
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(rot_amt32),
+ IRExpr_Mux0X( mkexpr(rot_amt32b),
IRExpr_Get(OFFB_CC_DEP1,Ity_I32),
widenUto32(mkexpr(dst1)))) );
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(rot_amt32),
+ IRExpr_Mux0X( mkexpr(rot_amt32b),
IRExpr_Get(OFFB_CC_DEP2,Ity_I32),
mkU32(0))) );
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(rot_amt32),
+ IRExpr_Mux0X( mkexpr(rot_amt32b),
IRExpr_Get(OFFB_CC_NDEP,Ity_I32),
mkexpr(oldFlags))) );
} /* if (isRotate) */
IRTemp res32 = newTemp(Ity_I32);
assign(res32,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- binop(Iop_CmpEQ32, mkexpr(src32x), mkU32(0))),
+ binop(Iop_CmpEQ32, mkexpr(src32x), mkU32(0)),
unop(Iop_Clz32, mkexpr(src32x)),
mkU32(8 * sizeofIRType(ty))
));
static void put_ST ( Int i, IRExpr* value )
{
- put_ST_UNCHECKED( i,
- IRExpr_Mux0X( get_ST_TAG(i),
- /* 0 means empty */
- value,
- /* non-0 means full */
- mkQNaN64()
- )
+ put_ST_UNCHECKED(
+ i,
+ IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* 0 means empty */
+ value,
+ /* non-0 means full */
+ mkQNaN64()
+ )
);
}
static IRExpr* get_ST ( Int i )
{
return
- IRExpr_Mux0X( get_ST_TAG(i),
+ IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
/* 0 means empty */
mkQNaN64(),
/* non-0 means full */
DIP("fcmovb %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondB)),
+ mk_x86g_calculate_condition(X86CondB),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovz %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondZ)),
+ mk_x86g_calculate_condition(X86CondZ),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovbe %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondBE)),
+ mk_x86g_calculate_condition(X86CondBE),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovu %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondP)),
+ mk_x86g_calculate_condition(X86CondP),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovnb %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondNB)),
+ mk_x86g_calculate_condition(X86CondNB),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovnz %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondNZ)),
+ mk_x86g_calculate_condition(X86CondNZ),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovnbe %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondNBE)),
+ mk_x86g_calculate_condition(X86CondNBE),
get_ST(0), get_ST(r_src)) );
break;
DIP("fcmovnu %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
IRExpr_Mux0X(
- unop(Iop_1Uto8,
- mk_x86g_calculate_condition(X86CondNP)),
+ mk_x86g_calculate_condition(X86CondNP),
get_ST(0), get_ST(r_src)) );
break;
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size))),
+ binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
mkU64(0),
binop(op, mkexpr(g0), mkexpr(amt8))
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size))),
+ binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
binop(op, mkexpr(g0), mkU8(size-1)),
binop(op, mkexpr(g0), mkexpr(amt8))
)
IRTemp src32 = newTemp(Ity_I32);
IRTemp dst32 = newTemp(Ity_I32);
- IRTemp src8 = newTemp(Ity_I8);
+ IRTemp srcB = newTemp(Ity_I1);
vassert(sz == 4 || sz == 2);
( isReg ? nameIReg(sz, eregOfRM(modrm)) : dis_buf ),
nameIReg(sz, gregOfRM(modrm)));
- /* Generate an 8-bit expression which is zero iff the original is
+ /* Generate a bool expression which is zero iff the original is
zero, and nonzero otherwise. Ask for a CmpNE version which, if
instrumented by Memcheck, is instrumented expensively, since
this may be used on the output of a preceding movmskb insn,
which has been known to be partially defined, and in need of
careful handling. */
- assign( src8,
- unop(Iop_1Uto8, binop(mkSizedOp(ty,Iop_ExpCmpNE8),
- mkexpr(src), mkU(ty,0))) );
+ assign( srcB, binop(mkSizedOp(ty,Iop_ExpCmpNE8),
+ mkexpr(src), mkU(ty,0)) );
/* Flags: Z is 1 iff source value is zero. All others
are undefined -- we force them to zero. */
stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(src8),
+ IRExpr_Mux0X( mkexpr(srcB),
/* src==0 */
mkU32(X86G_CC_MASK_Z),
/* src!=0 */
/* The main computation, guarding against zero. */
assign( dst32,
IRExpr_Mux0X(
- mkexpr(src8),
+ mkexpr(srcB),
/* src == 0 -- leave dst unchanged */
widenUto32( getIReg( sz, gregOfRM(modrm) ) ),
/* src != 0 */
IRTemp dest = newTemp(ty);
IRTemp dest2 = newTemp(ty);
IRTemp acc2 = newTemp(ty);
- IRTemp cond8 = newTemp(Ity_I8);
+ IRTemp cond = newTemp(Ity_I1);
IRTemp addr = IRTemp_INVALID;
UChar rm = getUChar(delta0);
assign( src, getIReg(size, gregOfRM(rm)) );
assign( acc, getIReg(size, R_EAX) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_x86g_calculate_condition(X86CondZ)) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+ assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIReg(size, R_EAX, mkexpr(acc2));
putIReg(size, eregOfRM(rm), mkexpr(dest2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
assign( src, getIReg(size, gregOfRM(rm)) );
assign( acc, getIReg(size, R_EAX) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_x86g_calculate_condition(X86CondZ)) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+ assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIReg(size, R_EAX, mkexpr(acc2));
storeLE( mkexpr(addr), mkexpr(dest2) );
DIP("cmpxchg%c %s,%s\n", nameISize(size),
NULL, mkexpr(acc), NULL, mkexpr(src) )
));
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
- assign( cond8, unop(Iop_1Uto8, mk_x86g_calculate_condition(X86CondZ)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond8), mkexpr(dest), mkexpr(acc)) );
+ assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+ assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
putIReg(size, R_EAX, mkexpr(acc2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)), dis_buf);
assign( tmpd, getIReg(sz, gregOfRM(rm)) );
putIReg(sz, gregOfRM(rm),
- IRExpr_Mux0X( unop(Iop_1Uto8,
- mk_x86g_calculate_condition(cond)),
+ IRExpr_Mux0X( mk_x86g_calculate_condition(cond),
mkexpr(tmpd),
mkexpr(tmps) )
);
assign( tmpd, getIReg(sz, gregOfRM(rm)) );
putIReg(sz, gregOfRM(rm),
- IRExpr_Mux0X( unop(Iop_1Uto8,
- mk_x86g_calculate_condition(cond)),
+ IRExpr_Mux0X( mk_x86g_calculate_condition(cond),
mkexpr(tmpd),
mkexpr(tmps) )
);
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size))),
+ binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
mkV128(0x0000),
binop(op, mkexpr(g0), mkexpr(amt8))
)
assign(
g1,
IRExpr_Mux0X(
- unop(Iop_1Uto8,binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size))),
+ binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
binop(op, mkexpr(g0), mkU8(size-1)),
binop(op, mkexpr(g0), mkexpr(amt8))
)
stmt( IRStmt_Put(
OFFB_DFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
+ unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(10)),
mkU32(1))),
stmt( IRStmt_Put(
OFFB_IDFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
+ unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(21)),
mkU32(1))),
stmt( IRStmt_Put(
OFFB_ACFLAG,
IRExpr_Mux0X(
- unop(Iop_32to8,
+ unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(18)),
mkU32(1))),
unchanged. If the DCAS fails then we're putting into
EDX:EAX the value seen in memory. */
putIReg(4, R_EDX,
- IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(success)),
- mkexpr(oldHi),
- mkexpr(expdHi)
+ IRExpr_Mux0X( mkexpr(success),
+ mkexpr(oldHi), mkexpr(expdHi)
));
putIReg(4, R_EAX,
- IRExpr_Mux0X( unop(Iop_1Uto8, mkexpr(success)),
- mkexpr(oldLo),
- mkexpr(expdLo)
+ IRExpr_Mux0X( mkexpr(success),
+ mkexpr(oldLo), mkexpr(expdLo)
));
/* Copy the success bit into the Z flag and leave the others
return IRExpr_Binder(binder);
}
+static Bool isZeroU8 ( IRExpr* e )
+{
+ return e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U8
+ && e->Iex.Const.con->Ico.U8 == 0;
+}
+
/*---------------------------------------------------------*/
/*--- ISelEnv ---*/
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: {
- if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- HReg r8;
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
- HReg dst = newVRegI(env);
- addInstr(env, mk_iMOVsd_RR(rX,dst));
- r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, AMD64Instr_Test64(0xFF, r8));
- addInstr(env, AMD64Instr_CMov64(Acc_Z,r0,dst));
- return dst;
+ case Iex_Mux0X: { // VFD
+ if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
+ && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
+ HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
+ AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
+ HReg dst = newVRegI(env);
+ addInstr(env, mk_iMOVsd_RR(rX,dst));
+ AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, AMD64Instr_CMov64(cc ^ 1, r0, dst));
+ return dst;
}
break;
}
|| e->Iex.Binop.op == Iop_CmpNE8
|| e->Iex.Binop.op == Iop_CasCmpEQ8
|| e->Iex.Binop.op == Iop_CasCmpNE8)) {
- HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
- AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
- HReg r = newVRegI(env);
- addInstr(env, mk_iMOVsd_RR(r1,r));
- addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
- addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0xFF),r));
- switch (e->Iex.Binop.op) {
- case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Acc_Z;
- case Iop_CmpNE8: case Iop_CasCmpNE8: return Acc_NZ;
- default: vpanic("iselCondCode(amd64): CmpXX8");
+ if (isZeroU8(e->Iex.Binop.arg2)) {
+ HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ addInstr(env, AMD64Instr_Test64(0xFF,r1));
+ switch (e->Iex.Binop.op) {
+ case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Acc_Z;
+ case Iop_CmpNE8: case Iop_CasCmpNE8: return Acc_NZ;
+ default: vpanic("iselCondCode(amd64): CmpXX8(expr,0:I8)");
+ }
+ } else {
+ HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+ AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+ HReg r = newVRegI(env);
+ addInstr(env, mk_iMOVsd_RR(r1,r));
+ addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
+ addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0xFF),r));
+ switch (e->Iex.Binop.op) {
+ case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Acc_Z;
+ case Iop_CmpNE8: case Iop_CasCmpNE8: return Acc_NZ;
+ default: vpanic("iselCondCode(amd64): CmpXX8(expr,expr)");
+ }
}
}
|| e->Iex.Binop.op == Iop_CmpLE32S
|| e->Iex.Binop.op == Iop_CmpLE32U
|| e->Iex.Binop.op == Iop_CasCmpEQ32
- || e->Iex.Binop.op == Iop_CasCmpNE32)) {
+ || e->Iex.Binop.op == Iop_CasCmpNE32
+ || e->Iex.Binop.op == Iop_ExpCmpNE32)) {
HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
addInstr(env, AMD64Instr_Alu32R(Aalu_CMP,rmi2,r1));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: case Iop_CasCmpEQ32: return Acc_Z;
- case Iop_CmpNE32: case Iop_CasCmpNE32: return Acc_NZ;
+ case Iop_CmpNE32:
+ case Iop_CasCmpNE32: case Iop_ExpCmpNE32: return Acc_NZ;
case Iop_CmpLT32S: return Acc_L;
case Iop_CmpLT32U: return Acc_B;
case Iop_CmpLE32S: return Acc_LE;
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) {
- HReg r8, rX, r0, dst;
+ if (e->tag == Iex_Mux0X) { // VFD
+ HReg rX, r0, dst;
vassert(ty == Ity_F64);
- vassert(typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8);
- r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
+ vassert(typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1);
rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
dst = newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(0xFF, r8));
- addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
+ AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
} /* switch (e->Iex.Binop.op) */
} /* if (e->tag == Iex_Binop) */
- if (e->tag == Iex_Mux0X) {
- HReg r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
+ if (e->tag == Iex_Mux0X) { // VFD
HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX);
HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(0xFF, r8));
- addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
+ HReg cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
}
}
+ // JRS 2013-Jan-03: this seems completely nonsensical
/* --- CasCmpEQ* --- */
/* Ist_Cas has a dummy argument to compare with, so comparison is
always true. */
- if (e->tag == Iex_Binop
- && (e->Iex.Binop.op == Iop_CasCmpEQ32
- || e->Iex.Binop.op == Iop_CasCmpEQ16
- || e->Iex.Binop.op == Iop_CasCmpEQ8)) {
- return ARMcc_AL;
- }
+ //if (e->tag == Iex_Binop
+ // && (e->Iex.Binop.op == Iop_CasCmpEQ32
+ // || e->Iex.Binop.op == Iop_CasCmpEQ16
+ // || e->Iex.Binop.op == Iop_CasCmpEQ8)) {
+ // return ARMcc_AL;
+ //}
ppIRExpr(e);
vpanic("iselCondCode");
}
return rLo;
}
-//zz case Iop_16HIto8:
-//zz case Iop_32HIto16: {
-//zz HReg dst = newVRegI(env);
-//zz HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
-//zz Int shift = e->Iex.Unop.op == Iop_16HIto8 ? 8 : 16;
-//zz addInstr(env, mk_iMOVsd_RR(src,dst) );
-//zz addInstr(env, X86Instr_Sh32(Xsh_SHR, shift, dst));
-//zz return dst;
-//zz }
+
case Iop_1Uto32:
+ /* 1Uto32(tmp). Since I1 values generated into registers
+ are guaranteed to have value either only zero or one,
+ we can simply return the value of the register in this
+ case. */
+ if (e->Iex.Unop.arg->tag == Iex_RdTmp) {
+ HReg dst = lookupIRTemp(env, e->Iex.Unop.arg->Iex.RdTmp.tmp);
+ return dst;
+ }
+ /* else fall through */
case Iop_1Uto8: {
HReg dst = newVRegI(env);
ARMCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
/* --------- MULTIPLEX --------- */
case Iex_Mux0X: {
- IRExpr* cond = e->Iex.Mux0X.cond;
-
- /* Mux0X( 32to8(1Uto32(ccexpr)), expr0, exprX ) */
- if (ty == Ity_I32
- && cond->tag == Iex_Unop
- && cond->Iex.Unop.op == Iop_32to8
- && cond->Iex.Unop.arg->tag == Iex_Unop
- && cond->Iex.Unop.arg->Iex.Unop.op == Iop_1Uto32) {
+ /* Mux0X(ccexpr, expr0, exprX) */
+ if (ty == Ity_I32) {
ARMCondCode cc;
HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.Mux0X.expr0);
HReg dst = newVRegI(env);
addInstr(env, mk_iMOVds_RR(dst, rX));
- cc = iselCondCode(env, cond->Iex.Unop.arg->Iex.Unop.arg);
+ cc = iselCondCode(env, e->Iex.Mux0X.cond);
addInstr(env, ARMInstr_CMov(cc ^ 1, dst, r0));
return dst;
}
-
- /* Mux0X(cond, expr0, exprX) (general case) */
- if (ty == Ity_I32) {
- HReg r8;
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.Mux0X.expr0);
- HReg dst = newVRegI(env);
- addInstr(env, mk_iMOVds_RR(dst, rX));
- r8 = iselIntExpr_R(env, cond);
- addInstr(env, ARMInstr_CmpOrTst(False/*!isCmp*/, r8,
- ARMRI84_I84(0xFF,0)));
- addInstr(env, ARMInstr_CMov(ARMcc_EQ, dst, r0));
- return dst;
- }
break;
}
/* --------- MULTIPLEX --------- */
if (e->tag == Iex_Mux0X) {
- IRType ty8;
- HReg r8, rXhi, rXlo, r0hi, r0lo, dstHi, dstLo;
- ty8 = typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond);
- vassert(ty8 == Ity_I8);
+ IRType tyC;
+ HReg rXhi, rXlo, r0hi, r0lo, dstHi, dstLo;
+ ARMCondCode cc;
+ tyC = typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond);
+ vassert(tyC == Ity_I1);
iselInt64Expr(&rXhi, &rXlo, env, e->Iex.Mux0X.exprX);
iselInt64Expr(&r0hi, &r0lo, env, e->Iex.Mux0X.expr0);
dstHi = newVRegI(env);
dstLo = newVRegI(env);
addInstr(env, mk_iMOVds_RR(dstHi, rXhi));
addInstr(env, mk_iMOVds_RR(dstLo, rXlo));
- r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, ARMInstr_CmpOrTst(False/*!isCmp*/, r8,
- ARMRI84_I84(0xFF,0)));
- addInstr(env, ARMInstr_CMov(ARMcc_EQ, dstHi, ARMRI84_R(r0hi)));
- addInstr(env, ARMInstr_CMov(ARMcc_EQ, dstLo, ARMRI84_R(r0lo)));
+ cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_CMov(cc ^ 1, dstHi, ARMRI84_R(r0hi)));
+ addInstr(env, ARMInstr_CMov(cc ^ 1, dstLo, ARMRI84_R(r0lo)));
*rHi = dstHi;
*rLo = dstLo;
return;
}
if (e->tag == Iex_Mux0X) {
- HReg r8;
+ ARMCondCode cc;
HReg rX = iselNeonExpr(env, e->Iex.Mux0X.exprX);
HReg r0 = iselNeonExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegV(env);
addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, rX, 4, True));
- r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, ARMInstr_CmpOrTst(False/*!isCmp*/, r8,
- ARMRI84_I84(0xFF,0)));
- addInstr(env, ARMInstr_NCMovQ(ARMcc_EQ, dst, r0));
+ cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_NCMovQ(cc ^ 1, dst, r0));
return dst;
}
/*--- ISEL: Floating point expressions (32 bit) ---*/
/*---------------------------------------------------------*/
-/* Compute a 64-bit floating point value into a register, the identity
+/* Compute a 32-bit floating point value into a register, the identity
of which is returned. As with iselIntExpr_R, the reg may be either
real or virtual; in any case it must not be changed by subsequent
code emitted by the caller. */
if (e->tag == Iex_Mux0X) {
if (ty == Ity_F32
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- HReg r8;
+ && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
+ ARMCondCode cc;
HReg rX = iselFltExpr(env, e->Iex.Mux0X.exprX);
HReg r0 = iselFltExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegF(env);
addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, rX));
- r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, ARMInstr_CmpOrTst(False/*!isCmp*/, r8,
- ARMRI84_I84(0xFF,0)));
- addInstr(env, ARMInstr_VCMovS(ARMcc_EQ, dst, r0));
+ cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_VCMovS(cc ^ 1, dst, r0));
return dst;
}
}
return;
}
if (ty == Ity_I1) {
+ /* Here, we are generating a I1 value into a 32 bit register.
+ Make sure the value in the register is only zero or one,
+ but no other. This allows optimisation of the
+ 1Uto32(tmp:I1) case, by making it simply a copy of the
+ register holding 'tmp'. The point being that the value in
+ the register holding 'tmp' can only have been created
+ here. */
HReg dst = lookupIRTemp(env, tmp);
ARMCondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data);
addInstr(env, ARMInstr_Mov(dst, ARMRI84_I84(0,0)));
return IRExpr_Binder(binder);
}
+static Bool isZeroU8 ( IRExpr* e )
+{
+ return e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U8
+ && e->Iex.Const.con->Ico.U8 == 0;
+}
+
/*---------------------------------------------------------*/
/*--- ISelEnv ---*/
case Iex_Mux0X: {
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
- typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
- HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
- HReg rX = iselWordExpr_R(env, e->Iex.Mux0X.exprX);
- PPCRI* r0 = iselWordExpr_RI(env, e->Iex.Mux0X.expr0);
- HReg r_dst = newVRegI(env);
- HReg r_tmp = newVRegI(env);
- addInstr(env, mk_iMOVds_RR(r_dst,rX));
- addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
- r_cond, PPCRH_Imm(False,0xFF)));
- addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_tmp, PPCRH_Imm(False,0)));
- addInstr(env, PPCInstr_CMov(cc,r_dst,r0));
+ typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
+ PPCRI* rX = iselWordExpr_RI(env, e->Iex.Mux0X.exprX);
+ HReg r0 = iselWordExpr_R(env, e->Iex.Mux0X.expr0);
+ HReg r_dst = newVRegI(env);
+ addInstr(env, mk_iMOVds_RR(r_dst,r0));
+ PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, PPCInstr_CMov(cc, r_dst, rX));
return r_dst;
}
break;
/* --- patterns rooted at: CmpNEZ8 --- */
/* CmpNEZ8(x) */
+ /* Note this cloned as CmpNE8(x,0) below. */
/* could do better -- andi. */
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ8) {
}
}
+ /* --- patterns rooted at: CmpNE8 --- */
+
+ /* CmpNE8(x,0) */
+ /* Note this is a direct copy of CmpNEZ8 above. */
+ /* could do better -- andi. */
+ if (e->tag == Iex_Binop
+ && e->Iex.Binop.op == Iop_CmpNE8
+ && isZeroU8(e->Iex.Binop.arg2)) {
+ HReg arg = iselWordExpr_R(env, e->Iex.Binop.arg1);
+ HReg tmp = newVRegI(env);
+ addInstr(env, PPCInstr_Alu(Palu_AND, tmp, arg,
+ PPCRH_Imm(False,0xFF)));
+ addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+ 7/*cr*/, tmp, PPCRH_Imm(False,0)));
+ return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+ }
+
/* var */
if (e->tag == Iex_RdTmp) {
HReg r_src = lookupIRTemp(env, e->Iex.RdTmp.tmp);
/* --------- MULTIPLEX --------- */
if (e->tag == Iex_Mux0X) {
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
- HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
+ && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX);
HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
HReg fr_dst = newVRegF(env);
- HReg r_tmp = newVRegI(env);
- addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp,
- r_cond, PPCRH_Imm(False,0xFF)));
- addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, frX ));
- addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
- 7/*cr*/, r_tmp, PPCRH_Imm(False,0)));
- addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr0 ));
+ addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, fr0 ));
+ PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, PPCInstr_FpCMov( cc, fr_dst, frX ));
return fr_dst;
}
}
/* --------- MULTIPLEX --------- */
case Iex_Mux0X: {
IRExpr *cond_expr;
- HReg dst, tmp, rX;
- s390_opnd_RMI cond, r0, zero;
+ HReg dst, rX;
+ s390_opnd_RMI r0;
cond_expr = expr->Iex.Mux0X.cond;
+ vassert(typeOfIRExpr(env->type_env, cond_expr) == Ity_I1);
+
dst = newVRegI(env);
r0 = s390_isel_int_expr_RMI(env, expr->Iex.Mux0X.expr0);
rX = s390_isel_int_expr(env, expr->Iex.Mux0X.exprX);
size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.exprX));
- if (cond_expr->tag == Iex_Unop && cond_expr->Iex.Unop.op == Iop_1Uto8) {
- s390_cc_t cc = s390_isel_cc(env, cond_expr->Iex.Unop.arg);
+ s390_cc_t cc = s390_isel_cc(env, cond_expr);
- addInstr(env, s390_insn_move(size, dst, rX));
- addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0));
- return dst;
- }
-
- /* Assume the condition is true and move rX to the destination reg. */
addInstr(env, s390_insn_move(size, dst, rX));
-
- /* Compute the condition ... */
- cond = s390_isel_int_expr_RMI(env, cond_expr);
-
- /* tmp = cond & 0xFF */
- tmp = newVRegI(env);
- addInstr(env, s390_insn_load_immediate(4, tmp, 0xFF));
- addInstr(env, s390_insn_alu(4, S390_ALU_AND, tmp, cond));
-
- /* ... and compare it with zero */
- zero = s390_opnd_imm(0);
- addInstr(env, s390_insn_compare(4, tmp, zero, 0 /* signed */));
-
- /* ... and if it compared equal move r0 to the destination reg. */
- size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.expr0));
- addInstr(env, s390_insn_cond_move(size, S390_CC_E, dst, r0));
-
+ addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0));
return dst;
}
&& e->Iex.Const.con->Ico.U32 == 0;
}
-static Bool isZeroU64 ( IRExpr* e )
-{
- return e->tag == Iex_Const
- && e->Iex.Const.con->tag == Ico_U64
- && e->Iex.Const.con->Ico.U64 == 0ULL;
-}
+//static Bool isZeroU64 ( IRExpr* e )
+//{
+// return e->tag == Iex_Const
+// && e->Iex.Const.con->tag == Ico_U64
+// && e->Iex.Const.con->Ico.U64 == 0ULL;
+//}
/*---------------------------------------------------------*/
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: {
+ case Iex_Mux0X: { // VFD
if ((ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- X86RM* r8;
+ && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
X86RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegI(env);
addInstr(env, mk_iMOVsd_RR(rX,dst));
- r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
- addInstr(env, X86Instr_Test32(0xFF, r8));
- addInstr(env, X86Instr_CMov32(Xcc_Z,r0,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, X86Instr_CMov32(cc ^ 1, r0, dst));
return dst;
}
break;
return;
}
- /* 64-bit Mux0X: Mux0X(g, expr, 0:I64) */
- if (e->tag == Iex_Mux0X && isZeroU64(e->Iex.Mux0X.exprX)) {
- X86RM* r8;
- HReg e0Lo, e0Hi;
- HReg tLo = newVRegI(env);
- HReg tHi = newVRegI(env);
- X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
- iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0);
- r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
- addInstr(env, mk_iMOVsd_RR( e0Hi, tHi ) );
- addInstr(env, mk_iMOVsd_RR( e0Lo, tLo ) );
- addInstr(env, X86Instr_Push(X86RMI_Imm(0)));
- addInstr(env, X86Instr_Test32(0xFF, r8));
- addInstr(env, X86Instr_CMov32(Xcc_NZ,X86RM_Mem(zero_esp),tHi));
- addInstr(env, X86Instr_CMov32(Xcc_NZ,X86RM_Mem(zero_esp),tLo));
- add_to_esp(env, 4);
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
- /* 64-bit Mux0X: Mux0X(g, 0:I64, expr) */
- if (e->tag == Iex_Mux0X && isZeroU64(e->Iex.Mux0X.expr0)) {
- X86RM* r8;
- HReg e0Lo, e0Hi;
- HReg tLo = newVRegI(env);
- HReg tHi = newVRegI(env);
- X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
- iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.exprX);
- r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
- addInstr(env, mk_iMOVsd_RR( e0Hi, tHi ) );
- addInstr(env, mk_iMOVsd_RR( e0Lo, tLo ) );
- addInstr(env, X86Instr_Push(X86RMI_Imm(0)));
- addInstr(env, X86Instr_Test32(0xFF, r8));
- addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Mem(zero_esp),tHi));
- addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Mem(zero_esp),tLo));
- add_to_esp(env, 4);
- *rHi = tHi;
- *rLo = tLo;
- return;
- }
-
- /* 64-bit Mux0X: Mux0X(g, expr, expr) */
+ /* 64-bit Mux0X: Mux0X(g, expr, expr) */ // VFD
if (e->tag == Iex_Mux0X) {
- X86RM* r8;
HReg e0Lo, e0Hi, eXLo, eXHi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX);
addInstr(env, mk_iMOVsd_RR(eXHi, tHi));
addInstr(env, mk_iMOVsd_RR(eXLo, tLo));
- r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
- addInstr(env, X86Instr_Test32(0xFF, r8));
+ X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
/* This assumes the first cmov32 doesn't trash the condition
codes, so they are still available for the second cmov32 */
- addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Reg(e0Hi),tHi));
- addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Reg(e0Lo),tLo));
+ addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Hi), tHi));
+ addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Lo), tLo));
*rHi = tHi;
*rLo = tLo;
return;
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) {
+ if (e->tag == Iex_Mux0X) { // VFD
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) {
- X86RM* r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
+ && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegF(env);
addInstr(env, X86Instr_FpUnary(Xfp_MOV,rX,dst));
- addInstr(env, X86Instr_Test32(0xFF, r8));
- addInstr(env, X86Instr_FpCMov(Xcc_Z,r0,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, X86Instr_FpCMov(cc ^ 1, r0, dst));
return dst;
}
}
} /* switch (e->Iex.Binop.op) */
} /* if (e->tag == Iex_Binop) */
- if (e->tag == Iex_Mux0X) {
- X86RM* r8 = iselIntExpr_RM(env, e->Iex.Mux0X.cond);
+ if (e->tag == Iex_Mux0X) { // VFD
HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX);
HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, X86Instr_Test32(0xFF, r8));
- addInstr(env, X86Instr_SseCMov(Xcc_Z,r0,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, X86Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
tcExpr(bb,stmt, expr->Iex.Mux0X.cond, gWordTy);
tcExpr(bb,stmt, expr->Iex.Mux0X.expr0, gWordTy);
tcExpr(bb,stmt, expr->Iex.Mux0X.exprX, gWordTy);
- if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.cond) != Ity_I8)
- sanityCheckFail(bb,stmt,"Iex.Mux0X.cond: cond :: Ity_I8");
+ if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.cond) != Ity_I1)
+ sanityCheckFail(bb,stmt,"Iex.Mux0X.cond: cond :: Ity_I1");
if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.expr0)
!= typeOfIRExpr(tyenv, expr->Iex.Mux0X.exprX))
sanityCheckFail(bb,stmt,"Iex.Mux0X: expr0/exprX mismatch");
// return r;
//}
+/* Helper for arbitrary expression pattern matching in flat IR. If
+ 'e' is a reference to a tmp, look it up in env -- repeatedly, if
+ necessary -- until it resolves to a non-tmp. Note that this can
+ return NULL if it can't resolve 'e' to a new expression, which will
+ be the case if 'e' is instead defined by an IRStmt (IRDirty or
+ LLSC). */
+static IRExpr* chase ( IRExpr** env, IRExpr* e )
+{
+ /* Why is this loop guaranteed to terminate? Because all tmps must
+ have definitions before use, hence a tmp cannot be bound
+ (directly or indirectly) to itself. */
+ while (e->tag == Iex_RdTmp) {
+ if (0) { vex_printf("chase "); ppIRExpr(e); vex_printf("\n"); }
+ e = env[(Int)e->Iex.RdTmp.tmp];
+ if (e == NULL) break;
+ }
+ return e;
+}
+
static IRExpr* fold_Expr ( IRExpr** env, IRExpr* e )
{
Int shift;
e2 = mkZeroOfPrimopResultType(e->Iex.Binop.op);
break;
}
+ /* CmpNE32(1Uto32(b), 0) ==> b */
+ if (isZeroU32(e->Iex.Binop.arg2)) {
+ IRExpr* a1 = chase(env, e->Iex.Binop.arg1);
+ if (a1 && a1->tag == Iex_Unop
+ && a1->Iex.Unop.op == Iop_1Uto32) {
+ e2 = a1->Iex.Unop.arg;
+ break;
+ }
+ }
break;
case Iop_CmpEQ32:
case Iex_Mux0X:
/* Mux0X */
-
/* is the discriminant is a constant? */
if (e->Iex.Mux0X.cond->tag == Iex_Const) {
- Bool zero;
/* assured us by the IR type rules */
- vassert(e->Iex.Mux0X.cond->Iex.Const.con->tag == Ico_U8);
- zero = toBool(0 == (0xFF & e->Iex.Mux0X.cond
- ->Iex.Const.con->Ico.U8));
- e2 = zero ? e->Iex.Mux0X.expr0 : e->Iex.Mux0X.exprX;
+ vassert(e->Iex.Mux0X.cond->Iex.Const.con->tag == Ico_U1);
+ e2 = e->Iex.Mux0X.cond->Iex.Const.con->Ico.U1
+ ? e->Iex.Mux0X.exprX : e->Iex.Mux0X.expr0;
}
else
/* are the arms identical? (pretty weedy test) */
if (sameIRExprs(env, e->Iex.Mux0X.expr0,
- e->Iex.Mux0X.exprX)) {
+ e->Iex.Mux0X.exprX)) {
e2 = e->Iex.Mux0X.expr0;
}
break;