/* DEP1 contains the result, DEP2 contains the undershifted value. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_OP,Ity_I64),
- mkU64(ccOp))) );
+ IRExpr_ITE( mkexpr(guardB),
+ mkU64(ccOp),
+ IRExpr_Get(OFFB_CC_OP,Ity_I64) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_DEP1,Ity_I64),
- widenUto64(mkexpr(res)))) );
+ IRExpr_ITE( mkexpr(guardB),
+ widenUto64(mkexpr(res)),
+ IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_DEP2,Ity_I64),
- widenUto64(mkexpr(resUS)))) );
+ IRExpr_ITE( mkexpr(guardB),
+ widenUto64(mkexpr(resUS)),
+ IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) ));
}
/* CC_DEP1 is the rotated value. CC_NDEP is flags before. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(rot_amt64b),
- IRExpr_Get(OFFB_CC_OP,Ity_I64),
- mkU64(ccOp))) );
+ IRExpr_ITE( mkexpr(rot_amt64b),
+ mkU64(ccOp),
+ IRExpr_Get(OFFB_CC_OP,Ity_I64) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(rot_amt64b),
- IRExpr_Get(OFFB_CC_DEP1,Ity_I64),
- widenUto64(mkexpr(dst1)))) );
+ IRExpr_ITE( mkexpr(rot_amt64b),
+ widenUto64(mkexpr(dst1)),
+ IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(rot_amt64b),
- IRExpr_Get(OFFB_CC_DEP2,Ity_I64),
- mkU64(0))) );
+ IRExpr_ITE( mkexpr(rot_amt64b),
+ mkU64(0),
+ IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) ));
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(rot_amt64b),
- IRExpr_Get(OFFB_CC_NDEP,Ity_I64),
- mkexpr(oldFlags))) );
+ IRExpr_ITE( mkexpr(rot_amt64b),
+ mkexpr(oldFlags),
+ IRExpr_Get(OFFB_CC_NDEP,Ity_I64) ) ));
} /* if (isRotate) */
/* Save result, and finish up. */
// special-case around that.
IRTemp res64 = newTemp(Ity_I64);
assign(res64,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpEQ64, mkexpr(src64x), mkU64(0)),
- unop(Iop_Clz64, mkexpr(src64x)),
- mkU64(8 * sizeofIRType(ty))
+ mkU64(8 * sizeofIRType(ty)),
+ unop(Iop_Clz64, mkexpr(src64x))
));
IRTemp res = newTemp(ty);
{
put_ST_UNCHECKED(
i,
- IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
- /* 0 means empty */
- value,
- /* non-0 means full */
- mkQNaN64()
+ IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* non-0 means full */
+ mkQNaN64(),
+ /* 0 means empty */
+ value
)
);
}
static IRExpr* get_ST ( Int i )
{
return
- IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
- /* 0 means empty */
- mkQNaN64(),
- /* non-0 means full */
- get_ST_UNCHECKED(i));
+ IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* non-0 means full */
+ get_ST_UNCHECKED(i),
+ /* 0 means empty */
+ mkQNaN64());
}
IRTemp t32 = newTemp(Ity_I32);
assign( t32, e32 );
return
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U,
unop(Iop_32Uto64,
binop(Iop_Add32, mkexpr(t32), mkU32(32768))),
mkU64(65536)),
- mkU16( 0x8000 ),
- unop(Iop_32to16, mkexpr(t32)));
+ unop(Iop_32to16, mkexpr(t32)),
+ mkU16( 0x8000 ) );
}
r_src = (UInt)modrm - 0xC0;
DIP("fcmovb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondB),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */
r_src = (UInt)modrm - 0xC8;
DIP("fcmovz %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondZ),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */
r_src = (UInt)modrm - 0xD0;
DIP("fcmovbe %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondBE),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */
r_src = (UInt)modrm - 0xD8;
DIP("fcmovu %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondP),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xE9: /* FUCOMPP %st(0),%st(1) */
r_src = (UInt)modrm - 0xC0;
DIP("fcmovnb %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondNB),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
DIP("fcmovnz %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(
0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondNZ),
- get_ST(0),
- get_ST(r_src)
+ get_ST(r_src),
+ get_ST(0)
)
);
break;
DIP("fcmovnbe %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(
0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondNBE),
- get_ST(0),
- get_ST(r_src)
+ get_ST(r_src),
+ get_ST(0)
)
);
break;
DIP("fcmovnu %%st(%u), %%st(0)\n", r_src);
put_ST_UNCHECKED(
0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_amd64g_calculate_condition(AMD64CondNP),
- get_ST(0),
- get_ST(r_src)
+ get_ST(r_src),
+ get_ST(0)
)
);
break;
if (shl || shr) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
- mkU64(0),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ mkU64(0)
)
);
} else
if (sar) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
- binop(op, mkexpr(g0), mkU8(size-1)),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ binop(op, mkexpr(g0), mkU8(size-1))
)
);
} else {
else (base << amt) | (xtra >>u (64-amt))
*/
return
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
- mkexpr(base),
binop(Iop_Or64,
binop(Iop_Shl64, mkexpr(base), mkexpr(amt)),
binop(Iop_Shr64, mkexpr(xtra),
binop(Iop_Sub8, mkU8(64), mkexpr(amt)))
- )
+ ),
+ mkexpr(base)
);
}
else (base >>u amt) | (xtra << (64-amt))
*/
return
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
- mkexpr(base),
binop(Iop_Or64,
binop(Iop_Shr64, mkexpr(base), mkexpr(amt)),
binop(Iop_Shl64, mkexpr(xtra),
binop(Iop_Sub8, mkU8(64), mkexpr(amt)))
- )
+ ),
+ mkexpr(base)
);
}
stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(srcB),
- /* src==0 */
- mkU64(AMD64G_CC_MASK_Z),
- /* src!=0 */
- mkU64(0)
+ IRExpr_ITE( mkexpr(srcB),
+ /* src!=0 */
+ mkU64(0),
+ /* src==0 */
+ mkU64(AMD64G_CC_MASK_Z)
)
));
/* Set NDEP even though it isn't used. This makes redundant-PUT
/* The main computation, guarding against zero. */
assign( dst64,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mkexpr(srcB),
- /* src == 0 -- leave dst unchanged */
- widenUto64( getIRegG( sz, pfx, modrm ) ),
/* src != 0 */
fwds ? unop(Iop_Ctz64, mkexpr(src64))
: binop(Iop_Sub64,
mkU64(63),
- unop(Iop_Clz64, mkexpr(src64)))
+ unop(Iop_Clz64, mkexpr(src64))),
+ /* src == 0 -- leave dst unchanged */
+ widenUto64( getIRegG( sz, pfx, modrm ) )
)
);
/* There are 3 cases to consider:
reg-reg: ignore any lock prefix, generate sequence based
- on Mux0X
+ on ITE
reg-mem, not locked: ignore any lock prefix, generate sequence
- based on Mux0X
+ based on ITE
reg-mem, locked: use IRCAS
*/
assign( acc, getIRegRAX(size) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIRegRAX(size, mkexpr(acc2));
putIRegE(size, pfx, rm, mkexpr(dest2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
assign( acc, getIRegRAX(size) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIRegRAX(size, mkexpr(acc2));
storeLE( mkexpr(addr), mkexpr(dest2) );
DIP("cmpxchg%c %s,%s\n", nameISize(size),
));
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIRegRAX(size, mkexpr(acc2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIRegG(size,pfx,rm), dis_buf);
assign( tmpd, getIRegG(sz, pfx, rm) );
putIRegG( sz, pfx, rm,
- IRExpr_Mux0X( mk_amd64g_calculate_condition(cond),
- mkexpr(tmpd),
- mkexpr(tmps) )
+ IRExpr_ITE( mk_amd64g_calculate_condition(cond),
+ mkexpr(tmps),
+ mkexpr(tmpd) )
);
DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond),
nameIRegE(sz,pfx,rm),
assign( tmpd, getIRegG(sz, pfx, rm) );
putIRegG( sz, pfx, rm,
- IRExpr_Mux0X( mk_amd64g_calculate_condition(cond),
- mkexpr(tmpd),
- mkexpr(tmps) )
+ IRExpr_ITE( mk_amd64g_calculate_condition(cond),
+ mkexpr(tmps),
+ mkexpr(tmpd) )
);
DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond),
if (shl || shr) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
- mkV128(0x0000),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ mkV128(0x0000)
)
);
} else
if (sar) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
- binop(op, mkexpr(g0), mkU8(size-1)),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ binop(op, mkexpr(g0), mkU8(size-1))
)
);
} else {
IRTemp validL = newTemp(Ity_I32);
assign(validL, binop(Iop_Sub32,
- IRExpr_Mux0X(mkexpr(zmaskL_zero),
- mkU32(0),
- binop(Iop_Shl32, mkU32(1), ctzL)),
+ IRExpr_ITE(mkexpr(zmaskL_zero),
+ binop(Iop_Shl32, mkU32(1), ctzL),
+ mkU32(0)),
mkU32(1)));
/* And similarly for validR. */
assign(zmaskR_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskR), mkU32(0)));
IRTemp validR = newTemp(Ity_I32);
assign(validR, binop(Iop_Sub32,
- IRExpr_Mux0X(mkexpr(zmaskR_zero),
- mkU32(0),
- binop(Iop_Shl32, mkU32(1), ctzR)),
+ IRExpr_ITE(mkexpr(zmaskR_zero),
+ binop(Iop_Shl32, mkU32(1), ctzR),
+ mkU32(0)),
mkU32(1)));
/* Do the actual comparison. */
/* Now for the condition codes... */
/* C == 0 iff intRes2 == 0 */
- IRExpr *c_bit = IRExpr_Mux0X( binop(Iop_ExpCmpNE32, mkexpr(intRes2),
- mkU32(0)),
- mkU32(0),
- mkU32(1 << AMD64G_CC_SHIFT_C) );
+ IRExpr *c_bit = IRExpr_ITE( binop(Iop_ExpCmpNE32, mkexpr(intRes2),
+ mkU32(0)),
+ mkU32(1 << AMD64G_CC_SHIFT_C),
+ mkU32(0));
/* Z == 1 iff any in argL is 0 */
- IRExpr *z_bit = IRExpr_Mux0X( mkexpr(zmaskL_zero),
- mkU32(0),
- mkU32(1 << AMD64G_CC_SHIFT_Z) );
+ IRExpr *z_bit = IRExpr_ITE( mkexpr(zmaskL_zero),
+ mkU32(1 << AMD64G_CC_SHIFT_Z),
+ mkU32(0));
/* S == 1 iff any in argR is 0 */
- IRExpr *s_bit = IRExpr_Mux0X( mkexpr(zmaskR_zero),
- mkU32(0),
- mkU32(1 << AMD64G_CC_SHIFT_S) );
+ IRExpr *s_bit = IRExpr_ITE( mkexpr(zmaskR_zero),
+ mkU32(1 << AMD64G_CC_SHIFT_S),
+ mkU32(0));
/* O == IntRes2[0] */
IRExpr *o_bit = binop(Iop_Shl32, binop(Iop_And32, mkexpr(intRes2),
mkU32(0x01)),
If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */
stmt( IRStmt_Put(
OFFB_DFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(10)),
mkU64(1))),
- mkU64(1),
- mkU64(0xFFFFFFFFFFFFFFFFULL)))
+ mkU64(0xFFFFFFFFFFFFFFFFULL),
+ mkU64(1)))
);
/* And set the ID flag */
stmt( IRStmt_Put(
OFFB_IDFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(21)),
mkU64(1))),
- mkU64(0),
- mkU64(1)))
+ mkU64(1),
+ mkU64(0)))
);
/* And set the AC flag too */
stmt( IRStmt_Put(
OFFB_ACFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_64to1,
binop(Iop_And64,
binop(Iop_Shr64, mkexpr(t1), mkU8(18)),
mkU64(1))),
- mkU64(0),
- mkU64(1)))
+ mkU64(1),
+ mkU64(0)))
);
DIP("popf%c\n", nameISize(sz));
expdHi64:expdLo64, even if we're doing a cmpxchg8b. */
/* It's just _so_ much fun ... */
putIRegRDX( 8,
- IRExpr_Mux0X( mkexpr(success),
- sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi))
- : mkexpr(oldHi),
- mkexpr(expdHi64)
+ IRExpr_ITE( mkexpr(success),
+ mkexpr(expdHi64),
+ sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi))
+ : mkexpr(oldHi)
));
putIRegRAX( 8,
- IRExpr_Mux0X( mkexpr(success),
- sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo))
- : mkexpr(oldLo),
- mkexpr(expdLo64)
+ IRExpr_ITE( mkexpr(success),
+ mkexpr(expdLo64),
+ sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo))
+ : mkexpr(oldLo)
));
/* Copy the success bit into the Z flag and leave the others
if (shl || shr) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
- mkV128(0x0000),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ mkV128(0x0000)
)
);
} else
if (sar) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
- binop(op, mkexpr(g0), mkU8(size-1)),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ binop(op, mkexpr(g0), mkU8(size-1))
)
);
} else {
breakupV128to64s( dataV, &dHi, &dLo );
breakupV128to64s( ctrlV, &cHi, &cLo );
IRExpr* rHi
- = IRExpr_Mux0X( unop(Iop_64to1,
- binop(Iop_Shr64, mkexpr(cHi), mkU8(1))),
- mkexpr(dLo), mkexpr(dHi) );
+ = IRExpr_ITE( unop(Iop_64to1,
+ binop(Iop_Shr64, mkexpr(cHi), mkU8(1))),
+ mkexpr(dHi), mkexpr(dLo) );
IRExpr* rLo
- = IRExpr_Mux0X( unop(Iop_64to1,
- binop(Iop_Shr64, mkexpr(cLo), mkU8(1))),
- mkexpr(dLo), mkexpr(dHi) );
+ = IRExpr_ITE( unop(Iop_64to1,
+ binop(Iop_Shr64, mkexpr(cLo), mkU8(1))),
+ mkexpr(dHi), mkexpr(dLo) );
IRTemp res = newTemp(Ity_V128);
assign(res, binop(Iop_64HLtoV128, rHi, rLo));
return res;
--> oldC ? (argR <=u argL) : (argR <u argL)
*/
return
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
- /* case oldC == 0 */
- unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1)),
/* case oldC != 0 */
- unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1))
+ unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+ /* case oldC == 0 */
+ unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
);
}
--> oldC ? (argR <=u argL) : (argR <u argL)
*/
return
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
- /* case oldC == 0 */
- unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1)),
/* case oldC != 0 */
- unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1))
+ unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+ /* case oldC == 0 */
+ unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
);
}
llPutIReg( iregNo, e );
} else {
llPutIReg( iregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetIReg(iregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetIReg(iregNo) ));
}
if (iregNo == 15) {
// assert against competing r15 updates. Shouldn't
llPutIReg( iregNo, e );
} else {
llPutIReg( iregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetIReg(iregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetIReg(iregNo) ));
}
}
llPutDReg( dregNo, e );
} else {
llPutDReg( dregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetDReg(dregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetDReg(dregNo) ));
}
}
llPutDRegI64( dregNo, e );
} else {
llPutDRegI64( dregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetDRegI64(dregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetDRegI64(dregNo) ));
}
}
llPutQReg( qregNo, e );
} else {
llPutQReg( qregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetQReg(qregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetQReg(qregNo) ));
}
}
llPutFReg( fregNo, e );
} else {
llPutFReg( fregNo,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- llGetFReg(fregNo),
- e ));
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, llGetFReg(fregNo) ));
}
}
} else {
stmt(IRStmt_Put(
gsoffset,
- IRExpr_Mux0X( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
- IRExpr_Get(gsoffset, Ity_I32),
- e
- )
+ IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+ e, IRExpr_Get(gsoffset, Ity_I32) )
));
}
}
assign( c1, binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)) );
stmt( IRStmt_Put(
OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(c1),
- IRExpr_Get(OFFB_CC_OP, Ity_I32),
- mkU32(cc_op) )));
+ IRExpr_ITE( mkexpr(c1),
+ mkU32(cc_op),
+ IRExpr_Get(OFFB_CC_OP, Ity_I32) ) ));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(c1),
- IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
- mkexpr(t_dep1) )));
+ IRExpr_ITE( mkexpr(c1),
+ mkexpr(t_dep1),
+ IRExpr_Get(OFFB_CC_DEP1, Ity_I32) ) ));
stmt( IRStmt_Put(
OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(c1),
- IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
- mkexpr(t_dep2) )));
+ IRExpr_ITE( mkexpr(c1),
+ mkexpr(t_dep2),
+ IRExpr_Get(OFFB_CC_DEP2, Ity_I32) ) ));
stmt( IRStmt_Put(
OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(c1),
- IRExpr_Get(OFFB_CC_NDEP, Ity_I32),
- mkexpr(t_ndep) )));
+ IRExpr_ITE( mkexpr(c1),
+ mkexpr(t_ndep),
+ IRExpr_Get(OFFB_CC_NDEP, Ity_I32) ) ));
}
}
assign( nd0, mkexpr(regT) );
assign( nd1, mkU32(ceil) );
assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
- assign( nd3, IRExpr_Mux0X(mkexpr(nd2), mkexpr(nd0), mkexpr(nd1)) );
+ assign( nd3, IRExpr_ITE(mkexpr(nd2), mkexpr(nd1), mkexpr(nd0)) );
assign( nd4, mkU32(floor) );
assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
- assign( nd6, IRExpr_Mux0X(mkexpr(nd5), mkexpr(nd3), mkexpr(nd4)) );
+ assign( nd6, IRExpr_ITE(mkexpr(nd5), mkexpr(nd4), mkexpr(nd3)) );
assign( *res, mkexpr(nd6) );
/* if saturation occurred, then resQ is set to some nonzero value
assign( nd0, mkexpr(regT) );
assign( nd1, mkU32(ceil) );
assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
- assign( nd3, IRExpr_Mux0X( mkexpr(nd2), mkexpr(nd0), mkexpr(nd1) ) );
+ assign( nd3, IRExpr_ITE( mkexpr(nd2), mkexpr(nd1), mkexpr(nd0) ) );
assign( nd4, mkU32(floor) );
assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
- assign( nd6, IRExpr_Mux0X( mkexpr(nd5), mkexpr(nd3), mkexpr(nd4) ) );
+ assign( nd6, IRExpr_ITE( mkexpr(nd5), mkexpr(nd4), mkexpr(nd3) ) );
assign( *res, mkexpr(nd6) );
/* if saturation occurred, then resQ is set to some nonzero value
assign(oldC, mk_armg_calculate_flag_c() );
assign(
*newC,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
- IRExpr_Mux0X(
+ mkexpr(oldC),
+ IRExpr_ITE(
binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
- mkU32(0),
binop(Iop_And32,
binop(Iop_Shr32,
mkexpr(rMt),
)
),
mkU32(1)
- )
- ),
- mkexpr(oldC)
+ ),
+ mkU32(0)
+ )
)
);
}
assign(oldC, mk_armg_calculate_flag_c() );
assign(
*newC,
- IRExpr_Mux0X(
- binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
- IRExpr_Mux0X(
+ IRExpr_ITE(
+ binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
+ mkexpr(oldC),
+ IRExpr_ITE(
binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
- mkU32(0),
binop(Iop_And32,
binop(Iop_Shr32,
mkexpr(rMt),
)
),
mkU32(1)
- )
- ),
- mkexpr(oldC)
+ ),
+ mkU32(0)
+ )
)
);
}
assign(oldC, mk_armg_calculate_flag_c() );
assign(
*newC,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
- IRExpr_Mux0X(
+ mkexpr(oldC),
+ IRExpr_ITE(
binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
- binop(Iop_And32,
- binop(Iop_Shr32,
- mkexpr(rMt),
- mkU8(31)
- ),
- mkU32(1)
- ),
binop(Iop_And32,
binop(Iop_Shr32,
mkexpr(rMt),
)
),
mkU32(1)
+ ),
+ binop(Iop_And32,
+ binop(Iop_Shr32,
+ mkexpr(rMt),
+ mkU8(31)
+ ),
+ mkU32(1)
)
- ),
- mkexpr(oldC)
+ )
)
);
}
mkexpr(rMt),
unop(
Iop_32to8,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT32U, mkexpr(amtT), mkU32(32)),
- mkU32(31),
- mkexpr(amtT)))));
+ mkexpr(amtT),
+ mkU32(31)))));
DIS(buf, "r%u, ASR r%u", rM, rS);
}
if (newC) {
assign(
*newC,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE32, mkexpr(amtT), mkU32(0)),
- mkexpr(oldC),
binop(Iop_And32,
binop(Iop_Shr32,
mkexpr(rMt),
)
),
mkU32(1)
- )
+ ),
+ mkexpr(oldC)
)
);
}
assign(
*res,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpNE32, mkexpr(amt5T), mkU32(0)),
- mkexpr(rMt),
binop(Iop_Or32,
binop(Iop_Shr32,
mkexpr(rMt),
binop(Iop_Sub32, mkU32(32), mkexpr(amt5T))
)
)
- )
+ ),
+ mkexpr(rMt)
)
);
DIS(buf, "r%u, ROR r#%u", rM, rS);
IRTemp arg = newTemp(Ity_I32);
IRTemp res = newTemp(Ity_I32);
assign(arg, getIRegA(rM));
- assign(res, IRExpr_Mux0X(
+ assign(res, IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
- unop(Iop_Clz32, mkexpr(arg)),
- mkU32(32)
+ mkU32(32),
+ unop(Iop_Clz32, mkexpr(arg))
));
putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
DIP("clz%s r%u, r%u\n", nCC(INSN_COND), rD, rM);
// = 1
//
// condT = newTemp(Ity_I32);
- // assign(condT, IRExpr_Mux0X(
+ // assign(condT, IRExpr_ITE(
// unop(Iop_32to8, binop(Iop_And32,
// mkexpr(old_itstate),
// mkU32(0xF0))),
- // mkU32(1),
- // mkexpr(condT1)
+ // mkexpr(condT1),
+ // mkU32(1))
// ));
condT = newTemp(Ity_I32);
assign(condT, mkU32(1));
We test explicitly for old_itstate[7:4] == AL ^ 0xE, and in
that case set condT directly to 1. Else we use the results
of the helper. Since old_itstate is always defined and
- because Memcheck does lazy V-bit propagation through Mux0X,
+ because Memcheck does lazy V-bit propagation through ITE,
this will cause condT to always be a defined 1 if the
condition is 'AL'. From an execution semantics point of view
this is irrelevant since we're merely duplicating part of the
simulator still runs fine. It's just that we get loads of
false errors from Memcheck. */
condT = newTemp(Ity_I32);
- assign(condT, IRExpr_Mux0X(
+ assign(condT, IRExpr_ITE(
binop(Iop_CmpNE32, binop(Iop_And32,
mkexpr(old_itstate),
mkU32(0xF0)),
mkU32(0)),
- mkU32(1),
- mkexpr(condT1)
+ mkexpr(condT1),
+ mkU32(1)
));
/* Something we don't have in ARM: generate a 0 or 1 value
IRTemp arg = newTemp(Ity_I32);
IRTemp res = newTemp(Ity_I32);
assign(arg, getIRegT(rM1));
- assign(res, IRExpr_Mux0X(
+ assign(res, IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
- unop(Iop_Clz32, mkexpr(arg)),
- mkU32(32)
+ mkU32(32),
+ unop(Iop_Clz32, mkexpr(arg))
));
putIRegT(rD, mkexpr(res), condT);
DIP("clz r%u, r%u\n", rD, rM1);
putIReg(rt, binop(op, getIReg(rs), mkU64(imm)));
#define FP_CONDITIONAL_CODE \
- t3 = newTemp(Ity_I32); \
- assign(t3, binop(Iop_And32, IRExpr_Mux0X( unop(Iop_1Uto8, \
- binop(Iop_CmpEQ32, mkU32(cc), mkU32(0))), \
- binop(Iop_Shr32, getFCSR(), mkU8(24+cc)), \
- binop(Iop_Shr32, getFCSR(), mkU8(23))), mkU32(0x1)));
+ t3 = newTemp(Ity_I32); \
+ assign(t3, binop(Iop_And32, \
+ IRExpr_ITE( unop(Iop_1Uto8, \
+ binop(Iop_CmpEQ32, mkU32(cc), mkU32(0))), \
+ binop(Iop_Shr32, getFCSR(), mkU8(23)), \
+ binop(Iop_Shr32, getFCSR(), mkU8(24+cc))), \
+ mkU32(0x1)));
/*------------------------------------------------------------*/
/*--- Field helpers ---*/
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(bc1_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + bc1_cc)), mkU32(0x1)), binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(), mkU8(23)),
- mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + bc1_cc)),
+ mkU32(0x1))
+ ));
if (tf == 1 && nd == 0) {
//branch on true
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpNE32, mkU32(0),
getIReg(rt))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- mkexpr(t2), mkexpr(t1)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ mkexpr(t1), mkexpr(t2)));
putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
mkexpr(t4)));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpNE32, mkU32(0),
getIReg(rt))));
- putDReg(fd, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getDReg(fd), getDReg(fs)));
+ putDReg(fd, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getDReg(fs), getDReg(fd)));
break;
default:
goto decode_failure;
assign(t2, unop(Iop_F32toF64, getFReg(fd)));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
getIReg(rt))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- mkexpr(t2), mkexpr(t1)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ mkexpr(t1), mkexpr(t2)));
putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
mkexpr(t4)));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
getIReg(rt))));
- putDReg(fd, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getDReg(fd), getDReg(fs)));
+ putDReg(fd, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getDReg(fs), getDReg(fd)));
break;
default:
goto decode_failure;
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)), mkU32(0x1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(23)), mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(1),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getDReg(fs), getDReg(fd)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getDReg(fd), getDReg(fs)));
putDReg(fd, mkexpr(t4));
break;
case 0x10: // S
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32,
- getFCSR(), mkU8(24 + mov_cc)),
- mkU32(0x1)), binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(),
- mkU8(23)), mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(1),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- mkexpr(t5), mkexpr(t6)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ mkexpr(t6), mkexpr(t5)));
putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
mkexpr(t4)));
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32,
mkU32(0), mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)), mkU32(0x1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(23)), mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(1),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getDReg(fd), getDReg(fs)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getDReg(fs), getDReg(fd)));
putDReg(fd, mkexpr(t4));
break;
case 0x10: // S
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)), mkU32(0x1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(23)), mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(1),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- mkexpr(t6), mkexpr(t5)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ mkexpr(t5), mkexpr(t6)));
putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
mkexpr(t4)));
}
assign(t5, unop(Iop_1Sto32, binop(Iop_CmpLT32U, mkexpr(t2),
mkexpr(t4))));
- assign(t6, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t5)), mkexpr(t1),
- binop(Iop_Sub32, mkexpr(t1), mkU32(0x1))));
+ assign(t6, IRExpr_ITE(unop(Iop_32to8, mkexpr(t5)),
+ binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
+ mkexpr(t1)));
putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
assign(t5, unop(Iop_1Sto32, binop(Iop_CmpLT32U, mkexpr(t2),
mkexpr(t4))));
- assign(t6, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t5)),
- mkexpr(t1), binop(Iop_Sub32, mkexpr(t1), mkU32(0x1))));
+ assign(t6, IRExpr_ITE(unop(Iop_32to8, mkexpr(t5)),
+ binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
+ mkexpr(t1)));
putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32, mkexpr(t3))));
putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
t1 = newTemp(Ity_I32);
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rs),
mkU32(0))));
- putIReg(rd, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- unop(Iop_Clz32, getIReg(rs)), mkU32(0x00000020)));
+ putIReg(rd, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, getIReg(rs))));
break;
}
t1 = newTemp(Ity_I32);
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rs),
mkU32(0xffffffff))));
- putIReg(rd, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- unop(Iop_Clz32, unop(Iop_Not32, getIReg(rs))),
- mkU32(0x00000020)));
+ putIReg(rd, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ mkU32(0x00000020),
+ unop(Iop_Clz32, unop(Iop_Not32, getIReg(rs)))));
break;
}
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)), mkU32(0x1)), binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(), mkU8(23)),
- mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getIReg(rd), getIReg(rs)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getIReg(rs), getIReg(rd)));
putIReg(rd, mkexpr(t4));
}
} else if (tf == 1) { /* MOVT */
assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(0),
mkU32(mov_cc))));
- assign(t2, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t1)),
- binop(Iop_And32, binop(Iop_Shr32, getFCSR(),
- mkU8(24 + mov_cc)), mkU32(0x1)), binop(Iop_And32,
- binop(Iop_Shr32, getFCSR(), mkU8(23)),
- mkU32(0x1))));
+ assign(t2, IRExpr_ITE(unop(Iop_32to8, mkexpr(t1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(23)),
+ mkU32(0x1)),
+ binop(Iop_And32,
+ binop(Iop_Shr32, getFCSR(),
+ mkU8(24 + mov_cc)),
+ mkU32(0x1))
+ ));
assign(t3, unop(Iop_1Sto32, binop(Iop_CmpEQ32, mkU32(1),
mkexpr(t2))));
- assign(t4, IRExpr_Mux0X(unop(Iop_32to8, mkexpr(t3)),
- getIReg(rd), getIReg(rs)));
+ assign(t4, IRExpr_ITE(unop(Iop_32to8, mkexpr(t3)),
+ getIReg(rs), getIReg(rd)));
putIReg(rd, mkexpr(t4));
}
}
assign( hi32, unop(Iop_64HIto32, t64));
assign( lo32, unop(Iop_64to32, t64));
- return IRExpr_Mux0X(
+ return IRExpr_ITE(
/* if (hi32 == (lo32 >>s 31)) */
binop(Iop_CmpEQ32, mkexpr(hi32),
binop( Iop_Sar32, mkexpr(lo32), mkU8(31))),
+ /* then: within signed-32 range: lo half good enough */
+ mkexpr(lo32),
/* else: sign dep saturate: 1->0x80000000, 0->0x7FFFFFFF */
binop(Iop_Add32, mkU32(0x7FFFFFFF),
- binop(Iop_Shr32, mkexpr(hi32), mkU8(31))),
- /* then: within signed-32 range: lo half good enough */
- mkexpr(lo32) );
+ binop(Iop_Shr32, mkexpr(hi32), mkU8(31))));
}
/* Unsigned saturating narrow 64S to 32 */
assign( hi32, unop(Iop_64HIto32, t64));
assign( lo32, unop(Iop_64to32, t64));
- return IRExpr_Mux0X(
+ return IRExpr_ITE(
/* if (top 32 bits of t64 are 0) */
binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0)),
- /* else: positive saturate -> 0xFFFFFFFF */
- mkU32(0xFFFFFFFF),
/* then: within unsigned-32 range: lo half good enough */
- mkexpr(lo32) );
+ mkexpr(lo32),
+ /* else: positive saturate -> 0xFFFFFFFF */
+ mkU32(0xFFFFFFFF));
}
/* Signed saturate narrow 64->32, combining to V128 */
binop(Iop_Shl32, src, mask),
binop(Iop_Shr32, src, binop(Iop_Sub8, mkU8(32), mask)));
}
- /* Note: the MuxOX is not merely an optimisation; it's needed
+ /* Note: the ITE not merely an optimisation; it's needed
because otherwise the Shr is a shift by the word size when
mask denotes zero. For rotates by immediates, a lot of
this junk gets folded out. */
- return IRExpr_Mux0X( binop(Iop_CmpNE8, mask, mkU8(0)),
- /* zero rotate */ src,
- /* non-zero rotate */ rot );
+ return IRExpr_ITE( binop(Iop_CmpNE8, mask, mkU8(0)),
+ /* non-zero rotate */ rot,
+ /* zero rotate */ src);
}
/* Standard effective address calc: (rA + rB) */
)
);
xer_ca
- = IRExpr_Mux0X(
+ = IRExpr_ITE(
/* shift amt > 31 ? */
binop(Iop_CmpLT32U, mkU32(31), argR),
- /* no -- be like srawi */
- unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0))),
/* yes -- get sign bit of argL */
- binop(Iop_Shr32, argL, mkU8(31))
+ binop(Iop_Shr32, argL, mkU8(31)),
+ /* no -- be like srawi */
+ unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0)))
);
break;
)
);
xer_ca
- = IRExpr_Mux0X(
+ = IRExpr_ITE(
/* shift amt > 31 ? */
binop(Iop_CmpLT64U, mkU64(31), argR),
- /* no -- be like srawi */
- unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))),
/* yes -- get sign bit of argL */
- unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63)))
- );
+ unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
+ /* no -- be like srawi */
+ unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
+ );
break;
case /* 11 */ PPCG_FLAG_OP_SRAWI:
)
);
xer_ca
- = IRExpr_Mux0X(
+ = IRExpr_ITE(
/* shift amt > 63 ? */
binop(Iop_CmpLT64U, mkU64(63), argR),
- /* no -- be like sradi */
- unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))),
/* yes -- get sign bit of argL */
- unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63)))
+ unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
+ /* no -- be like sradi */
+ unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
);
break;
// Iop_Clz32 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE32, lo32, mkU32(0));
assign(rA, mkWidenFrom32(ty,
- IRExpr_Mux0X( irx,
- mkU32(32),
- unop(Iop_Clz32, lo32)),
+ IRExpr_ITE( irx,
+ unop(Iop_Clz32, lo32),
+ mkU32(32)),
False));
// TODO: alternatively: assign(rA, verbose_Clz32(rS));
flag_rC ? ".":"", rA_addr, rS_addr);
// Iop_Clz64 undefined for arg==0, so deal with that case:
irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0));
- assign(rA, IRExpr_Mux0X( irx,
- mkU64(64),
- unop(Iop_Clz64, mkexpr(rS)) ));
+ assign(rA, IRExpr_ITE( irx,
+ unop(Iop_Clz64, mkexpr(rS)),
+ mkU64(64) ));
// TODO: alternatively: assign(rA, verbose_Clz64(rS));
break;
e_tmp = binop( Iop_Sar32,
mkexpr(rS_lo32),
unop( Iop_32to8,
- IRExpr_Mux0X( mkexpr(outofrange),
- mkexpr(sh_amt),
- mkU32(31)) ) );
+ IRExpr_ITE( mkexpr(outofrange),
+ mkU32(31),
+ mkexpr(sh_amt)) ) );
assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */True) );
set_XER_CA( ty, PPCG_FLAG_OP_SRAW,
binop( Iop_Sar64,
mkexpr(rS),
unop( Iop_64to8,
- IRExpr_Mux0X( mkexpr(outofrange),
- mkexpr(sh_amt),
- mkU64(63)) ))
+ IRExpr_ITE( mkexpr(outofrange),
+ mkU64(63),
+ mkexpr(sh_amt)) ))
);
set_XER_CA( ty, PPCG_FLAG_OP_SRAD,
mkexpr(rA), mkexpr(rS), mkexpr(sh_amt),
// frD = (frA >= 0.0) ? frC : frB
// = (cc_b0 == 0) ? frC : frB
assign( frD,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0)),
- mkexpr(frB),
- mkexpr(frC) ));
+ mkexpr(frC),
+ mkexpr(frB) ));
/* One of the rare ones which don't mess with FPRF */
set_FPRF = False;
/* need to preserve sign of zero */
/* frD = (fabs(frB) > 9e18) ? frB :
(sign(frB)) ? -fabs((double)r_tmp64) : (double)r_tmp64 */
- assign(frD, IRExpr_Mux0X(
+ assign(frD, IRExpr_ITE(
binop(Iop_CmpNE8,
unop(Iop_32to8,
binop(Iop_CmpF64,
IRExpr_Const(IRConst_F64(9e18)),
unop(Iop_AbsF64, mkexpr(frB)))),
mkU8(0)),
- IRExpr_Mux0X(
+ mkexpr(frB),
+ IRExpr_ITE(
binop(Iop_CmpNE32,
binop(Iop_Shr32,
unop(Iop_64HIto32,
mkexpr(frB))),
mkU8(31)),
mkU32(0)),
- binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) ),
unop(Iop_NegF64,
unop( Iop_AbsF64,
binop(Iop_I64StoF64, mkU32(0),
- mkexpr(r_tmp64)) ))
- ),
- mkexpr(frB)
+ mkexpr(r_tmp64)) )),
+ binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) )
+ )
));
break;
assign( res1, unop(Iop_64HIto32, mkexpr(lo64)) );
assign( res0, unop(Iop_64to32, mkexpr(lo64)) );
- b3_result = IRExpr_Mux0X(is_NaN_32(b3),
- // else: result is from the Iop_QFtoI32{s|u}x4_RZ
- mkexpr(res3),
- // then: result is 0x{8|0}80000000
- mkU32(un_signed ? 0x00000000 : 0x80000000));
- b2_result = IRExpr_Mux0X(is_NaN_32(b2),
- // else: result is from the Iop_QFtoI32{s|u}x4_RZ
- mkexpr(res2),
- // then: result is 0x{8|0}80000000
- mkU32(un_signed ? 0x00000000 : 0x80000000));
- b1_result = IRExpr_Mux0X(is_NaN_32(b1),
- // else: result is from the Iop_QFtoI32{s|u}x4_RZ
- mkexpr(res1),
- // then: result is 0x{8|0}80000000
- mkU32(un_signed ? 0x00000000 : 0x80000000));
- b0_result = IRExpr_Mux0X(is_NaN_32(b0),
- // else: result is from the Iop_QFtoI32{s|u}x4_RZ
- mkexpr(res0),
- // then: result is 0x{8|0}80000000
- mkU32(un_signed ? 0x00000000 : 0x80000000));
+ b3_result = IRExpr_ITE(is_NaN_32(b3),
+ // then: result is 0x{8|0}80000000
+ mkU32(un_signed ? 0x00000000 : 0x80000000),
+ // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+ mkexpr(res3));
+ b2_result = IRExpr_ITE(is_NaN_32(b2),
+ // then: result is 0x{8|0}80000000
+ mkU32(un_signed ? 0x00000000 : 0x80000000),
+ // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+ mkexpr(res2));
+ b1_result = IRExpr_ITE(is_NaN_32(b1),
+ // then: result is 0x{8|0}80000000
+ mkU32(un_signed ? 0x00000000 : 0x80000000),
+ // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+ mkexpr(res1));
+ b0_result = IRExpr_ITE(is_NaN_32(b0),
+ // then: result is 0x{8|0}80000000
+ mkU32(un_signed ? 0x00000000 : 0x80000000),
+ // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+ mkexpr(res0));
putVSReg( XT,
binop( Iop_64HLtoV128,
#define SNAN_MASK 0x0008000000000000ULL
return
- IRExpr_Mux0X(mkexpr(frA_isSNaN),
- /* else: if frB is a SNaN */
- IRExpr_Mux0X(mkexpr(frB_isSNaN),
- /* else: if frB is a QNaN */
- IRExpr_Mux0X(mkexpr(frB_isQNaN),
- /* else: frA is a QNaN, so result = frB */
- mkexpr(frB_I64),
- /* then: result = frA */
- mkexpr(frA_I64)),
- /* then: result = frB converted to QNaN */
- binop(Iop_Or64, mkexpr(frB_I64), mkU64(SNAN_MASK))),
- /* then: result = frA converted to QNaN */
- binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK)));
+ IRExpr_ITE(mkexpr(frA_isSNaN),
+ /* then: result = frA converted to QNaN */
+ binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK)),
+ /* else: if frB is a SNaN */
+ IRExpr_ITE(mkexpr(frB_isSNaN),
+ /* then: result = frB converted to QNaN */
+ binop(Iop_Or64, mkexpr(frB_I64), mkU64(SNAN_MASK)),
+ /* else: if frB is a QNaN */
+ IRExpr_ITE(mkexpr(frB_isQNaN),
+ /* then: result = frA */
+ mkexpr(frA_I64),
+ /* else: frA is a QNaN, so result = frB */
+ mkexpr(frB_I64))));
}
/*
unop( Iop_ReinterpI64asF64,
mkexpr( src2 ) ) ) );
- return IRExpr_Mux0X( binop( Iop_CmpEQ32,
+ return IRExpr_ITE( binop( Iop_CmpEQ32,
mkexpr( src1cmpsrc2 ),
mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ),
- /* else: use src2 */
- mkexpr( src2 ),
- /* then: use src1 */
- mkexpr( src1 ) );
+ /* then: use src1 */
+ mkexpr( src1 ),
+ /* else: use src2 */
+ mkexpr( src2 ) );
}
/*
assign(anyNaN, mkOR1(is_NaN(frA_I64), is_NaN(frB_I64)));
#define MINUS_ZERO 0x8000000000000000ULL
- return IRExpr_Mux0X( /* If both arguments are zero . . . */
- mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ),
- /* else: check if either input is a NaN*/
- IRExpr_Mux0X( mkexpr( anyNaN ),
- /* else: use "comparison helper" */
- _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ),
- /* then: use "NaN helper" */
- _get_maxmin_fp_NaN( frA_I64, frB_I64 ) ),
- /* then: if frA is -0 and isMin==True, return -0;
- * else if frA is +0 and isMin==False; return +0;
- * otherwise, simply return frB. */
- IRExpr_Mux0X( binop( Iop_CmpEQ32,
- unop( Iop_64HIto32,
- mkexpr( frA_I64 ) ),
- mkU32( isMin ? 0x80000000 : 0 ) ),
- mkexpr( frB_I64 ),
- mkU64( isMin ? MINUS_ZERO : 0ULL ) ) );
+ return IRExpr_ITE( /* If both arguments are zero . . . */
+ mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ),
+ /* then: if frA is -0 and isMin==True, return -0;
+ * else if frA is +0 and isMin==False; return +0;
+ * otherwise, simply return frB. */
+ IRExpr_ITE( binop( Iop_CmpEQ32,
+ unop( Iop_64HIto32,
+ mkexpr( frA_I64 ) ),
+ mkU32( isMin ? 0x80000000 : 0 ) ),
+ mkU64( isMin ? MINUS_ZERO : 0ULL ),
+ mkexpr( frB_I64 ) ),
+ /* else: check if either input is a NaN*/
+ IRExpr_ITE( mkexpr( anyNaN ),
+ /* then: use "NaN helper" */
+ _get_maxmin_fp_NaN( frA_I64, frB_I64 ),
+ /* else: use "comparison helper" */
+ _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ) ));
}
/*
/* frD = (fabs(frB) > 9e18) ? frB :
(sign(frB)) ? -fabs((double)intermediateResult) : (double)intermediateResult */
assign( frD,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop( Iop_CmpNE8,
unop( Iop_32to8,
binop( Iop_CmpF64,
IRExpr_Const( IRConst_F64( 9e18 ) ),
unop( Iop_AbsF64, mkexpr( frB ) ) ) ),
mkU8(0) ),
- IRExpr_Mux0X(
+ mkexpr( frB ),
+ IRExpr_ITE(
binop( Iop_CmpNE32,
binop( Iop_Shr32,
unop( Iop_64HIto32,
mkexpr( frB_I64 ) ),
mkU8( 31 ) ),
mkU32(0) ),
- binop( Iop_I64StoF64,
- mkU32( 0 ),
- mkexpr( intermediateResult ) ),
unop( Iop_NegF64,
unop( Iop_AbsF64,
binop( Iop_I64StoF64,
mkU32( 0 ),
- mkexpr( intermediateResult ) ) ) )
- ),
- mkexpr( frB )
+ mkexpr( intermediateResult ) ) ) ),
+ binop( Iop_I64StoF64,
+ mkU32( 0 ),
+ mkexpr( intermediateResult ) )
+ )
)
);
binop( Iop_And32, hi32, mkU32( 0x00080000 ) ),
mkU32( 0 ) ) ) );
- return IRExpr_Mux0X( mkexpr( is_SNAN ),
- mkexpr( frD ),
+ return IRExpr_ITE( mkexpr( is_SNAN ),
unop( Iop_ReinterpI64asF64,
binop( Iop_Xor64,
mkU64( SNAN_MASK ),
- mkexpr( frB_I64 ) ) ) );
+ mkexpr( frB_I64 ) ) ),
+ mkexpr( frD ));
}
/*
UInt bi = ifieldRegC( theInstr );
putIReg(
rT,
- IRExpr_Mux0X( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)),
- getIReg(rB),
- rA == 0 ? (mode64 ? mkU64(0) : mkU32(0))
- : getIReg(rA) )
+ IRExpr_ITE( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)),
+ rA == 0 ? (mode64 ? mkU64(0) : mkU32(0))
+ : getIReg(rA),
+ getIReg(rB))
+
);
DIP("isel r%u,r%u,r%u,crb%u\n", rT,rA,rB,bi);
goto decode_success;
{
vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
- return IRExpr_Mux0X(condition, iffalse, iftrue);
+ return IRExpr_ITE(condition, iftrue, iffalse);
}
/* Add a statement that stores DATA at ADDR. This is a big-endian machine. */
/* DEP1 contains the result, DEP2 contains the undershifted value. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_OP,Ity_I32),
- mkU32(ccOp))) );
+ IRExpr_ITE( mkexpr(guardB),
+ mkU32(ccOp),
+ IRExpr_Get(OFFB_CC_OP,Ity_I32) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_DEP1,Ity_I32),
- widenUto32(mkexpr(res)))) );
+ IRExpr_ITE( mkexpr(guardB),
+ widenUto32(mkexpr(res)),
+ IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_DEP2,Ity_I32),
- widenUto32(mkexpr(resUS)))) );
+ IRExpr_ITE( mkexpr(guardB),
+ widenUto32(mkexpr(resUS)),
+ IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) ));
/* Set NDEP even though it isn't used. This makes redundant-PUT
elimination of previous stores to this field work better. */
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(guardB),
- IRExpr_Get(OFFB_CC_NDEP,Ity_I32),
- mkU32(0) )));
+ IRExpr_ITE( mkexpr(guardB),
+ mkU32(0),
+ IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) ));
}
/* CC_DEP1 is the rotated value. CC_NDEP is flags before. */
stmt( IRStmt_Put( OFFB_CC_OP,
- IRExpr_Mux0X( mkexpr(rot_amt32b),
- IRExpr_Get(OFFB_CC_OP,Ity_I32),
- mkU32(ccOp))) );
+ IRExpr_ITE( mkexpr(rot_amt32b),
+ mkU32(ccOp),
+ IRExpr_Get(OFFB_CC_OP,Ity_I32) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(rot_amt32b),
- IRExpr_Get(OFFB_CC_DEP1,Ity_I32),
- widenUto32(mkexpr(dst1)))) );
+ IRExpr_ITE( mkexpr(rot_amt32b),
+ widenUto32(mkexpr(dst1)),
+ IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) ));
stmt( IRStmt_Put( OFFB_CC_DEP2,
- IRExpr_Mux0X( mkexpr(rot_amt32b),
- IRExpr_Get(OFFB_CC_DEP2,Ity_I32),
- mkU32(0))) );
+ IRExpr_ITE( mkexpr(rot_amt32b),
+ mkU32(0),
+ IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) ));
stmt( IRStmt_Put( OFFB_CC_NDEP,
- IRExpr_Mux0X( mkexpr(rot_amt32b),
- IRExpr_Get(OFFB_CC_NDEP,Ity_I32),
- mkexpr(oldFlags))) );
+ IRExpr_ITE( mkexpr(rot_amt32b),
+ mkexpr(oldFlags),
+ IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) ));
} /* if (isRotate) */
/* Save result, and finish up. */
// special-case around that.
IRTemp res32 = newTemp(Ity_I32);
assign(res32,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpEQ32, mkexpr(src32x), mkU32(0)),
- unop(Iop_Clz32, mkexpr(src32x)),
- mkU32(8 * sizeofIRType(ty))
+ mkU32(8 * sizeofIRType(ty)),
+ unop(Iop_Clz32, mkexpr(src32x))
));
IRTemp res = newTemp(ty);
{
put_ST_UNCHECKED(
i,
- IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
- /* 0 means empty */
- value,
- /* non-0 means full */
- mkQNaN64()
+ IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* non-0 means full */
+ mkQNaN64(),
+ /* 0 means empty */
+ value
)
);
}
static IRExpr* get_ST ( Int i )
{
return
- IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
- /* 0 means empty */
- mkQNaN64(),
- /* non-0 means full */
- get_ST_UNCHECKED(i));
+ IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+ /* non-0 means full */
+ get_ST_UNCHECKED(i),
+ /* 0 means empty */
+ mkQNaN64());
}
r_src = (UInt)modrm - 0xC0;
DIP("fcmovb %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondB),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */
r_src = (UInt)modrm - 0xC8;
DIP("fcmovz %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondZ),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */
r_src = (UInt)modrm - 0xD0;
DIP("fcmovbe %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondBE),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */
r_src = (UInt)modrm - 0xD8;
DIP("fcmovu %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondP),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xE9: /* FUCOMPP %st(0),%st(1) */
r_src = (UInt)modrm - 0xC0;
DIP("fcmovnb %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNB),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
r_src = (UInt)modrm - 0xC8;
DIP("fcmovnz %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNZ),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */
r_src = (UInt)modrm - 0xD0;
DIP("fcmovnbe %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNBE),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xD8 ... 0xDF: /* FCMOVNU ST(i), ST(0) */
r_src = (UInt)modrm - 0xD8;
DIP("fcmovnu %%st(%d), %%st(0)\n", (Int)r_src);
put_ST_UNCHECKED(0,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mk_x86g_calculate_condition(X86CondNP),
- get_ST(0), get_ST(r_src)) );
+ get_ST(r_src), get_ST(0)) );
break;
case 0xE2:
if (shl || shr) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
- mkU64(0),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ mkU64(0)
)
);
} else
if (sar) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
- binop(op, mkexpr(g0), mkU8(size-1)),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ binop(op, mkexpr(g0), mkU8(size-1))
)
);
} else {
stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
stmt( IRStmt_Put(
OFFB_CC_DEP1,
- IRExpr_Mux0X( mkexpr(srcB),
- /* src==0 */
- mkU32(X86G_CC_MASK_Z),
- /* src!=0 */
- mkU32(0)
+ IRExpr_ITE( mkexpr(srcB),
+ /* src!=0 */
+ mkU32(0),
+ /* src==0 */
+ mkU32(X86G_CC_MASK_Z)
)
));
/* Set NDEP even though it isn't used. This makes redundant-PUT
/* The main computation, guarding against zero. */
assign( dst32,
- IRExpr_Mux0X(
+ IRExpr_ITE(
mkexpr(srcB),
- /* src == 0 -- leave dst unchanged */
- widenUto32( getIReg( sz, gregOfRM(modrm) ) ),
/* src != 0 */
fwds ? unop(Iop_Ctz32, mkexpr(src32))
: binop(Iop_Sub32,
mkU32(31),
- unop(Iop_Clz32, mkexpr(src32)))
+ unop(Iop_Clz32, mkexpr(src32))),
+ /* src == 0 -- leave dst unchanged */
+ widenUto32( getIReg( sz, gregOfRM(modrm) ) )
)
);
/* There are 3 cases to consider:
reg-reg: ignore any lock prefix, generate sequence based
- on Mux0X
+ on ITE
reg-mem, not locked: ignore any lock prefix, generate sequence
- based on Mux0X
+ based on ITE
reg-mem, locked: use IRCAS
*/
assign( acc, getIReg(size, R_EAX) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_x86g_calculate_condition(X86CondZ) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIReg(size, R_EAX, mkexpr(acc2));
putIReg(size, eregOfRM(rm), mkexpr(dest2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
assign( acc, getIReg(size, R_EAX) );
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_x86g_calculate_condition(X86CondZ) );
- assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIReg(size, R_EAX, mkexpr(acc2));
storeLE( mkexpr(addr), mkexpr(dest2) );
DIP("cmpxchg%c %s,%s\n", nameISize(size),
));
setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
assign( cond, mk_x86g_calculate_condition(X86CondZ) );
- assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) );
+ assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
putIReg(size, R_EAX, mkexpr(acc2));
DIP("cmpxchg%c %s,%s\n", nameISize(size),
nameIReg(size,gregOfRM(rm)), dis_buf);
assign( tmpd, getIReg(sz, gregOfRM(rm)) );
putIReg(sz, gregOfRM(rm),
- IRExpr_Mux0X( mk_x86g_calculate_condition(cond),
- mkexpr(tmpd),
- mkexpr(tmps) )
+ IRExpr_ITE( mk_x86g_calculate_condition(cond),
+ mkexpr(tmps),
+ mkexpr(tmpd) )
);
DIP("cmov%c%s %s,%s\n", nameISize(sz),
name_X86Condcode(cond),
assign( tmpd, getIReg(sz, gregOfRM(rm)) );
putIReg(sz, gregOfRM(rm),
- IRExpr_Mux0X( mk_x86g_calculate_condition(cond),
- mkexpr(tmpd),
- mkexpr(tmps) )
+ IRExpr_ITE( mk_x86g_calculate_condition(cond),
+ mkexpr(tmps),
+ mkexpr(tmpd) )
);
DIP("cmov%c%s %s,%s\n", nameISize(sz),
if (shl || shr) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
- mkV128(0x0000),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ mkV128(0x0000)
)
);
} else
if (sar) {
assign(
g1,
- IRExpr_Mux0X(
+ IRExpr_ITE(
binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
- binop(op, mkexpr(g0), mkU8(size-1)),
- binop(op, mkexpr(g0), mkexpr(amt8))
+ binop(op, mkexpr(g0), mkexpr(amt8)),
+ binop(op, mkexpr(g0), mkU8(size-1))
)
);
} else {
If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */
stmt( IRStmt_Put(
OFFB_DFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(10)),
mkU32(1))),
- mkU32(1),
- mkU32(0xFFFFFFFF)))
+ mkU32(0xFFFFFFFF),
+ mkU32(1)))
);
/* Set the ID flag */
stmt( IRStmt_Put(
OFFB_IDFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(21)),
mkU32(1))),
- mkU32(0),
- mkU32(1)))
+ mkU32(1),
+ mkU32(0)))
);
/* And set the AC flag. If setting it 1 to, possibly emit an
emulation warning. */
stmt( IRStmt_Put(
OFFB_ACFLAG,
- IRExpr_Mux0X(
+ IRExpr_ITE(
unop(Iop_32to1,
binop(Iop_And32,
binop(Iop_Shr32, mkexpr(t1), mkU8(18)),
mkU32(1))),
- mkU32(0),
- mkU32(1)))
+ mkU32(1),
+ mkU32(0)))
);
if (emit_AC_emwarn) {
unchanged. If the DCAS fails then we're putting into
EDX:EAX the value seen in memory. */
putIReg(4, R_EDX,
- IRExpr_Mux0X( mkexpr(success),
- mkexpr(oldHi), mkexpr(expdHi)
+ IRExpr_ITE( mkexpr(success),
+ mkexpr(expdHi), mkexpr(oldHi)
));
putIReg(4, R_EAX,
- IRExpr_Mux0X( mkexpr(success),
- mkexpr(oldLo), mkexpr(expdLo)
+ IRExpr_ITE( mkexpr(success),
+ mkexpr(expdLo), mkexpr(oldLo)
));
/* Copy the success bit into the Z flag and leave the others
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: { // VFD
+ case Iex_ITE: { // VFD
if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+ AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
HReg dst = newVRegI(env);
- addInstr(env, mk_iMOVsd_RR(rX,dst));
- AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_iMOVsd_RR(r1,dst));
+ AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, AMD64Instr_CMov64(cc ^ 1, r0, dst));
return dst;
}
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) { // VFD
- HReg rX, r0, dst;
+ if (e->tag == Iex_ITE) { // VFD
+ HReg r1, r0, dst;
vassert(ty == Ity_F64);
- vassert(typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1);
- rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
- r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
+ vassert(typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1);
+ r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+ r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
dst = newVRegV(env);
- addInstr(env, mk_vMOVsd_RR(rX,dst));
- AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_vMOVsd_RR(r1,dst));
+ AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
} /* switch (e->Iex.Binop.op) */
} /* if (e->tag == Iex_Binop) */
- if (e->tag == Iex_Mux0X) { // VFD
- HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0);
+ if (e->tag == Iex_ITE) { // VFD
+ HReg r1 = iselVecExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselVecExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegV(env);
- addInstr(env, mk_vMOVsd_RR(rX,dst));
- HReg cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_vMOVsd_RR(r1,dst));
+ HReg cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: { // VFD
- /* Mux0X(ccexpr, expr0, exprX) */
+ case Iex_ITE: { // VFD
+ /* ITE(ccexpr, iftrue, iffalse) */
if (ty == Ity_I32) {
ARMCondCode cc;
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.Mux0X.expr0);
+ HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+ ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.ITE.iffalse);
HReg dst = newVRegI(env);
- addInstr(env, mk_iMOVds_RR(dst, rX));
- cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_iMOVds_RR(dst, r1));
+ cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, ARMInstr_CMov(cc ^ 1, dst, r0));
return dst;
}
} /* if (e->tag == Iex_Unop) */
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
IRType tyC;
- HReg rXhi, rXlo, r0hi, r0lo, dstHi, dstLo;
+ HReg r1hi, r1lo, r0hi, r0lo, dstHi, dstLo;
ARMCondCode cc;
- tyC = typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond);
+ tyC = typeOfIRExpr(env->type_env,e->Iex.ITE.cond);
vassert(tyC == Ity_I1);
- iselInt64Expr(&rXhi, &rXlo, env, e->Iex.Mux0X.exprX);
- iselInt64Expr(&r0hi, &r0lo, env, e->Iex.Mux0X.expr0);
+ iselInt64Expr(&r1hi, &r1lo, env, e->Iex.ITE.iftrue);
+ iselInt64Expr(&r0hi, &r0lo, env, e->Iex.ITE.iffalse);
dstHi = newVRegI(env);
dstLo = newVRegI(env);
- addInstr(env, mk_iMOVds_RR(dstHi, rXhi));
- addInstr(env, mk_iMOVds_RR(dstLo, rXlo));
- cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_iMOVds_RR(dstHi, r1hi));
+ addInstr(env, mk_iMOVds_RR(dstLo, r1lo));
+ cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, ARMInstr_CMov(cc ^ 1, dstHi, ARMRI84_R(r0hi)));
addInstr(env, ARMInstr_CMov(cc ^ 1, dstLo, ARMRI84_R(r0lo)));
*rHi = dstHi;
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
HReg rLo, rHi;
HReg res = newVRegD(env);
iselInt64Expr(&rHi, &rLo, env, e);
}
}
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
ARMCondCode cc;
- HReg rX = iselNeonExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselNeonExpr(env, e->Iex.Mux0X.expr0);
+ HReg r1 = iselNeonExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselNeonExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegV(env);
- addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, rX, 4, True));
- cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, r1, 4, True));
+ cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, ARMInstr_NCMovQ(cc ^ 1, dst, r0));
return dst;
}
}
}
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegD(env);
- addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, rX));
- ARMCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, r1));
+ ARMCondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, ARMInstr_VCMovD(cc ^ 1, dst, r0));
return dst;
}
}
}
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
if (ty == Ity_F32
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
ARMCondCode cc;
- HReg rX = iselFltExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselFltExpr(env, e->Iex.Mux0X.expr0);
+ HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselFltExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegF(env);
- addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, rX));
- cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, r1));
+ cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, ARMInstr_VCMovS(cc ^ 1, dst, r0));
return dst;
}
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: {
+ case Iex_ITE: {
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64))) &&
- typeOfIRExpr(env->type_env, e->Iex.Mux0X.cond) == Ity_I8) {
+ typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I8) {
/*
* r_dst = cond && rX
* cond = not(cond)
* tmp = cond && r0
* r_dst = tmp + r_dst
*/
- HReg r0 = iselWordExpr_R(env, e->Iex.Mux0X.expr0);
- HReg rX = iselWordExpr_R(env, e->Iex.Mux0X.exprX);
- HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
+ HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse);
+ HReg r1 = iselWordExpr_R(env, e->Iex.ITE.iftrue);
+ HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_dst = newVRegI(env);
HReg r_tmp = newVRegI(env);
HReg r_tmp1 = newVRegI(env);
HReg r_cond_neg = newVRegI(env);
- addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp, r_cond, MIPSRH_Reg(rX)));
+ addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp, r_cond, MIPSRH_Reg(r1)));
addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond,
MIPSRH_Reg(r_cond)));
addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1, r_cond_neg,
return;
}
- /* 64-bit Mux0X */
- if (e->tag == Iex_Mux0X) {
+ /* 64-bit ITE */
+ if (e->tag == Iex_ITE) {
HReg expr0Lo, expr0Hi;
- HReg exprXLo, exprXHi;
+ HReg expr1Lo, expr1Hi;
HReg tmpHi = newVRegI(env);
HReg tmpLo = newVRegI(env);
HReg tmp1Hi = newVRegI(env);
HReg tmp1Lo = newVRegI(env);
- HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
+ HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_cond_neg = newVRegI(env);
HReg desLo = newVRegI(env);
HReg desHi = newVRegI(env);
- /* expr0Hi:expr0Lo = expr0 */
- /* exprXHi:exprXLo = exprX */
- iselInt64Expr(&expr0Hi, &expr0Lo, env, e->Iex.Mux0X.expr0);
- iselInt64Expr(&exprXHi, &exprXLo, env, e->Iex.Mux0X.exprX);
+ /* expr0Hi:expr0Lo = iffalse */
+ /* expr1Hi:expr1Lo = iftrue */
+ iselInt64Expr(&expr0Hi, &expr0Lo, env, e->Iex.ITE.iffalse);
+ iselInt64Expr(&expr1Hi, &expr1Lo, env, e->Iex.ITE.iftrue);
addInstr(env, MIPSInstr_Alu(Malu_AND, tmpLo, r_cond,
- MIPSRH_Reg(exprXLo)));
+ MIPSRH_Reg(expr1Lo)));
addInstr(env, MIPSInstr_Alu(Malu_AND, tmpHi, r_cond,
- MIPSRH_Reg(exprXHi)));
+ MIPSRH_Reg(expr1Hi)));
addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond,
MIPSRH_Reg(r_cond)));
addInstr(env, MIPSInstr_Alu(Malu_AND, tmp1Lo, r_cond_neg,
- MIPSRH_Reg(exprXLo)));
+ MIPSRH_Reg(expr1Lo)));
addInstr(env, MIPSInstr_Alu(Malu_AND, tmp1Hi, r_cond_neg,
- MIPSRH_Reg(exprXHi)));
+ MIPSRH_Reg(expr1Hi)));
addInstr(env, MIPSInstr_Alu(Malu_ADD, desLo, tmpLo,
MIPSRH_Reg(tmp1Lo)));
addInstr(env, MIPSInstr_Alu(Malu_ADD, desHi, tmpHi,
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) {
+ if (e->tag == Iex_ITE) {
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env, e->Iex.Mux0X.cond) == Ity_I8) {
- HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
- HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
- HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond);
+ && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I8) {
+ HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
+ HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+ HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
HReg r_cond_neg = newVRegI(env);
HReg r_dst = newVRegD(env);
HReg r_tmp_lo = newVRegI(env);
HReg r_tmp1_hi = newVRegI(env);
HReg r_r0_lo = newVRegI(env);
HReg r_r0_hi = newVRegI(env);
- HReg r_rX_lo = newVRegI(env);
- HReg r_rX_hi = newVRegI(env);
+ HReg r_r1_lo = newVRegI(env);
+ HReg r_r1_hi = newVRegI(env);
HReg r_dst_lo = newVRegI(env);
HReg r_dst_hi = newVRegI(env);
am_addr = MIPSAMode_IR(0, StackPointer(mode64));
// store as Ity_F64
- addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, rX, am_addr));
+ addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r1, am_addr));
// load as 2xI32
- addInstr(env, MIPSInstr_Load(4, r_rX_lo, am_addr, mode64));
- addInstr(env, MIPSInstr_Load(4, r_rX_hi, nextMIPSAModeFloat(am_addr),
+ addInstr(env, MIPSInstr_Load(4, r_r1_lo, am_addr, mode64));
+ addInstr(env, MIPSInstr_Load(4, r_r1_hi, nextMIPSAModeFloat(am_addr),
mode64));
add_to_sp(env, 16); // Reset SP
addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_lo, r_cond_neg,
- MIPSRH_Reg(r_rX_lo)));
+ MIPSRH_Reg(r_r1_lo)));
addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_hi, r_cond_neg,
- MIPSRH_Reg(r_rX_hi)));
+ MIPSRH_Reg(r_r1_hi)));
addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_lo, r_tmp_lo,
MIPSRH_Reg(r_tmp1_lo)));
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: { // VFD
+ case Iex_ITE: { // VFD
if ((ty == Ity_I8 || ty == Ity_I16 ||
ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
- typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- PPCRI* rX = iselWordExpr_RI(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselWordExpr_R(env, e->Iex.Mux0X.expr0);
+ typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ PPCRI* r1 = iselWordExpr_RI(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse);
HReg r_dst = newVRegI(env);
addInstr(env, mk_iMOVds_RR(r_dst,r0));
- PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
- addInstr(env, PPCInstr_CMov(cc, r_dst, rX));
+ PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+ addInstr(env, PPCInstr_CMov(cc, r_dst, r1));
return r_dst;
}
break;
return;
}
- /* 64-bit Mux0X */
- if (e->tag == Iex_Mux0X) { // VFD
+ /* 64-bit ITE */
+ if (e->tag == Iex_ITE) { // VFD
HReg e0Lo, e0Hi, eXLo, eXHi;
- iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX);
- iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0);
+ iselInt64Expr(&eXHi, &eXLo, env, e->Iex.ITE.iftrue);
+ iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse);
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
addInstr(env, mk_iMOVds_RR(tHi,e0Hi));
addInstr(env, mk_iMOVds_RR(tLo,e0Lo));
- PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(eXHi)));
addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(eXLo)));
*rHi = tHi;
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX);
- HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ HReg fr1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+ HReg fr0 = iselDblExpr(env, e->Iex.ITE.iffalse);
HReg fr_dst = newVRegF(env);
addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, fr0 ));
- PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
- addInstr(env, PPCInstr_FpCMov( cc, fr_dst, frX ));
+ PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+ addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr1 ));
return fr_dst;
}
}
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: {
+ case Iex_ITE: {
IRExpr *cond_expr;
- HReg dst, rX;
+ HReg dst, r1;
s390_opnd_RMI r0;
- cond_expr = expr->Iex.Mux0X.cond;
+ cond_expr = expr->Iex.ITE.cond;
vassert(typeOfIRExpr(env->type_env, cond_expr) == Ity_I1);
dst = newVRegI(env);
- r0 = s390_isel_int_expr_RMI(env, expr->Iex.Mux0X.expr0);
- rX = s390_isel_int_expr(env, expr->Iex.Mux0X.exprX);
- size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.exprX));
+ r0 = s390_isel_int_expr_RMI(env, expr->Iex.ITE.iffalse);
+ r1 = s390_isel_int_expr(env, expr->Iex.ITE.iftrue);
+ size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.ITE.iftrue));
s390_cc_t cc = s390_isel_cc(env, cond_expr);
- addInstr(env, s390_insn_move(size, dst, rX));
+ addInstr(env, s390_insn_move(size, dst, r1));
addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0));
return dst;
}
}
/* --------- MULTIPLEX --------- */
- case Iex_Mux0X: { // VFD
+ case Iex_ITE: { // VFD
if ((ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX);
- X86RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0);
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+ X86RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
HReg dst = newVRegI(env);
- addInstr(env, mk_iMOVsd_RR(rX,dst));
- X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_iMOVsd_RR(r1,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, X86Instr_CMov32(cc ^ 1, r0, dst));
return dst;
}
return;
}
- /* 64-bit Mux0X: Mux0X(g, expr, expr) */ // VFD
- if (e->tag == Iex_Mux0X) {
- HReg e0Lo, e0Hi, eXLo, eXHi;
+ /* 64-bit ITE: ITE(g, expr, expr) */ // VFD
+ if (e->tag == Iex_ITE) {
+ HReg e0Lo, e0Hi, e1Lo, e1Hi;
HReg tLo = newVRegI(env);
HReg tHi = newVRegI(env);
- iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0);
- iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX);
- addInstr(env, mk_iMOVsd_RR(eXHi, tHi));
- addInstr(env, mk_iMOVsd_RR(eXLo, tLo));
- X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse);
+ iselInt64Expr(&e1Hi, &e1Lo, env, e->Iex.ITE.iftrue);
+ addInstr(env, mk_iMOVsd_RR(e1Hi, tHi));
+ addInstr(env, mk_iMOVsd_RR(e1Lo, tLo));
+ X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
/* This assumes the first cmov32 doesn't trash the condition
codes, so they are still available for the second cmov32 */
addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Hi), tHi));
}
/* --------- MULTIPLEX --------- */
- if (e->tag == Iex_Mux0X) { // VFD
+ if (e->tag == Iex_ITE) { // VFD
if (ty == Ity_F64
- && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) {
- HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
+ && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+ HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegF(env);
- addInstr(env, X86Instr_FpUnary(Xfp_MOV,rX,dst));
- X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, X86Instr_FpUnary(Xfp_MOV,r1,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, X86Instr_FpCMov(cc ^ 1, r0, dst));
return dst;
}
} /* switch (e->Iex.Binop.op) */
} /* if (e->tag == Iex_Binop) */
- if (e->tag == Iex_Mux0X) { // VFD
- HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX);
- HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0);
+ if (e->tag == Iex_ITE) { // VFD
+ HReg r1 = iselVecExpr(env, e->Iex.ITE.iftrue);
+ HReg r0 = iselVecExpr(env, e->Iex.ITE.iffalse);
HReg dst = newVRegV(env);
- addInstr(env, mk_vMOVsd_RR(rX,dst));
- X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond);
+ addInstr(env, mk_vMOVsd_RR(r1,dst));
+ X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
addInstr(env, X86Instr_SseCMov(cc ^ 1, r0, dst));
return dst;
}
vex_printf("):");
ppIRType(e->Iex.CCall.retty);
break;
- case Iex_Mux0X:
- vex_printf("Mux0X(");
- ppIRExpr(e->Iex.Mux0X.cond);
+ case Iex_ITE:
+ vex_printf("ITE(");
+ ppIRExpr(e->Iex.ITE.cond);
vex_printf(",");
- ppIRExpr(e->Iex.Mux0X.expr0);
+ ppIRExpr(e->Iex.ITE.iftrue);
vex_printf(",");
- ppIRExpr(e->Iex.Mux0X.exprX);
+ ppIRExpr(e->Iex.ITE.iffalse);
vex_printf(")");
break;
default:
e->Iex.CCall.args = args;
return e;
}
-IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX ) {
+IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) {
IRExpr* e = LibVEX_Alloc(sizeof(IRExpr));
- e->tag = Iex_Mux0X;
- e->Iex.Mux0X.cond = cond;
- e->Iex.Mux0X.expr0 = expr0;
- e->Iex.Mux0X.exprX = exprX;
+ e->tag = Iex_ITE;
+ e->Iex.ITE.cond = cond;
+ e->Iex.ITE.iftrue = iftrue;
+ e->Iex.ITE.iffalse = iffalse;
return e;
}
e->Iex.CCall.retty,
deepCopyIRExprVec(e->Iex.CCall.args));
- case Iex_Mux0X:
- return IRExpr_Mux0X(deepCopyIRExpr(e->Iex.Mux0X.cond),
- deepCopyIRExpr(e->Iex.Mux0X.expr0),
- deepCopyIRExpr(e->Iex.Mux0X.exprX));
+ case Iex_ITE:
+ return IRExpr_ITE(deepCopyIRExpr(e->Iex.ITE.cond),
+ deepCopyIRExpr(e->Iex.ITE.iftrue),
+ deepCopyIRExpr(e->Iex.ITE.iffalse));
default:
vpanic("deepCopyIRExpr");
}
return t_dst;
case Iex_CCall:
return e->Iex.CCall.retty;
- case Iex_Mux0X:
- e = e->Iex.Mux0X.expr0;
+ case Iex_ITE:
+ e = e->Iex.ITE.iffalse;
goto start;
- /* return typeOfIRExpr(tyenv, e->Iex.Mux0X.expr0); */
+ /* return typeOfIRExpr(tyenv, e->Iex.ITE.iffalse); */
case Iex_Binder:
vpanic("typeOfIRExpr: Binder is not a valid expression");
default:
if (!isIRAtom(e->Iex.CCall.args[i]))
return False;
return True;
- case Iex_Mux0X: return toBool (
- isIRAtom(e->Iex.Mux0X.cond)
- && isIRAtom(e->Iex.Mux0X.expr0)
- && isIRAtom(e->Iex.Mux0X.exprX));
+ case Iex_ITE: return toBool (
+ isIRAtom(e->Iex.ITE.cond)
+ && isIRAtom(e->Iex.ITE.iftrue)
+ && isIRAtom(e->Iex.ITE.iffalse));
default: vpanic("isFlatIRStmt(e)");
}
/*notreached*/
for (i = 0; expr->Iex.CCall.args[i]; i++)
useBeforeDef_Expr(bb,stmt,expr->Iex.CCall.args[i],def_counts);
break;
- case Iex_Mux0X:
- useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.cond,def_counts);
- useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.expr0,def_counts);
- useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.exprX,def_counts);
+ case Iex_ITE:
+ useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.cond,def_counts);
+ useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iftrue,def_counts);
+ useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iffalse,def_counts);
break;
default:
vpanic("useBeforeDef_Expr");
if (!saneIRConst(expr->Iex.Const.con))
sanityCheckFail(bb,stmt,"Iex.Const.con: invalid const");
break;
- case Iex_Mux0X:
- tcExpr(bb,stmt, expr->Iex.Mux0X.cond, gWordTy);
- tcExpr(bb,stmt, expr->Iex.Mux0X.expr0, gWordTy);
- tcExpr(bb,stmt, expr->Iex.Mux0X.exprX, gWordTy);
- if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.cond) != Ity_I1)
- sanityCheckFail(bb,stmt,"Iex.Mux0X.cond: cond :: Ity_I1");
- if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.expr0)
- != typeOfIRExpr(tyenv, expr->Iex.Mux0X.exprX))
- sanityCheckFail(bb,stmt,"Iex.Mux0X: expr0/exprX mismatch");
+ case Iex_ITE:
+ tcExpr(bb,stmt, expr->Iex.ITE.cond, gWordTy);
+ tcExpr(bb,stmt, expr->Iex.ITE.iftrue, gWordTy);
+ tcExpr(bb,stmt, expr->Iex.ITE.iffalse, gWordTy);
+ if (typeOfIRExpr(tyenv, expr->Iex.ITE.cond) != Ity_I1)
+ sanityCheckFail(bb,stmt,"Iex.ITE.cond: cond :: Ity_I1");
+ if (typeOfIRExpr(tyenv, expr->Iex.ITE.iftrue)
+ != typeOfIRExpr(tyenv, expr->Iex.ITE.iffalse))
+ sanityCheckFail(bb,stmt,"Iex.ITE: iftrue/iffalse mismatch");
break;
default:
vpanic("tcExpr");
newargs)));
return IRExpr_RdTmp(t1);
- case Iex_Mux0X:
+ case Iex_ITE:
t1 = newIRTemp(bb->tyenv, ty);
addStmtToIRSB(bb, IRStmt_WrTmp(t1,
- IRExpr_Mux0X(flatten_Expr(bb, ex->Iex.Mux0X.cond),
- flatten_Expr(bb, ex->Iex.Mux0X.expr0),
- flatten_Expr(bb, ex->Iex.Mux0X.exprX))));
+ IRExpr_ITE(flatten_Expr(bb, ex->Iex.ITE.cond),
+ flatten_Expr(bb, ex->Iex.ITE.iftrue),
+ flatten_Expr(bb, ex->Iex.ITE.iffalse))));
return IRExpr_RdTmp(t1);
case Iex_Const:
&& sameIRExprs_aux( env, tri1->arg3, tri2->arg3 ));
}
- case Iex_Mux0X:
- return toBool( sameIRExprs_aux( env, e1->Iex.Mux0X.cond,
- e2->Iex.Mux0X.cond )
- && sameIRExprs_aux( env, e1->Iex.Mux0X.expr0,
- e2->Iex.Mux0X.expr0 )
- && sameIRExprs_aux( env, e1->Iex.Mux0X.exprX,
- e2->Iex.Mux0X.exprX ));
+ case Iex_ITE:
+ return toBool( sameIRExprs_aux( env, e1->Iex.ITE.cond,
+ e2->Iex.ITE.cond )
+ && sameIRExprs_aux( env, e1->Iex.ITE.iftrue,
+ e2->Iex.ITE.iftrue )
+ && sameIRExprs_aux( env, e1->Iex.ITE.iffalse,
+ e2->Iex.ITE.iffalse ));
default:
/* Not very likely to be "same". */
}
break;
- case Iex_Mux0X:
- /* Mux0X */
+ case Iex_ITE:
+ /* ITE */
/* is the discriminant is a constant? */
- if (e->Iex.Mux0X.cond->tag == Iex_Const) {
+ if (e->Iex.ITE.cond->tag == Iex_Const) {
/* assured us by the IR type rules */
- vassert(e->Iex.Mux0X.cond->Iex.Const.con->tag == Ico_U1);
- e2 = e->Iex.Mux0X.cond->Iex.Const.con->Ico.U1
- ? e->Iex.Mux0X.exprX : e->Iex.Mux0X.expr0;
+ vassert(e->Iex.ITE.cond->Iex.Const.con->tag == Ico_U1);
+ e2 = e->Iex.ITE.cond->Iex.Const.con->Ico.U1
+ ? e->Iex.ITE.iftrue : e->Iex.ITE.iffalse;
}
else
/* are the arms identical? (pretty weedy test) */
- if (sameIRExprs(env, e->Iex.Mux0X.expr0,
- e->Iex.Mux0X.exprX)) {
- e2 = e->Iex.Mux0X.expr0;
+ if (sameIRExprs(env, e->Iex.ITE.iftrue,
+ e->Iex.ITE.iffalse)) {
+ e2 = e->Iex.ITE.iffalse;
}
break;
);
}
- case Iex_Mux0X:
- vassert(isIRAtom(ex->Iex.Mux0X.cond));
- vassert(isIRAtom(ex->Iex.Mux0X.expr0));
- vassert(isIRAtom(ex->Iex.Mux0X.exprX));
- return IRExpr_Mux0X(
- subst_Expr(env, ex->Iex.Mux0X.cond),
- subst_Expr(env, ex->Iex.Mux0X.expr0),
- subst_Expr(env, ex->Iex.Mux0X.exprX)
+ case Iex_ITE:
+ vassert(isIRAtom(ex->Iex.ITE.cond));
+ vassert(isIRAtom(ex->Iex.ITE.iftrue));
+ vassert(isIRAtom(ex->Iex.ITE.iffalse));
+ return IRExpr_ITE(
+ subst_Expr(env, ex->Iex.ITE.cond),
+ subst_Expr(env, ex->Iex.ITE.iftrue),
+ subst_Expr(env, ex->Iex.ITE.iffalse)
);
default:
case Iex_GetI:
addUses_Expr(set, e->Iex.GetI.ix);
return;
- case Iex_Mux0X:
- addUses_Expr(set, e->Iex.Mux0X.cond);
- addUses_Expr(set, e->Iex.Mux0X.expr0);
- addUses_Expr(set, e->Iex.Mux0X.exprX);
+ case Iex_ITE:
+ addUses_Expr(set, e->Iex.ITE.cond);
+ addUses_Expr(set, e->Iex.ITE.iftrue);
+ addUses_Expr(set, e->Iex.ITE.iffalse);
return;
case Iex_CCall:
for (i = 0; e->Iex.CCall.args[i]; i++)
struct {
ULong f64i;
} Cf64i;
- /* Mux0X(tmp,tmp,tmp) */
+ /* ITE(tmp,tmp,tmp) */
struct {
IRTemp co;
IRTemp e0;
IRTemp eX;
} Mttt;
- /* Mux0X(tmp,const,tmp) */
+ /* ITE(tmp,tmp,const) */
struct {
IRTemp co;
IRConst con0;
IRTemp eX;
} Mtct;
- /* Mux0X(tmp,tmp,const) */
+ /* ITE(tmp,const,tmp) */
struct {
IRTemp co;
IRTemp e0;
IRConst conX;
} Mttc;
- /* Mux0X(tmp,const,const) */
+ /* ITE(tmp,const,const) */
struct {
IRTemp co;
IRConst con0;
case Cf64i:
return IRExpr_Const(IRConst_F64i(ae->u.Cf64i.f64i));
case Mttt:
- return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mttt.co),
- IRExpr_RdTmp(ae->u.Mttt.e0),
- IRExpr_RdTmp(ae->u.Mttt.eX));
+ return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mttt.co),
+ IRExpr_RdTmp(ae->u.Mttt.eX),
+ IRExpr_RdTmp(ae->u.Mttt.e0));
case Mtct:
con0 = LibVEX_Alloc(sizeof(IRConst));
*con0 = ae->u.Mtct.con0;
- return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mtct.co),
- IRExpr_Const(con0),
- IRExpr_RdTmp(ae->u.Mtct.eX));
+ return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mtct.co),
+ IRExpr_RdTmp(ae->u.Mtct.eX),
+ IRExpr_Const(con0));
case Mttc:
conX = LibVEX_Alloc(sizeof(IRConst));
*conX = ae->u.Mttc.conX;
- return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mttc.co),
- IRExpr_RdTmp(ae->u.Mttc.e0),
- IRExpr_Const(conX));
+ return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mttc.co),
+ IRExpr_Const(conX),
+ IRExpr_RdTmp(ae->u.Mttc.e0));
+
case Mtcc:
con0 = LibVEX_Alloc(sizeof(IRConst));
conX = LibVEX_Alloc(sizeof(IRConst));
*con0 = ae->u.Mtcc.con0;
*conX = ae->u.Mtcc.conX;
- return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mtcc.co),
- IRExpr_Const(con0),
- IRExpr_Const(conX));
+ return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mtcc.co),
+ IRExpr_Const(conX),
+ IRExpr_Const(con0));
case GetIt:
return IRExpr_GetI(ae->u.GetIt.descr,
IRExpr_RdTmp(ae->u.GetIt.ix),
}
break;
- case Iex_Mux0X:
- if (e->Iex.Mux0X.cond->tag == Iex_RdTmp) {
- if (e->Iex.Mux0X.expr0->tag == Iex_RdTmp) {
- if (e->Iex.Mux0X.exprX->tag == Iex_RdTmp) {
+ case Iex_ITE:
+ if (e->Iex.ITE.cond->tag == Iex_RdTmp) {
+ if (e->Iex.ITE.iffalse->tag == Iex_RdTmp) {
+ if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
ae = LibVEX_Alloc(sizeof(AvailExpr));
ae->tag = Mttt;
- ae->u.Mttt.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp;
- ae->u.Mttt.e0 = e->Iex.Mux0X.expr0->Iex.RdTmp.tmp;
- ae->u.Mttt.eX = e->Iex.Mux0X.exprX->Iex.RdTmp.tmp;
+ ae->u.Mttt.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+ ae->u.Mttt.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp;
+ ae->u.Mttt.eX = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
return ae;
}
- if (e->Iex.Mux0X.exprX->tag == Iex_Const) {
+ if (e->Iex.ITE.iftrue->tag == Iex_Const) {
ae = LibVEX_Alloc(sizeof(AvailExpr));
ae->tag = Mttc;
- ae->u.Mttc.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp;
- ae->u.Mttc.e0 = e->Iex.Mux0X.expr0->Iex.RdTmp.tmp;
- ae->u.Mttc.conX = *(e->Iex.Mux0X.exprX->Iex.Const.con);
+ ae->u.Mttc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+ ae->u.Mttc.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp;
+ ae->u.Mttc.conX = *(e->Iex.ITE.iftrue->Iex.Const.con);
return ae;
}
- } else if (e->Iex.Mux0X.expr0->tag == Iex_Const) {
- if (e->Iex.Mux0X.exprX->tag == Iex_RdTmp) {
+ } else if (e->Iex.ITE.iffalse->tag == Iex_Const) {
+ if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
ae = LibVEX_Alloc(sizeof(AvailExpr));
ae->tag = Mtct;
- ae->u.Mtct.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp;
- ae->u.Mtct.con0 = *(e->Iex.Mux0X.expr0->Iex.Const.con);
- ae->u.Mtct.eX = e->Iex.Mux0X.exprX->Iex.RdTmp.tmp;
+ ae->u.Mtct.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+ ae->u.Mtct.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con);
+ ae->u.Mtct.eX = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
return ae;
}
- if (e->Iex.Mux0X.exprX->tag == Iex_Const) {
+ if (e->Iex.ITE.iftrue->tag == Iex_Const) {
ae = LibVEX_Alloc(sizeof(AvailExpr));
ae->tag = Mtcc;
- ae->u.Mtcc.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp;
- ae->u.Mtcc.con0 = *(e->Iex.Mux0X.expr0->Iex.Const.con);
- ae->u.Mtcc.conX = *(e->Iex.Mux0X.exprX->Iex.Const.con);
+ ae->u.Mtcc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+ ae->u.Mtcc.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con);
+ ae->u.Mtcc.conX = *(e->Iex.ITE.iftrue->Iex.Const.con);
return ae;
}
}
for (i = 0; e->Iex.CCall.args[i]; i++)
deltaIRExpr(e->Iex.CCall.args[i], delta);
break;
- case Iex_Mux0X:
- deltaIRExpr(e->Iex.Mux0X.cond, delta);
- deltaIRExpr(e->Iex.Mux0X.expr0, delta);
- deltaIRExpr(e->Iex.Mux0X.exprX, delta);
+ case Iex_ITE:
+ deltaIRExpr(e->Iex.ITE.cond, delta);
+ deltaIRExpr(e->Iex.ITE.iftrue, delta);
+ deltaIRExpr(e->Iex.ITE.iffalse, delta);
break;
default:
vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
for (i = 0; e->Iex.CCall.args[i]; i++)
setHints_Expr(doesLoad, doesGet, e->Iex.CCall.args[i]);
return;
- case Iex_Mux0X:
- setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.cond);
- setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.expr0);
- setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.exprX);
+ case Iex_ITE:
+ setHints_Expr(doesLoad, doesGet, e->Iex.ITE.cond);
+ setHints_Expr(doesLoad, doesGet, e->Iex.ITE.iftrue);
+ setHints_Expr(doesLoad, doesGet, e->Iex.ITE.iffalse);
return;
case Iex_Qop:
setHints_Expr(doesLoad, doesGet, e->Iex.Qop.details->arg1);
uses[e->Iex.RdTmp.tmp]++;
return;
- case Iex_Mux0X:
- aoccCount_Expr(uses, e->Iex.Mux0X.cond);
- aoccCount_Expr(uses, e->Iex.Mux0X.expr0);
- aoccCount_Expr(uses, e->Iex.Mux0X.exprX);
+ case Iex_ITE:
+ aoccCount_Expr(uses, e->Iex.ITE.cond);
+ aoccCount_Expr(uses, e->Iex.ITE.iftrue);
+ aoccCount_Expr(uses, e->Iex.ITE.iffalse);
return;
case Iex_Qop:
case Iex_RdTmp:
e2 = atbSubst_Temp(env, e->Iex.RdTmp.tmp);
return e2 ? e2 : e;
- case Iex_Mux0X:
- return IRExpr_Mux0X(
- atbSubst_Expr(env, e->Iex.Mux0X.cond),
- atbSubst_Expr(env, e->Iex.Mux0X.expr0),
- atbSubst_Expr(env, e->Iex.Mux0X.exprX)
+ case Iex_ITE:
+ return IRExpr_ITE(
+ atbSubst_Expr(env, e->Iex.ITE.cond),
+ atbSubst_Expr(env, e->Iex.ITE.iftrue),
+ atbSubst_Expr(env, e->Iex.ITE.iffalse)
);
case Iex_Qop:
return IRExpr_Qop(
Iex_Unop,
Iex_Load,
Iex_Const,
- Iex_Mux0X,
+ Iex_ITE,
Iex_CCall
}
IRExprTag;
IRExpr** args; /* Vector of argument expressions. */
} CCall;
- /* A ternary if-then-else operator. It returns expr0 if cond is
- zero, exprX otherwise. Note that it is STRICT, ie. both
- expr0 and exprX are evaluated in all cases.
+ /* A ternary if-then-else operator. It returns iftrue if cond is
+ nonzero, iffalse otherwise. Note that it is STRICT, ie. both
+ iftrue and iffalse are evaluated in all cases.
- ppIRExpr output: Mux0X(<cond>,<expr0>,<exprX>),
- eg. Mux0X(t6,t7,t8)
+ ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>),
+ eg. ITE(t6,t7,t8)
*/
struct {
IRExpr* cond; /* Condition */
- IRExpr* expr0; /* True expression */
- IRExpr* exprX; /* False expression */
- } Mux0X;
+ IRExpr* iftrue; /* True expression */
+ IRExpr* iffalse; /* False expression */
+ } ITE;
} Iex;
};
extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr );
extern IRExpr* IRExpr_Const ( IRConst* con );
extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
-extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX );
+extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
/* Deep-copy an IRExpr. */
extern IRExpr* deepCopyIRExpr ( IRExpr* );
static
-IRAtom* expr2vbits_Mux0X ( MCEnv* mce,
- IRAtom* cond, IRAtom* expr0, IRAtom* exprX )
+IRAtom* expr2vbits_ITE ( MCEnv* mce,
+ IRAtom* cond, IRAtom* iftrue, IRAtom* iffalse )
{
- IRAtom *vbitsC, *vbits0, *vbitsX;
+ IRAtom *vbitsC, *vbits0, *vbits1;
IRType ty;
- /* Given Mux0X(cond,expr0,exprX), generate
- Mux0X(cond,expr0#,exprX#) `UifU` PCast(cond#)
+ /* Given ITE(cond,iftrue,iffalse), generate
+ ITE(cond,iftrue#,iffalse#) `UifU` PCast(cond#)
That is, steer the V bits like the originals, but trash the
result if the steering value is undefined. This gives
lazy propagation. */
tl_assert(isOriginalAtom(mce, cond));
- tl_assert(isOriginalAtom(mce, expr0));
- tl_assert(isOriginalAtom(mce, exprX));
+ tl_assert(isOriginalAtom(mce, iftrue));
+ tl_assert(isOriginalAtom(mce, iffalse));
vbitsC = expr2vbits(mce, cond);
- vbits0 = expr2vbits(mce, expr0);
- vbitsX = expr2vbits(mce, exprX);
+ vbits0 = expr2vbits(mce, iffalse);
+ vbits1 = expr2vbits(mce, iftrue);
ty = typeOfIRExpr(mce->bb->tyenv, vbits0);
return
- mkUifU(mce, ty, assignNew(mce, ty, IRExpr_Mux0X(cond, vbits0, vbitsX)),
+ mkUifU(mce, ty, assignNew(mce, ty, IRExpr_ITE(cond, vbits1, vbits0)),
mkPCastTo(mce, ty, vbitsC) );
}
e->Iex.CCall.retty,
e->Iex.CCall.cee );
- case Iex_Mux0X:
- return expr2vbits_Mux0X( mce, e->Iex.Mux0X.cond, e->Iex.Mux0X.expr0,
- e->Iex.Mux0X.exprX);
+ case Iex_ITE:
+ return expr2vbits_ITE( mce, e->Iex.ITE.cond, e->Iex.ITE.iftrue,
+ e->Iex.ITE.iffalse);
default:
VG_(printf)("\n");
case Iex_Binop:
return isBogusAtom(e->Iex.Binop.arg1)
|| isBogusAtom(e->Iex.Binop.arg2);
- case Iex_Mux0X:
- return isBogusAtom(e->Iex.Mux0X.cond)
- || isBogusAtom(e->Iex.Mux0X.expr0)
- || isBogusAtom(e->Iex.Mux0X.exprX);
+ case Iex_ITE:
+ return isBogusAtom(e->Iex.ITE.cond)
+ || isBogusAtom(e->Iex.ITE.iftrue)
+ || isBogusAtom(e->Iex.ITE.iffalse);
case Iex_Load:
return isBogusAtom(e->Iex.Load.addr);
case Iex_CCall: