From: Florian Krohm Date: Tue, 29 Jan 2013 03:56:06 +0000 (+0000) Subject: Infrastructure cleanup part 2. X-Git-Tag: svn/VALGRIND_3_9_0^2~126 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8a8f1bf3b42ccb136d96118a05595b88e6dc187e;p=thirdparty%2Fvalgrind.git Infrastructure cleanup part 2. Replace Iex_Mux0X with Iex_ITE (if-then-else) and IRExpr_Mux0X( cond, iffalse, iftrue ) with IRExpr_ITE ( cond, iftrue, iffalse ); git-svn-id: svn://svn.valgrind.org/vex/trunk@2668 --- diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c index 4546de38b8..1034971836 100644 --- a/VEX/priv/guest_amd64_toIR.c +++ b/VEX/priv/guest_amd64_toIR.c @@ -1825,17 +1825,17 @@ static void setFlags_DEP1_DEP2_shift ( IROp op64, /* DEP1 contains the result, DEP2 contains the undershifted value. */ stmt( IRStmt_Put( OFFB_CC_OP, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_OP,Ity_I64), - mkU64(ccOp))) ); + IRExpr_ITE( mkexpr(guardB), + mkU64(ccOp), + IRExpr_Get(OFFB_CC_OP,Ity_I64) ) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_DEP1,Ity_I64), - widenUto64(mkexpr(res)))) ); + IRExpr_ITE( mkexpr(guardB), + widenUto64(mkexpr(res)), + IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) )); stmt( IRStmt_Put( OFFB_CC_DEP2, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_DEP2,Ity_I64), - widenUto64(mkexpr(resUS)))) ); + IRExpr_ITE( mkexpr(guardB), + widenUto64(mkexpr(resUS)), + IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) )); } @@ -3607,21 +3607,21 @@ ULong dis_Grp2 ( VexAbiInfo* vbi, /* CC_DEP1 is the rotated value. CC_NDEP is flags before. */ stmt( IRStmt_Put( OFFB_CC_OP, - IRExpr_Mux0X( mkexpr(rot_amt64b), - IRExpr_Get(OFFB_CC_OP,Ity_I64), - mkU64(ccOp))) ); + IRExpr_ITE( mkexpr(rot_amt64b), + mkU64(ccOp), + IRExpr_Get(OFFB_CC_OP,Ity_I64) ) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(rot_amt64b), - IRExpr_Get(OFFB_CC_DEP1,Ity_I64), - widenUto64(mkexpr(dst1)))) ); + IRExpr_ITE( mkexpr(rot_amt64b), + widenUto64(mkexpr(dst1)), + IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) )); stmt( IRStmt_Put( OFFB_CC_DEP2, - IRExpr_Mux0X( mkexpr(rot_amt64b), - IRExpr_Get(OFFB_CC_DEP2,Ity_I64), - mkU64(0))) ); + IRExpr_ITE( mkexpr(rot_amt64b), + mkU64(0), + IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) )); stmt( IRStmt_Put( OFFB_CC_NDEP, - IRExpr_Mux0X( mkexpr(rot_amt64b), - IRExpr_Get(OFFB_CC_NDEP,Ity_I64), - mkexpr(oldFlags))) ); + IRExpr_ITE( mkexpr(rot_amt64b), + mkexpr(oldFlags), + IRExpr_Get(OFFB_CC_NDEP,Ity_I64) ) )); } /* if (isRotate) */ /* Save result, and finish up. */ @@ -4663,10 +4663,10 @@ static IRTemp gen_LZCNT ( IRType ty, IRTemp src ) // special-case around that. IRTemp res64 = newTemp(Ity_I64); assign(res64, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpEQ64, mkexpr(src64x), mkU64(0)), - unop(Iop_Clz64, mkexpr(src64x)), - mkU64(8 * sizeofIRType(ty)) + mkU64(8 * sizeofIRType(ty)), + unop(Iop_Clz64, mkexpr(src64x)) )); IRTemp res = newTemp(ty); @@ -4805,11 +4805,11 @@ static void put_ST ( Int i, IRExpr* value ) { put_ST_UNCHECKED( i, - IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), - /* 0 means empty */ - value, - /* non-0 means full */ - mkQNaN64() + IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), + /* non-0 means full */ + mkQNaN64(), + /* 0 means empty */ + value ) ); } @@ -4831,11 +4831,11 @@ static IRExpr* get_ST_UNCHECKED ( Int i ) static IRExpr* get_ST ( Int i ) { return - IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), - /* 0 means empty */ - mkQNaN64(), - /* non-0 means full */ - get_ST_UNCHECKED(i)); + IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), + /* non-0 means full */ + get_ST_UNCHECKED(i), + /* 0 means empty */ + mkQNaN64()); } @@ -5007,13 +5007,13 @@ static IRExpr* x87ishly_qnarrow_32_to_16 ( IRExpr* e32 ) IRTemp t32 = newTemp(Ity_I32); assign( t32, e32 ); return - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U, unop(Iop_32Uto64, binop(Iop_Add32, mkexpr(t32), mkU32(32768))), mkU64(65536)), - mkU16( 0x8000 ), - unop(Iop_32to16, mkexpr(t32))); + unop(Iop_32to16, mkexpr(t32)), + mkU16( 0x8000 ) ); } @@ -5748,36 +5748,36 @@ ULong dis_FPU ( /*OUT*/Bool* decode_ok, r_src = (UInt)modrm - 0xC0; DIP("fcmovb %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondB), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */ r_src = (UInt)modrm - 0xC8; DIP("fcmovz %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondZ), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */ r_src = (UInt)modrm - 0xD0; DIP("fcmovbe %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondBE), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */ r_src = (UInt)modrm - 0xD8; DIP("fcmovu %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondP), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xE9: /* FUCOMPP %st(0),%st(1) */ @@ -5912,9 +5912,9 @@ ULong dis_FPU ( /*OUT*/Bool* decode_ok, r_src = (UInt)modrm - 0xC0; DIP("fcmovnb %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondNB), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */ @@ -5922,10 +5922,10 @@ ULong dis_FPU ( /*OUT*/Bool* decode_ok, DIP("fcmovnz %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED( 0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondNZ), - get_ST(0), - get_ST(r_src) + get_ST(r_src), + get_ST(0) ) ); break; @@ -5935,10 +5935,10 @@ ULong dis_FPU ( /*OUT*/Bool* decode_ok, DIP("fcmovnbe %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED( 0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondNBE), - get_ST(0), - get_ST(r_src) + get_ST(r_src), + get_ST(0) ) ); break; @@ -5948,10 +5948,10 @@ ULong dis_FPU ( /*OUT*/Bool* decode_ok, DIP("fcmovnu %%st(%u), %%st(0)\n", r_src); put_ST_UNCHECKED( 0, - IRExpr_Mux0X( + IRExpr_ITE( mk_amd64g_calculate_condition(AMD64CondNP), - get_ST(0), - get_ST(r_src) + get_ST(r_src), + get_ST(0) ) ); break; @@ -6887,20 +6887,20 @@ static ULong dis_MMX_shiftG_byE ( VexAbiInfo* vbi, if (shl || shr) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)), - mkU64(0), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + mkU64(0) ) ); } else if (sar) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)), - binop(op, mkexpr(g0), mkU8(size-1)), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + binop(op, mkexpr(g0), mkU8(size-1)) ) ); } else { @@ -7374,14 +7374,14 @@ IRExpr* shiftL64_with_extras ( IRTemp base, IRTemp xtra, IRTemp amt ) else (base << amt) | (xtra >>u (64-amt)) */ return - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)), - mkexpr(base), binop(Iop_Or64, binop(Iop_Shl64, mkexpr(base), mkexpr(amt)), binop(Iop_Shr64, mkexpr(xtra), binop(Iop_Sub8, mkU8(64), mkexpr(amt))) - ) + ), + mkexpr(base) ); } @@ -7395,14 +7395,14 @@ IRExpr* shiftR64_with_extras ( IRTemp xtra, IRTemp base, IRTemp amt ) else (base >>u amt) | (xtra << (64-amt)) */ return - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)), - mkexpr(base), binop(Iop_Or64, binop(Iop_Shr64, mkexpr(base), mkexpr(amt)), binop(Iop_Shl64, mkexpr(xtra), binop(Iop_Sub8, mkU8(64), mkexpr(amt))) - ) + ), + mkexpr(base) ); } @@ -7802,11 +7802,11 @@ ULong dis_bs_E_G ( VexAbiInfo* vbi, stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(srcB), - /* src==0 */ - mkU64(AMD64G_CC_MASK_Z), - /* src!=0 */ - mkU64(0) + IRExpr_ITE( mkexpr(srcB), + /* src!=0 */ + mkU64(0), + /* src==0 */ + mkU64(AMD64G_CC_MASK_Z) ) )); /* Set NDEP even though it isn't used. This makes redundant-PUT @@ -7841,15 +7841,15 @@ ULong dis_bs_E_G ( VexAbiInfo* vbi, /* The main computation, guarding against zero. */ assign( dst64, - IRExpr_Mux0X( + IRExpr_ITE( mkexpr(srcB), - /* src == 0 -- leave dst unchanged */ - widenUto64( getIRegG( sz, pfx, modrm ) ), /* src != 0 */ fwds ? unop(Iop_Ctz64, mkexpr(src64)) : binop(Iop_Sub64, mkU64(63), - unop(Iop_Clz64, mkexpr(src64))) + unop(Iop_Clz64, mkexpr(src64))), + /* src == 0 -- leave dst unchanged */ + widenUto64( getIRegG( sz, pfx, modrm ) ) ) ); @@ -7974,10 +7974,10 @@ ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok, /* There are 3 cases to consider: reg-reg: ignore any lock prefix, generate sequence based - on Mux0X + on ITE reg-mem, not locked: ignore any lock prefix, generate sequence - based on Mux0X + based on ITE reg-mem, locked: use IRCAS */ @@ -7990,8 +7990,8 @@ ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok, assign( acc, getIRegRAX(size) ); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) ); - assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIRegRAX(size, mkexpr(acc2)); putIRegE(size, pfx, rm, mkexpr(dest2)); DIP("cmpxchg%c %s,%s\n", nameISize(size), @@ -8007,8 +8007,8 @@ ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok, assign( acc, getIRegRAX(size) ); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) ); - assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIRegRAX(size, mkexpr(acc2)); storeLE( mkexpr(addr), mkexpr(dest2) ); DIP("cmpxchg%c %s,%s\n", nameISize(size), @@ -8030,7 +8030,7 @@ ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok, )); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIRegRAX(size, mkexpr(acc2)); DIP("cmpxchg%c %s,%s\n", nameISize(size), nameIRegG(size,pfx,rm), dis_buf); @@ -8079,9 +8079,9 @@ ULong dis_cmov_E_G ( VexAbiInfo* vbi, assign( tmpd, getIRegG(sz, pfx, rm) ); putIRegG( sz, pfx, rm, - IRExpr_Mux0X( mk_amd64g_calculate_condition(cond), - mkexpr(tmpd), - mkexpr(tmps) ) + IRExpr_ITE( mk_amd64g_calculate_condition(cond), + mkexpr(tmps), + mkexpr(tmpd) ) ); DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond), nameIRegE(sz,pfx,rm), @@ -8096,9 +8096,9 @@ ULong dis_cmov_E_G ( VexAbiInfo* vbi, assign( tmpd, getIRegG(sz, pfx, rm) ); putIRegG( sz, pfx, rm, - IRExpr_Mux0X( mk_amd64g_calculate_condition(cond), - mkexpr(tmpd), - mkexpr(tmps) ) + IRExpr_ITE( mk_amd64g_calculate_condition(cond), + mkexpr(tmps), + mkexpr(tmpd) ) ); DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond), @@ -8822,20 +8822,20 @@ static ULong dis_SSE_shiftG_byE ( VexAbiInfo* vbi, if (shl || shr) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)), - mkV128(0x0000), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + mkV128(0x0000) ) ); } else if (sar) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)), - binop(op, mkexpr(g0), mkU8(size-1)), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + binop(op, mkexpr(g0), mkU8(size-1)) ) ); } else { @@ -16909,9 +16909,9 @@ static Long dis_PCMPISTRI_3A ( UChar modrm, UInt regNoL, UInt regNoR, IRTemp validL = newTemp(Ity_I32); assign(validL, binop(Iop_Sub32, - IRExpr_Mux0X(mkexpr(zmaskL_zero), - mkU32(0), - binop(Iop_Shl32, mkU32(1), ctzL)), + IRExpr_ITE(mkexpr(zmaskL_zero), + binop(Iop_Shl32, mkU32(1), ctzL), + mkU32(0)), mkU32(1))); /* And similarly for validR. */ @@ -16920,9 +16920,9 @@ static Long dis_PCMPISTRI_3A ( UChar modrm, UInt regNoL, UInt regNoR, assign(zmaskR_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskR), mkU32(0))); IRTemp validR = newTemp(Ity_I32); assign(validR, binop(Iop_Sub32, - IRExpr_Mux0X(mkexpr(zmaskR_zero), - mkU32(0), - binop(Iop_Shl32, mkU32(1), ctzR)), + IRExpr_ITE(mkexpr(zmaskR_zero), + binop(Iop_Shl32, mkU32(1), ctzR), + mkU32(0)), mkU32(1))); /* Do the actual comparison. */ @@ -16962,18 +16962,18 @@ static Long dis_PCMPISTRI_3A ( UChar modrm, UInt regNoL, UInt regNoR, /* Now for the condition codes... */ /* C == 0 iff intRes2 == 0 */ - IRExpr *c_bit = IRExpr_Mux0X( binop(Iop_ExpCmpNE32, mkexpr(intRes2), - mkU32(0)), - mkU32(0), - mkU32(1 << AMD64G_CC_SHIFT_C) ); + IRExpr *c_bit = IRExpr_ITE( binop(Iop_ExpCmpNE32, mkexpr(intRes2), + mkU32(0)), + mkU32(1 << AMD64G_CC_SHIFT_C), + mkU32(0)); /* Z == 1 iff any in argL is 0 */ - IRExpr *z_bit = IRExpr_Mux0X( mkexpr(zmaskL_zero), - mkU32(0), - mkU32(1 << AMD64G_CC_SHIFT_Z) ); + IRExpr *z_bit = IRExpr_ITE( mkexpr(zmaskL_zero), + mkU32(1 << AMD64G_CC_SHIFT_Z), + mkU32(0)); /* S == 1 iff any in argR is 0 */ - IRExpr *s_bit = IRExpr_Mux0X( mkexpr(zmaskR_zero), - mkU32(0), - mkU32(1 << AMD64G_CC_SHIFT_S) ); + IRExpr *s_bit = IRExpr_ITE( mkexpr(zmaskR_zero), + mkU32(1 << AMD64G_CC_SHIFT_S), + mkU32(0)); /* O == IntRes2[0] */ IRExpr *o_bit = binop(Iop_Shl32, binop(Iop_And32, mkexpr(intRes2), mkU32(0x01)), @@ -18912,37 +18912,37 @@ Long dis_ESC_NONE ( If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */ stmt( IRStmt_Put( OFFB_DFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_64to1, binop(Iop_And64, binop(Iop_Shr64, mkexpr(t1), mkU8(10)), mkU64(1))), - mkU64(1), - mkU64(0xFFFFFFFFFFFFFFFFULL))) + mkU64(0xFFFFFFFFFFFFFFFFULL), + mkU64(1))) ); /* And set the ID flag */ stmt( IRStmt_Put( OFFB_IDFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_64to1, binop(Iop_And64, binop(Iop_Shr64, mkexpr(t1), mkU8(21)), mkU64(1))), - mkU64(0), - mkU64(1))) + mkU64(1), + mkU64(0))) ); /* And set the AC flag too */ stmt( IRStmt_Put( OFFB_ACFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_64to1, binop(Iop_And64, binop(Iop_Shr64, mkexpr(t1), mkU8(18)), mkU64(1))), - mkU64(0), - mkU64(1))) + mkU64(1), + mkU64(0))) ); DIP("popf%c\n", nameISize(sz)); @@ -20344,16 +20344,16 @@ Long dis_ESC_0F ( expdHi64:expdLo64, even if we're doing a cmpxchg8b. */ /* It's just _so_ much fun ... */ putIRegRDX( 8, - IRExpr_Mux0X( mkexpr(success), - sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi)) - : mkexpr(oldHi), - mkexpr(expdHi64) + IRExpr_ITE( mkexpr(success), + mkexpr(expdHi64), + sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi)) + : mkexpr(oldHi) )); putIRegRAX( 8, - IRExpr_Mux0X( mkexpr(success), - sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo)) - : mkexpr(oldLo), - mkexpr(expdLo64) + IRExpr_ITE( mkexpr(success), + mkexpr(expdLo64), + sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo)) + : mkexpr(oldLo) )); /* Copy the success bit into the Z flag and leave the others @@ -20838,20 +20838,20 @@ static ULong dis_AVX128_shiftV_byE ( VexAbiInfo* vbi, if (shl || shr) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)), - mkV128(0x0000), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + mkV128(0x0000) ) ); } else if (sar) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)), - binop(op, mkexpr(g0), mkU8(size-1)), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + binop(op, mkexpr(g0), mkU8(size-1)) ) ); } else { @@ -24620,13 +24620,13 @@ static IRTemp math_PERMILPD_VAR_128 ( IRTemp dataV, IRTemp ctrlV ) breakupV128to64s( dataV, &dHi, &dLo ); breakupV128to64s( ctrlV, &cHi, &cLo ); IRExpr* rHi - = IRExpr_Mux0X( unop(Iop_64to1, - binop(Iop_Shr64, mkexpr(cHi), mkU8(1))), - mkexpr(dLo), mkexpr(dHi) ); + = IRExpr_ITE( unop(Iop_64to1, + binop(Iop_Shr64, mkexpr(cHi), mkU8(1))), + mkexpr(dHi), mkexpr(dLo) ); IRExpr* rLo - = IRExpr_Mux0X( unop(Iop_64to1, - binop(Iop_Shr64, mkexpr(cLo), mkU8(1))), - mkexpr(dLo), mkexpr(dHi) ); + = IRExpr_ITE( unop(Iop_64to1, + binop(Iop_Shr64, mkexpr(cLo), mkU8(1))), + mkexpr(dHi), mkexpr(dLo) ); IRTemp res = newTemp(Ity_V128); assign(res, binop(Iop_64HLtoV128, rHi, rLo)); return res; diff --git a/VEX/priv/guest_arm_helpers.c b/VEX/priv/guest_arm_helpers.c index 5b43d7a393..6b2fd436bd 100644 --- a/VEX/priv/guest_arm_helpers.c +++ b/VEX/priv/guest_arm_helpers.c @@ -661,12 +661,12 @@ IRExpr* guest_arm_spechelper ( const HChar* function_name, --> oldC ? (argR <=u argL) : (argR oldC ? (argR <=u argL) : (argR >s 31)) */ binop(Iop_CmpEQ32, mkexpr(hi32), binop( Iop_Sar32, mkexpr(lo32), mkU8(31))), + /* then: within signed-32 range: lo half good enough */ + mkexpr(lo32), /* else: sign dep saturate: 1->0x80000000, 0->0x7FFFFFFF */ binop(Iop_Add32, mkU32(0x7FFFFFFF), - binop(Iop_Shr32, mkexpr(hi32), mkU8(31))), - /* then: within signed-32 range: lo half good enough */ - mkexpr(lo32) ); + binop(Iop_Shr32, mkexpr(hi32), mkU8(31)))); } /* Unsigned saturating narrow 64S to 32 */ @@ -819,13 +819,13 @@ static IRExpr* mkQNarrow64Uto32 ( IRExpr* t64 ) assign( hi32, unop(Iop_64HIto32, t64)); assign( lo32, unop(Iop_64to32, t64)); - return IRExpr_Mux0X( + return IRExpr_ITE( /* if (top 32 bits of t64 are 0) */ binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0)), - /* else: positive saturate -> 0xFFFFFFFF */ - mkU32(0xFFFFFFFF), /* then: within unsigned-32 range: lo half good enough */ - mkexpr(lo32) ); + mkexpr(lo32), + /* else: positive saturate -> 0xFFFFFFFF */ + mkU32(0xFFFFFFFF)); } /* Signed saturate narrow 64->32, combining to V128 */ @@ -1446,13 +1446,13 @@ static IRExpr* /* :: Ity_I32/64 */ ROTL ( IRExpr* src, binop(Iop_Shl32, src, mask), binop(Iop_Shr32, src, binop(Iop_Sub8, mkU8(32), mask))); } - /* Note: the MuxOX is not merely an optimisation; it's needed + /* Note: the ITE not merely an optimisation; it's needed because otherwise the Shr is a shift by the word size when mask denotes zero. For rotates by immediates, a lot of this junk gets folded out. */ - return IRExpr_Mux0X( binop(Iop_CmpNE8, mask, mkU8(0)), - /* zero rotate */ src, - /* non-zero rotate */ rot ); + return IRExpr_ITE( binop(Iop_CmpNE8, mask, mkU8(0)), + /* non-zero rotate */ rot, + /* zero rotate */ src); } /* Standard effective address calc: (rA + rB) */ @@ -2229,13 +2229,13 @@ static void set_XER_CA_32 ( UInt op, IRExpr* res, ) ); xer_ca - = IRExpr_Mux0X( + = IRExpr_ITE( /* shift amt > 31 ? */ binop(Iop_CmpLT32U, mkU32(31), argR), - /* no -- be like srawi */ - unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0))), /* yes -- get sign bit of argL */ - binop(Iop_Shr32, argL, mkU8(31)) + binop(Iop_Shr32, argL, mkU8(31)), + /* no -- be like srawi */ + unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0))) ); break; @@ -2349,14 +2349,14 @@ static void set_XER_CA_64 ( UInt op, IRExpr* res, ) ); xer_ca - = IRExpr_Mux0X( + = IRExpr_ITE( /* shift amt > 31 ? */ binop(Iop_CmpLT64U, mkU64(31), argR), - /* no -- be like srawi */ - unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))), /* yes -- get sign bit of argL */ - unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))) - ); + unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))), + /* no -- be like srawi */ + unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))) + ); break; case /* 11 */ PPCG_FLAG_OP_SRAWI: @@ -2403,13 +2403,13 @@ static void set_XER_CA_64 ( UInt op, IRExpr* res, ) ); xer_ca - = IRExpr_Mux0X( + = IRExpr_ITE( /* shift amt > 63 ? */ binop(Iop_CmpLT64U, mkU64(63), argR), - /* no -- be like sradi */ - unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))), /* yes -- get sign bit of argL */ - unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))) + unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))), + /* no -- be like sradi */ + unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0))) ); break; @@ -3855,9 +3855,9 @@ static Bool dis_int_logic ( UInt theInstr ) // Iop_Clz32 undefined for arg==0, so deal with that case: irx = binop(Iop_CmpNE32, lo32, mkU32(0)); assign(rA, mkWidenFrom32(ty, - IRExpr_Mux0X( irx, - mkU32(32), - unop(Iop_Clz32, lo32)), + IRExpr_ITE( irx, + unop(Iop_Clz32, lo32), + mkU32(32)), False)); // TODO: alternatively: assign(rA, verbose_Clz32(rS)); @@ -3962,9 +3962,9 @@ static Bool dis_int_logic ( UInt theInstr ) flag_rC ? ".":"", rA_addr, rS_addr); // Iop_Clz64 undefined for arg==0, so deal with that case: irx = binop(Iop_CmpNE64, mkexpr(rS), mkU64(0)); - assign(rA, IRExpr_Mux0X( irx, - mkU64(64), - unop(Iop_Clz64, mkexpr(rS)) )); + assign(rA, IRExpr_ITE( irx, + unop(Iop_Clz64, mkexpr(rS)), + mkU64(64) )); // TODO: alternatively: assign(rA, verbose_Clz64(rS)); break; @@ -6095,9 +6095,9 @@ static Bool dis_int_shift ( UInt theInstr ) e_tmp = binop( Iop_Sar32, mkexpr(rS_lo32), unop( Iop_32to8, - IRExpr_Mux0X( mkexpr(outofrange), - mkexpr(sh_amt), - mkU32(31)) ) ); + IRExpr_ITE( mkexpr(outofrange), + mkU32(31), + mkexpr(sh_amt)) ) ); assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */True) ); set_XER_CA( ty, PPCG_FLAG_OP_SRAW, @@ -6193,9 +6193,9 @@ static Bool dis_int_shift ( UInt theInstr ) binop( Iop_Sar64, mkexpr(rS), unop( Iop_64to8, - IRExpr_Mux0X( mkexpr(outofrange), - mkexpr(sh_amt), - mkU64(63)) )) + IRExpr_ITE( mkexpr(outofrange), + mkU64(63), + mkexpr(sh_amt)) )) ); set_XER_CA( ty, PPCG_FLAG_OP_SRAD, mkexpr(rA), mkexpr(rS), mkexpr(sh_amt), @@ -7373,10 +7373,10 @@ static Bool dis_fp_arith ( UInt theInstr ) // frD = (frA >= 0.0) ? frC : frB // = (cc_b0 == 0) ? frC : frB assign( frD, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0)), - mkexpr(frB), - mkexpr(frC) )); + mkexpr(frC), + mkexpr(frB) )); /* One of the rare ones which don't mess with FPRF */ set_FPRF = False; @@ -8258,14 +8258,15 @@ static Bool dis_fp_round ( UInt theInstr ) /* need to preserve sign of zero */ /* frD = (fabs(frB) > 9e18) ? frB : (sign(frB)) ? -fabs((double)r_tmp64) : (double)r_tmp64 */ - assign(frD, IRExpr_Mux0X( + assign(frD, IRExpr_ITE( binop(Iop_CmpNE8, unop(Iop_32to8, binop(Iop_CmpF64, IRExpr_Const(IRConst_F64(9e18)), unop(Iop_AbsF64, mkexpr(frB)))), mkU8(0)), - IRExpr_Mux0X( + mkexpr(frB), + IRExpr_ITE( binop(Iop_CmpNE32, binop(Iop_Shr32, unop(Iop_64HIto32, @@ -8273,13 +8274,12 @@ static Bool dis_fp_round ( UInt theInstr ) mkexpr(frB))), mkU8(31)), mkU32(0)), - binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) ), unop(Iop_NegF64, unop( Iop_AbsF64, binop(Iop_I64StoF64, mkU32(0), - mkexpr(r_tmp64)) )) - ), - mkexpr(frB) + mkexpr(r_tmp64)) )), + binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) ) + ) )); break; @@ -11839,26 +11839,26 @@ dis_vx_conv ( UInt theInstr, UInt opc2 ) assign( res1, unop(Iop_64HIto32, mkexpr(lo64)) ); assign( res0, unop(Iop_64to32, mkexpr(lo64)) ); - b3_result = IRExpr_Mux0X(is_NaN_32(b3), - // else: result is from the Iop_QFtoI32{s|u}x4_RZ - mkexpr(res3), - // then: result is 0x{8|0}80000000 - mkU32(un_signed ? 0x00000000 : 0x80000000)); - b2_result = IRExpr_Mux0X(is_NaN_32(b2), - // else: result is from the Iop_QFtoI32{s|u}x4_RZ - mkexpr(res2), - // then: result is 0x{8|0}80000000 - mkU32(un_signed ? 0x00000000 : 0x80000000)); - b1_result = IRExpr_Mux0X(is_NaN_32(b1), - // else: result is from the Iop_QFtoI32{s|u}x4_RZ - mkexpr(res1), - // then: result is 0x{8|0}80000000 - mkU32(un_signed ? 0x00000000 : 0x80000000)); - b0_result = IRExpr_Mux0X(is_NaN_32(b0), - // else: result is from the Iop_QFtoI32{s|u}x4_RZ - mkexpr(res0), - // then: result is 0x{8|0}80000000 - mkU32(un_signed ? 0x00000000 : 0x80000000)); + b3_result = IRExpr_ITE(is_NaN_32(b3), + // then: result is 0x{8|0}80000000 + mkU32(un_signed ? 0x00000000 : 0x80000000), + // else: result is from the Iop_QFtoI32{s|u}x4_RZ + mkexpr(res3)); + b2_result = IRExpr_ITE(is_NaN_32(b2), + // then: result is 0x{8|0}80000000 + mkU32(un_signed ? 0x00000000 : 0x80000000), + // else: result is from the Iop_QFtoI32{s|u}x4_RZ + mkexpr(res2)); + b1_result = IRExpr_ITE(is_NaN_32(b1), + // then: result is 0x{8|0}80000000 + mkU32(un_signed ? 0x00000000 : 0x80000000), + // else: result is from the Iop_QFtoI32{s|u}x4_RZ + mkexpr(res1)); + b0_result = IRExpr_ITE(is_NaN_32(b0), + // then: result is 0x{8|0}80000000 + mkU32(un_signed ? 0x00000000 : 0x80000000), + // else: result is from the Iop_QFtoI32{s|u}x4_RZ + mkexpr(res0)); putVSReg( XT, binop( Iop_64HLtoV128, @@ -12781,19 +12781,19 @@ static IRExpr * _get_maxmin_fp_NaN(IRTemp frA_I64, IRTemp frB_I64) #define SNAN_MASK 0x0008000000000000ULL return - IRExpr_Mux0X(mkexpr(frA_isSNaN), - /* else: if frB is a SNaN */ - IRExpr_Mux0X(mkexpr(frB_isSNaN), - /* else: if frB is a QNaN */ - IRExpr_Mux0X(mkexpr(frB_isQNaN), - /* else: frA is a QNaN, so result = frB */ - mkexpr(frB_I64), - /* then: result = frA */ - mkexpr(frA_I64)), - /* then: result = frB converted to QNaN */ - binop(Iop_Or64, mkexpr(frB_I64), mkU64(SNAN_MASK))), - /* then: result = frA converted to QNaN */ - binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK))); + IRExpr_ITE(mkexpr(frA_isSNaN), + /* then: result = frA converted to QNaN */ + binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK)), + /* else: if frB is a SNaN */ + IRExpr_ITE(mkexpr(frB_isSNaN), + /* then: result = frB converted to QNaN */ + binop(Iop_Or64, mkexpr(frB_I64), mkU64(SNAN_MASK)), + /* else: if frB is a QNaN */ + IRExpr_ITE(mkexpr(frB_isQNaN), + /* then: result = frA */ + mkexpr(frA_I64), + /* else: frA is a QNaN, so result = frB */ + mkexpr(frB_I64)))); } /* @@ -12807,13 +12807,13 @@ static IRExpr * _get_maxmin_fp_cmp(IRTemp src1, IRTemp src2, Bool isMin) unop( Iop_ReinterpI64asF64, mkexpr( src2 ) ) ) ); - return IRExpr_Mux0X( binop( Iop_CmpEQ32, + return IRExpr_ITE( binop( Iop_CmpEQ32, mkexpr( src1cmpsrc2 ), mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ), - /* else: use src2 */ - mkexpr( src2 ), - /* then: use src1 */ - mkexpr( src1 ) ); + /* then: use src1 */ + mkexpr( src1 ), + /* else: use src2 */ + mkexpr( src2 ) ); } /* @@ -12840,23 +12840,23 @@ static IRExpr * get_max_min_fp(IRTemp frA_I64, IRTemp frB_I64, Bool isMin) assign(anyNaN, mkOR1(is_NaN(frA_I64), is_NaN(frB_I64))); #define MINUS_ZERO 0x8000000000000000ULL - return IRExpr_Mux0X( /* If both arguments are zero . . . */ - mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ), - /* else: check if either input is a NaN*/ - IRExpr_Mux0X( mkexpr( anyNaN ), - /* else: use "comparison helper" */ - _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ), - /* then: use "NaN helper" */ - _get_maxmin_fp_NaN( frA_I64, frB_I64 ) ), - /* then: if frA is -0 and isMin==True, return -0; - * else if frA is +0 and isMin==False; return +0; - * otherwise, simply return frB. */ - IRExpr_Mux0X( binop( Iop_CmpEQ32, - unop( Iop_64HIto32, - mkexpr( frA_I64 ) ), - mkU32( isMin ? 0x80000000 : 0 ) ), - mkexpr( frB_I64 ), - mkU64( isMin ? MINUS_ZERO : 0ULL ) ) ); + return IRExpr_ITE( /* If both arguments are zero . . . */ + mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ), + /* then: if frA is -0 and isMin==True, return -0; + * else if frA is +0 and isMin==False; return +0; + * otherwise, simply return frB. */ + IRExpr_ITE( binop( Iop_CmpEQ32, + unop( Iop_64HIto32, + mkexpr( frA_I64 ) ), + mkU32( isMin ? 0x80000000 : 0 ) ), + mkU64( isMin ? MINUS_ZERO : 0ULL ), + mkexpr( frB_I64 ) ), + /* else: check if either input is a NaN*/ + IRExpr_ITE( mkexpr( anyNaN ), + /* then: use "NaN helper" */ + _get_maxmin_fp_NaN( frA_I64, frB_I64 ), + /* else: use "comparison helper" */ + _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ) )); } /* @@ -12910,30 +12910,30 @@ static IRExpr * _do_vsx_fp_roundToInt(IRTemp frB_I64, UInt opc2, /* frD = (fabs(frB) > 9e18) ? frB : (sign(frB)) ? -fabs((double)intermediateResult) : (double)intermediateResult */ assign( frD, - IRExpr_Mux0X( + IRExpr_ITE( binop( Iop_CmpNE8, unop( Iop_32to8, binop( Iop_CmpF64, IRExpr_Const( IRConst_F64( 9e18 ) ), unop( Iop_AbsF64, mkexpr( frB ) ) ) ), mkU8(0) ), - IRExpr_Mux0X( + mkexpr( frB ), + IRExpr_ITE( binop( Iop_CmpNE32, binop( Iop_Shr32, unop( Iop_64HIto32, mkexpr( frB_I64 ) ), mkU8( 31 ) ), mkU32(0) ), - binop( Iop_I64StoF64, - mkU32( 0 ), - mkexpr( intermediateResult ) ), unop( Iop_NegF64, unop( Iop_AbsF64, binop( Iop_I64StoF64, mkU32( 0 ), - mkexpr( intermediateResult ) ) ) ) - ), - mkexpr( frB ) + mkexpr( intermediateResult ) ) ) ), + binop( Iop_I64StoF64, + mkU32( 0 ), + mkexpr( intermediateResult ) ) + ) ) ); @@ -12948,12 +12948,12 @@ static IRExpr * _do_vsx_fp_roundToInt(IRTemp frB_I64, UInt opc2, binop( Iop_And32, hi32, mkU32( 0x00080000 ) ), mkU32( 0 ) ) ) ); - return IRExpr_Mux0X( mkexpr( is_SNAN ), - mkexpr( frD ), + return IRExpr_ITE( mkexpr( is_SNAN ), unop( Iop_ReinterpI64asF64, binop( Iop_Xor64, mkU64( SNAN_MASK ), - mkexpr( frB_I64 ) ) ) ); + mkexpr( frB_I64 ) ) ), + mkexpr( frD )); } /* @@ -17577,10 +17577,11 @@ DisResult disInstr_PPC_WRK ( UInt bi = ifieldRegC( theInstr ); putIReg( rT, - IRExpr_Mux0X( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)), - getIReg(rB), - rA == 0 ? (mode64 ? mkU64(0) : mkU32(0)) - : getIReg(rA) ) + IRExpr_ITE( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)), + rA == 0 ? (mode64 ? mkU64(0) : mkU32(0)) + : getIReg(rA), + getIReg(rB)) + ); DIP("isel r%u,r%u,r%u,crb%u\n", rT,rA,rB,bi); goto decode_success; diff --git a/VEX/priv/guest_s390_toIR.c b/VEX/priv/guest_s390_toIR.c index 9f7eb17ba4..377b5b68ac 100644 --- a/VEX/priv/guest_s390_toIR.c +++ b/VEX/priv/guest_s390_toIR.c @@ -240,7 +240,7 @@ mkite(IRExpr *condition, IRExpr *iftrue, IRExpr *iffalse) { vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1); - return IRExpr_Mux0X(condition, iffalse, iftrue); + return IRExpr_ITE(condition, iftrue, iffalse); } /* Add a statement that stores DATA at ADDR. This is a big-endian machine. */ diff --git a/VEX/priv/guest_x86_toIR.c b/VEX/priv/guest_x86_toIR.c index 3d21b4947e..e9b96fcaf1 100644 --- a/VEX/priv/guest_x86_toIR.c +++ b/VEX/priv/guest_x86_toIR.c @@ -992,23 +992,23 @@ static void setFlags_DEP1_DEP2_shift ( IROp op32, /* DEP1 contains the result, DEP2 contains the undershifted value. */ stmt( IRStmt_Put( OFFB_CC_OP, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_OP,Ity_I32), - mkU32(ccOp))) ); + IRExpr_ITE( mkexpr(guardB), + mkU32(ccOp), + IRExpr_Get(OFFB_CC_OP,Ity_I32) ) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_DEP1,Ity_I32), - widenUto32(mkexpr(res)))) ); + IRExpr_ITE( mkexpr(guardB), + widenUto32(mkexpr(res)), + IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) )); stmt( IRStmt_Put( OFFB_CC_DEP2, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_DEP2,Ity_I32), - widenUto32(mkexpr(resUS)))) ); + IRExpr_ITE( mkexpr(guardB), + widenUto32(mkexpr(resUS)), + IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) )); /* Set NDEP even though it isn't used. This makes redundant-PUT elimination of previous stores to this field work better. */ stmt( IRStmt_Put( OFFB_CC_NDEP, - IRExpr_Mux0X( mkexpr(guardB), - IRExpr_Get(OFFB_CC_NDEP,Ity_I32), - mkU32(0) ))); + IRExpr_ITE( mkexpr(guardB), + mkU32(0), + IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) )); } @@ -2581,21 +2581,21 @@ UInt dis_Grp2 ( UChar sorb, /* CC_DEP1 is the rotated value. CC_NDEP is flags before. */ stmt( IRStmt_Put( OFFB_CC_OP, - IRExpr_Mux0X( mkexpr(rot_amt32b), - IRExpr_Get(OFFB_CC_OP,Ity_I32), - mkU32(ccOp))) ); + IRExpr_ITE( mkexpr(rot_amt32b), + mkU32(ccOp), + IRExpr_Get(OFFB_CC_OP,Ity_I32) ) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(rot_amt32b), - IRExpr_Get(OFFB_CC_DEP1,Ity_I32), - widenUto32(mkexpr(dst1)))) ); + IRExpr_ITE( mkexpr(rot_amt32b), + widenUto32(mkexpr(dst1)), + IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) )); stmt( IRStmt_Put( OFFB_CC_DEP2, - IRExpr_Mux0X( mkexpr(rot_amt32b), - IRExpr_Get(OFFB_CC_DEP2,Ity_I32), - mkU32(0))) ); + IRExpr_ITE( mkexpr(rot_amt32b), + mkU32(0), + IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) )); stmt( IRStmt_Put( OFFB_CC_NDEP, - IRExpr_Mux0X( mkexpr(rot_amt32b), - IRExpr_Get(OFFB_CC_NDEP,Ity_I32), - mkexpr(oldFlags))) ); + IRExpr_ITE( mkexpr(rot_amt32b), + mkexpr(oldFlags), + IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) )); } /* if (isRotate) */ /* Save result, and finish up. */ @@ -3429,10 +3429,10 @@ static IRTemp gen_LZCNT ( IRType ty, IRTemp src ) // special-case around that. IRTemp res32 = newTemp(Ity_I32); assign(res32, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpEQ32, mkexpr(src32x), mkU32(0)), - unop(Iop_Clz32, mkexpr(src32x)), - mkU32(8 * sizeofIRType(ty)) + mkU32(8 * sizeofIRType(ty)), + unop(Iop_Clz32, mkexpr(src32x)) )); IRTemp res = newTemp(ty); @@ -3569,11 +3569,11 @@ static void put_ST ( Int i, IRExpr* value ) { put_ST_UNCHECKED( i, - IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), - /* 0 means empty */ - value, - /* non-0 means full */ - mkQNaN64() + IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), + /* non-0 means full */ + mkQNaN64(), + /* 0 means empty */ + value ) ); } @@ -3595,11 +3595,11 @@ static IRExpr* get_ST_UNCHECKED ( Int i ) static IRExpr* get_ST ( Int i ) { return - IRExpr_Mux0X( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), - /* 0 means empty */ - mkQNaN64(), - /* non-0 means full */ - get_ST_UNCHECKED(i)); + IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)), + /* non-0 means full */ + get_ST_UNCHECKED(i), + /* 0 means empty */ + mkQNaN64()); } @@ -4532,36 +4532,36 @@ UInt dis_FPU ( Bool* decode_ok, UChar sorb, Int delta ) r_src = (UInt)modrm - 0xC0; DIP("fcmovb %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondB), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */ r_src = (UInt)modrm - 0xC8; DIP("fcmovz %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondZ), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */ r_src = (UInt)modrm - 0xD0; DIP("fcmovbe %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondBE), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */ r_src = (UInt)modrm - 0xD8; DIP("fcmovu %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondP), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xE9: /* FUCOMPP %st(0),%st(1) */ @@ -4693,36 +4693,36 @@ UInt dis_FPU ( Bool* decode_ok, UChar sorb, Int delta ) r_src = (UInt)modrm - 0xC0; DIP("fcmovnb %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondNB), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */ r_src = (UInt)modrm - 0xC8; DIP("fcmovnz %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondNZ), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */ r_src = (UInt)modrm - 0xD0; DIP("fcmovnbe %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondNBE), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xD8 ... 0xDF: /* FCMOVNU ST(i), ST(0) */ r_src = (UInt)modrm - 0xD8; DIP("fcmovnu %%st(%d), %%st(0)\n", (Int)r_src); put_ST_UNCHECKED(0, - IRExpr_Mux0X( + IRExpr_ITE( mk_x86g_calculate_condition(X86CondNP), - get_ST(0), get_ST(r_src)) ); + get_ST(r_src), get_ST(0)) ); break; case 0xE2: @@ -5644,20 +5644,20 @@ static UInt dis_MMX_shiftG_byE ( UChar sorb, Int delta, if (shl || shr) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)), - mkU64(0), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + mkU64(0) ) ); } else if (sar) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)), - binop(op, mkexpr(g0), mkU8(size-1)), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + binop(op, mkexpr(g0), mkU8(size-1)) ) ); } else { @@ -6401,11 +6401,11 @@ UInt dis_bs_E_G ( UChar sorb, Int sz, Int delta, Bool fwds ) stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) )); stmt( IRStmt_Put( OFFB_CC_DEP1, - IRExpr_Mux0X( mkexpr(srcB), - /* src==0 */ - mkU32(X86G_CC_MASK_Z), - /* src!=0 */ - mkU32(0) + IRExpr_ITE( mkexpr(srcB), + /* src!=0 */ + mkU32(0), + /* src==0 */ + mkU32(X86G_CC_MASK_Z) ) )); /* Set NDEP even though it isn't used. This makes redundant-PUT @@ -6437,15 +6437,15 @@ UInt dis_bs_E_G ( UChar sorb, Int sz, Int delta, Bool fwds ) /* The main computation, guarding against zero. */ assign( dst32, - IRExpr_Mux0X( + IRExpr_ITE( mkexpr(srcB), - /* src == 0 -- leave dst unchanged */ - widenUto32( getIReg( sz, gregOfRM(modrm) ) ), /* src != 0 */ fwds ? unop(Iop_Ctz32, mkexpr(src32)) : binop(Iop_Sub32, mkU32(31), - unop(Iop_Clz32, mkexpr(src32))) + unop(Iop_Clz32, mkexpr(src32))), + /* src == 0 -- leave dst unchanged */ + widenUto32( getIReg( sz, gregOfRM(modrm) ) ) ) ); @@ -6553,10 +6553,10 @@ UInt dis_cmpxchg_G_E ( UChar sorb, /* There are 3 cases to consider: reg-reg: ignore any lock prefix, generate sequence based - on Mux0X + on ITE reg-mem, not locked: ignore any lock prefix, generate sequence - based on Mux0X + based on ITE reg-mem, locked: use IRCAS */ @@ -6568,8 +6568,8 @@ UInt dis_cmpxchg_G_E ( UChar sorb, assign( acc, getIReg(size, R_EAX) ); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_x86g_calculate_condition(X86CondZ) ); - assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIReg(size, R_EAX, mkexpr(acc2)); putIReg(size, eregOfRM(rm), mkexpr(dest2)); DIP("cmpxchg%c %s,%s\n", nameISize(size), @@ -6585,8 +6585,8 @@ UInt dis_cmpxchg_G_E ( UChar sorb, assign( acc, getIReg(size, R_EAX) ); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_x86g_calculate_condition(X86CondZ) ); - assign( dest2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(src)) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIReg(size, R_EAX, mkexpr(acc2)); storeLE( mkexpr(addr), mkexpr(dest2) ); DIP("cmpxchg%c %s,%s\n", nameISize(size), @@ -6608,7 +6608,7 @@ UInt dis_cmpxchg_G_E ( UChar sorb, )); setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty); assign( cond, mk_x86g_calculate_condition(X86CondZ) ); - assign( acc2, IRExpr_Mux0X(mkexpr(cond), mkexpr(dest), mkexpr(acc)) ); + assign( acc2, IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) ); putIReg(size, R_EAX, mkexpr(acc2)); DIP("cmpxchg%c %s,%s\n", nameISize(size), nameIReg(size,gregOfRM(rm)), dis_buf); @@ -6655,9 +6655,9 @@ UInt dis_cmov_E_G ( UChar sorb, assign( tmpd, getIReg(sz, gregOfRM(rm)) ); putIReg(sz, gregOfRM(rm), - IRExpr_Mux0X( mk_x86g_calculate_condition(cond), - mkexpr(tmpd), - mkexpr(tmps) ) + IRExpr_ITE( mk_x86g_calculate_condition(cond), + mkexpr(tmps), + mkexpr(tmpd) ) ); DIP("cmov%c%s %s,%s\n", nameISize(sz), name_X86Condcode(cond), @@ -6673,9 +6673,9 @@ UInt dis_cmov_E_G ( UChar sorb, assign( tmpd, getIReg(sz, gregOfRM(rm)) ); putIReg(sz, gregOfRM(rm), - IRExpr_Mux0X( mk_x86g_calculate_condition(cond), - mkexpr(tmpd), - mkexpr(tmps) ) + IRExpr_ITE( mk_x86g_calculate_condition(cond), + mkexpr(tmps), + mkexpr(tmpd) ) ); DIP("cmov%c%s %s,%s\n", nameISize(sz), @@ -7303,20 +7303,20 @@ static UInt dis_SSE_shiftG_byE ( UChar sorb, Int delta, if (shl || shr) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)), - mkV128(0x0000), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + mkV128(0x0000) ) ); } else if (sar) { assign( g1, - IRExpr_Mux0X( + IRExpr_ITE( binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)), - binop(op, mkexpr(g0), mkU8(size-1)), - binop(op, mkexpr(g0), mkexpr(amt8)) + binop(op, mkexpr(g0), mkexpr(amt8)), + binop(op, mkexpr(g0), mkU8(size-1)) ) ); } else { @@ -7518,38 +7518,38 @@ void set_EFLAGS_from_value ( IRTemp t1, If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */ stmt( IRStmt_Put( OFFB_DFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_32to1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(10)), mkU32(1))), - mkU32(1), - mkU32(0xFFFFFFFF))) + mkU32(0xFFFFFFFF), + mkU32(1))) ); /* Set the ID flag */ stmt( IRStmt_Put( OFFB_IDFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_32to1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(21)), mkU32(1))), - mkU32(0), - mkU32(1))) + mkU32(1), + mkU32(0))) ); /* And set the AC flag. If setting it 1 to, possibly emit an emulation warning. */ stmt( IRStmt_Put( OFFB_ACFLAG, - IRExpr_Mux0X( + IRExpr_ITE( unop(Iop_32to1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(18)), mkU32(1))), - mkU32(0), - mkU32(1))) + mkU32(1), + mkU32(0))) ); if (emit_AC_emwarn) { @@ -14625,12 +14625,12 @@ DisResult disInstr_X86_WRK ( unchanged. If the DCAS fails then we're putting into EDX:EAX the value seen in memory. */ putIReg(4, R_EDX, - IRExpr_Mux0X( mkexpr(success), - mkexpr(oldHi), mkexpr(expdHi) + IRExpr_ITE( mkexpr(success), + mkexpr(expdHi), mkexpr(oldHi) )); putIReg(4, R_EAX, - IRExpr_Mux0X( mkexpr(success), - mkexpr(oldLo), mkexpr(expdLo) + IRExpr_ITE( mkexpr(success), + mkexpr(expdLo), mkexpr(oldLo) )); /* Copy the success bit into the Z flag and leave the others diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index 495269f04f..6fe511dc7a 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -1758,14 +1758,14 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { // VFD + case Iex_ITE: { // VFD if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); - AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0); + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue); + AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse); HReg dst = newVRegI(env); - addInstr(env, mk_iMOVsd_RR(rX,dst)); - AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_iMOVsd_RR(r1,dst)); + AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, AMD64Instr_CMov64(cc ^ 1, r0, dst)); return dst; } @@ -2860,15 +2860,15 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { // VFD - HReg rX, r0, dst; + if (e->tag == Iex_ITE) { // VFD + HReg r1, r0, dst; vassert(ty == Ity_F64); - vassert(typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1); - rX = iselDblExpr(env, e->Iex.Mux0X.exprX); - r0 = iselDblExpr(env, e->Iex.Mux0X.expr0); + vassert(typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1); + r1 = iselDblExpr(env, e->Iex.ITE.iftrue); + r0 = iselDblExpr(env, e->Iex.ITE.iffalse); dst = newVRegV(env); - addInstr(env, mk_vMOVsd_RR(rX,dst)); - AMD64CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_vMOVsd_RR(r1,dst)); + AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst)); return dst; } @@ -3438,12 +3438,12 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) } /* switch (e->Iex.Binop.op) */ } /* if (e->tag == Iex_Binop) */ - if (e->tag == Iex_Mux0X) { // VFD - HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0); + if (e->tag == Iex_ITE) { // VFD + HReg r1 = iselVecExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselVecExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegV(env); - addInstr(env, mk_vMOVsd_RR(rX,dst)); - HReg cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_vMOVsd_RR(r1,dst)); + HReg cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst)); return dst; } diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c index fd1f223669..c3217de764 100644 --- a/VEX/priv/host_arm_isel.c +++ b/VEX/priv/host_arm_isel.c @@ -1761,15 +1761,15 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { // VFD - /* Mux0X(ccexpr, expr0, exprX) */ + case Iex_ITE: { // VFD + /* ITE(ccexpr, iftrue, iffalse) */ if (ty == Ity_I32) { ARMCondCode cc; - HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); - ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.Mux0X.expr0); + HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue); + ARMRI84* r0 = iselIntExpr_RI84(NULL, False, env, e->Iex.ITE.iffalse); HReg dst = newVRegI(env); - addInstr(env, mk_iMOVds_RR(dst, rX)); - cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_iMOVds_RR(dst, r1)); + cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, ARMInstr_CMov(cc ^ 1, dst, r0)); return dst; } @@ -2016,19 +2016,19 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) } /* if (e->tag == Iex_Unop) */ /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD IRType tyC; - HReg rXhi, rXlo, r0hi, r0lo, dstHi, dstLo; + HReg r1hi, r1lo, r0hi, r0lo, dstHi, dstLo; ARMCondCode cc; - tyC = typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond); + tyC = typeOfIRExpr(env->type_env,e->Iex.ITE.cond); vassert(tyC == Ity_I1); - iselInt64Expr(&rXhi, &rXlo, env, e->Iex.Mux0X.exprX); - iselInt64Expr(&r0hi, &r0lo, env, e->Iex.Mux0X.expr0); + iselInt64Expr(&r1hi, &r1lo, env, e->Iex.ITE.iftrue); + iselInt64Expr(&r0hi, &r0lo, env, e->Iex.ITE.iffalse); dstHi = newVRegI(env); dstLo = newVRegI(env); - addInstr(env, mk_iMOVds_RR(dstHi, rXhi)); - addInstr(env, mk_iMOVds_RR(dstLo, rXlo)); - cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_iMOVds_RR(dstHi, r1hi)); + addInstr(env, mk_iMOVds_RR(dstLo, r1lo)); + cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, ARMInstr_CMov(cc ^ 1, dstHi, ARMRI84_R(r0hi))); addInstr(env, ARMInstr_CMov(cc ^ 1, dstLo, ARMRI84_R(r0lo))); *rHi = dstHi; @@ -3660,7 +3660,7 @@ static HReg iselNeon64Expr_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD HReg rLo, rHi; HReg res = newVRegD(env); iselInt64Expr(&rHi, &rLo, env, e); @@ -5286,13 +5286,13 @@ static HReg iselNeonExpr_wrk ( ISelEnv* env, IRExpr* e ) } } - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD ARMCondCode cc; - HReg rX = iselNeonExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselNeonExpr(env, e->Iex.Mux0X.expr0); + HReg r1 = iselNeonExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselNeonExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegV(env); - addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, rX, 4, True)); - cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, r1, 4, True)); + cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, ARMInstr_NCMovQ(cc ^ 1, dst, r0)); return dst; } @@ -5453,14 +5453,14 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) } } - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD if (ty == Ity_F64 - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0); + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegD(env); - addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, rX)); - ARMCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, r1)); + ARMCondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, ARMInstr_VCMovD(cc ^ 1, dst, r0)); return dst; } @@ -5595,15 +5595,15 @@ static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ) } } - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD if (ty == Ity_F32 - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { ARMCondCode cc; - HReg rX = iselFltExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselFltExpr(env, e->Iex.Mux0X.expr0); + HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselFltExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegF(env); - addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, rX)); - cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, r1)); + cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, ARMInstr_VCMovS(cc ^ 1, dst, r0)); return dst; } diff --git a/VEX/priv/host_mips_isel.c b/VEX/priv/host_mips_isel.c index 4bc2242fff..ec9b7246b4 100644 --- a/VEX/priv/host_mips_isel.c +++ b/VEX/priv/host_mips_isel.c @@ -1421,25 +1421,25 @@ static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { + case Iex_ITE: { if ((ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64))) && - typeOfIRExpr(env->type_env, e->Iex.Mux0X.cond) == Ity_I8) { + typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I8) { /* * r_dst = cond && rX * cond = not(cond) * tmp = cond && r0 * r_dst = tmp + r_dst */ - HReg r0 = iselWordExpr_R(env, e->Iex.Mux0X.expr0); - HReg rX = iselWordExpr_R(env, e->Iex.Mux0X.exprX); - HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); + HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse); + HReg r1 = iselWordExpr_R(env, e->Iex.ITE.iftrue); + HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond); HReg r_dst = newVRegI(env); HReg r_tmp = newVRegI(env); HReg r_tmp1 = newVRegI(env); HReg r_cond_neg = newVRegI(env); - addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp, r_cond, MIPSRH_Reg(rX))); + addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp, r_cond, MIPSRH_Reg(r1))); addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond, MIPSRH_Reg(r_cond))); addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1, r_cond_neg, @@ -1952,34 +1952,34 @@ static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e) return; } - /* 64-bit Mux0X */ - if (e->tag == Iex_Mux0X) { + /* 64-bit ITE */ + if (e->tag == Iex_ITE) { HReg expr0Lo, expr0Hi; - HReg exprXLo, exprXHi; + HReg expr1Lo, expr1Hi; HReg tmpHi = newVRegI(env); HReg tmpLo = newVRegI(env); HReg tmp1Hi = newVRegI(env); HReg tmp1Lo = newVRegI(env); - HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); + HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond); HReg r_cond_neg = newVRegI(env); HReg desLo = newVRegI(env); HReg desHi = newVRegI(env); - /* expr0Hi:expr0Lo = expr0 */ - /* exprXHi:exprXLo = exprX */ - iselInt64Expr(&expr0Hi, &expr0Lo, env, e->Iex.Mux0X.expr0); - iselInt64Expr(&exprXHi, &exprXLo, env, e->Iex.Mux0X.exprX); + /* expr0Hi:expr0Lo = iffalse */ + /* expr1Hi:expr1Lo = iftrue */ + iselInt64Expr(&expr0Hi, &expr0Lo, env, e->Iex.ITE.iffalse); + iselInt64Expr(&expr1Hi, &expr1Lo, env, e->Iex.ITE.iftrue); addInstr(env, MIPSInstr_Alu(Malu_AND, tmpLo, r_cond, - MIPSRH_Reg(exprXLo))); + MIPSRH_Reg(expr1Lo))); addInstr(env, MIPSInstr_Alu(Malu_AND, tmpHi, r_cond, - MIPSRH_Reg(exprXHi))); + MIPSRH_Reg(expr1Hi))); addInstr(env, MIPSInstr_Alu(Malu_NOR, r_cond_neg, r_cond, MIPSRH_Reg(r_cond))); addInstr(env, MIPSInstr_Alu(Malu_AND, tmp1Lo, r_cond_neg, - MIPSRH_Reg(exprXLo))); + MIPSRH_Reg(expr1Lo))); addInstr(env, MIPSInstr_Alu(Malu_AND, tmp1Hi, r_cond_neg, - MIPSRH_Reg(exprXHi))); + MIPSRH_Reg(expr1Hi))); addInstr(env, MIPSInstr_Alu(Malu_ADD, desLo, tmpLo, MIPSRH_Reg(tmp1Lo))); addInstr(env, MIPSInstr_Alu(Malu_ADD, desHi, tmpHi, @@ -2676,12 +2676,12 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) } /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { + if (e->tag == Iex_ITE) { if (ty == Ity_F64 - && typeOfIRExpr(env->type_env, e->Iex.Mux0X.cond) == Ity_I8) { - HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0); - HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX); - HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); + && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I8) { + HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse); + HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue); + HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond); HReg r_cond_neg = newVRegI(env); HReg r_dst = newVRegD(env); HReg r_tmp_lo = newVRegI(env); @@ -2690,8 +2690,8 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) HReg r_tmp1_hi = newVRegI(env); HReg r_r0_lo = newVRegI(env); HReg r_r0_hi = newVRegI(env); - HReg r_rX_lo = newVRegI(env); - HReg r_rX_hi = newVRegI(env); + HReg r_r1_lo = newVRegI(env); + HReg r_r1_hi = newVRegI(env); HReg r_dst_lo = newVRegI(env); HReg r_dst_hi = newVRegI(env); @@ -2720,19 +2720,19 @@ static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e) am_addr = MIPSAMode_IR(0, StackPointer(mode64)); // store as Ity_F64 - addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, rX, am_addr)); + addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, r1, am_addr)); // load as 2xI32 - addInstr(env, MIPSInstr_Load(4, r_rX_lo, am_addr, mode64)); - addInstr(env, MIPSInstr_Load(4, r_rX_hi, nextMIPSAModeFloat(am_addr), + addInstr(env, MIPSInstr_Load(4, r_r1_lo, am_addr, mode64)); + addInstr(env, MIPSInstr_Load(4, r_r1_hi, nextMIPSAModeFloat(am_addr), mode64)); add_to_sp(env, 16); // Reset SP addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_lo, r_cond_neg, - MIPSRH_Reg(r_rX_lo))); + MIPSRH_Reg(r_r1_lo))); addInstr(env, MIPSInstr_Alu(Malu_AND, r_tmp1_hi, r_cond_neg, - MIPSRH_Reg(r_rX_hi))); + MIPSRH_Reg(r_r1_hi))); addInstr(env, MIPSInstr_Alu(Malu_ADD, r_dst_lo, r_tmp_lo, MIPSRH_Reg(r_tmp1_lo))); diff --git a/VEX/priv/host_ppc_isel.c b/VEX/priv/host_ppc_isel.c index 2f94d6e1ba..00a8cffb28 100644 --- a/VEX/priv/host_ppc_isel.c +++ b/VEX/priv/host_ppc_isel.c @@ -2229,16 +2229,16 @@ static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { // VFD + case Iex_ITE: { // VFD if ((ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ((ty == Ity_I64) && mode64)) && - typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - PPCRI* rX = iselWordExpr_RI(env, e->Iex.Mux0X.exprX); - HReg r0 = iselWordExpr_R(env, e->Iex.Mux0X.expr0); + typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + PPCRI* r1 = iselWordExpr_RI(env, e->Iex.ITE.iftrue); + HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse); HReg r_dst = newVRegI(env); addInstr(env, mk_iMOVds_RR(r_dst,r0)); - PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); - addInstr(env, PPCInstr_CMov(cc, r_dst, rX)); + PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond); + addInstr(env, PPCInstr_CMov(cc, r_dst, r1)); return r_dst; } break; @@ -3002,16 +3002,16 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, return; } - /* 64-bit Mux0X */ - if (e->tag == Iex_Mux0X) { // VFD + /* 64-bit ITE */ + if (e->tag == Iex_ITE) { // VFD HReg e0Lo, e0Hi, eXLo, eXHi; - iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX); - iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0); + iselInt64Expr(&eXHi, &eXLo, env, e->Iex.ITE.iftrue); + iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse); HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); addInstr(env, mk_iMOVds_RR(tHi,e0Hi)); addInstr(env, mk_iMOVds_RR(tLo,e0Lo)); - PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(eXHi))); addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(eXLo))); *rHi = tHi; @@ -3957,15 +3957,15 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD if (ty == Ity_F64 - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX); - HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0); + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + HReg fr1 = iselDblExpr(env, e->Iex.ITE.iftrue); + HReg fr0 = iselDblExpr(env, e->Iex.ITE.iffalse); HReg fr_dst = newVRegF(env); addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, fr0 )); - PPCCondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); - addInstr(env, PPCInstr_FpCMov( cc, fr_dst, frX )); + PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond); + addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr1 )); return fr_dst; } } diff --git a/VEX/priv/host_s390_isel.c b/VEX/priv/host_s390_isel.c index d8cbd279e7..6af9d7b2af 100644 --- a/VEX/priv/host_s390_isel.c +++ b/VEX/priv/host_s390_isel.c @@ -1740,23 +1740,23 @@ s390_isel_int_expr_wrk(ISelEnv *env, IRExpr *expr) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { + case Iex_ITE: { IRExpr *cond_expr; - HReg dst, rX; + HReg dst, r1; s390_opnd_RMI r0; - cond_expr = expr->Iex.Mux0X.cond; + cond_expr = expr->Iex.ITE.cond; vassert(typeOfIRExpr(env->type_env, cond_expr) == Ity_I1); dst = newVRegI(env); - r0 = s390_isel_int_expr_RMI(env, expr->Iex.Mux0X.expr0); - rX = s390_isel_int_expr(env, expr->Iex.Mux0X.exprX); - size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.Mux0X.exprX)); + r0 = s390_isel_int_expr_RMI(env, expr->Iex.ITE.iffalse); + r1 = s390_isel_int_expr(env, expr->Iex.ITE.iftrue); + size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.ITE.iftrue)); s390_cc_t cc = s390_isel_cc(env, cond_expr); - addInstr(env, s390_insn_move(size, dst, rX)); + addInstr(env, s390_insn_move(size, dst, r1)); addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0)); return dst; } diff --git a/VEX/priv/host_x86_isel.c b/VEX/priv/host_x86_isel.c index 692f2f911f..e59af20572 100644 --- a/VEX/priv/host_x86_isel.c +++ b/VEX/priv/host_x86_isel.c @@ -1388,14 +1388,14 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - case Iex_Mux0X: { // VFD + case Iex_ITE: { // VFD if ((ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); - X86RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0); + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue); + X86RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse); HReg dst = newVRegI(env); - addInstr(env, mk_iMOVsd_RR(rX,dst)); - X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_iMOVsd_RR(r1,dst)); + X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, X86Instr_CMov32(cc ^ 1, r0, dst)); return dst; } @@ -2051,16 +2051,16 @@ static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ) return; } - /* 64-bit Mux0X: Mux0X(g, expr, expr) */ // VFD - if (e->tag == Iex_Mux0X) { - HReg e0Lo, e0Hi, eXLo, eXHi; + /* 64-bit ITE: ITE(g, expr, expr) */ // VFD + if (e->tag == Iex_ITE) { + HReg e0Lo, e0Hi, e1Lo, e1Hi; HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); - iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0); - iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX); - addInstr(env, mk_iMOVsd_RR(eXHi, tHi)); - addInstr(env, mk_iMOVsd_RR(eXLo, tLo)); - X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse); + iselInt64Expr(&e1Hi, &e1Lo, env, e->Iex.ITE.iftrue); + addInstr(env, mk_iMOVsd_RR(e1Hi, tHi)); + addInstr(env, mk_iMOVsd_RR(e1Lo, tLo)); + X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond); /* This assumes the first cmov32 doesn't trash the condition codes, so they are still available for the second cmov32 */ addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Hi), tHi)); @@ -3095,14 +3095,14 @@ static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) } /* --------- MULTIPLEX --------- */ - if (e->tag == Iex_Mux0X) { // VFD + if (e->tag == Iex_ITE) { // VFD if (ty == Ity_F64 - && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I1) { - HReg rX = iselDblExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselDblExpr(env, e->Iex.Mux0X.expr0); + && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { + HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegF(env); - addInstr(env, X86Instr_FpUnary(Xfp_MOV,rX,dst)); - X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, X86Instr_FpUnary(Xfp_MOV,r1,dst)); + X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, X86Instr_FpCMov(cc ^ 1, r0, dst)); return dst; } @@ -3672,12 +3672,12 @@ static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) } /* switch (e->Iex.Binop.op) */ } /* if (e->tag == Iex_Binop) */ - if (e->tag == Iex_Mux0X) { // VFD - HReg rX = iselVecExpr(env, e->Iex.Mux0X.exprX); - HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0); + if (e->tag == Iex_ITE) { // VFD + HReg r1 = iselVecExpr(env, e->Iex.ITE.iftrue); + HReg r0 = iselVecExpr(env, e->Iex.ITE.iffalse); HReg dst = newVRegV(env); - addInstr(env, mk_vMOVsd_RR(rX,dst)); - X86CondCode cc = iselCondCode(env, e->Iex.Mux0X.cond); + addInstr(env, mk_vMOVsd_RR(r1,dst)); + X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond); addInstr(env, X86Instr_SseCMov(cc ^ 1, r0, dst)); return dst; } diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c index 859809bd36..39be267f0f 100644 --- a/VEX/priv/ir_defs.c +++ b/VEX/priv/ir_defs.c @@ -1132,13 +1132,13 @@ void ppIRExpr ( IRExpr* e ) vex_printf("):"); ppIRType(e->Iex.CCall.retty); break; - case Iex_Mux0X: - vex_printf("Mux0X("); - ppIRExpr(e->Iex.Mux0X.cond); + case Iex_ITE: + vex_printf("ITE("); + ppIRExpr(e->Iex.ITE.cond); vex_printf(","); - ppIRExpr(e->Iex.Mux0X.expr0); + ppIRExpr(e->Iex.ITE.iftrue); vex_printf(","); - ppIRExpr(e->Iex.Mux0X.exprX); + ppIRExpr(e->Iex.ITE.iffalse); vex_printf(")"); break; default: @@ -1644,12 +1644,12 @@ IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ) { e->Iex.CCall.args = args; return e; } -IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX ) { +IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) { IRExpr* e = LibVEX_Alloc(sizeof(IRExpr)); - e->tag = Iex_Mux0X; - e->Iex.Mux0X.cond = cond; - e->Iex.Mux0X.expr0 = expr0; - e->Iex.Mux0X.exprX = exprX; + e->tag = Iex_ITE; + e->Iex.ITE.cond = cond; + e->Iex.ITE.iftrue = iftrue; + e->Iex.ITE.iffalse = iffalse; return e; } @@ -2074,10 +2074,10 @@ IRExpr* deepCopyIRExpr ( IRExpr* e ) e->Iex.CCall.retty, deepCopyIRExprVec(e->Iex.CCall.args)); - case Iex_Mux0X: - return IRExpr_Mux0X(deepCopyIRExpr(e->Iex.Mux0X.cond), - deepCopyIRExpr(e->Iex.Mux0X.expr0), - deepCopyIRExpr(e->Iex.Mux0X.exprX)); + case Iex_ITE: + return IRExpr_ITE(deepCopyIRExpr(e->Iex.ITE.cond), + deepCopyIRExpr(e->Iex.ITE.iftrue), + deepCopyIRExpr(e->Iex.ITE.iffalse)); default: vpanic("deepCopyIRExpr"); } @@ -3156,10 +3156,10 @@ IRType typeOfIRExpr ( IRTypeEnv* tyenv, IRExpr* e ) return t_dst; case Iex_CCall: return e->Iex.CCall.retty; - case Iex_Mux0X: - e = e->Iex.Mux0X.expr0; + case Iex_ITE: + e = e->Iex.ITE.iffalse; goto start; - /* return typeOfIRExpr(tyenv, e->Iex.Mux0X.expr0); */ + /* return typeOfIRExpr(tyenv, e->Iex.ITE.iffalse); */ case Iex_Binder: vpanic("typeOfIRExpr: Binder is not a valid expression"); default: @@ -3250,10 +3250,10 @@ Bool isFlatIRStmt ( IRStmt* st ) if (!isIRAtom(e->Iex.CCall.args[i])) return False; return True; - case Iex_Mux0X: return toBool ( - isIRAtom(e->Iex.Mux0X.cond) - && isIRAtom(e->Iex.Mux0X.expr0) - && isIRAtom(e->Iex.Mux0X.exprX)); + case Iex_ITE: return toBool ( + isIRAtom(e->Iex.ITE.cond) + && isIRAtom(e->Iex.ITE.iftrue) + && isIRAtom(e->Iex.ITE.iffalse)); default: vpanic("isFlatIRStmt(e)"); } /*notreached*/ @@ -3430,10 +3430,10 @@ void useBeforeDef_Expr ( IRSB* bb, IRStmt* stmt, IRExpr* expr, Int* def_counts ) for (i = 0; expr->Iex.CCall.args[i]; i++) useBeforeDef_Expr(bb,stmt,expr->Iex.CCall.args[i],def_counts); break; - case Iex_Mux0X: - useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.cond,def_counts); - useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.expr0,def_counts); - useBeforeDef_Expr(bb,stmt,expr->Iex.Mux0X.exprX,def_counts); + case Iex_ITE: + useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.cond,def_counts); + useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iftrue,def_counts); + useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iffalse,def_counts); break; default: vpanic("useBeforeDef_Expr"); @@ -3705,15 +3705,15 @@ void tcExpr ( IRSB* bb, IRStmt* stmt, IRExpr* expr, IRType gWordTy ) if (!saneIRConst(expr->Iex.Const.con)) sanityCheckFail(bb,stmt,"Iex.Const.con: invalid const"); break; - case Iex_Mux0X: - tcExpr(bb,stmt, expr->Iex.Mux0X.cond, gWordTy); - tcExpr(bb,stmt, expr->Iex.Mux0X.expr0, gWordTy); - tcExpr(bb,stmt, expr->Iex.Mux0X.exprX, gWordTy); - if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.cond) != Ity_I1) - sanityCheckFail(bb,stmt,"Iex.Mux0X.cond: cond :: Ity_I1"); - if (typeOfIRExpr(tyenv, expr->Iex.Mux0X.expr0) - != typeOfIRExpr(tyenv, expr->Iex.Mux0X.exprX)) - sanityCheckFail(bb,stmt,"Iex.Mux0X: expr0/exprX mismatch"); + case Iex_ITE: + tcExpr(bb,stmt, expr->Iex.ITE.cond, gWordTy); + tcExpr(bb,stmt, expr->Iex.ITE.iftrue, gWordTy); + tcExpr(bb,stmt, expr->Iex.ITE.iffalse, gWordTy); + if (typeOfIRExpr(tyenv, expr->Iex.ITE.cond) != Ity_I1) + sanityCheckFail(bb,stmt,"Iex.ITE.cond: cond :: Ity_I1"); + if (typeOfIRExpr(tyenv, expr->Iex.ITE.iftrue) + != typeOfIRExpr(tyenv, expr->Iex.ITE.iffalse)) + sanityCheckFail(bb,stmt,"Iex.ITE: iftrue/iffalse mismatch"); break; default: vpanic("tcExpr"); diff --git a/VEX/priv/ir_opt.c b/VEX/priv/ir_opt.c index 86fe4a8476..ded5906d07 100644 --- a/VEX/priv/ir_opt.c +++ b/VEX/priv/ir_opt.c @@ -362,12 +362,12 @@ static IRExpr* flatten_Expr ( IRSB* bb, IRExpr* ex ) newargs))); return IRExpr_RdTmp(t1); - case Iex_Mux0X: + case Iex_ITE: t1 = newIRTemp(bb->tyenv, ty); addStmtToIRSB(bb, IRStmt_WrTmp(t1, - IRExpr_Mux0X(flatten_Expr(bb, ex->Iex.Mux0X.cond), - flatten_Expr(bb, ex->Iex.Mux0X.expr0), - flatten_Expr(bb, ex->Iex.Mux0X.exprX)))); + IRExpr_ITE(flatten_Expr(bb, ex->Iex.ITE.cond), + flatten_Expr(bb, ex->Iex.ITE.iftrue), + flatten_Expr(bb, ex->Iex.ITE.iffalse)))); return IRExpr_RdTmp(t1); case Iex_Const: @@ -1103,13 +1103,13 @@ static Bool sameIRExprs_aux2 ( IRExpr** env, IRExpr* e1, IRExpr* e2 ) && sameIRExprs_aux( env, tri1->arg3, tri2->arg3 )); } - case Iex_Mux0X: - return toBool( sameIRExprs_aux( env, e1->Iex.Mux0X.cond, - e2->Iex.Mux0X.cond ) - && sameIRExprs_aux( env, e1->Iex.Mux0X.expr0, - e2->Iex.Mux0X.expr0 ) - && sameIRExprs_aux( env, e1->Iex.Mux0X.exprX, - e2->Iex.Mux0X.exprX )); + case Iex_ITE: + return toBool( sameIRExprs_aux( env, e1->Iex.ITE.cond, + e2->Iex.ITE.cond ) + && sameIRExprs_aux( env, e1->Iex.ITE.iftrue, + e2->Iex.ITE.iftrue ) + && sameIRExprs_aux( env, e1->Iex.ITE.iffalse, + e2->Iex.ITE.iffalse )); default: /* Not very likely to be "same". */ @@ -2212,20 +2212,20 @@ static IRExpr* fold_Expr ( IRExpr** env, IRExpr* e ) } break; - case Iex_Mux0X: - /* Mux0X */ + case Iex_ITE: + /* ITE */ /* is the discriminant is a constant? */ - if (e->Iex.Mux0X.cond->tag == Iex_Const) { + if (e->Iex.ITE.cond->tag == Iex_Const) { /* assured us by the IR type rules */ - vassert(e->Iex.Mux0X.cond->Iex.Const.con->tag == Ico_U1); - e2 = e->Iex.Mux0X.cond->Iex.Const.con->Ico.U1 - ? e->Iex.Mux0X.exprX : e->Iex.Mux0X.expr0; + vassert(e->Iex.ITE.cond->Iex.Const.con->tag == Ico_U1); + e2 = e->Iex.ITE.cond->Iex.Const.con->Ico.U1 + ? e->Iex.ITE.iftrue : e->Iex.ITE.iffalse; } else /* are the arms identical? (pretty weedy test) */ - if (sameIRExprs(env, e->Iex.Mux0X.expr0, - e->Iex.Mux0X.exprX)) { - e2 = e->Iex.Mux0X.expr0; + if (sameIRExprs(env, e->Iex.ITE.iftrue, + e->Iex.ITE.iffalse)) { + e2 = e->Iex.ITE.iffalse; } break; @@ -2381,14 +2381,14 @@ static IRExpr* subst_Expr ( IRExpr** env, IRExpr* ex ) ); } - case Iex_Mux0X: - vassert(isIRAtom(ex->Iex.Mux0X.cond)); - vassert(isIRAtom(ex->Iex.Mux0X.expr0)); - vassert(isIRAtom(ex->Iex.Mux0X.exprX)); - return IRExpr_Mux0X( - subst_Expr(env, ex->Iex.Mux0X.cond), - subst_Expr(env, ex->Iex.Mux0X.expr0), - subst_Expr(env, ex->Iex.Mux0X.exprX) + case Iex_ITE: + vassert(isIRAtom(ex->Iex.ITE.cond)); + vassert(isIRAtom(ex->Iex.ITE.iftrue)); + vassert(isIRAtom(ex->Iex.ITE.iffalse)); + return IRExpr_ITE( + subst_Expr(env, ex->Iex.ITE.cond), + subst_Expr(env, ex->Iex.ITE.iftrue), + subst_Expr(env, ex->Iex.ITE.iffalse) ); default: @@ -2796,10 +2796,10 @@ static void addUses_Expr ( Bool* set, IRExpr* e ) case Iex_GetI: addUses_Expr(set, e->Iex.GetI.ix); return; - case Iex_Mux0X: - addUses_Expr(set, e->Iex.Mux0X.cond); - addUses_Expr(set, e->Iex.Mux0X.expr0); - addUses_Expr(set, e->Iex.Mux0X.exprX); + case Iex_ITE: + addUses_Expr(set, e->Iex.ITE.cond); + addUses_Expr(set, e->Iex.ITE.iftrue); + addUses_Expr(set, e->Iex.ITE.iffalse); return; case Iex_CCall: for (i = 0; e->Iex.CCall.args[i]; i++) @@ -3287,25 +3287,25 @@ typedef struct { ULong f64i; } Cf64i; - /* Mux0X(tmp,tmp,tmp) */ + /* ITE(tmp,tmp,tmp) */ struct { IRTemp co; IRTemp e0; IRTemp eX; } Mttt; - /* Mux0X(tmp,const,tmp) */ + /* ITE(tmp,tmp,const) */ struct { IRTemp co; IRConst con0; IRTemp eX; } Mtct; - /* Mux0X(tmp,tmp,const) */ + /* ITE(tmp,const,tmp) */ struct { IRTemp co; IRTemp e0; IRConst conX; } Mttc; - /* Mux0X(tmp,const,const) */ + /* ITE(tmp,const,const) */ struct { IRTemp co; IRConst con0; @@ -3420,29 +3420,30 @@ static IRExpr* availExpr_to_IRExpr ( AvailExpr* ae ) case Cf64i: return IRExpr_Const(IRConst_F64i(ae->u.Cf64i.f64i)); case Mttt: - return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mttt.co), - IRExpr_RdTmp(ae->u.Mttt.e0), - IRExpr_RdTmp(ae->u.Mttt.eX)); + return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mttt.co), + IRExpr_RdTmp(ae->u.Mttt.eX), + IRExpr_RdTmp(ae->u.Mttt.e0)); case Mtct: con0 = LibVEX_Alloc(sizeof(IRConst)); *con0 = ae->u.Mtct.con0; - return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mtct.co), - IRExpr_Const(con0), - IRExpr_RdTmp(ae->u.Mtct.eX)); + return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mtct.co), + IRExpr_RdTmp(ae->u.Mtct.eX), + IRExpr_Const(con0)); case Mttc: conX = LibVEX_Alloc(sizeof(IRConst)); *conX = ae->u.Mttc.conX; - return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mttc.co), - IRExpr_RdTmp(ae->u.Mttc.e0), - IRExpr_Const(conX)); + return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mttc.co), + IRExpr_Const(conX), + IRExpr_RdTmp(ae->u.Mttc.e0)); + case Mtcc: con0 = LibVEX_Alloc(sizeof(IRConst)); conX = LibVEX_Alloc(sizeof(IRConst)); *con0 = ae->u.Mtcc.con0; *conX = ae->u.Mtcc.conX; - return IRExpr_Mux0X(IRExpr_RdTmp(ae->u.Mtcc.co), - IRExpr_Const(con0), - IRExpr_Const(conX)); + return IRExpr_ITE(IRExpr_RdTmp(ae->u.Mtcc.co), + IRExpr_Const(conX), + IRExpr_Const(con0)); case GetIt: return IRExpr_GetI(ae->u.GetIt.descr, IRExpr_RdTmp(ae->u.GetIt.ix), @@ -3588,40 +3589,40 @@ static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e ) } break; - case Iex_Mux0X: - if (e->Iex.Mux0X.cond->tag == Iex_RdTmp) { - if (e->Iex.Mux0X.expr0->tag == Iex_RdTmp) { - if (e->Iex.Mux0X.exprX->tag == Iex_RdTmp) { + case Iex_ITE: + if (e->Iex.ITE.cond->tag == Iex_RdTmp) { + if (e->Iex.ITE.iffalse->tag == Iex_RdTmp) { + if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) { ae = LibVEX_Alloc(sizeof(AvailExpr)); ae->tag = Mttt; - ae->u.Mttt.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp; - ae->u.Mttt.e0 = e->Iex.Mux0X.expr0->Iex.RdTmp.tmp; - ae->u.Mttt.eX = e->Iex.Mux0X.exprX->Iex.RdTmp.tmp; + ae->u.Mttt.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; + ae->u.Mttt.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp; + ae->u.Mttt.eX = e->Iex.ITE.iftrue->Iex.RdTmp.tmp; return ae; } - if (e->Iex.Mux0X.exprX->tag == Iex_Const) { + if (e->Iex.ITE.iftrue->tag == Iex_Const) { ae = LibVEX_Alloc(sizeof(AvailExpr)); ae->tag = Mttc; - ae->u.Mttc.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp; - ae->u.Mttc.e0 = e->Iex.Mux0X.expr0->Iex.RdTmp.tmp; - ae->u.Mttc.conX = *(e->Iex.Mux0X.exprX->Iex.Const.con); + ae->u.Mttc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; + ae->u.Mttc.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp; + ae->u.Mttc.conX = *(e->Iex.ITE.iftrue->Iex.Const.con); return ae; } - } else if (e->Iex.Mux0X.expr0->tag == Iex_Const) { - if (e->Iex.Mux0X.exprX->tag == Iex_RdTmp) { + } else if (e->Iex.ITE.iffalse->tag == Iex_Const) { + if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) { ae = LibVEX_Alloc(sizeof(AvailExpr)); ae->tag = Mtct; - ae->u.Mtct.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp; - ae->u.Mtct.con0 = *(e->Iex.Mux0X.expr0->Iex.Const.con); - ae->u.Mtct.eX = e->Iex.Mux0X.exprX->Iex.RdTmp.tmp; + ae->u.Mtct.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; + ae->u.Mtct.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con); + ae->u.Mtct.eX = e->Iex.ITE.iftrue->Iex.RdTmp.tmp; return ae; } - if (e->Iex.Mux0X.exprX->tag == Iex_Const) { + if (e->Iex.ITE.iftrue->tag == Iex_Const) { ae = LibVEX_Alloc(sizeof(AvailExpr)); ae->tag = Mtcc; - ae->u.Mtcc.co = e->Iex.Mux0X.cond->Iex.RdTmp.tmp; - ae->u.Mtcc.con0 = *(e->Iex.Mux0X.expr0->Iex.Const.con); - ae->u.Mtcc.conX = *(e->Iex.Mux0X.exprX->Iex.Const.con); + ae->u.Mtcc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp; + ae->u.Mtcc.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con); + ae->u.Mtcc.conX = *(e->Iex.ITE.iftrue->Iex.Const.con); return ae; } } @@ -4374,10 +4375,10 @@ static void deltaIRExpr ( IRExpr* e, Int delta ) for (i = 0; e->Iex.CCall.args[i]; i++) deltaIRExpr(e->Iex.CCall.args[i], delta); break; - case Iex_Mux0X: - deltaIRExpr(e->Iex.Mux0X.cond, delta); - deltaIRExpr(e->Iex.Mux0X.expr0, delta); - deltaIRExpr(e->Iex.Mux0X.exprX, delta); + case Iex_ITE: + deltaIRExpr(e->Iex.ITE.cond, delta); + deltaIRExpr(e->Iex.ITE.iftrue, delta); + deltaIRExpr(e->Iex.ITE.iffalse, delta); break; default: vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); @@ -4754,10 +4755,10 @@ static void setHints_Expr (Bool* doesLoad, Bool* doesGet, IRExpr* e ) for (i = 0; e->Iex.CCall.args[i]; i++) setHints_Expr(doesLoad, doesGet, e->Iex.CCall.args[i]); return; - case Iex_Mux0X: - setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.cond); - setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.expr0); - setHints_Expr(doesLoad, doesGet, e->Iex.Mux0X.exprX); + case Iex_ITE: + setHints_Expr(doesLoad, doesGet, e->Iex.ITE.cond); + setHints_Expr(doesLoad, doesGet, e->Iex.ITE.iftrue); + setHints_Expr(doesLoad, doesGet, e->Iex.ITE.iffalse); return; case Iex_Qop: setHints_Expr(doesLoad, doesGet, e->Iex.Qop.details->arg1); @@ -4826,10 +4827,10 @@ static void aoccCount_Expr ( UShort* uses, IRExpr* e ) uses[e->Iex.RdTmp.tmp]++; return; - case Iex_Mux0X: - aoccCount_Expr(uses, e->Iex.Mux0X.cond); - aoccCount_Expr(uses, e->Iex.Mux0X.expr0); - aoccCount_Expr(uses, e->Iex.Mux0X.exprX); + case Iex_ITE: + aoccCount_Expr(uses, e->Iex.ITE.cond); + aoccCount_Expr(uses, e->Iex.ITE.iftrue); + aoccCount_Expr(uses, e->Iex.ITE.iffalse); return; case Iex_Qop: @@ -5161,11 +5162,11 @@ static IRExpr* atbSubst_Expr ( ATmpInfo* env, IRExpr* e ) case Iex_RdTmp: e2 = atbSubst_Temp(env, e->Iex.RdTmp.tmp); return e2 ? e2 : e; - case Iex_Mux0X: - return IRExpr_Mux0X( - atbSubst_Expr(env, e->Iex.Mux0X.cond), - atbSubst_Expr(env, e->Iex.Mux0X.expr0), - atbSubst_Expr(env, e->Iex.Mux0X.exprX) + case Iex_ITE: + return IRExpr_ITE( + atbSubst_Expr(env, e->Iex.ITE.cond), + atbSubst_Expr(env, e->Iex.ITE.iftrue), + atbSubst_Expr(env, e->Iex.ITE.iffalse) ); case Iex_Qop: return IRExpr_Qop( diff --git a/VEX/pub/libvex_ir.h b/VEX/pub/libvex_ir.h index c432b732a3..627ffd7cf2 100644 --- a/VEX/pub/libvex_ir.h +++ b/VEX/pub/libvex_ir.h @@ -1615,7 +1615,7 @@ typedef Iex_Unop, Iex_Load, Iex_Const, - Iex_Mux0X, + Iex_ITE, Iex_CCall } IRExprTag; @@ -1799,18 +1799,18 @@ struct _IRExpr { IRExpr** args; /* Vector of argument expressions. */ } CCall; - /* A ternary if-then-else operator. It returns expr0 if cond is - zero, exprX otherwise. Note that it is STRICT, ie. both - expr0 and exprX are evaluated in all cases. + /* A ternary if-then-else operator. It returns iftrue if cond is + nonzero, iffalse otherwise. Note that it is STRICT, ie. both + iftrue and iffalse are evaluated in all cases. - ppIRExpr output: Mux0X(,,), - eg. Mux0X(t6,t7,t8) + ppIRExpr output: ITE(,,), + eg. ITE(t6,t7,t8) */ struct { IRExpr* cond; /* Condition */ - IRExpr* expr0; /* True expression */ - IRExpr* exprX; /* False expression */ - } Mux0X; + IRExpr* iftrue; /* True expression */ + IRExpr* iffalse; /* False expression */ + } ITE; } Iex; }; @@ -1845,7 +1845,7 @@ extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg ); extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ); extern IRExpr* IRExpr_Const ( IRConst* con ); extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ); -extern IRExpr* IRExpr_Mux0X ( IRExpr* cond, IRExpr* expr0, IRExpr* exprX ); +extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ); /* Deep-copy an IRExpr. */ extern IRExpr* deepCopyIRExpr ( IRExpr* ); diff --git a/VEX/test_main.c b/VEX/test_main.c index c9e3745703..58929817f7 100644 --- a/VEX/test_main.c +++ b/VEX/test_main.c @@ -2110,27 +2110,27 @@ IRAtom* expr2vbits_LDle ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias ) static -IRAtom* expr2vbits_Mux0X ( MCEnv* mce, - IRAtom* cond, IRAtom* expr0, IRAtom* exprX ) +IRAtom* expr2vbits_ITE ( MCEnv* mce, + IRAtom* cond, IRAtom* iftrue, IRAtom* iffalse ) { - IRAtom *vbitsC, *vbits0, *vbitsX; + IRAtom *vbitsC, *vbits0, *vbits1; IRType ty; - /* Given Mux0X(cond,expr0,exprX), generate - Mux0X(cond,expr0#,exprX#) `UifU` PCast(cond#) + /* Given ITE(cond,iftrue,iffalse), generate + ITE(cond,iftrue#,iffalse#) `UifU` PCast(cond#) That is, steer the V bits like the originals, but trash the result if the steering value is undefined. This gives lazy propagation. */ tl_assert(isOriginalAtom(mce, cond)); - tl_assert(isOriginalAtom(mce, expr0)); - tl_assert(isOriginalAtom(mce, exprX)); + tl_assert(isOriginalAtom(mce, iftrue)); + tl_assert(isOriginalAtom(mce, iffalse)); vbitsC = expr2vbits(mce, cond); - vbits0 = expr2vbits(mce, expr0); - vbitsX = expr2vbits(mce, exprX); + vbits0 = expr2vbits(mce, iffalse); + vbits1 = expr2vbits(mce, iftrue); ty = typeOfIRExpr(mce->bb->tyenv, vbits0); return - mkUifU(mce, ty, assignNew(mce, ty, IRExpr_Mux0X(cond, vbits0, vbitsX)), + mkUifU(mce, ty, assignNew(mce, ty, IRExpr_ITE(cond, vbits1, vbits0)), mkPCastTo(mce, ty, vbitsC) ); } @@ -2173,9 +2173,9 @@ IRExpr* expr2vbits ( MCEnv* mce, IRExpr* e ) e->Iex.CCall.retty, e->Iex.CCall.cee ); - case Iex_Mux0X: - return expr2vbits_Mux0X( mce, e->Iex.Mux0X.cond, e->Iex.Mux0X.expr0, - e->Iex.Mux0X.exprX); + case Iex_ITE: + return expr2vbits_ITE( mce, e->Iex.ITE.cond, e->Iex.ITE.iftrue, + e->Iex.ITE.iffalse); default: VG_(printf)("\n"); @@ -2562,10 +2562,10 @@ static Bool checkForBogusLiterals ( /*FLAT*/ IRStmt* st ) case Iex_Binop: return isBogusAtom(e->Iex.Binop.arg1) || isBogusAtom(e->Iex.Binop.arg2); - case Iex_Mux0X: - return isBogusAtom(e->Iex.Mux0X.cond) - || isBogusAtom(e->Iex.Mux0X.expr0) - || isBogusAtom(e->Iex.Mux0X.exprX); + case Iex_ITE: + return isBogusAtom(e->Iex.ITE.cond) + || isBogusAtom(e->Iex.ITE.iftrue) + || isBogusAtom(e->Iex.ITE.iffalse); case Iex_Load: return isBogusAtom(e->Iex.Load.addr); case Iex_CCall: