vassert(op != Aalu_MUL);
return i;
}
-AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, AMD64RM* dst ) {
+AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
i->tag = Ain_Sh64;
i->Ain.Sh64.op = op;
i->Ain.Sh64.dst = dst;
return i;
}
-AMD64Instr* AMD64Instr_Test64 ( AMD64RI* src, AMD64RM* dst ) {
- AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
- i->tag = Ain_Test64;
- i->Ain.Test64.src = src;
- i->Ain.Test64.dst = dst;
+AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) {
+ AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
+ i->tag = Ain_Test64;
+ i->Ain.Test64.imm32 = imm32;
+ i->Ain.Test64.dst = dst;
return i;
}
-AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, AMD64RM* dst ) {
+AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
i->tag = Ain_Unary64;
i->Ain.Unary64.op = op;
i->Ain.Unary64.dst = dst;
return i;
}
-AMD64Instr* AMD64Instr_MulL ( Bool syned, Int sz, AMD64RM* src ) {
+AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) {
AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
i->tag = Ain_MulL;
i->Ain.MulL.syned = syned;
- i->Ain.MulL.sz = sz;
i->Ain.MulL.src = src;
- vassert(sz == 2 || sz == 4 || sz == 8);
return i;
}
AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) {
vex_printf("%%cl,");
else
vex_printf("$%d,", (Int)i->Ain.Sh64.src);
- ppAMD64RM(i->Ain.Sh64.dst);
+ ppHRegAMD64(i->Ain.Sh64.dst);
return;
case Ain_Test64:
- vex_printf("testq ");
- ppAMD64RI(i->Ain.Test64.src);
- vex_printf(",");
- ppAMD64RM(i->Ain.Test64.dst);
+ vex_printf("testq $%d,", (Int)i->Ain.Test64.imm32);
+ ppHRegAMD64(i->Ain.Test64.dst);
return;
case Ain_Unary64:
vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op));
- ppAMD64RM(i->Ain.Unary64.dst);
+ ppHRegAMD64(i->Ain.Unary64.dst);
return;
case Ain_MulL:
- vex_printf("%cmul%s ",
- i->Ain.MulL.syned ? 's' : 'u',
- showAMD64ScalarSz(i->Ain.MulL.sz));
+ vex_printf("%cmulq ", i->Ain.MulL.syned ? 's' : 'u');
ppAMD64RM(i->Ain.MulL.src);
return;
case Ain_Div:
addRegUsage_AMD64AMode(u, i->Ain.Alu64M.dst);
return;
case Ain_Sh64:
- addRegUsage_AMD64RM(u, i->Ain.Sh64.dst, HRmModify);
+ addHRegUse(u, HRmModify, i->Ain.Sh64.dst);
if (i->Ain.Sh64.src == 0)
addHRegUse(u, HRmRead, hregAMD64_RCX());
return;
case Ain_Test64:
- addRegUsage_AMD64RI(u, i->Ain.Test64.src);
- addRegUsage_AMD64RM(u, i->Ain.Test64.dst, HRmRead);
+ addHRegUse(u, HRmRead, i->Ain.Test64.dst);
return;
case Ain_Unary64:
- addRegUsage_AMD64RM(u, i->Ain.Unary64.dst, HRmModify);
+ addHRegUse(u, HRmModify, i->Ain.Unary64.dst);
return;
case Ain_MulL:
addRegUsage_AMD64RM(u, i->Ain.MulL.src, HRmRead);
mapRegs_AMD64AMode(m, i->Ain.Alu64M.dst);
return;
case Ain_Sh64:
- mapRegs_AMD64RM(m, i->Ain.Sh64.dst);
+ mapReg(m, &i->Ain.Sh64.dst);
return;
case Ain_Test64:
- mapRegs_AMD64RI(m, i->Ain.Test64.src);
- mapRegs_AMD64RM(m, i->Ain.Test64.dst);
+ mapReg(m, &i->Ain.Test64.dst);
return;
case Ain_Unary64:
- mapRegs_AMD64RM(m, i->Ain.Unary64.dst);
+ mapReg(m, &i->Ain.Unary64.dst);
return;
case Ain_MulL:
mapRegs_AMD64RM(m, i->Ain.MulL.src);
*dst = i->Ain.Alu64R.dst;
return True;
}
-//.. /* Moves between FP regs */
-//.. if (i->tag == Xin_FpUnary) {
-//.. if (i->Xin.FpUnary.op != Xfp_MOV)
-//.. return False;
-//.. *src = i->Xin.FpUnary.src;
-//.. *dst = i->Xin.FpUnary.dst;
-//.. return True;
-//.. }
+ /* Moves between vector regs */
if (i->tag == Ain_SseReRg) {
if (i->Ain.SseReRg.op != Asse_MOV)
return False;
switch (hregClass(rreg)) {
case HRcInt64:
return AMD64Instr_Alu64M ( Aalu_MOV, AMD64RI_Reg(rreg), am );
- //case HRcFlt64:
- // return AMD64Instr_FpLdSt ( False/*store*/, 8, rreg, am );
case HRcVec128:
return AMD64Instr_SseLdSt ( False/*store*/, 16, rreg, am );
default:
switch (hregClass(rreg)) {
case HRcInt64:
return AMD64Instr_Alu64R ( Aalu_MOV, AMD64RMI_Mem(am), rreg );
- //case HRcFlt64:
- // return AMD64Instr_FpLdSt ( True/*load*/, 8, rreg, am );
case HRcVec128:
return AMD64Instr_SseLdSt ( True/*load*/, 16, rreg, am );
default:
return toUChar(n);
}
-
-
-//.. static UInt fregNo ( HReg r )
-//.. {
-//.. UInt n;
-//.. vassert(hregClass(r) == HRcFlt64);
-//.. vassert(!hregIsVirtual(r));
-//.. n = hregNumber(r);
-//.. vassert(n <= 5);
-//.. return n;
-//.. }
-
-
/* Given an xmm (128bit V-class) register number, produce the
equivalent numbered register in 64-bit I-class. This is a bit of
fakery which facilitates using functions that work on integer
default: goto bad;
}
if (i->Ain.Sh64.src == 0) {
- *p++ = rexAMode_R(fake(0),
- i->Ain.Sh64.dst->Arm.Reg.reg);
+ *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst);
*p++ = toUChar(opc_cl);
- switch (i->Ain.Sh64.dst->tag) {
- case Arm_Reg:
- p = doAMode_R(p, fake(subopc),
- i->Ain.Sh64.dst->Arm.Reg.reg);
- goto done;
- default:
- goto bad;
- }
+ p = doAMode_R(p, fake(subopc), i->Ain.Sh64.dst);
+ goto done;
} else {
- *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst->Arm.Reg.reg);
+ *p++ = rexAMode_R(fake(0), i->Ain.Sh64.dst);
*p++ = toUChar(opc_imm);
- switch (i->Ain.Sh64.dst->tag) {
- case Arm_Reg:
- p = doAMode_R(p, fake(subopc),
- i->Ain.Sh64.dst->Arm.Reg.reg);
- *p++ = (UChar)(i->Ain.Sh64.src);
- goto done;
- default:
- goto bad;
- }
+ p = doAMode_R(p, fake(subopc), i->Ain.Sh64.dst);
+ *p++ = (UChar)(i->Ain.Sh64.src);
+ goto done;
}
break;
case Ain_Test64:
- if (i->Ain.Test64.src->tag == Ari_Imm
- && i->Ain.Test64.dst->tag == Arm_Reg) {
- /* testq sign-extend($imm32), %reg */
- *p++ = rexAMode_R(fake(0), i->Ain.Test64.dst->Arm.Reg.reg);
- *p++ = 0xF7;
- p = doAMode_R(p, fake(0), i->Ain.Test64.dst->Arm.Reg.reg);
- p = emit32(p, i->Ain.Test64.src->Ari.Imm.imm32);
- goto done;
- }
- break;
+ /* testq sign-extend($imm32), %reg */
+ *p++ = rexAMode_R(fake(0), i->Ain.Test64.dst);
+ *p++ = 0xF7;
+ p = doAMode_R(p, fake(0), i->Ain.Test64.dst);
+ p = emit32(p, i->Ain.Test64.imm32);
+ goto done;
case Ain_Unary64:
if (i->Ain.Unary64.op == Aun_NOT) {
- if (i->Ain.Unary64.dst->tag == Arm_Reg) {
- *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg);
- *p++ = 0xF7;
- p = doAMode_R(p, fake(2), i->Ain.Unary64.dst->Arm.Reg.reg);
- goto done;
- } else {
- goto bad;
- }
+ *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst);
+ *p++ = 0xF7;
+ p = doAMode_R(p, fake(2), i->Ain.Unary64.dst);
+ goto done;
}
if (i->Ain.Unary64.op == Aun_NEG) {
- if (i->Ain.Unary64.dst->tag == Arm_Reg) {
- *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst->Arm.Reg.reg);
- *p++ = 0xF7;
- p = doAMode_R(p, fake(3), i->Ain.Unary64.dst->Arm.Reg.reg);
- goto done;
- } else {
- goto bad;
- }
+ *p++ = rexAMode_R(fake(0), i->Ain.Unary64.dst);
+ *p++ = 0xF7;
+ p = doAMode_R(p, fake(3), i->Ain.Unary64.dst);
+ goto done;
}
break;
case Ain_MulL:
subopc = i->Ain.MulL.syned ? 5 : 4;
- if (i->Ain.MulL.sz == 8) {
- switch (i->Ain.MulL.src->tag) {
- case Arm_Mem:
- *p++ = rexAMode_M( fake(0),
- i->Ain.MulL.src->Arm.Mem.am);
- *p++ = 0xF7;
- p = doAMode_M(p, fake(subopc),
- i->Ain.MulL.src->Arm.Mem.am);
- goto done;
- case Arm_Reg:
- *p++ = rexAMode_R(fake(0),
- i->Ain.MulL.src->Arm.Reg.reg);
- *p++ = 0xF7;
- p = doAMode_R(p, fake(subopc),
- i->Ain.MulL.src->Arm.Reg.reg);
- goto done;
- default:
- goto bad;
- }
+ switch (i->Ain.MulL.src->tag) {
+ case Arm_Mem:
+ *p++ = rexAMode_M( fake(0),
+ i->Ain.MulL.src->Arm.Mem.am);
+ *p++ = 0xF7;
+ p = doAMode_M(p, fake(subopc),
+ i->Ain.MulL.src->Arm.Mem.am);
+ goto done;
+ case Arm_Reg:
+ *p++ = rexAMode_R(fake(0),
+ i->Ain.MulL.src->Arm.Reg.reg);
+ *p++ = 0xF7;
+ p = doAMode_R(p, fake(subopc),
+ i->Ain.MulL.src->Arm.Reg.reg);
+ goto done;
+ default:
+ goto bad;
}
break;
addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Imm(3), reg));
addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
iselIntExpr_RMI(env, mode), reg));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, AMD64RM_Reg(reg)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, reg));
addInstr(env, AMD64Instr_Alu64R(
Aalu_OR, AMD64RMI_Imm(DEFAULT_MXCSR), reg));
addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(reg)));
*/
addInstr(env, mk_iMOVsd_RR(rrm, rrm2));
addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(3), rrm2));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, AMD64RM_Reg(rrm2)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, rrm2));
addInstr(env, AMD64Instr_Alu64R(Aalu_OR,
AMD64RMI_Imm(DEFAULT_FPUCW), rrm2));
addInstr(env, AMD64Instr_Alu64M(Aalu_MOV,
//.. addInstr(env, X86Instr_Sh32(Xsh_SAR, 16, X86RM_Reg(dst)));
//.. break;
case Iop_Sar32:
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, dst));
break;
default:
ppIROp(e->Iex.Binop.op);
vassert(nshift >= 0);
if (nshift > 0)
/* Can't allow nshift==0 since that means %cl */
- addInstr(env, AMD64Instr_Sh64(
- shOp,
- nshift,
- AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(shOp, nshift, dst));
} else {
/* General case; we have to force the amount into %cl. */
HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(regR,hregAMD64_RCX()));
- addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, dst));
}
return dst;
}
HReg left64 = iselIntExpr_R(env, e->Iex.Binop.arg1);
addInstr(env, mk_iMOVsd_RR(left64, rdx));
addInstr(env, mk_iMOVsd_RR(left64, rax));
- addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, AMD64RM_Reg(rdx)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, rdx));
addInstr(env, AMD64Instr_Div(syned, 4, rmRight));
addInstr(env, AMD64Instr_MovZLQ(rdx,rdx));
addInstr(env, AMD64Instr_MovZLQ(rax,rax));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(rdx)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, rdx));
addInstr(env, mk_iMOVsd_RR(rax, dst));
addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(rdx), dst));
return dst;
HReg lo32s = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi32s, hi32));
addInstr(env, mk_iMOVsd_RR(lo32s, lo32));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(hi32)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, hi32));
addInstr(env, AMD64Instr_MovZLQ(lo32,lo32));
addInstr(env, AMD64Instr_Alu64R(
Aalu_OR, AMD64RMI_Reg(lo32), hi32));
HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi16s, hi16));
addInstr(env, mk_iMOVsd_RR(lo16s, lo16));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, AMD64RM_Reg(hi16)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, hi16));
addInstr(env, AMD64Instr_Alu64R(
Aalu_AND, AMD64RMI_Imm(0xFFFF), lo16));
addInstr(env, AMD64Instr_Alu64R(
HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(hi8s, hi8));
addInstr(env, mk_iMOVsd_RR(lo8s, lo8));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, AMD64RM_Reg(hi8)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, hi8));
addInstr(env, AMD64Instr_Alu64R(
Aalu_AND, AMD64RMI_Imm(0xFF), lo8));
addInstr(env, AMD64Instr_Alu64R(
addInstr(env, mk_iMOVsd_RR(a32s, a32));
addInstr(env, mk_iMOVsd_RR(b32s, b32));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(a32)));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, AMD64RM_Reg(b32)));
- addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(a32)));
- addInstr(env, AMD64Instr_Sh64(shr_op, shift, AMD64RM_Reg(b32)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, a32));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, b32));
+ addInstr(env, AMD64Instr_Sh64(shr_op, shift, a32));
+ addInstr(env, AMD64Instr_Sh64(shr_op, shift, b32));
addInstr(env, AMD64Instr_Alu64R(Aalu_MUL, AMD64RMI_Reg(a32), b32));
return b32;
}
HReg dst = newVRegI(env);
HReg src = iselIntExpr_R(env, expr8);
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, dst));
return dst;
}
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
UInt amt = 32;
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst));
return dst;
}
case Iop_128HIto64: {
|| e->Iex.Unop.op==Iop_16Sto64 );
UInt amt = srcIs16 ? 48 : 56;
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst));
return dst;
}
case Iop_Not8:
HReg dst = newVRegI(env);
HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Unary64(Aun_NOT,AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Unary64(Aun_NOT,dst));
return dst;
}
//.. case Iop_64HIto32: {
default: vassert(0);
}
addInstr(env, mk_iMOVsd_RR(src,dst) );
- addInstr(env, AMD64Instr_Sh64(
- Ash_SHR, shift, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHR, shift, dst));
return dst;
}
case Iop_1Uto64:
HReg dst = newVRegI(env);
AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
addInstr(env, AMD64Instr_Set64(cond,dst));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, AMD64RM_Reg(dst)));
- addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, dst));
+ addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst));
return dst;
}
case Iop_Ctz64: {
HReg dst = newVRegI(env);
HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg);
addInstr(env, mk_iMOVsd_RR(reg,dst));
- addInstr(env, AMD64Instr_Unary64(Aun_NEG,AMD64RM_Reg(dst)));
+ addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst));
return dst;
}
HReg dst = newVRegI(env);
addInstr(env, mk_iMOVsd_RR(rX,dst));
r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8)));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_CMov64(Acc_Z,r0,dst));
return dst;
}
/* 64to1 */
if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_64to1) {
- AMD64RM* rm = iselIntExpr_RM(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(1),rm));
+ HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg);
+ addInstr(env, AMD64Instr_Test64(1,reg));
return Acc_NZ;
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ8) {
HReg r = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF),AMD64RM_Reg(r)));
+ addInstr(env, AMD64Instr_Test64(0xFF,r));
return Acc_NZ;
}
if (e->tag == Iex_Unop
&& e->Iex.Unop.op == Iop_CmpNEZ16) {
HReg r = iselIntExpr_R(env, e->Iex.Unop.arg);
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFFFF),AMD64RM_Reg(r)));
+ addInstr(env, AMD64Instr_Test64(0xFFFF,r));
return Acc_NZ;
}
HReg r = newVRegI(env);
addInstr(env, mk_iMOVsd_RR(r1,r));
addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
- addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, AMD64RM_Reg(r)));
+ addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, r));
switch (e->Iex.Binop.op) {
case Iop_CmpEQ32: return Acc_Z;
case Iop_CmpNE32: return Acc_NZ;
AMD64RM* rmLeft = iselIntExpr_RM(env, e->Iex.Binop.arg1);
HReg rRight = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(rRight, hregAMD64_RAX()));
- addInstr(env, AMD64Instr_MulL(syned, 8, rmLeft));
+ addInstr(env, AMD64Instr_MulL(syned, rmLeft));
/* Result is now in RDX:RAX. Tell the caller. */
addInstr(env, mk_iMOVsd_RR(hregAMD64_RDX(), tHi));
addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), tLo));
r0 = iselDblExpr(env, e->Iex.Mux0X.expr0);
dst = newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8)));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
return dst;
}
HReg r0 = iselVecExpr(env, e->Iex.Mux0X.expr0);
HReg dst = newVRegV(env);
addInstr(env, mk_vMOVsd_RR(rX,dst));
- addInstr(env, AMD64Instr_Test64(AMD64RI_Imm(0xFF), AMD64RM_Reg(r8)));
+ addInstr(env, AMD64Instr_Test64(0xFF, r8));
addInstr(env, AMD64Instr_SseCMov(Acc_Z,r0,dst));
return dst;
}