return i;
}
-AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, AMD64RM* src, HReg dst ) {
+AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));
i->tag = Ain_CMov64;
i->Ain.CMov64.cond = cond;
case Ain_CMov64:
vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond));
- ppAMD64RM(i->Ain.CMov64.src);
+ ppHRegAMD64(i->Ain.CMov64.src);
vex_printf(",");
ppHRegAMD64(i->Ain.CMov64.dst);
return;
addRegUsage_AMD64AMode(u, i->Ain.XAssisted.amRIP);
return;
case Ain_CMov64:
- addRegUsage_AMD64RM(u, i->Ain.CMov64.src, HRmRead);
+ addHRegUse(u, HRmRead, i->Ain.CMov64.src);
addHRegUse(u, HRmModify, i->Ain.CMov64.dst);
return;
case Ain_CLoad:
mapRegs_AMD64AMode(m, i->Ain.XAssisted.amRIP);
return;
case Ain_CMov64:
- mapRegs_AMD64RM(m, i->Ain.CMov64.src);
+ mapReg(m, &i->Ain.CMov64.src);
mapReg(m, &i->Ain.CMov64.dst);
return;
case Ain_CLoad:
case Ain_CMov64:
vassert(i->Ain.CMov64.cond != Acc_ALWAYS);
- if (i->Ain.CMov64.src->tag == Arm_Reg) {
- *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
- *p++ = 0x0F;
- *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
- p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
- goto done;
- }
- if (i->Ain.CMov64.src->tag == Arm_Mem) {
- *p++ = rexAMode_M(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
- *p++ = 0x0F;
- *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
- p = doAMode_M(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
- goto done;
- }
- break;
+ *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src);
+ *p++ = 0x0F;
+ *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
+ p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src);
+ goto done;
case Ain_CLoad: {
vassert(i->Ain.CLoad.cond != Acc_ALWAYS);
Ain_XDirect, /* direct transfer to GA */
Ain_XIndir, /* indirect transfer to GA */
Ain_XAssisted, /* assisted transfer to GA */
- Ain_CMov64, /* conditional move */
+ Ain_CMov64, /* conditional move, 64-bit reg-reg only */
Ain_CLoad, /* cond. load to int reg, 32 bit ZX or 64 bit only */
Ain_MovxLQ, /* reg-reg move, zx-ing/sx-ing top half */
Ain_LoadEX, /* mov{s,z}{b,w,l}q from mem to reg */
be the bogus Acc_ALWAYS. */
struct {
AMD64CondCode cond;
- AMD64RM* src;
+ HReg src;
HReg dst;
} CMov64;
/* conditional load to int reg, 32 bit ZX or 64 bit only.
AMD64CondCode cond );
extern AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
AMD64CondCode cond, IRJumpKind jk );
-extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, AMD64RM* src, HReg dst );
+extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, HReg src, HReg dst );
extern AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
AMD64AMode* addr, HReg dst );
extern AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst );
HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
addInstr(env, mk_iMOVsd_RR(src1, dst));
addInstr(env, AMD64Instr_Alu32R(Aalu_CMP, AMD64RMI_Reg(src2), dst));
- addInstr(env, AMD64Instr_CMov64(Acc_B, AMD64RM_Reg(src2), dst));
+ addInstr(env, AMD64Instr_CMov64(Acc_B, src2, dst));
return dst;
}
if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
&& typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue);
- AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
+ HReg r0 = iselIntExpr_R(env, e->Iex.ITE.iffalse);
HReg dst = newVRegI(env);
addInstr(env, mk_iMOVsd_RR(r1,dst));
AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
default: goto unhandled_cas;
}
addInstr(env, AMD64Instr_ACAS(am, sz));
- addInstr(env, AMD64Instr_CMov64(
- Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOld));
+ addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOld));
return;
} else {
/* double CAS */
addInstr(env, mk_iMOVsd_RR(rDataHi, hregAMD64_RCX()));
addInstr(env, mk_iMOVsd_RR(rDataLo, hregAMD64_RBX()));
addInstr(env, AMD64Instr_DACAS(am, sz));
- addInstr(env,
- AMD64Instr_CMov64(
- Acc_NZ, AMD64RM_Reg(hregAMD64_RDX()), rOldHi));
- addInstr(env,
- AMD64Instr_CMov64(
- Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOldLo));
+ addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RDX(), rOldHi));
+ addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOldLo));
return;
}
unhandled_cas: