From: Julian Seward Date: Tue, 27 Jan 2015 23:35:58 +0000 (+0000) Subject: Change AMD64Instr_CMov64 so that the source can only be a register X-Git-Tag: svn/VALGRIND_3_11_0^2~106 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e7fb75b92d48f84a3c0f4a629a69c9f270dbb535;p=thirdparty%2Fvalgrind.git Change AMD64Instr_CMov64 so that the source can only be a register instead of register-or-memory (an AMD64RM). This avoids duplicating conditional load functionality introduced in r3075 via AMD64Instr_CLoad and in practice has no effect on the quality of the generated code. git-svn-id: svn://svn.valgrind.org/vex/trunk@3076 --- diff --git a/VEX/priv/host_amd64_defs.c b/VEX/priv/host_amd64_defs.c index 13925fab3f..fdbf05e455 100644 --- a/VEX/priv/host_amd64_defs.c +++ b/VEX/priv/host_amd64_defs.c @@ -736,7 +736,7 @@ AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP, return i; } -AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, AMD64RM* src, HReg dst ) { +AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_CMov64; i->Ain.CMov64.cond = cond; @@ -1128,7 +1128,7 @@ void ppAMD64Instr ( const AMD64Instr* i, Bool mode64 ) case Ain_CMov64: vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond)); - ppAMD64RM(i->Ain.CMov64.src); + ppHRegAMD64(i->Ain.CMov64.src); vex_printf(","); ppHRegAMD64(i->Ain.CMov64.dst); return; @@ -1481,7 +1481,7 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, const AMD64Instr* i, Bool mode64 ) addRegUsage_AMD64AMode(u, i->Ain.XAssisted.amRIP); return; case Ain_CMov64: - addRegUsage_AMD64RM(u, i->Ain.CMov64.src, HRmRead); + addHRegUse(u, HRmRead, i->Ain.CMov64.src); addHRegUse(u, HRmModify, i->Ain.CMov64.dst); return; case Ain_CLoad: @@ -1717,7 +1717,7 @@ void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, Bool mode64 ) mapRegs_AMD64AMode(m, i->Ain.XAssisted.amRIP); return; case Ain_CMov64: - mapRegs_AMD64RM(m, i->Ain.CMov64.src); + mapReg(m, &i->Ain.CMov64.src); mapReg(m, &i->Ain.CMov64.dst); return; case Ain_CLoad: @@ -3000,21 +3000,11 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc, case Ain_CMov64: vassert(i->Ain.CMov64.cond != Acc_ALWAYS); - if (i->Ain.CMov64.src->tag == Arm_Reg) { - *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg); - *p++ = 0x0F; - *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond)); - p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg); - goto done; - } - if (i->Ain.CMov64.src->tag == Arm_Mem) { - *p++ = rexAMode_M(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am); - *p++ = 0x0F; - *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond)); - p = doAMode_M(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am); - goto done; - } - break; + *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src); + *p++ = 0x0F; + *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond)); + p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src); + goto done; case Ain_CLoad: { vassert(i->Ain.CLoad.cond != Acc_ALWAYS); diff --git a/VEX/priv/host_amd64_defs.h b/VEX/priv/host_amd64_defs.h index 02c89e2434..6ebe9b628e 100644 --- a/VEX/priv/host_amd64_defs.h +++ b/VEX/priv/host_amd64_defs.h @@ -367,7 +367,7 @@ typedef Ain_XDirect, /* direct transfer to GA */ Ain_XIndir, /* indirect transfer to GA */ Ain_XAssisted, /* assisted transfer to GA */ - Ain_CMov64, /* conditional move */ + Ain_CMov64, /* conditional move, 64-bit reg-reg only */ Ain_CLoad, /* cond. load to int reg, 32 bit ZX or 64 bit only */ Ain_MovxLQ, /* reg-reg move, zx-ing/sx-ing top half */ Ain_LoadEX, /* mov{s,z}{b,w,l}q from mem to reg */ @@ -503,7 +503,7 @@ typedef be the bogus Acc_ALWAYS. */ struct { AMD64CondCode cond; - AMD64RM* src; + HReg src; HReg dst; } CMov64; /* conditional load to int reg, 32 bit ZX or 64 bit only. @@ -718,7 +718,7 @@ extern AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP, AMD64CondCode cond ); extern AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP, AMD64CondCode cond, IRJumpKind jk ); -extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, AMD64RM* src, HReg dst ); +extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, HReg src, HReg dst ); extern AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB, AMD64AMode* addr, HReg dst ); extern AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ); diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c index 792629455e..999ce955dd 100644 --- a/VEX/priv/host_amd64_isel.c +++ b/VEX/priv/host_amd64_isel.c @@ -1242,7 +1242,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(src1, dst)); addInstr(env, AMD64Instr_Alu32R(Aalu_CMP, AMD64RMI_Reg(src2), dst)); - addInstr(env, AMD64Instr_CMov64(Acc_B, AMD64RM_Reg(src2), dst)); + addInstr(env, AMD64Instr_CMov64(Acc_B, src2, dst)); return dst; } @@ -1862,7 +1862,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ) if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) { HReg r1 = iselIntExpr_R(env, e->Iex.ITE.iftrue); - AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.ITE.iffalse); + HReg r0 = iselIntExpr_R(env, e->Iex.ITE.iffalse); HReg dst = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r1,dst)); AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond); @@ -4650,8 +4650,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) default: goto unhandled_cas; } addInstr(env, AMD64Instr_ACAS(am, sz)); - addInstr(env, AMD64Instr_CMov64( - Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOld)); + addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOld)); return; } else { /* double CAS */ @@ -4689,12 +4688,8 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt ) addInstr(env, mk_iMOVsd_RR(rDataHi, hregAMD64_RCX())); addInstr(env, mk_iMOVsd_RR(rDataLo, hregAMD64_RBX())); addInstr(env, AMD64Instr_DACAS(am, sz)); - addInstr(env, - AMD64Instr_CMov64( - Acc_NZ, AMD64RM_Reg(hregAMD64_RDX()), rOldHi)); - addInstr(env, - AMD64Instr_CMov64( - Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOldLo)); + addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RDX(), rOldHi)); + addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOldLo)); return; } unhandled_cas: