]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Change AMD64Instr_CMov64 so that the source can only be a register
authorJulian Seward <jseward@acm.org>
Tue, 27 Jan 2015 23:35:58 +0000 (23:35 +0000)
committerJulian Seward <jseward@acm.org>
Tue, 27 Jan 2015 23:35:58 +0000 (23:35 +0000)
instead of register-or-memory (an AMD64RM).  This avoids duplicating
conditional load functionality introduced in r3075 via
AMD64Instr_CLoad and in practice has no effect on the quality of the
generated code.

git-svn-id: svn://svn.valgrind.org/vex/trunk@3076

VEX/priv/host_amd64_defs.c
VEX/priv/host_amd64_defs.h
VEX/priv/host_amd64_isel.c

index 13925fab3f78bd4e89c495ca01b5dc09e79d8914..fdbf05e455c95262ec449ac986ddf7147092d24a 100644 (file)
@@ -736,7 +736,7 @@ AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
    return i;
 }
 
-AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, AMD64RM* src, HReg dst ) {
+AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
    AMD64Instr* i      = LibVEX_Alloc(sizeof(AMD64Instr));
    i->tag             = Ain_CMov64;
    i->Ain.CMov64.cond = cond;
@@ -1128,7 +1128,7 @@ void ppAMD64Instr ( const AMD64Instr* i, Bool mode64 )
 
       case Ain_CMov64:
          vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond));
-         ppAMD64RM(i->Ain.CMov64.src);
+         ppHRegAMD64(i->Ain.CMov64.src);
          vex_printf(",");
          ppHRegAMD64(i->Ain.CMov64.dst);
          return;
@@ -1481,7 +1481,7 @@ void getRegUsage_AMD64Instr ( HRegUsage* u, const AMD64Instr* i, Bool mode64 )
          addRegUsage_AMD64AMode(u, i->Ain.XAssisted.amRIP);
          return;
       case Ain_CMov64:
-         addRegUsage_AMD64RM(u, i->Ain.CMov64.src, HRmRead);
+         addHRegUse(u, HRmRead,   i->Ain.CMov64.src);
          addHRegUse(u, HRmModify, i->Ain.CMov64.dst);
          return;
       case Ain_CLoad:
@@ -1717,7 +1717,7 @@ void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, Bool mode64 )
          mapRegs_AMD64AMode(m, i->Ain.XAssisted.amRIP);
          return;
       case Ain_CMov64:
-         mapRegs_AMD64RM(m, i->Ain.CMov64.src);
+         mapReg(m, &i->Ain.CMov64.src);
          mapReg(m, &i->Ain.CMov64.dst);
          return;
       case Ain_CLoad:
@@ -3000,21 +3000,11 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
 
    case Ain_CMov64:
       vassert(i->Ain.CMov64.cond != Acc_ALWAYS);
-      if (i->Ain.CMov64.src->tag == Arm_Reg) {
-         *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
-         *p++ = 0x0F;
-         *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
-         p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Reg.reg);
-         goto done;
-      }
-      if (i->Ain.CMov64.src->tag == Arm_Mem) {
-         *p++ = rexAMode_M(i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
-         *p++ = 0x0F;
-         *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
-         p = doAMode_M(p, i->Ain.CMov64.dst, i->Ain.CMov64.src->Arm.Mem.am);
-         goto done;
-      }
-      break;
+      *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      *p++ = 0x0F;
+      *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
+      p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      goto done;
 
    case Ain_CLoad: {
       vassert(i->Ain.CLoad.cond != Acc_ALWAYS);
index 02c89e2434590acc6026d182aa2a7077dd35e2c4..6ebe9b628e909ce0e05e7ade4d255a5f0b3d7fb5 100644 (file)
@@ -367,7 +367,7 @@ typedef
       Ain_XDirect,     /* direct transfer to GA */
       Ain_XIndir,      /* indirect transfer to GA */
       Ain_XAssisted,   /* assisted transfer to GA */
-      Ain_CMov64,      /* conditional move */
+      Ain_CMov64,      /* conditional move, 64-bit reg-reg only */
       Ain_CLoad,       /* cond. load to int reg, 32 bit ZX or 64 bit only */
       Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
       Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
@@ -503,7 +503,7 @@ typedef
             be the bogus Acc_ALWAYS. */
          struct {
             AMD64CondCode cond;
-            AMD64RM*      src;
+            HReg          src;
             HReg          dst;
          } CMov64;
          /* conditional load to int reg, 32 bit ZX or 64 bit only.
@@ -718,7 +718,7 @@ extern AMD64Instr* AMD64Instr_XIndir     ( HReg dstGA, AMD64AMode* amRIP,
                                            AMD64CondCode cond );
 extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
                                            AMD64CondCode cond, IRJumpKind jk );
-extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, AMD64RM* src, HReg dst );
+extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, HReg src, HReg dst );
 extern AMD64Instr* AMD64Instr_CLoad      ( AMD64CondCode cond, UChar szB,
                                            AMD64AMode* addr, HReg dst );
 extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
index 792629455e424585d030bbddced378c898b2cc96..999ce955dd5209b75d64a2195ca27b515e3e8651 100644 (file)
@@ -1242,7 +1242,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
          HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
          addInstr(env, mk_iMOVsd_RR(src1, dst));
          addInstr(env, AMD64Instr_Alu32R(Aalu_CMP, AMD64RMI_Reg(src2), dst));
-         addInstr(env, AMD64Instr_CMov64(Acc_B, AMD64RM_Reg(src2), dst));
+         addInstr(env, AMD64Instr_CMov64(Acc_B, src2, dst));
          return dst;
       }
 
@@ -1862,7 +1862,7 @@ static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
       if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
           && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
          HReg     r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
-         AMD64RM* r0  = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
+         HReg     r0  = iselIntExpr_R(env, e->Iex.ITE.iffalse);
          HReg     dst = newVRegI(env);
          addInstr(env, mk_iMOVsd_RR(r1,dst));
          AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
@@ -4650,8 +4650,7 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
             default: goto unhandled_cas;
          }
          addInstr(env, AMD64Instr_ACAS(am, sz));
-         addInstr(env, AMD64Instr_CMov64(
-                          Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOld));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOld));
          return;
       } else {
          /* double CAS */
@@ -4689,12 +4688,8 @@ static void iselStmt ( ISelEnv* env, IRStmt* stmt )
          addInstr(env, mk_iMOVsd_RR(rDataHi, hregAMD64_RCX()));
          addInstr(env, mk_iMOVsd_RR(rDataLo, hregAMD64_RBX()));
          addInstr(env, AMD64Instr_DACAS(am, sz));
-         addInstr(env,
-                  AMD64Instr_CMov64(
-                     Acc_NZ, AMD64RM_Reg(hregAMD64_RDX()), rOldHi));
-         addInstr(env,
-                  AMD64Instr_CMov64(
-                     Acc_NZ, AMD64RM_Reg(hregAMD64_RAX()), rOldLo));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RDX(), rOldHi));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOldLo));
          return;
       }
       unhandled_cas: