]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Performance improvements for flag handling.
authorJulian Seward <jseward@acm.org>
Mon, 26 Dec 2005 19:33:55 +0000 (19:33 +0000)
committerJulian Seward <jseward@acm.org>
Mon, 26 Dec 2005 19:33:55 +0000 (19:33 +0000)
git-svn-id: svn://svn.valgrind.org/vex/trunk@1513

VEX/priv/guest-amd64/ghelpers.c
VEX/priv/host-amd64/isel.c

index 7514a5dcc1de18f7a924bf79ad1e481c2317bcda..70d0ff505fa0aca6c60af4e0c43b5ff4f9e0b288 100644 (file)
@@ -899,6 +899,18 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name,
 
       /*---------------- SUBQ ----------------*/
 
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondZ)) {
+         /* long long sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64,cc_dep1,cc_dep2));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNZ)) {
+         /* long long sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64,cc_dep1,cc_dep2));
+      }
+
       if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondL)) {
          /* long long sub/cmp, then L (signed less than) 
             --> test dst <s src */
@@ -913,14 +925,37 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name,
                      binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
       }
 
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNB)) {
+         /* long long sub/cmp, then NB (unsigned greater than or equal)
+            --> test src <=u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondBE)) {
+         /* long long sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+      }
+
       /*---------------- SUBL ----------------*/
 
       if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondZ)) {
          /* long sub/cmp, then Z --> test dst==src */
          return unop(Iop_1Uto64,
-                     binop(Iop_CmpEQ32, 
-                           unop(Iop_64to32,cc_dep1), 
-                           unop(Iop_64to32,cc_dep2)));
+                     binop(Iop_CmpEQ64, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(32)),
+                           binop(Iop_Shl64,cc_dep2,mkU8(32))));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNZ)) {
+         /* long sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(32)),
+                           binop(Iop_Shl64,cc_dep2,mkU8(32))));
       }
 
 //..       if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondNZ)) {
@@ -936,7 +971,6 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name,
                      binop(Iop_CmpLT64S, 
                            binop(Iop_Shl64,cc_dep1,mkU8(32)),
                            binop(Iop_Shl64,cc_dep2,mkU8(32))));
-
       }
 
       if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondLE)) {
@@ -949,14 +983,15 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name,
 
       }
 
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondBE)) {
+         /* long sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(32)),
+                           binop(Iop_Shl64,cc_dep2,mkU8(32))));
+      }
 
-//..       if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondBE)) {
-//..          /* long sub/cmp, then BE (unsigned less than or equal)
-//..             --> test dst <=u src */
-//..          return unop(Iop_1Uto32,
-//..                      binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
-//..       }
-//.. 
 //..       if (isU32(cc_op, AMD64G_CC_OP_SUBL) && isU32(cond, X86CondB)) {
 //..          /* long sub/cmp, then B (unsigned less than)
 //..             --> test dst <u src */
@@ -1005,7 +1040,7 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name,
 
 //..       if (isU32(cc_op, AMD64G_CC_OP_SUBB) && isU32(cond, X86CondNBE)) {
 //..          /* long sub/cmp, then NBE (unsigned greater than)
-//..             --> test src <=u dst */
+//..             --> test src <u dst */
 //..          /* Note, args are opposite way round from the usual */
 //..          return unop(Iop_1Uto32,
 //..                      binop(Iop_CmpLT32U, 
index 71e9b34ceb0d8cfeac42a026e8dc6ef2abf04020..63ea2c1bfdd7f26abaafc5c7b9e23a26ccc2ccf5 100644 (file)
@@ -2115,7 +2115,7 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
            || e->Iex.Binop.op == Iop_CmpLT64S
            || e->Iex.Binop.op == Iop_CmpLT64U
            || e->Iex.Binop.op == Iop_CmpLE64S
-           //|| e->Iex.Binop.op == Iop_CmpLE64U
+           || e->Iex.Binop.op == Iop_CmpLE64U
           )) {
       HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
       AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
@@ -2126,7 +2126,7 @@ static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
         case Iop_CmpLT64S: return Acc_L;
         case Iop_CmpLT64U: return Acc_B;
         case Iop_CmpLE64S: return Acc_LE;
-          //case Iop_CmpLE64U: return Acc_BE;
+         case Iop_CmpLE64U: return Acc_BE;
          default: vpanic("iselCondCode(amd64): CmpXX64");
       }
    }