]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Memcheck on amd64; fix false positive associated with spec cases {Z,NZ} after {LOGICB...
authorJulian Seward <jseward@acm.org>
Sun, 30 Sep 2018 07:29:43 +0000 (09:29 +0200)
committerJulian Seward <jseward@acm.org>
Sun, 30 Sep 2018 07:29:43 +0000 (09:29 +0200)
For the spec cases {Z,NZ} after {LOGICB,LOGICW}, which are simply comparisons
of the result against zero, use Cmp{EQ,NE}32 rather than their 64-bit
counterparts.  This is because Memcheck on amd64 instruments the 32 bit
versions exactly, at the default --expensive-definedness-checks=auto setting.
The alternative would have been to make Memcheck also do exact instrumentation
of the 64 bit versions, but that would also burden all other 64 bit eq/ne
comparisons with that cost for no purpose.  So this is a cheaper solution.

VEX/priv/guest_amd64_helpers.c

index a53419aaad8a81c393d8f0b7042181e5779318ca..a2b0789bb435e8260bb1d71f061c196a7e0800a2 100644 (file)
@@ -1626,32 +1626,42 @@ IRExpr* guest_amd64_spechelper ( const HChar* function_name,
 
       if (isU64(cc_op, AMD64G_CC_OP_LOGICW) && isU64(cond, AMD64CondZ)) {
          /* word and/or/xor, then Z --> test dst==0 */
+         // Use CmpEQ32 rather than CmpEQ64 here, so that Memcheck instruments
+         // it exactly at EdcAUTO.
          return unop(Iop_1Uto64,
-                     binop(Iop_CmpEQ64,
-                           binop(Iop_And64, cc_dep1, mkU64(0xFFFF)),
-                           mkU64(0)));
+                     binop(Iop_CmpEQ32,
+                           unop(Iop_16Uto32, unop(Iop_64to16, cc_dep1)),
+                           mkU32(0)));
       }
       if (isU64(cc_op, AMD64G_CC_OP_LOGICW) && isU64(cond, AMD64CondNZ)) {
          /* word and/or/xor, then NZ --> test dst!=0 */
+         // Use CmpNE32 rather than CmpNE64 here, so that Memcheck instruments
+         // it exactly at EdcAUTO.
          return unop(Iop_1Uto64,
-                     binop(Iop_CmpNE64,
-                           binop(Iop_And64, cc_dep1, mkU64(0xFFFF)),
-                           mkU64(0)));
+                     binop(Iop_CmpNE32,
+                           unop(Iop_16Uto32, unop(Iop_64to16, cc_dep1)),
+                           mkU32(0)));
       }
 
       /*---------------- LOGICB ----------------*/
 
       if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondZ)) {
          /* byte and/or/xor, then Z --> test dst==0 */
+         // Use CmpEQ32 rather than CmpEQ64 here, so that Memcheck instruments
+         // it exactly at EdcAUTO.
          return unop(Iop_1Uto64,
-                     binop(Iop_CmpEQ64, binop(Iop_And64,cc_dep1,mkU64(255)), 
-                                        mkU64(0)));
+                     binop(Iop_CmpEQ32,
+                           unop(Iop_8Uto32, unop(Iop_64to8, cc_dep1)),
+                           mkU32(0)));
       }
       if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondNZ)) {
          /* byte and/or/xor, then NZ --> test dst!=0 */
+         // Use CmpNE32 rather than CmpNE64 here, so that Memcheck instruments
+         // it exactly at EdcAUTO.
          return unop(Iop_1Uto64,
-                     binop(Iop_CmpNE64, binop(Iop_And64,cc_dep1,mkU64(255)), 
-                                        mkU64(0)));
+                     binop(Iop_CmpNE32,
+                           unop(Iop_8Uto32, unop(Iop_64to8, cc_dep1)),
+                           mkU32(0)));
       }
 
       if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondS)) {