From: Julian Seward Date: Thu, 25 May 2006 18:48:12 +0000 (+0000) Subject: Specialisation rule which reduces memcheck false error rate for X-Git-Tag: svn/VALGRIND_3_2_3^2~52 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=31d7ca2e220ae59b75c766a6ce347dd2d1157c22;p=thirdparty%2Fvalgrind.git Specialisation rule which reduces memcheck false error rate for KDE on SuSE 10.1 (amd64). git-svn-id: svn://svn.valgrind.org/vex/trunk@1626 --- diff --git a/VEX/priv/guest-amd64/ghelpers.c b/VEX/priv/guest-amd64/ghelpers.c index 24c96ff227..47c9ea4286 100644 --- a/VEX/priv/guest-amd64/ghelpers.c +++ b/VEX/priv/guest-amd64/ghelpers.c @@ -1010,7 +1010,7 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name, } if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondLE)) { - /* 16-bit sub/cmp, then LE (signed less than or equal) + /* word sub/cmp, then LE (signed less than or equal) --> test dst <=s src */ return unop(Iop_1Uto64, binop(Iop_CmpLE64S, @@ -1029,6 +1029,21 @@ IRExpr* guest_amd64_spechelper ( HChar* function_name, unop(Iop_64to8,cc_dep2))); } + if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondS) + && isU64(cc_dep2, 0)) { + /* byte sub/cmp of zero, then S --> test (dst-0 test dst (ULong)dst[7] + This is yet another scheme by which gcc figures out if the + top bit of a byte is 1 or 0. See also LOGICB/CondS below. */ + /* Note: isU64(cc_dep2, 0) is correct, even though this is + for an 8-bit comparison, since the args to the helper + function are always U64s. */ + return binop(Iop_And64, + binop(Iop_Shr64,cc_dep1,mkU8(7)), + mkU64(1)); + } + // if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ)) { // /* byte sub/cmp, then NZ --> test dst!=src */ // return unop(Iop_32Uto64,