From: Julian Seward Date: Sun, 13 Oct 2002 01:18:49 +0000 (+0000) Subject: merge revs: X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=77800209d67a603f880a2d8fac2f2171a035bb36;p=thirdparty%2Fvalgrind.git merge revs: memcheck/mc_include.h,1.5,1.6 memcheck/mc_main.c,1.14,1.15 memcheck/mc_translate.c,1.9,1.10 memcheck/docs manual.html,1.37,1.38 which are: Incorporate horrible hack to workaround problem of emitting bogus uninit-value errors on code with inlined strlen() et al from gcc-3.1 and above. git-svn-id: svn://svn.valgrind.org/valgrind/branches/VALGRIND_1_0_BRANCH@1214 --- diff --git a/docs/manual.html b/docs/manual.html index bb95561ca4..36c74c7b8c 100644 --- a/docs/manual.html +++ b/docs/manual.html @@ -592,6 +592,20 @@ follows: from becoming a huge performance overhead in programs with many errors.

+

  • --avoid-strlen-errors=yes [default]
    + --avoid-strlen-errors=no

    When enabled, valgrind + inspects each basic block it instruments for some tell-tale + literals (0xFEFEFEFF, 0x80808080, 0x00008080) which suggest + that this block is part of an inlined strlen() function. In + many cases such functions cause spurious uninitialised-value + errors to be reported -- their code is too clever for the + instrumentation scheme. This horrible hack works around the + problem, at the expense of hiding any genuine + uninitialised-value errors which might appear ine such blocks. + It is enabled by default because it is needed to get sensible + behaviour on code compiled by gcc-3.1 and above. +


  • +

  • --cachesim=no [default]
    --cachesim=yes

    When enabled, turns off memory checking, and turns on cache profiling. Cache profiling is diff --git a/valgrind.in b/valgrind.in index 27c2c5eb53..b0082fb32e 100755 --- a/valgrind.in +++ b/valgrind.in @@ -74,6 +74,8 @@ do --D1=*,*,*) vgopts="$vgopts $arg"; shift;; --L2=*,*,*) vgopts="$vgopts $arg"; shift;; --weird-hacks=*) vgopts="$vgopts $arg"; shift;; + --avoid-strlen-errors=no) vgopts="$vgopts $arg"; shift;; + --avoid-strlen-errors=yes) vgopts="$vgopts $arg"; shift;; # options for debugging Valgrind --sanity-level=*) vgopts="$vgopts $arg"; shift;; --single-step=yes) vgopts="$vgopts $arg"; shift;; @@ -138,6 +140,7 @@ if [ $# = 0 ] || [ z"$dousage" = z1 ]; then echo " --check-addrVs=no|yes experimental lighterweight checking? [yes]" echo " yes == Valgrind's original behaviour" echo " --cachesim=no|yes do cache profiling? [no]" + echo " --avoid-strlen-errors=no|yes suppress errs from inlined strlen? [yes]\n" echo " --I1=,, set I1 cache manually" echo " --D1=,, set D1 cache manually" echo " --L2=,, set L2 cache manually" diff --git a/vg_include.h b/vg_include.h index cf7d37a673..e8ad84a185 100644 --- a/vg_include.h +++ b/vg_include.h @@ -270,6 +270,9 @@ extern Bool VG_(clo_optimise); extern Bool VG_(clo_instrument); /* DEBUG: clean up instrumented code? default: YES */ extern Bool VG_(clo_cleanup); +/* When instrumenting, omit some checks if tell-tale literals for + inlined strlen() are visible in the basic block. default: YES */ +extern Bool VG_(clo_avoid_strlen_errors); /* Cache simulation instrumentation? default: NO */ extern Bool VG_(clo_cachesim); /* I1 cache configuration. default: undefined */ diff --git a/vg_main.c b/vg_main.c index cfe618185f..66a857e5c8 100644 --- a/vg_main.c +++ b/vg_main.c @@ -438,6 +438,7 @@ Bool VG_(clo_single_step); Bool VG_(clo_optimise); Bool VG_(clo_instrument); Bool VG_(clo_cleanup); +Bool VG_(clo_avoid_strlen_errors); Bool VG_(clo_cachesim); cache_t VG_(clo_I1_cache); cache_t VG_(clo_D1_cache); @@ -577,6 +578,7 @@ static void process_cmd_line_options ( void ) VG_(clo_D1_cache) = UNDEFINED_CACHE; VG_(clo_L2_cache) = UNDEFINED_CACHE; VG_(clo_cleanup) = True; + VG_(clo_avoid_strlen_errors) = True; VG_(clo_smc_check) = VG_CLO_SMC_NONE; /* note UNUSED ! */ VG_(clo_trace_syscalls) = False; VG_(clo_trace_signals) = False; @@ -849,6 +851,11 @@ static void process_cmd_line_options ( void ) else if (STREQ(argv[i], "--cleanup=no")) VG_(clo_cleanup) = False; + else if (STREQ(argv[i], "--avoid-strlen-errors=yes")) + VG_(clo_avoid_strlen_errors) = True; + else if (STREQ(argv[i], "--avoid-strlen-errors=no")) + VG_(clo_avoid_strlen_errors) = False; + else if (STREQ(argv[i], "--cachesim=yes")) VG_(clo_cachesim) = True; else if (STREQ(argv[i], "--cachesim=no")) diff --git a/vg_translate.c b/vg_translate.c index 8c798763b8..7964f44293 100644 --- a/vg_translate.c +++ b/vg_translate.c @@ -2107,9 +2107,43 @@ static UCodeBlock* vg_instrument ( UCodeBlock* cb_in ) Int i, j; UInstr* u_in; Int qs, qd, qt, qtt; + Bool bogusLiterals; cb = VG_(allocCodeBlock)(); cb->nextTemp = cb_in->nextTemp; + /* Scan the block to look for bogus literals. These are magic + numbers which particularly appear in hand-optimised / inlined + implementations of strlen() et al which cause so much trouble + (spurious reports of uninit-var uses). Purpose of this horrible + hack is to disable some checks any such literals are present in + this basic block. */ + bogusLiterals = False; + + if (VG_(clo_avoid_strlen_errors)) { + for (i = 0; i < cb_in->used; i++) { + u_in = &cb_in->instrs[i]; + switch (u_in->opcode) { + case ADD: case SUB: case MOV: + if (u_in->size == 4 && u_in->tag1 == Literal) + goto literal; + break; + case LEA1: + vg_assert(u_in->size == 4); + goto literal; + default: + break; + } + continue; + literal: + if (u_in->lit32 == 0xFEFEFEFF || + u_in->lit32 == 0x80808080 || + u_in->lit32 == 0x00008080) { + bogusLiterals = True; + break; + } + } + } + for (i = 0; i < cb_in->used; i++) { qs = qd = qt = qtt = INVALID_TEMPREG; u_in = &cb_in->instrs[i]; @@ -2585,7 +2619,12 @@ static UCodeBlock* vg_instrument ( UCodeBlock* cb_in ) if (u_in->cond != CondAlways) { vg_assert(u_in->flags_r != FlagsEmpty); qt = create_GETVF(cb, 0); - uInstr1(cb, TESTV, 0, TempReg, qt); + if (/* HACK */ bogusLiterals) { + if (0) + VG_(printf)("ignore TESTV due to bogus literal\n"); + } else { + uInstr1(cb, TESTV, 0, TempReg, qt); + } /* qt should never be referred to again. Nevertheless ... */ uInstr1(cb, SETV, 0, TempReg, qt);