From: Nicholas Nethercote Date: Sun, 15 Mar 2009 23:25:38 +0000 (+0000) Subject: Fix all the non-VEX problems identified with the Clang Static Analyzer. X-Git-Tag: svn/VALGRIND_3_5_0~851 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b15e3d9a45d230d80413428c4249fe58a47ca0e9;p=thirdparty%2Fvalgrind.git Fix all the non-VEX problems identified with the Clang Static Analyzer. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@9416 --- diff --git a/cachegrind/cg_main.c b/cachegrind/cg_main.c index cfaf53aef1..6d7ce8767d 100644 --- a/cachegrind/cg_main.c +++ b/cachegrind/cg_main.c @@ -1341,7 +1341,6 @@ static void fprint_CC_table_and_calc_totals(void) VG_(sprintf)(buf, "fn=%s\n", currFn); VG_(write)(fd, (void*)buf, VG_(strlen)(buf)); distinct_fns++; - just_hit_a_new_file = False; } // Print the LineCC @@ -1454,8 +1453,7 @@ static void cg_fini(Int exitcode) BranchCC B_total; ULong L2_total_m, L2_total_mr, L2_total_mw, L2_total, L2_total_r, L2_total_w; - Int l1, l2, l3, l4; - Int p; + Int l1, l2, l3; /* Running with both cache and branch simulation disabled is not allowed (checked during command line option processing). */ @@ -1466,12 +1464,13 @@ static void cg_fini(Int exitcode) if (VG_(clo_verbosity) == 0) return; + #define MAX(a, b) ((a) >= (b) ? (a) : (b)) + /* I cache results. Use the I_refs value to determine the first column * width. */ l1 = ULong_width(Ir_total.a); - l2 = ULong_width(Dr_total.a); - l3 = ULong_width(Dw_total.a); - l4 = ULong_width(Bc_total.b + Bi_total.b); + l2 = ULong_width(MAX(Dr_total.a, Bc_total.b)); + l3 = ULong_width(MAX(Dw_total.a, Bi_total.b)); /* Make format string, getting width right for numbers */ VG_(sprintf)(fmt, "%%s %%,%dllu", l1); @@ -1485,8 +1484,6 @@ static void cg_fini(Int exitcode) VG_UMSG(fmt, "I1 misses: ", Ir_total.m1); VG_UMSG(fmt, "L2i misses: ", Ir_total.m2); - p = 100; - if (0 == Ir_total.a) Ir_total.a = 1; VG_(percentify)(Ir_total.m1, Ir_total.a, 2, l1+1, buf1); VG_UMSG("I1 miss rate: %s", buf1); @@ -1511,8 +1508,6 @@ static void cg_fini(Int exitcode) VG_UMSG(fmt, "L2d misses: ", D_total.m2, Dr_total.m2, Dw_total.m2); - p = 10; - if (0 == D_total.a) D_total.a = 1; if (0 == Dr_total.a) Dr_total.a = 1; if (0 == Dw_total.a) Dw_total.a = 1; diff --git a/callgrind/bbcc.c b/callgrind/bbcc.c index b6e7f1dce5..c1eaab380c 100644 --- a/callgrind/bbcc.c +++ b/callgrind/bbcc.c @@ -480,7 +480,6 @@ static void handleUnderflow(BB* bb) /* RET at top of call stack */ BBCC* source_bbcc; BB* source_bb; - jCC* jcc; Bool seen_before; fn_node* caller; int fn_number, *pactive; @@ -533,7 +532,6 @@ static void handleUnderflow(BB* bb) (Addr)-1, False); call_entry_up = &(CLG_(current_call_stack).entry[CLG_(current_call_stack).sp -1]); - jcc = call_entry_up->jcc; /* assume this call is lasting since last dump or * for a signal handler since it's call */ if (CLG_(current_state).sig == 0) diff --git a/callgrind/dump.c b/callgrind/dump.c index 78dfdfd2f7..17414cae18 100644 --- a/callgrind/dump.c +++ b/callgrind/dump.c @@ -185,19 +185,19 @@ static void my_fwrite(Int fd, Char* buf, Int len) static void print_obj(Char* buf, obj_node* obj) { - int n; + //int n; if (CLG_(clo).compress_strings) { CLG_ASSERT(obj_dumped != 0); if (obj_dumped[obj->number]) - n = VG_(sprintf)(buf, "(%d)\n", obj->number); + /*n =*/ VG_(sprintf)(buf, "(%d)\n", obj->number); else { - n = VG_(sprintf)(buf, "(%d) %s\n", + /*n =*/ VG_(sprintf)(buf, "(%d) %s\n", obj->number, obj->name); } } else - n = VG_(sprintf)(buf, "%s\n", obj->name); + /*n =*/ VG_(sprintf)(buf, "%s\n", obj->name); #if 0 /* add mapping parameters the first time a object is dumped @@ -1296,7 +1296,7 @@ static int new_dumpfile(Char buf[BUF_LEN], int tid, Char* trigger) i += VG_(sprintf)(filename+i, ".%d", out_counter); if (CLG_(clo).separate_threads) - i += VG_(sprintf)(filename+i, "-%02d", tid); + VG_(sprintf)(filename+i, "-%02d", tid); res = VG_(open)(filename, VKI_O_WRONLY|VKI_O_TRUNC, 0); } diff --git a/coregrind/m_aspacemgr/aspacemgr-common.c b/coregrind/m_aspacemgr/aspacemgr-common.c index f884138e05..ca4dc76033 100644 --- a/coregrind/m_aspacemgr/aspacemgr-common.c +++ b/coregrind/m_aspacemgr/aspacemgr-common.c @@ -50,6 +50,7 @@ // Simple assert and assert-like fns, which avoid dependence on // m_libcassert, and hence on the entire debug-info reader swamp +__attribute__ ((noreturn)) void ML_(am_exit)( Int status ) { # if defined(VGO_linux) diff --git a/coregrind/m_aspacemgr/aspacemgr-linux.c b/coregrind/m_aspacemgr/aspacemgr-linux.c index 311ea4c89b..615d623bd6 100644 --- a/coregrind/m_aspacemgr/aspacemgr-linux.c +++ b/coregrind/m_aspacemgr/aspacemgr-linux.c @@ -912,9 +912,10 @@ static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot, have a sloppyXcheck mode which we enable on x86 - in this mode we allow the kernel to report execute permission when we weren't expecting it but not vice versa. */ - sloppyXcheck = False; # if defined(VGA_x86) sloppyXcheck = True; +# else + sloppyXcheck = False; # endif /* NSegments iLo .. iHi inclusive should agree with the presented diff --git a/coregrind/m_aspacemgr/priv_aspacemgr.h b/coregrind/m_aspacemgr/priv_aspacemgr.h index 8774bc3036..3ce2229696 100644 --- a/coregrind/m_aspacemgr/priv_aspacemgr.h +++ b/coregrind/m_aspacemgr/priv_aspacemgr.h @@ -63,10 +63,12 @@ This is important since most of the system itself depends on aspacem, so we have to do this to avoid a circular dependency. */ +__attribute__ ((noreturn)) extern void ML_(am_exit) ( Int status ); extern void ML_(am_barf) ( HChar* what ); extern void ML_(am_barf_toolow) ( HChar* what ); +__attribute__ ((noreturn)) extern void ML_(am_assert_fail) ( const HChar* expr, const Char* file, Int line, diff --git a/coregrind/m_coredump/coredump-elf.c b/coregrind/m_coredump/coredump-elf.c index ca4428ad13..f9b036a155 100644 --- a/coregrind/m_coredump/coredump-elf.c +++ b/coregrind/m_coredump/coredump-elf.c @@ -432,12 +432,10 @@ void make_elf_coredump(ThreadId tid, const vki_siginfo_t *si, UInt max_size) continue; if (phdrs[idx].p_filesz > 0) { - Int ret; - vg_assert(VG_(lseek)(core_fd, phdrs[idx].p_offset, VKI_SEEK_SET) == phdrs[idx].p_offset); vg_assert(seg->end - seg->start >= phdrs[idx].p_filesz); - ret = VG_(write)(core_fd, (void *)seg->start, phdrs[idx].p_filesz); + (void)VG_(write)(core_fd, (void *)seg->start, phdrs[idx].p_filesz); } idx++; } diff --git a/coregrind/m_debuginfo/readdwarf.c b/coregrind/m_debuginfo/readdwarf.c index 556599fadb..2edaf80944 100644 --- a/coregrind/m_debuginfo/readdwarf.c +++ b/coregrind/m_debuginfo/readdwarf.c @@ -883,7 +883,7 @@ void read_dwarf2_lineblock ( struct _DebugInfo* di, break; case DW_LNS_set_isa: - adv = read_leb128 (data, & bytes_read, 0); + /*adv =*/ read_leb128 (data, & bytes_read, 0); data += bytes_read; if (di->ddump_line) VG_(printf)(" DWARF2-line: set_isa\n"); @@ -2915,7 +2915,7 @@ static Int run_CF_instruction ( /*MOD*/UnwindContext* ctx, case DW_CFA_GNU_args_size: /* No idea what is supposed to happen. gdb-6.3 simply ignores these. */ - off = read_leb128( &instr[i], &nleb, 0 ); + /*off = */ read_leb128( &instr[i], &nleb, 0 ); i += nleb; if (di->ddump_frames) VG_(printf)(" rci:DW_CFA_GNU_args_size (ignored)\n"); diff --git a/coregrind/m_debuginfo/readdwarf3.c b/coregrind/m_debuginfo/readdwarf3.c index 80dbd6ec9b..d8eed6a19b 100644 --- a/coregrind/m_debuginfo/readdwarf3.c +++ b/coregrind/m_debuginfo/readdwarf3.c @@ -344,7 +344,6 @@ static UWord get_UWord ( Cursor* c ) { vg_assert(0); } - /* Read a DWARF3 'Initial Length' field */ static ULong get_Initial_Length ( /*OUT*/Bool* is64, Cursor* c, @@ -898,13 +897,13 @@ void set_abbv_Cursor ( /*OUT*/Cursor* c, Bool td3, /* Now iterate though the table until we find the requested entry. */ while (True) { - ULong atag; - UInt has_children; + //ULong atag; + //UInt has_children; acode = get_ULEB128( c ); if (acode == 0) break; /* end of the table */ if (acode == abbv_code) break; /* found it */ - atag = get_ULEB128( c ); - has_children = get_UChar( c ); + /*atag = */ get_ULEB128( c ); + /*has_children = */ get_UChar( c ); //TRACE_D3(" %llu %s [%s]\n", // acode, pp_DW_TAG(atag), pp_DW_children(has_children)); while (True) { @@ -1303,13 +1302,7 @@ void read_filename_table( /*MOD*/D3VarParser* parser, Bool is_dw64; Cursor c; Word i; - ULong unit_length; UShort version; - ULong header_length; - UChar minimum_instruction_length; - UChar default_is_stmt; - Char line_base; - UChar line_range; UChar opcode_base; UChar* str; @@ -1322,18 +1315,18 @@ void read_filename_table( /*MOD*/D3VarParser* parser, cc->debug_line_sz, debug_line_offset, cc->barf, "Overrun whilst reading .debug_line section(1)" ); - unit_length - = get_Initial_Length( &is_dw64, &c, + /* unit_length = */ + get_Initial_Length( &is_dw64, &c, "read_filename_table: invalid initial-length field" ); version = get_UShort( &c ); if (version != 2) cc->barf("read_filename_table: Only DWARF version 2 line info " "is currently supported."); - header_length = (ULong)get_Dwarfish_UWord( &c, is_dw64 ); - minimum_instruction_length = get_UChar( &c ); - default_is_stmt = get_UChar( &c ); - line_base = (Char)get_UChar( &c ); - line_range = get_UChar( &c ); + /*header_length = (ULong)*/ get_Dwarfish_UWord( &c, is_dw64 ); + /*minimum_instruction_length = */ get_UChar( &c ); + /*default_is_stmt = */ get_UChar( &c ); + /*line_base = (Char)*/ get_UChar( &c ); + /*line_range = */ get_UChar( &c ); opcode_base = get_UChar( &c ); /* skip over "standard_opcode_lengths" */ for (i = 1; i < (Word)opcode_base; i++) @@ -1584,7 +1577,6 @@ static void parse_var_DIE ( GExpr* gexpr = NULL; Int n_attrs = 0; UWord abs_ori = (UWord)D3_INVALID_CUOFF; - Bool declaration = False; Int lineNo = 0; UChar* fileName = NULL; while (True) { @@ -1614,7 +1606,7 @@ static void parse_var_DIE ( abs_ori = (UWord)cts; } if (attr == DW_AT_declaration && ctsSzB > 0 && cts > 0) { - declaration = True; + /*declaration = True;*/ } if (attr == DW_AT_decl_line && ctsSzB > 0) { lineNo = (Int)cts; @@ -2433,7 +2425,6 @@ static void parse_type_DIE ( /*MOD*/XArray* /* of TyEnt */ tyents, Bool have_count = False; Long lower = 0; Long upper = 0; - Long count = 0; switch (parser->language) { case 'C': have_lower = True; lower = 0; break; @@ -2461,7 +2452,7 @@ static void parse_type_DIE ( /*MOD*/XArray* /* of TyEnt */ tyents, have_upper = True; } if (attr == DW_AT_count && ctsSzB > 0) { - count = cts; + /*count = (Long)cts;*/ have_count = True; } } diff --git a/coregrind/m_debuginfo/readelf.c b/coregrind/m_debuginfo/readelf.c index 29d28b116b..e8d3ba1531 100644 --- a/coregrind/m_debuginfo/readelf.c +++ b/coregrind/m_debuginfo/readelf.c @@ -219,7 +219,10 @@ Bool get_elf_symbol_info ( Bool* is_text_out /* is this a text symbol? */ ) { - Bool plausible, is_in_opd; + Bool plausible; +# if defined(VGP_ppc64_linux) + Bool is_in_opd; +# endif Bool in_text, in_data, in_sdata, in_rodata, in_bss, in_sbss; /* Set defaults */ @@ -365,7 +368,9 @@ Bool get_elf_symbol_info ( See thread starting at http://gcc.gnu.org/ml/gcc-patches/2004-08/msg00557.html */ +# if defined(VGP_ppc64_linux) is_in_opd = False; +# endif if (di->opd_present && di->opd_size > 0 @@ -644,7 +649,7 @@ void read_elf_symtab__ppc64_linux( Addr sym_svma, sym_avma_really; Char *sym_name, *sym_name_really; Int sym_size; - Addr sym_tocptr, old_tocptr; + Addr sym_tocptr; Bool from_opd, modify_size, modify_tocptr, is_text; DiSym risym; ElfXX_Sym *sym; @@ -699,7 +704,6 @@ void read_elf_symtab__ppc64_linux( modify_size = False; modify_tocptr = False; old_size = 0; - old_tocptr = 0; if (prev->from_opd && !from_opd && (prev->size == 24 || prev->size == 16) @@ -720,7 +724,6 @@ void read_elf_symtab__ppc64_linux( shouldn't currently have an known TOC ptr. */ vg_assert(prev->tocptr == 0); modify_tocptr = True; - old_tocptr = prev->tocptr; prev->tocptr = sym_tocptr; } else { diff --git a/coregrind/m_debuginfo/storage.c b/coregrind/m_debuginfo/storage.c index 1e29aea579..6a427b6ad2 100644 --- a/coregrind/m_debuginfo/storage.c +++ b/coregrind/m_debuginfo/storage.c @@ -951,7 +951,7 @@ static void canonicaliseVarInfo ( struct _DebugInfo* di ) /* All the rest of this is for the local-scope case. */ /* iterate over all entries in 'scope' */ nInThisScope = 0; - range = rangep = NULL; + rangep = NULL; VG_(OSetGen_ResetIter)(scope); while (True) { range = VG_(OSetGen_Next)(scope); @@ -1035,7 +1035,6 @@ static Int compare_DiSym ( void* va, void* vb ) static DiSym* prefersym ( struct _DebugInfo* di, DiSym* a, DiSym* b ) { Word cmp; - Word lena, lenb; /* full length */ Word vlena, vlenb; /* length without version */ const UChar *vpa, *vpb; @@ -1044,8 +1043,8 @@ static DiSym* prefersym ( struct _DebugInfo* di, DiSym* a, DiSym* b ) vg_assert(a->addr == b->addr); - vlena = lena = VG_(strlen)(a->name); - vlenb = lenb = VG_(strlen)(b->name); + vlena = VG_(strlen)(a->name); + vlenb = VG_(strlen)(b->name); vpa = VG_(strchr)(a->name, '@'); vpb = VG_(strchr)(b->name, '@'); diff --git a/coregrind/m_demangle/cplus-dem.c b/coregrind/m_demangle/cplus-dem.c index e89db663da..6eac15e1b2 100644 --- a/coregrind/m_demangle/cplus-dem.c +++ b/coregrind/m_demangle/cplus-dem.c @@ -1295,7 +1295,7 @@ demangle_signature (struct work_stuff *work, break; case 'K': - oldmangled = *mangled; + //oldmangled = *mangled; success = demangle_qualified (work, mangled, declp, 1, 0); if (AUTO_DEMANGLING || GNU_DEMANGLING) { diff --git a/coregrind/m_errormgr.c b/coregrind/m_errormgr.c index ec45800ee4..ec4862a32e 100644 --- a/coregrind/m_errormgr.c +++ b/coregrind/m_errormgr.c @@ -956,7 +956,6 @@ static void load_one_suppressions_file ( Char* filename ) Char* err_str = NULL; SuppLoc tmp_callers[VG_MAX_SUPP_CALLERS]; - fd = -1; sres = VG_(open)( filename, VKI_O_RDONLY, 0 ); if (sres.isError) { if (VG_(clo_xml)) diff --git a/coregrind/m_initimg/initimg-linux.c b/coregrind/m_initimg/initimg-linux.c index d4f6251383..e80dc22024 100644 --- a/coregrind/m_initimg/initimg-linux.c +++ b/coregrind/m_initimg/initimg-linux.c @@ -179,6 +179,10 @@ static void load_client ( /*OUT*/ExeInfo* info, VG_(memset)(info, 0, sizeof(*info)); ret = VG_(do_exec)(exe_name, info); + if (ret < 0) { + VG_(printf)("valgrind: could not execute '%s'\n", exe_name); + VG_(exit)(1); + } // The client was successfully loaded! Continue. @@ -240,6 +244,8 @@ static HChar** setup_client_env ( HChar** origenv, const HChar* toolname) Int preload_string_len = preload_core_path_len + preload_tool_path_len; HChar* preload_string = VG_(malloc)("initimg-linux.sce.1", preload_string_len); + vg_assert(origenv); + vg_assert(toolname); vg_assert(preload_string); /* Determine if there's a vgpreload__.so file, and setup diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c index 3fe3e41103..cf40058181 100644 --- a/coregrind/m_mallocfree.c +++ b/coregrind/m_mallocfree.c @@ -602,6 +602,7 @@ void ensure_mm_init ( ArenaId aid ) /*--- Superblock management ---*/ /*------------------------------------------------------------*/ +__attribute__((noreturn)) void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB ) { static Bool alreadyCrashing = False; diff --git a/coregrind/m_replacemalloc/vg_replace_malloc.c b/coregrind/m_replacemalloc/vg_replace_malloc.c index 62c0340971..3de33f91ae 100644 --- a/coregrind/m_replacemalloc/vg_replace_malloc.c +++ b/coregrind/m_replacemalloc/vg_replace_malloc.c @@ -66,6 +66,7 @@ executable too. */ +__attribute__ ((__noreturn__)) extern void _exit(int); /* Apparently it is necessary to make ourselves free of any dependency diff --git a/exp-ptrcheck/h_main.c b/exp-ptrcheck/h_main.c index 403388f7e6..f0343c1ff4 100644 --- a/exp-ptrcheck/h_main.c +++ b/exp-ptrcheck/h_main.c @@ -1163,7 +1163,6 @@ static void pre_mem_access2 ( CorePart part, ThreadId tid, Char* str, Addr s/*tart*/, Addr e/*nd*/ ) { Seg *seglo, *seghi; - Bool s_in_seglo, s_in_seghi, e_in_seglo, e_in_seghi; // Don't check code being translated -- very slow, and not much point if (Vg_CoreTranslate == part) return; @@ -1183,22 +1182,7 @@ static void pre_mem_access2 ( CorePart part, ThreadId tid, Char* str, tl_assert( BOTTOM != seglo && NONPTR != seglo ); tl_assert( BOTTOM != seghi && NONPTR != seghi ); - /* so seglo and seghi are either UNKNOWN or P(..) */ - s_in_seglo - = is_known_segment(seglo) - && seglo->addr <= s && s < seglo->addr + seglo->szB; - s_in_seghi - = is_known_segment(seghi) - && seghi->addr <= s && s < seghi->addr + seghi->szB; - e_in_seglo - = is_known_segment(seglo) - && seglo->addr <= e && e < seglo->addr + seglo->szB; - e_in_seghi - = is_known_segment(seghi) - && seghi->addr <= e && e < seghi->addr + seghi->szB; - - /* record an error if start and end are in different, but known - segments */ + /* record an error if start and end are in different, but known segments */ if (is_known_segment(seglo) && is_known_segment(seghi) && seglo != seghi) { h_record_sysparam_error(tid, part, str, s, e, seglo, seghi); diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index 44cd6ea765..d63d73b63b 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -411,10 +411,8 @@ static void pp_admin_threads ( Int d ) static void pp_map_threads ( Int d ) { - Int i, n; - n = 0; + Int i, n = 0; space(d); VG_(printf)("map_threads "); - n = 0; for (i = 0; i < VG_N_THREADS; i++) { if (map_threads[i] != NULL) n++; diff --git a/helgrind/libhb_core.c b/helgrind/libhb_core.c index cda2c28eaf..572b26b070 100644 --- a/helgrind/libhb_core.c +++ b/helgrind/libhb_core.c @@ -1724,7 +1724,6 @@ VTS* VTS__join ( VTS* a, VTS* b ) ULong tyma, tymb, tymMax; Thr* thr; VTS* res; - ScalarTS *tmpa, *tmpb; tl_assert(a && a->ts); tl_assert(b && b->ts); @@ -1742,43 +1741,38 @@ VTS* VTS__join ( VTS* a, VTS* b ) scalar timestamps, taking into account implicit zeroes. */ tl_assert(ia >= 0 && ia <= useda); tl_assert(ib >= 0 && ib <= usedb); - tmpa = tmpb = NULL; - if (ia == useda && ib == usedb) { + if (ia == useda && ib == usedb) { /* both empty - done */ break; - } - else - if (ia == useda && ib != usedb) { + + } else if (ia == useda && ib != usedb) { /* a empty, use up b */ - tmpb = VG_(indexXA)( b->ts, ib ); + ScalarTS* tmpb = VG_(indexXA)( b->ts, ib ); thr = tmpb->thr; tyma = 0; tymb = tmpb->tym; ib++; - } - else - if (ia != useda && ib == usedb) { + + } else if (ia != useda && ib == usedb) { /* b empty, use up a */ - tmpa = VG_(indexXA)( a->ts, ia ); + ScalarTS* tmpa = VG_(indexXA)( a->ts, ia ); thr = tmpa->thr; tyma = tmpa->tym; tymb = 0; ia++; - } - else { + + } else { /* both not empty; extract lowest-Thr*'d triple */ - tmpa = VG_(indexXA)( a->ts, ia ); - tmpb = VG_(indexXA)( b->ts, ib ); + ScalarTS* tmpa = VG_(indexXA)( a->ts, ia ); + ScalarTS* tmpb = VG_(indexXA)( b->ts, ib ); if (tmpa->thr < tmpb->thr) { /* a has the lowest unconsidered Thr* */ thr = tmpa->thr; tyma = tmpa->tym; tymb = 0; ia++; - } - else - if (tmpa->thr > tmpb->thr) { + } else if (tmpa->thr > tmpb->thr) { /* b has the lowest unconsidered Thr* */ thr = tmpb->thr; tyma = 0; @@ -1819,8 +1813,6 @@ POrd VTS__cmp ( VTS* a, VTS* b ) { Word ia, ib, useda, usedb; ULong tyma, tymb; - Thr* thr; - ScalarTS *tmpa, *tmpb; Bool all_leq = True; Bool all_geq = True; @@ -1834,43 +1826,36 @@ POrd VTS__cmp ( VTS* a, VTS* b ) while (1) { - /* This logic is to enumerate triples (thr, tyma, tymb) drawn - from a and b in order, where thr is the next Thr* - occurring in either a or b, and tyma/b are the relevant + /* This logic is to enumerate doubles (tyma, tymb) drawn + from a and b in order, and tyma/b are the relevant scalar timestamps, taking into account implicit zeroes. */ tl_assert(ia >= 0 && ia <= useda); tl_assert(ib >= 0 && ib <= usedb); - tmpa = tmpb = NULL; - if (ia == useda && ib == usedb) { + if (ia == useda && ib == usedb) { /* both empty - done */ break; - } - else - if (ia == useda && ib != usedb) { + + } else if (ia == useda && ib != usedb) { /* a empty, use up b */ - tmpb = VG_(indexXA)( b->ts, ib ); - thr = tmpb->thr; + ScalarTS* tmpb = VG_(indexXA)( b->ts, ib ); tyma = 0; tymb = tmpb->tym; ib++; - } - else - if (ia != useda && ib == usedb) { + + } else if (ia != useda && ib == usedb) { /* b empty, use up a */ - tmpa = VG_(indexXA)( a->ts, ia ); - thr = tmpa->thr; + ScalarTS* tmpa = VG_(indexXA)( a->ts, ia ); tyma = tmpa->tym; tymb = 0; ia++; - } - else { + + } else { /* both not empty; extract lowest-Thr*'d triple */ - tmpa = VG_(indexXA)( a->ts, ia ); - tmpb = VG_(indexXA)( b->ts, ib ); + ScalarTS* tmpa = VG_(indexXA)( a->ts, ia ); + ScalarTS* tmpb = VG_(indexXA)( b->ts, ib ); if (tmpa->thr < tmpb->thr) { /* a has the lowest unconsidered Thr* */ - thr = tmpa->thr; tyma = tmpa->tym; tymb = 0; ia++; @@ -1878,14 +1863,12 @@ POrd VTS__cmp ( VTS* a, VTS* b ) else if (tmpa->thr > tmpb->thr) { /* b has the lowest unconsidered Thr* */ - thr = tmpb->thr; tyma = 0; tymb = tmpb->tym; ib++; } else { /* they both next mention the same Thr* */ tl_assert(tmpa->thr == tmpb->thr); - thr = tmpa->thr; /* == tmpb->thr */ tyma = tmpa->tym; tymb = tmpb->tym; ia++; @@ -1893,7 +1876,7 @@ POrd VTS__cmp ( VTS* a, VTS* b ) } } - /* having laboriously determined (thr, tyma, tymb), do something + /* having laboriously determined (tyma, tymb), do something useful with it. */ if (tyma < tymb) all_geq = False; @@ -4002,7 +3985,8 @@ void zsm_apply32___msm_write ( Thr* thr, Addr a ) { void zsm_apply64___msm_read ( Thr* thr, Addr a ) { CacheLine* cl; - UWord cloff, tno, toff; + UWord cloff, tno; + //UWord toff; SVal svOld, svNew; UShort descr; stats__cline_read64s++; @@ -4010,7 +3994,7 @@ void zsm_apply64___msm_read ( Thr* thr, Addr a ) { cl = get_cacheline(a); cloff = get_cacheline_offset(a); tno = get_treeno(a); - toff = get_tree_offset(a); /* == 0, unused */ + //toff = get_tree_offset(a); /* == 0, unused */ descr = cl->descrs[tno]; if (UNLIKELY( !(descr & TREE_DESCR_64) )) { goto slowcase; @@ -4028,7 +4012,8 @@ void zsm_apply64___msm_read ( Thr* thr, Addr a ) { void zsm_apply64___msm_write ( Thr* thr, Addr a ) { CacheLine* cl; - UWord cloff, tno, toff; + UWord cloff, tno; + //UWord toff; SVal svOld, svNew; UShort descr; stats__cline_read64s++; @@ -4036,7 +4021,7 @@ void zsm_apply64___msm_write ( Thr* thr, Addr a ) { cl = get_cacheline(a); cloff = get_cacheline_offset(a); tno = get_treeno(a); - toff = get_tree_offset(a); /* == 0, unused */ + //toff = get_tree_offset(a); /* == 0, unused */ descr = cl->descrs[tno]; if (UNLIKELY( !(descr & TREE_DESCR_64) )) { goto slowcase; @@ -4162,13 +4147,14 @@ void zsm_write32 ( Addr a, SVal svNew ) { static void zsm_write64 ( Addr a, SVal svNew ) { CacheLine* cl; - UWord cloff, tno, toff; + UWord cloff, tno; + //UWord toff; stats__cline_set64s++; if (UNLIKELY(!aligned64(a))) goto slowcase; cl = get_cacheline(a); cloff = get_cacheline_offset(a); tno = get_treeno(a); - toff = get_tree_offset(a); /* == 0 */ + //toff = get_tree_offset(a); /* == 0, unused */ cl->descrs[tno] = TREE_DESCR_64; tl_assert(svNew != SVal_INVALID); cl->svals[cloff + 0] = svNew; @@ -4286,7 +4272,7 @@ void zsm_apply_range___msm_read ( Thr* thr, if (len >= 1) { zsm_apply8___msm_read( thr, a ); - a += 1; + //a += 1; len -= 1; } tl_assert(len == 0); @@ -4365,7 +4351,7 @@ void zsm_apply_range___msm_write ( Thr* thr, if (len >= 1) { zsm_apply8___msm_write( thr, a ); - a += 1; + //a += 1; len -= 1; } tl_assert(len == 0); @@ -4475,7 +4461,7 @@ void zsm_set_range_SMALL ( Addr a, SizeT len, SVal svNew ) if (len >= 1) { zsm_write8( a, svNew ); - a += 1; + //a += 1; len -= 1; } tl_assert(len == 0); diff --git a/include/pub_tool_mallocfree.h b/include/pub_tool_mallocfree.h index 9074d177fe..efab2c2318 100644 --- a/include/pub_tool_mallocfree.h +++ b/include/pub_tool_mallocfree.h @@ -47,6 +47,7 @@ extern SizeT VG_(malloc_usable_size)( void* p ); // TODO: move somewhere else // Call here to bomb the system when out of memory (mmap anon fails) +__attribute__((noreturn)) extern void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB ); #endif // __PUB_TOOL_MALLOCFREE_H diff --git a/massif/ms_main.c b/massif/ms_main.c index 4c680f2eb3..ee8590857a 100644 --- a/massif/ms_main.c +++ b/massif/ms_main.c @@ -1903,8 +1903,6 @@ static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str, SizeT snapshot_heap_szB, SizeT snapshot_total_szB) { Int i, j, n_insig_children_sxpts; - Char* perc; - SXPt* pred = NULL; SXPt* child = NULL; // Used for printing function names. Is made static to keep it out @@ -1932,7 +1930,6 @@ static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str, // We need the -1 to get the line number right, But I'm not sure why. ip_desc = VG_(describe_IP)(sxpt->Sig.ip-1, ip_desc, BUF_LEN); } - perc = make_perc(sxpt->szB, snapshot_total_szB); // Do the non-ip_desc part first... FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB); @@ -1989,7 +1986,6 @@ static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str, // Print the SXPt's children. They should already be in sorted order. n_insig_children_sxpts = 0; for (i = 0; i < sxpt->Sig.n_children; i++) { - pred = child; child = sxpt->Sig.children[i]; if (InsigSXPt == child->tag) @@ -2012,7 +2008,6 @@ static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str, case InsigSXPt: { Char* s = ( 1 == sxpt->Insig.n_xpts ? "," : "s, all" ); - perc = make_perc(sxpt->szB, snapshot_total_szB); FP("%sn0: %lu in %d place%s below massif's threshold (%s)\n", depth_str, sxpt->szB, sxpt->Insig.n_xpts, s, make_perc((ULong)clo_threshold, 100)); diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index 785c899261..a6e04eb80e 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -1158,7 +1158,7 @@ ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian ) least. */ ULong vbits64 = V_BITS64_UNDEFINED; SizeT szB = nBits / 8; - SSizeT i = szB-1; // Must be signed + SSizeT i; // Must be signed. SizeT n_addrs_bad = 0; Addr ai; Bool partial_load_exemption_applies; @@ -1493,6 +1493,7 @@ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16, part2: // 64KB-aligned, 64KB steps. // Nb: we can reach here with lenB < SM_SIZE + tl_assert(0 == lenA); while (True) { if (lenB < SM_SIZE) break; tl_assert(is_start_of_sm(a)); @@ -5441,7 +5442,7 @@ static void ocache_sarp_Set_Origins ( Addr a, UWord len, UInt otag ) { } if (len >= 1) { MC_(helperc_b_store1)( a, otag ); - a++; + //a++; len--; } tl_assert(len == 0); @@ -5473,7 +5474,7 @@ static void ocache_sarp_Clear_Origins ( Addr a, UWord len ) { } if (len >= 1) { MC_(helperc_b_store1)( a, 0 ); - a++; + //a++; len--; } tl_assert(len == 0); diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c index b95caf9709..bc8d698ada 100644 --- a/memcheck/mc_translate.c +++ b/memcheck/mc_translate.c @@ -1006,7 +1006,8 @@ static void complainIfUndefined ( MCEnv* mce, IRAtom* atom ) nargs = 0; } break; - default: + case 2: + case 16: if (origin) { fn = &MC_(helperc_value_checkN_fail_w_o); nm = "MC_(helperc_value_checkN_fail_w_o)"; @@ -1019,6 +1020,8 @@ static void complainIfUndefined ( MCEnv* mce, IRAtom* atom ) nargs = 1; } break; + default: + VG_(tool_panic)("unexpected szB"); } tl_assert(fn); @@ -2921,10 +2924,6 @@ void do_shadow_Store ( MCEnv* mce, { IROp mkAdd; IRType ty, tyAddr; - IRDirty *di, *diLo64, *diHi64; - IRAtom *addrAct, *addrLo64, *addrHi64; - IRAtom *vdataLo64, *vdataHi64; - IRAtom *eBias, *eBiasLo64, *eBiasHi64; void* helper = NULL; Char* hname = NULL; IRConst* c; @@ -2934,11 +2933,6 @@ void do_shadow_Store ( MCEnv* mce, tl_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 ); tl_assert( end == Iend_LE || end == Iend_BE ); - di = diLo64 = diHi64 = NULL; - eBias = eBiasLo64 = eBiasHi64 = NULL; - addrAct = addrLo64 = addrHi64 = NULL; - vdataLo64 = vdataHi64 = NULL; - if (data) { tl_assert(!vdata); tl_assert(isOriginalAtom(mce, data)); @@ -3016,7 +3010,12 @@ void do_shadow_Store ( MCEnv* mce, /* See comment in next clause re 64-bit regparms */ /* also, need to be careful about endianness */ - Int offLo64, offHi64; + Int offLo64, offHi64; + IRDirty *diLo64, *diHi64; + IRAtom *addrLo64, *addrHi64; + IRAtom *vdataLo64, *vdataHi64; + IRAtom *eBiasLo64, *eBiasHi64; + if (end == Iend_LE) { offLo64 = 0; offHi64 = 8; @@ -3048,12 +3047,15 @@ void do_shadow_Store ( MCEnv* mce, } else { + IRDirty *di; + IRAtom *addrAct; + /* 8/16/32/64-bit cases */ /* Generate the actual address into addrAct. */ if (bias == 0) { addrAct = addr; } else { - eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias); + IRAtom* eBias = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias); addrAct = assignNew('V', mce, tyAddr, binop(mkAdd, addr, eBias)); } @@ -3099,9 +3101,9 @@ static IRType szToITy ( Int n ) static void do_shadow_Dirty ( MCEnv* mce, IRDirty* d ) { - Int i, n, offset, toDo, gSz, gOff; + Int i, n, toDo, gSz, gOff; IRAtom *src, *here, *curr; - IRType tyAddr, tySrc, tyDst; + IRType tySrc, tyDst; IRTemp dst; IREndness end; @@ -3168,13 +3170,13 @@ void do_shadow_Dirty ( MCEnv* mce, IRDirty* d ) /* Inputs: memory. First set up some info needed regardless of whether we're doing reads or writes. */ - tyAddr = Ity_INVALID; if (d->mFx != Ifx_None) { /* Because we may do multiple shadow loads/stores from the same base address, it's best to do a single test of its definedness right now. Post-instrumentation optimisation should remove all but this test. */ + IRType tyAddr; tl_assert(d->mAddr); complainIfUndefined(mce, d->mAddr); @@ -3185,7 +3187,6 @@ void do_shadow_Dirty ( MCEnv* mce, IRDirty* d ) /* Deal with memory inputs (reads or modifies) */ if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) { - offset = 0; toDo = d->mSize; /* chew off 32-bit chunks. We don't care about the endianness since it's all going to be condensed down to a single bit, @@ -3255,7 +3256,6 @@ void do_shadow_Dirty ( MCEnv* mce, IRDirty* d ) /* Outputs: memory that we write or modify. Same comments about endianness as above apply. */ if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) { - offset = 0; toDo = d->mSize; /* chew off 32-bit chunks */ while (toDo >= 4) { @@ -4081,19 +4081,9 @@ static IRAtom* schemeE ( MCEnv* mce, IRExpr* e ) static void do_origins_Dirty ( MCEnv* mce, IRDirty* d ) { // This is a hacked version of do_shadow_Dirty - Int i, n, offset, toDo, gSz, gOff; + Int i, n, toDo, gSz, gOff; IRAtom *here, *curr; IRTemp dst; - IREndness end; - - /* What's the native endianness? We need to know this. */ -# if defined(VG_BIGENDIAN) - end = Iend_BE; -# elif defined(VG_LITTLEENDIAN) - end = Iend_LE; -# else -# error "Unknown endianness" -# endif /* First check the guard. */ curr = schemeE( mce, d->guard ); @@ -4164,7 +4154,6 @@ static void do_origins_Dirty ( MCEnv* mce, IRDirty* d ) /* Deal with memory inputs (reads or modifies) */ if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) { - offset = 0; toDo = d->mSize; /* chew off 32-bit chunks. We don't care about the endianness since it's all going to be condensed down to a single bit, @@ -4228,7 +4217,6 @@ static void do_origins_Dirty ( MCEnv* mce, IRDirty* d ) /* Outputs: memory that we write or modify. Same comments about endianness as above apply. */ if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) { - offset = 0; toDo = d->mSize; /* chew off 32-bit chunks */ while (toDo >= 4) { @@ -4242,7 +4230,6 @@ static void do_origins_Dirty ( MCEnv* mce, IRDirty* d ) } tl_assert(toDo == 0); /* also need to handle 1-byte excess */ } - } static void schemeS ( MCEnv* mce, IRStmt* st )