From: Philippe Waroquiers Date: Sun, 8 Jan 2023 10:50:07 +0000 (+0100) Subject: Avoid doing mempool specific leak search activities if there are no mempools X-Git-Tag: VALGRIND_3_21_0~236 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3a046ec1c19d01eec8b22b0338cd236ccf64a4d5;p=thirdparty%2Fvalgrind.git Avoid doing mempool specific leak search activities if there are no mempools For most memcheck users, no mempools are used, but the leak search logic was doing in any case special handling, leading to useless work such as sorting again an already sorted array and making a copy of an array without modifying it. This slightly optimises the perf reg tests of memcheck. perl perf/vg_perf --tools=memcheck --vg=. --vg=../trunk_untouched perf -- Running tests in perf ---------------------------------------------- -- bigcode1 -- bigcode1 . :0.08s me: 3.0s (38.1x, -----) bigcode1 trunk_untouched:0.08s me: 3.1s (38.6x, -1.3%) -- bigcode2 -- bigcode2 . :0.07s me: 7.4s (105.9x, -----) bigcode2 trunk_untouched:0.07s me: 7.5s (107.4x, -1.5%) -- bz2 -- bz2 . :0.40s me: 5.2s (12.9x, -----) bz2 trunk_untouched:0.40s me: 5.4s (13.6x, -5.0%) -- fbench -- fbench . :0.15s me: 2.8s (18.8x, -----) fbench trunk_untouched:0.15s me: 2.9s (19.0x, -1.1%) -- ffbench -- ffbench . :0.16s me: 2.7s (16.8x, -----) ffbench trunk_untouched:0.16s me: 2.7s (17.1x, -1.9%) -- heap -- heap . :0.06s me: 4.0s (66.5x, -----) heap trunk_untouched:0.06s me: 4.1s (68.7x, -3.3%) -- heap_pdb4 -- heap_pdb4 . :0.07s me: 6.2s (89.1x, -----) heap_pdb4 trunk_untouched:0.07s me: 6.6s (94.9x, -6.4%) -- many-loss-records -- many-loss-records . :0.01s me: 1.2s (122.0x, -----) many-loss-records trunk_untouched:0.01s me: 1.2s (125.0x, -2.5%) -- many-xpts -- many-xpts . :0.03s me: 1.2s (41.7x, -----) many-xpts trunk_untouched:0.03s me: 1.3s (43.7x, -4.8%) -- memrw -- memrw . :0.06s me: 1.2s (19.8x, -----) memrw trunk_untouched:0.06s me: 1.2s (20.2x, -1.7%) -- sarp -- sarp . :0.02s me: 1.8s (91.5x, -----) sarp trunk_untouched:0.02s me: 2.1s (103.5x,-13.1%) -- tinycc -- tinycc . :0.11s me: 7.1s (64.4x, -----) tinycc trunk_untouched:0.11s me: 7.1s (64.3x, 0.1%) -- Finished tests in perf ---------------------------------------------- == 12 programs, 24 timings ================= --- diff --git a/memcheck/mc_leakcheck.c b/memcheck/mc_leakcheck.c index b2a133f3fe..b06e77f605 100644 --- a/memcheck/mc_leakcheck.c +++ b/memcheck/mc_leakcheck.c @@ -332,15 +332,10 @@ Int find_chunk_for ( Addr ptr, static MC_Chunk** -find_active_chunks(Int* pn_chunks) +get_sorted_array_of_active_chunks(Int* pn_chunks) { - // Our goal is to construct a set of chunks that includes every - // mempool chunk, and every malloc region that *doesn't* contain a - // mempool chunk. - MC_Mempool *mp; - MC_Chunk **mallocs, **chunks, *mc; - UInt n_mallocs, n_chunks, m, s; - Bool *malloc_chunk_holds_a_pool_chunk; + UInt n_mallocs; + MC_Chunk **mallocs; // First we collect all the malloc chunks into an array and sort it. // We do this because we want to query the chunks by interior @@ -353,73 +348,98 @@ find_active_chunks(Int* pn_chunks) } VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks); - // Then we build an array containing a Bool for each malloc chunk, - // indicating whether it contains any mempools. - malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1", - n_mallocs, sizeof(Bool) ); - n_chunks = n_mallocs; + // If there are no mempools (for most users, this is the case), + // n_mallocs and mallocs is the final result + // otherwise we need to do special handling for mempools. + + if (VG_(HT_count_nodes)(MC_(mempool_list)) > 0) { + // Our goal is to construct a set of chunks that includes every + // mempool chunk, and every malloc region that *doesn't* contain a + // mempool chunk. + MC_Mempool *mp; + MC_Chunk **chunks, *mc; + UInt n_chunks, m, s; + Bool *malloc_chunk_holds_a_pool_chunk; + + // We build an array containing a Bool for each malloc chunk, + // indicating whether it contains any mempools. + malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1", + n_mallocs, sizeof(Bool) ); + n_chunks = n_mallocs; + + // Then we loop over the mempool tables. For each chunk in each + // pool, we set the entry in the Bool array corresponding to the + // malloc chunk containing the mempool chunk. + VG_(HT_ResetIter)(MC_(mempool_list)); + while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) { + VG_(HT_ResetIter)(mp->chunks); + while ( (mc = VG_(HT_Next)(mp->chunks)) ) { - // Then we loop over the mempool tables. For each chunk in each - // pool, we set the entry in the Bool array corresponding to the - // malloc chunk containing the mempool chunk. - VG_(HT_ResetIter)(MC_(mempool_list)); - while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) { - VG_(HT_ResetIter)(mp->chunks); - while ( (mc = VG_(HT_Next)(mp->chunks)) ) { - - // We'll need to record this chunk. - n_chunks++; - - // Possibly invalidate the malloc holding the beginning of this chunk. - m = find_chunk_for(mc->data, mallocs, n_mallocs); - if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) { - tl_assert(n_chunks > 0); - n_chunks--; - malloc_chunk_holds_a_pool_chunk[m] = True; - } + // We'll need to record this chunk. + n_chunks++; - // Possibly invalidate the malloc holding the end of this chunk. - if (mc->szB > 1) { - m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs); + // Possibly invalidate the malloc holding the beginning of this chunk. + m = find_chunk_for(mc->data, mallocs, n_mallocs); if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) { tl_assert(n_chunks > 0); n_chunks--; malloc_chunk_holds_a_pool_chunk[m] = True; } + + // Possibly invalidate the malloc holding the end of this chunk. + if (mc->szB > 1) { + m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs); + if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) { + tl_assert(n_chunks > 0); + n_chunks--; + malloc_chunk_holds_a_pool_chunk[m] = True; + } + } } } - } - tl_assert(n_chunks > 0); + tl_assert(n_chunks > 0); - // Create final chunk array. - chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks)); - s = 0; + // Create final chunk array. + chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks)); + s = 0; - // Copy the mempool chunks and the non-marked malloc chunks into a - // combined array of chunks. - VG_(HT_ResetIter)(MC_(mempool_list)); - while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) { - VG_(HT_ResetIter)(mp->chunks); - while ( (mc = VG_(HT_Next)(mp->chunks)) ) { - tl_assert(s < n_chunks); - chunks[s++] = mc; + // Copy the mempool chunks and the non-marked malloc chunks into a + // combined array of chunks. + VG_(HT_ResetIter)(MC_(mempool_list)); + while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) { + VG_(HT_ResetIter)(mp->chunks); + while ( (mc = VG_(HT_Next)(mp->chunks)) ) { + tl_assert(s < n_chunks); + chunks[s++] = mc; + } } - } - for (m = 0; m < n_mallocs; ++m) { - if (!malloc_chunk_holds_a_pool_chunk[m]) { - tl_assert(s < n_chunks); - chunks[s++] = mallocs[m]; + for (m = 0; m < n_mallocs; ++m) { + if (!malloc_chunk_holds_a_pool_chunk[m]) { + tl_assert(s < n_chunks); + chunks[s++] = mallocs[m]; + } } - } - tl_assert(s == n_chunks); + tl_assert(s == n_chunks); - // Free temporaries. - VG_(free)(mallocs); - VG_(free)(malloc_chunk_holds_a_pool_chunk); + // Free temporaries. + VG_(free)(mallocs); + VG_(free)(malloc_chunk_holds_a_pool_chunk); - *pn_chunks = n_chunks; + *pn_chunks = n_chunks; + + // Sort the array so blocks are in ascending order in memory. + VG_(ssort)(chunks, n_chunks, sizeof(VgHashNode*), compare_MC_Chunks); + + // Sanity check -- make sure they're in order. + for (int i = 0; i < n_chunks-1; i++) { + tl_assert( chunks[i]->data <= chunks[i+1]->data); + } - return chunks; + return chunks; + } else { + *pn_chunks = n_mallocs; + return mallocs; + } } /*------------------------------------------------------------*/ @@ -2020,7 +2040,7 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp) VG_(free)(lc_chunks); lc_chunks = NULL; } - lc_chunks = find_active_chunks(&lc_n_chunks); + lc_chunks = get_sorted_array_of_active_chunks(&lc_n_chunks); lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)(); if (lc_n_chunks == 0) { tl_assert(lc_chunks == NULL); @@ -2040,14 +2060,6 @@ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp) return; } - // Sort the array so blocks are in ascending order in memory. - VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks); - - // Sanity check -- make sure they're in order. - for (i = 0; i < lc_n_chunks-1; i++) { - tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data); - } - // Sanity check -- make sure they don't overlap. One exception is that // we allow a MALLOCLIKE block to sit entirely within a malloc() block. // This is for bug 100628. If this occurs, we ignore the malloc() block @@ -2262,7 +2274,7 @@ void MC_(who_points_at) ( Addr address, SizeT szB) VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n", szB, address); - chunks = find_active_chunks(&n_chunks); + chunks = get_sorted_array_of_active_chunks(&n_chunks); // Scan memory root-set, searching for ptr pointing in address[szB] scan_memory_root_set(address, szB);