}
/* Puts a new, heap allocated VgHashNode, into the VgHashTable. Prepends
- the node to the appropriate chain. */
+ the node to the appropriate chain. No duplicate key detection is done. */
void VG_(HT_add_node) ( VgHashTable table, void* vnode )
{
VgHashNode* node = (VgHashNode*)vnode;
/* Count the number of nodes in a table. */
extern Int VG_(HT_count_nodes) ( VgHashTable table );
-/* Add a node to the table. */
+/* Add a node to the table. Duplicate keys are permitted. */
extern void VG_(HT_add_node) ( VgHashTable t, void* node );
-/* Looks up a VgHashNode in the table. Returns NULL if not found. */
+/* Looks up a VgHashNode in the table. Returns NULL if not found. If entries
+ * with duplicate keys are present, the most recently-added of the dups will
+ * be returned, but it's probably better to avoid dups altogether. */
extern void* VG_(HT_lookup) ( VgHashTable table, UWord key );
/* Removes a VgHashNode from the table. Returns NULL if not found. */
stack traces relating to the heap block will contain entries for both
my_alloc() and internal_alloc(), which is probably not what you want.
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
Memcheck, it does two things:
Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
- WARNING: if your allocator uses malloc() or 'new' to allocate
- superblocks, rather than mmap() or brk(), this will not work properly --
- you'll likely get assertion failures during leak detection. This is
- because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
-
Ignored if addr == 0.
*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
MC_Chunk* MC_(get_freed_list_head)( void );
-/* For tracking malloc'd blocks */
+/* For tracking malloc'd blocks. Nb: it's quite important that it's a
+ VgHashTable, because VgHashTable allows duplicate keys without complaint.
+ This can occur if a user marks a malloc() block as also a custom block with
+ MALLOCLIKE_BLOCK. */
extern VgHashTable MC_(malloc_list);
/* For tracking memory pools. */
void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
{
- Int i;
+ Int i, j;
tl_assert(mode != LC_Off);
tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
}
- // Sanity check -- make sure they don't overlap. But do allow exact
- // duplicates. If this assertion fails, it may mean that the application
+ // Sanity check -- make sure they don't overlap. The one exception is that
+ // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
+ // This is for bug 100628. If this occurs, we ignore the malloc() block
+ // for leak-checking purposes. This is a hack and probably should be done
+ // better, but at least it's consistent with mempools (which are treated
+ // like this in find_active_chunks). Mempools have a separate VgHashTable
+ // for mempool chunks, but if custom-allocated blocks are put in a separate
+ // table from normal heap blocks it makes free-mismatch checking more
+ // difficult.
+ //
+ // If this check fails, it probably means that the application
// has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
- // requests, specifically, has made overlapping requests (which are
- // nonsensical). Another way to screw up is to use
- // VALGRIND_MALLOCLIKE_BLOCK for stack locations; again nonsensical.
+ // requests, eg. has made overlapping requests (which are
+ // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
+ // again nonsensical.
+ //
for (i = 0; i < lc_n_chunks-1; i++) {
MC_Chunk* ch1 = lc_chunks[i];
MC_Chunk* ch2 = lc_chunks[i+1];
- Bool nonsense_overlap = ! (
- // Normal case - no overlap.
- (ch1->data + ch1->szB <= ch2->data) ||
- // Degenerate case: exact duplicates.
- (ch1->data == ch2->data && ch1->szB == ch2->szB)
- );
- if (nonsense_overlap) {
- VG_(umsg)("Block [0x%lx, 0x%lx) overlaps with block [0x%lx, 0x%lx)\n",
- ch1->data, (ch1->data + ch1->szB),
- ch2->data, (ch2->data + ch2->szB));
+
+ Addr start1 = ch1->data;
+ Addr start2 = ch2->data;
+ Addr end1 = ch1->data + ch1->szB - 1;
+ Addr end2 = ch2->data + ch2->szB - 1;
+ Bool isCustom1 = ch1->allockind == MC_AllocCustom;
+ Bool isCustom2 = ch2->allockind == MC_AllocCustom;
+
+ if (end1 < start2) {
+ // Normal case - no overlap.
+
+ // We used to allow exact duplicates, I'm not sure why. --njn
+ //} else if (start1 == start2 && end1 == end2) {
+ // Degenerate case: exact duplicates.
+
+ } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
+ // Block i is MALLOCLIKE and entirely within block i+1.
+ // Remove block i+1.
+ for (j = i+1; j < lc_n_chunks-1; j++) {
+ lc_chunks[j] = lc_chunks[j+1];
+ }
+ lc_n_chunks--;
+
+ } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
+ // Block i+1 is MALLOCLIKE and entirely within block i.
+ // Remove block i.
+ for (j = i; j < lc_n_chunks-1; j++) {
+ lc_chunks[j] = lc_chunks[j+1];
+ }
+ lc_n_chunks--;
+
+ } else {
+ VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx",
+ start1, end1, start1, end2);
+ VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
+ VG_(umsg)("in an inappropriate way.");
+ tl_assert (0);
}
- tl_assert (!nonsense_overlap);
}
// Initialise lc_extras.
clientperm.stderr.exp \
clientperm.stdout.exp clientperm.vgtest \
custom_alloc.stderr.exp custom_alloc.vgtest \
+ custom-overlap.stderr.exp custom-overlap.vgtest \
deep_templates.vgtest \
deep_templates.stdout.exp deep_templates.stderr.exp \
describe-block.stderr.exp describe-block.vgtest \
calloc-overflow \
clientperm \
custom_alloc \
+ custom-overlap \
deep_templates \
describe-block \
doublefree error_counts errs1 exitprog execve execve2 erringfds \
--- /dev/null
+// Test for bug 100628: need to allow custom MALLOCLIKE blocks to overlap
+// with normal malloc() blocks in leak-checking -- if it happens, we ignore
+// the malloc() block during the leak check.
+
+#include <stdlib.h>
+#include "valgrind.h"
+
+int main(void)
+{
+ char* x;
+
+ // For this one, the first custom block overlaps exactly with the start of
+ // the malloc block.
+ x = malloc(1000);
+ VALGRIND_MALLOCLIKE_BLOCK(x, /*szB*/ 16, /*rzB*/0, /*isZeroed*/0);
+ VALGRIND_MALLOCLIKE_BLOCK(x+100, /*szB*/ 32, /*rzB*/0, /*isZeroed*/0);
+ VALGRIND_MALLOCLIKE_BLOCK(x+200, /*szB*/ 64, /*rzB*/0, /*isZeroed*/0);
+ VALGRIND_MALLOCLIKE_BLOCK(x+300, /*szB*/128, /*rzB*/0, /*isZeroed*/0);
+
+ // For this one, the first custom block does not overlap exactly with the
+ // start of the malloc block.
+ x = malloc(1000);
+ VALGRIND_MALLOCLIKE_BLOCK(x+100, /*szB*/ 32, /*rzB*/0, /*isZeroed*/0);
+ VALGRIND_MALLOCLIKE_BLOCK(x+200, /*szB*/ 64, /*rzB*/0, /*isZeroed*/0);
+ VALGRIND_MALLOCLIKE_BLOCK(x+300, /*szB*/128, /*rzB*/0, /*isZeroed*/0);
+
+ return 0;
+}
--- /dev/null
+prog: custom-overlap
+vgopts: --leak-check=summary -q