}
-// The ThreadId doesn't matter, it's not used.
-SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* ptr )
+SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
{
Arena* a = arenaId_to_ArenaP(aid);
Block* b = get_payload_block(a, ptr);
// Useful for querying user blocks.
SizeT VG_(malloc_usable_size) ( void* p )
{
- return VG_(arena_payload_szB)(VG_INVALID_THREADID, VG_AR_CLIENT, p);
+ return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p);
}
if (NULL == p) \
return 0; \
\
- pszB = (SizeT)VALGRIND_NON_SIMD_CALL2( info.arena_payload_szB, \
- VG_AR_CLIENT, p ); \
+ pszB = (SizeT)VALGRIND_NON_SIMD_CALL1( info.tl_malloc_usable_size, p ); \
MALLOC_TRACE(" = %llu", (ULong)pszB ); \
\
return pszB; \
info->tl_free = VG_(tdict).tool_free;
info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
+ info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
- info->arena_payload_szB = VG_(arena_payload_szB);
info->mallinfo = VG_(mallinfo);
info->clo_trace_malloc = VG_(clo_trace_malloc);
void (*__builtin_delete) ( ThreadId, void* ),
void (*__builtin_vec_delete) ( ThreadId, void* ),
void* (*realloc) ( ThreadId, void*, SizeT ),
+ SizeT (*malloc_usable_size) ( ThreadId, void* ),
SizeT client_malloc_redzone_szB
)
{
VG_(tdict).tool___builtin_delete = __builtin_delete;
VG_(tdict).tool___builtin_vec_delete = __builtin_vec_delete;
VG_(tdict).tool_realloc = realloc;
+ VG_(tdict).tool_malloc_usable_size = malloc_usable_size;
VG_(tdict).tool_client_redzone_szB = client_malloc_redzone_szB;
}
// Nb: The ThreadId doesn't matter, it's not used.
extern SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* payload );
+extern SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* payload );
+
extern void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi );
extern void VG_(sanity_check_malloc_all) ( void );
void (*tl___builtin_delete) (ThreadId tid, void* p);
void (*tl___builtin_vec_delete)(ThreadId tid, void* p);
void* (*tl_realloc) (ThreadId tid, void* p, SizeT size);
- SizeT (*arena_payload_szB) (ThreadId tid, ArenaId aid, void* payload);
+ SizeT (*tl_malloc_usable_size) (ThreadId tid, void* payload);
void (*mallinfo) (ThreadId tid, struct vg_mallinfo* mi);
Bool clo_trace_malloc;
};
void (*tool___builtin_delete) (ThreadId, void*);
void (*tool___builtin_vec_delete)(ThreadId, void*);
void* (*tool_realloc) (ThreadId, void*, SizeT);
+ SizeT (*tool_malloc_usable_size) (ThreadId, void*);
SizeT tool_client_redzone_szB;
// VG_(needs).final_IR_tidy_pass
DRD_(handle_free)(tid, (Addr)p);
}
+static SizeT DRD_(malloc_usable_size) ( ThreadId tid, void* p )
+{
+ DRD_Chunk *mc = VG_(HT_lookup)( DRD_(s_malloc_list), (UWord)p );
+
+ // There may be slop, but pretend there isn't because only the asked-for
+ // area will have been shadowed properly.
+ return ( mc ? mc->size : 0 );
+}
+
void DRD_(register_malloc_wrappers)(const StartUsingMem start_callback,
const StopUsingMem stop_callback)
{
DRD_(__builtin_delete),
DRD_(__builtin_vec_delete),
DRD_(realloc),
+ DRD_(malloc_usable_size),
0);
}
}
}
+SizeT h_replace_malloc_usable_size ( ThreadId tid, void* p )
+{
+ Seg* seg = find_Seg_by_addr( (Addr)p );
+
+ // There may be slop, but pretend there isn't because only the asked-for
+ // area will have been shadowed properly.
+ return ( seg ? seg->szB : 0 );
+}
+
/*------------------------------------------------------------*/
/*--- Memory events ---*/
void h_replace___builtin_delete ( ThreadId tid, void* p );
void h_replace___builtin_vec_delete ( ThreadId tid, void* p );
void* h_replace_realloc ( ThreadId tid, void* p_old, SizeT new_size );
+SizeT h_replace_malloc_usable_size ( ThreadId tid, void* p );
void h_new_mem_startup( Addr a, SizeT len,
Bool rr, Bool ww, Bool xx, ULong di_handle );
h_replace___builtin_delete,
h_replace___builtin_vec_delete,
h_replace_realloc,
+ h_replace_malloc_usable_size,
0 /* no need for client heap redzones */ );
VG_(needs_var_info) ();
}
}
+static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
+{
+ MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
+
+ // There may be slop, but pretend there isn't because only the asked-for
+ // area will have been shadowed properly.
+ return ( md ? md->szB : 0 );
+}
+
/*--------------------------------------------------------------*/
/*--- Instrumentation ---*/
hg_cli____builtin_delete,
hg_cli____builtin_vec_delete,
hg_cli__realloc,
+ hg_cli_malloc_usable_size,
HG_CLI__MALLOC_REDZONE_SZB );
/* 21 Dec 08: disabled this; it mostly causes H to start more
void (*p__builtin_delete) ( ThreadId tid, void* p ),
void (*p__builtin_vec_delete) ( ThreadId tid, void* p ),
void* (*prealloc) ( ThreadId tid, void* p, SizeT new_size ),
+ SizeT (*pmalloc_usable_size) ( ThreadId tid, void* p),
SizeT client_malloc_redzone_szB
);
return renew_block(tid, p_old, new_szB);
}
+static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
+{
+ HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
+
+ return ( hc ? hc->req_szB + hc->slop_szB : 0 );
+}
//------------------------------------------------------------//
//--- Stacks ---//
ms___builtin_delete,
ms___builtin_vec_delete,
ms_realloc,
+ ms_malloc_usable_size,
0 );
// HP_Chunks
ignoring.post.exp ignoring.stderr.exp ignoring.vgtest \
long-names.post.exp long-names.stderr.exp long-names.vgtest \
long-time.post.exp long-time.stderr.exp long-time.vgtest \
+ malloc_usable.stderr.exp malloc_usable.vgtest \
new-cpp.post.exp new-cpp.stderr.exp new-cpp.vgtest \
no-stack-no-heap.post.exp no-stack-no-heap.stderr.exp no-stack-no-heap.vgtest \
null.post.exp null.stderr.exp null.vgtest \
insig \
long-names \
long-time \
+ malloc_usable \
new-cpp \
null \
one \
--- /dev/null
+#include <assert.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+int main(void)
+{
+ // Because our allocations are in multiples of 8 or 16, 99 will round up
+ // to 104 or 112.
+ int* x = malloc(99);
+
+ // XXX: would be better to have a HAVE_MALLOC_USABLE_SIZE variable here
+# if !defined(_AIX)
+ assert(104 == malloc_usable_size(x) ||
+ 112 == malloc_usable_size(x));
+ assert( 0 == malloc_usable_size(NULL));
+ assert( 0 == malloc_usable_size((void*)0xdeadbeef));
+# endif
+
+ return 0;
+}
--- /dev/null
+prog: malloc_usable
+vgopts: -q
void MC_(__builtin_delete) ( ThreadId tid, void* p );
void MC_(__builtin_vec_delete) ( ThreadId tid, void* p );
void* MC_(realloc) ( ThreadId tid, void* p, SizeT new_size );
+SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p );
/*------------------------------------------------------------*/
MC_(__builtin_delete),
MC_(__builtin_vec_delete),
MC_(realloc),
+ MC_(malloc_usable_size),
MC_MALLOC_REDZONE_SZB );
VG_(needs_xml_output) ();
return p_new;
}
+SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
+{
+ MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
+
+ // There may be slop, but pretend there isn't because only the asked-for
+ // area will be marked as addressable.
+ return ( mc ? mc->szB : 0 );
+}
+
/* Memory pool stuff. */
void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
int main(void)
{
- // Since our allocations are in multiples of 8, 99 will round up to 104.
+ // Because Memcheck marks any slop as inaccessible, it doesn't round up
+ // sizes for malloc_usable_size().
int* x = malloc(99);
+
+ // DDD: would be better to have a HAVE_MALLOC_USABLE_SIZE variable here
# if !defined(_AIX)
- assert(104 == malloc_usable_size(x));
+ assert(99 == malloc_usable_size(x));
+ assert( 0 == malloc_usable_size(NULL));
+ assert( 0 == malloc_usable_size((void*)0xdeadbeef));
# endif
+
return 0;
}