From: Florian Krohm Date: Thu, 11 Sep 2014 20:15:23 +0000 (+0000) Subject: Rename VG_(malloc_usable_size) to VG_(cli_malloc_usable_size) X-Git-Tag: svn/VALGRIND_3_11_0~1014 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4754dd70f26a52e719b2a2e384d1a7b329715fb7;p=thirdparty%2Fvalgrind.git Rename VG_(malloc_usable_size) to VG_(cli_malloc_usable_size) because it operates on the CLIENT arena. Given that VG_(malloc) operates on the CORE arena, it was unexpected for VG_(mallos_usable_size) to use a different arena. Move function definition to the proper place (next to VG_(cli_malloc)) and fix call sites. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@14516 --- diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c index 043069b36d..93b7a2a4a4 100644 --- a/coregrind/m_mallocfree.c +++ b/coregrind/m_mallocfree.c @@ -2599,12 +2599,6 @@ HChar* VG_(strdup) ( const HChar* cc, const HChar* s ) return VG_(arena_strdup) ( VG_AR_CORE, cc, s ); } -// Useful for querying user blocks. -SizeT VG_(malloc_usable_size) ( void* p ) -{ - return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p); -} - void* VG_(perm_malloc) ( SizeT size, Int align ) { return VG_(arena_perm_malloc) ( VG_AR_CORE, size, align ); diff --git a/coregrind/m_replacemalloc/replacemalloc_core.c b/coregrind/m_replacemalloc/replacemalloc_core.c index aa9b46a366..9bf40dbe4f 100644 --- a/coregrind/m_replacemalloc/replacemalloc_core.c +++ b/coregrind/m_replacemalloc/replacemalloc_core.c @@ -101,6 +101,12 @@ void VG_(cli_free) ( void* p ) VG_(arena_free) ( VG_AR_CLIENT, p ); } +// Useful for querying user blocks. +SizeT VG_(cli_malloc_usable_size) ( void* p ) +{ + return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p); +} + Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size, SizeT rz_szB ) { return ( start - rz_szB <= a && a < start + size + rz_szB ); diff --git a/exp-dhat/dh_main.c b/exp-dhat/dh_main.c index 4142a90522..e18b395f48 100644 --- a/exp-dhat/dh_main.c +++ b/exp-dhat/dh_main.c @@ -466,7 +466,7 @@ void* new_block ( ThreadId tid, void* p, SizeT req_szB, SizeT req_alignB, return NULL; } if (is_zeroed) VG_(memset)(p, 0, req_szB); - actual_szB = VG_(malloc_usable_size)(p); + actual_szB = VG_(cli_malloc_usable_size)(p); tl_assert(actual_szB >= req_szB); /* slop_szB = actual_szB - req_szB; */ } else { diff --git a/include/pub_tool_mallocfree.h b/include/pub_tool_mallocfree.h index 7ddb32e72e..0524162c7c 100644 --- a/include/pub_tool_mallocfree.h +++ b/include/pub_tool_mallocfree.h @@ -45,10 +45,6 @@ extern void* VG_(calloc) ( const HChar* cc, SizeT n, SizeT bytes_per_ele extern void* VG_(realloc) ( const HChar* cc, void* p, SizeT size ); extern HChar* VG_(strdup) ( const HChar* cc, const HChar* s ); -// Returns the usable size of a heap-block. It's the asked-for size plus -// possibly some more due to rounding up. -extern SizeT VG_(malloc_usable_size)( void* p ); - // TODO: move somewhere else // Call here to bomb the system when out of memory (mmap anon fails) __attribute__((noreturn)) @@ -60,8 +56,7 @@ extern void VG_(out_of_memory_NORETURN) ( const HChar* who, SizeT szB ); // on a multiple of align. // Use the macro vg_alignof (type) to get a safe alignment for a type. // No other function can be used on these permanently allocated blocks. -// In particular, do *not* call VG_(free) or VG_(malloc_usable_size) -// or VG_(realloc). +// In particular, do *not* call VG_(free) or VG_(realloc). // Technically, these blocks will be returned from big superblocks // only containing such permanently allocated blocks. // Note that there is no cc cost centre : all such blocks will be diff --git a/include/pub_tool_replacemalloc.h b/include/pub_tool_replacemalloc.h index bab3b4fb83..bc019f5eab 100644 --- a/include/pub_tool_replacemalloc.h +++ b/include/pub_tool_replacemalloc.h @@ -42,6 +42,10 @@ * alloc/freeing. */ extern void* VG_(cli_malloc) ( SizeT align, SizeT nbytes ); extern void VG_(cli_free) ( void* p ); +// Returns the usable size of a heap-block. It's the asked-for size plus +// possibly some more due to rounding up. +extern SizeT VG_(cli_malloc_usable_size)( void* p ); + /* If a tool uses deferred freeing (e.g. memcheck to catch accesses to freed memory) it can maintain number and total size of queued blocks diff --git a/massif/ms_main.c b/massif/ms_main.c index 0770f3dd46..9f33546c4e 100644 --- a/massif/ms_main.c +++ b/massif/ms_main.c @@ -1567,7 +1567,7 @@ void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB, return NULL; } if (is_zeroed) VG_(memset)(p, 0, req_szB); - actual_szB = VG_(malloc_usable_size)(p); + actual_szB = VG_(cli_malloc_usable_size)(p); tl_assert(actual_szB >= req_szB); slop_szB = actual_szB - req_szB; @@ -1682,7 +1682,7 @@ void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB ) } VG_(memcpy)(p_new, p_old, old_req_szB + old_slop_szB); VG_(cli_free)(p_old); - new_actual_szB = VG_(malloc_usable_size)(p_new); + new_actual_szB = VG_(cli_malloc_usable_size)(p_new); tl_assert(new_actual_szB >= new_req_szB); new_slop_szB = new_actual_szB - new_req_szB; }