From: Philippe Waroquiers Date: Tue, 31 Jul 2012 22:17:28 +0000 (+0000) Subject: Implement --redzone-size and --core-redzone-size X-Git-Tag: svn/VALGRIND_3_8_0~61 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d045b4236ad804c0bbeddfc4f7fc2a3ac27b1265;p=thirdparty%2Fvalgrind.git Implement --redzone-size and --core-redzone-size * For tools replacing the malloc library (e.g. Memcheck, Helgrind, ...), the option --redzone-size= allows to control the padding blocks (redzones) added before and after each client allocated block. Smaller redzones decrease the memory needed by Valgrind. Bigger redzones increase the chance to detect blocks overrun or underrun. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@12807 --- diff --git a/NEWS b/NEWS index a98096bda2..ad79d6e66b 100644 --- a/NEWS +++ b/NEWS @@ -61,6 +61,12 @@ xxx Don't forget to update VALGRIND_MAJOR/MINOR before release * ==================== OTHER CHANGES ==================== +* For tools replacing the malloc library (e.g. Memcheck, Helgrind, ...), + the option --redzone-size= allows to control the padding + blocks (redzones) added before and after each client allocated block. + Smaller redzones decrease the memory needed by Valgrind. Bigger + redzones increase the chance to detect blocks overrun or underrun. + * The C++ demangler has been updated so as to work well with C++ compiled by up to at least g++ 4.6. diff --git a/coregrind/m_main.c b/coregrind/m_main.c index 0c4819fa36..7229bf2903 100644 --- a/coregrind/m_main.c +++ b/coregrind/m_main.c @@ -161,7 +161,9 @@ static void usage_NORETURN ( Bool debug_help ) " [use current 'ulimit' value]\n" "\n" " user options for Valgrind tools that replace malloc:\n" -" --alignment= set minimum alignment of heap allocations [%ld]\n" +" --alignment= set minimum alignment of heap allocations [%s]\n" +" --redzone-size= set minimum size of redzones added before/after\n" +" heap blocks (in bytes). [%s]\n" "\n" " uncommon user options for all Valgrind tools:\n" " --fullpath-after= (with nothing after the '=')\n" @@ -217,6 +219,8 @@ static void usage_NORETURN ( Bool debug_help ) " --trace-redir=no|yes show redirection details? [no]\n" " --trace-sched=no|yes show thread scheduler details? [no]\n" " --profile-heap=no|yes profile Valgrind's own space use\n" +" --core-redzone= set minimum size of redzones added before/after\n" +" heap blocks allocated for Valgrind internal use (in bytes) [4]\n" " --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n" " --sym-offsets=yes|no show syms in form 'name+offset' ? [no]\n" " --command-line-only=no|yes only use command line options [no]\n" @@ -260,15 +264,29 @@ static void usage_NORETURN ( Bool debug_help ) "\n"; Char* gdb_path = GDB_PATH; + Char default_alignment[30]; + Char default_redzone_size[30]; // Ensure the message goes to stdout VG_(log_output_sink).fd = 1; VG_(log_output_sink).is_socket = False; - /* 'usage1' expects two int, two char* argument, and one SizeT argument. */ + if (VG_(needs).malloc_replacement) { + VG_(sprintf)(default_alignment, "%d", VG_MIN_MALLOC_SZB); + VG_(sprintf)(default_redzone_size, "%lu", VG_(tdict).tool_client_redzone_szB); + } else { + VG_(strcpy)(default_alignment, "not used by this tool"); + VG_(strcpy)(default_redzone_size, "not used by this tool"); + } + /* 'usage1' a type as described after each arg. */ VG_(printf)(usage1, - VG_(clo_vgdb_error), gdb_path, VG_MIN_MALLOC_SZB, - VG_(clo_vgdb_poll), VG_(vgdb_prefix_default)()); + VG_(clo_vgdb_error) /* int */, + gdb_path /* char* */, + default_alignment /* char* */, + default_redzone_size /* char* */, + VG_(clo_vgdb_poll) /* int */, + VG_(vgdb_prefix_default)() /* char* */ + ); if (VG_(details).name) { VG_(printf)(" user options for %s:\n", VG_(details).name); if (VG_(needs).command_line_options) @@ -467,6 +485,8 @@ void main_process_cmd_line_options ( /*OUT*/Bool* logging_to_fd, else if VG_STREQN(16, arg, "--main-stacksize") {} else if VG_STREQN(11, arg, "--sim-hints") {} else if VG_STREQN(14, arg, "--profile-heap") {} + else if VG_STREQN(14, arg, "--core-redzone-size") {} + else if VG_STREQN(14, arg, "--redzone-size") {} // These options are new. else if (VG_STREQ(arg, "-v") || @@ -1523,14 +1543,18 @@ Int valgrind_main ( Int argc, HChar **argv, HChar **envp ) //-------------------------------------------------------------- /* Start the debugging-log system ASAP. First find out how many "-d"s were specified. This is a pre-scan of the command line. Also - get --profile-heap=yes which is needed by the time we start up dynamic - memory management. */ + get --profile-heap=yes, --core-redzone-size, --redzone-size which are + needed by the time we start up dynamic memory management. */ loglevel = 0; for (i = 1; i < argc; i++) { if (argv[i][0] != '-') break; if VG_STREQ(argv[i], "--") break; if VG_STREQ(argv[i], "-d") loglevel++; if VG_BOOL_CLO(argv[i], "--profile-heap", VG_(clo_profile_heap)) {} + if VG_BINT_CLO(argv[i], "--core-redzone-size", VG_(clo_core_redzone_size), + 0, MAX_CLO_REDZONE_SZB) {} + if VG_BINT_CLO(argv[i], "--redzone-size", VG_(clo_redzone_size), + 0, MAX_CLO_REDZONE_SZB) {} } /* ... and start the debug logger. Now we can safely emit logging @@ -1590,7 +1614,7 @@ Int valgrind_main ( Int argc, HChar **argv, HChar **envp ) //-------------------------------------------------------------- // Start up the dynamic memory manager // p: address space management - // p: getting --profile-heap + // p: getting --profile-heap,--core-redzone-size,--redzone-size // In fact m_mallocfree is self-initialising, so there's no // initialisation call to do. Instead, try a simple malloc/ // free pair right now to check that nothing is broken. diff --git a/coregrind/m_mallocfree.c b/coregrind/m_mallocfree.c index 2eb7e9c18f..be6031d9f6 100644 --- a/coregrind/m_mallocfree.c +++ b/coregrind/m_mallocfree.c @@ -269,6 +269,10 @@ SizeT mk_plain_bszB ( SizeT bszB ) return bszB & (~SIZE_T_0x1); } +// Forward definition. +static +void ensure_mm_init ( ArenaId aid ); + // return either 0 or sizeof(ULong) depending on whether or not // heap profiling is engaged #define hp_overhead_szB() set_at_init_hp_overhead_szB @@ -491,24 +495,44 @@ static Arena* arenaId_to_ArenaP ( ArenaId arena ) return & vg_arena[arena]; } -// Initialise an arena. rz_szB is the minimum redzone size; it might be -// made bigger to ensure that VG_MIN_MALLOC_SZB is observed. +SizeT VG_(malloc_effective_client_redzone_size)(void) +{ + vg_assert(VG_(needs).malloc_replacement); + ensure_mm_init (VG_AR_CLIENT); + /* ensure_mm_init will call arena_init if not yet done. + This then ensures that the arena redzone size is properly + initialised. */ + return arenaId_to_ArenaP(VG_AR_CLIENT)->rz_szB; +} + +// Initialise an arena. rz_szB is the (default) minimum redzone size; +// It might be overriden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size). +// it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed. static void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB, SizeT min_unsplittable_sblock_szB ) { SizeT i; Arena* a = arenaId_to_ArenaP(aid); + + // Ensure default redzones are a reasonable size. + vg_assert(rz_szB <= MAX_REDZONE_SZB); - // Ensure redzones are a reasonable size. They must always be at least - // the size of a pointer, for holding the prev/next pointer (see the layout - // details at the top of this file). - vg_assert(rz_szB < 128); + /* Override the default redzone size if a clo value was given. + Note that the clo value can be significantly bigger than MAX_REDZONE_SZB + to allow the user to chase horrible bugs using up to 1 page + of protection. */ + if (VG_AR_CLIENT == aid) { + if (VG_(clo_redzone_size) != -1) + rz_szB = VG_(clo_redzone_size); + } else { + if (VG_(clo_core_redzone_size) != rz_szB) + rz_szB = VG_(clo_core_redzone_size); + } + + // Redzones must always be at least the size of a pointer, for holding the + // prev/next pointer (see the layout details at the top of this file). if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*); - - vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0); - a->name = name; - a->clientmem = ( VG_AR_CLIENT == aid ? True : False ); // The size of the low and high admin sections in a block must be a // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for @@ -517,6 +541,13 @@ void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++; vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a)); + // Here we have established the effective redzone size. + + + vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0); + a->name = name; + a->clientmem = ( VG_AR_CLIENT == aid ? True : False ); + a->min_sblock_szB = min_sblock_szB; a->min_unsplittable_sblock_szB = min_unsplittable_sblock_szB; for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL; @@ -549,14 +580,15 @@ void VG_(print_all_arena_stats) ( void ) "%llu/%llu unsplit/split sb unmmap'd, " "%8ld/%8ld max/curr, " "%10llu/%10llu totalloc-blocks/bytes," - " %10llu searches\n", + " %10llu searches %lu rzB\n", a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped, a->stats__nreclaim_unsplit, a->stats__nreclaim_split, a->stats__bytes_on_loan_max, a->stats__bytes_on_loan, a->stats__tot_blocks, a->stats__tot_bytes, - a->stats__nsearches + a->stats__nsearches, + a->rz_szB ); } } @@ -615,8 +647,7 @@ void ensure_mm_init ( ArenaId aid ) // Check and set the client arena redzone size if (VG_(needs).malloc_replacement) { client_rz_szB = VG_(tdict).tool_client_redzone_szB; - // 128 is no special figure, just something not too big - if (client_rz_szB > 128) { + if (client_rz_szB > MAX_REDZONE_SZB) { VG_(printf)( "\nTool error:\n" " specified redzone size is too big (%llu)\n", (ULong)client_rz_szB); @@ -641,13 +672,20 @@ void ensure_mm_init ( ArenaId aid ) VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0; // Initialise the non-client arenas // Similarly to client arena, big allocations will be unsplittable. - arena_init ( VG_AR_CORE, "core", 4, 1048576, 1048576+1 ); - arena_init ( VG_AR_TOOL, "tool", 4, 4194304, 4194304+1 ); - arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576, 1048576+1 ); - arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536, 65536+1 ); - arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576, 1048576+1 ); - arena_init ( VG_AR_ERRORS, "errors", 4, 65536, 65536+1 ); - arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536, 65536+1 ); + arena_init ( VG_AR_CORE, "core", CORE_REDZONE_DEFAULT_SZB, + 1048576, 1048576+1 ); + arena_init ( VG_AR_TOOL, "tool", CORE_REDZONE_DEFAULT_SZB, + 4194304, 4194304+1 ); + arena_init ( VG_AR_DINFO, "dinfo", CORE_REDZONE_DEFAULT_SZB, + 1048576, 1048576+1 ); + arena_init ( VG_AR_DEMANGLE, "demangle", CORE_REDZONE_DEFAULT_SZB, + 65536, 65536+1 ); + arena_init ( VG_AR_EXECTXT, "exectxt", CORE_REDZONE_DEFAULT_SZB, + 1048576, 1048576+1 ); + arena_init ( VG_AR_ERRORS, "errors", CORE_REDZONE_DEFAULT_SZB, + 65536, 65536+1 ); + arena_init ( VG_AR_TTAUX, "ttaux", CORE_REDZONE_DEFAULT_SZB, + 65536, 65536+1 ); nonclient_inited = True; } @@ -1257,10 +1295,11 @@ static void cc_analyse_alloc_arena ( ArenaId aid ) VG_(printf)( "-------- Arena \"%s\": %lu/%lu max/curr mmap'd, " "%llu/%llu unsplit/split sb unmmap'd, " - "%lu/%lu max/curr on_loan --------\n", + "%lu/%lu max/curr on_loan %lu rzB --------\n", a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped, a->stats__nreclaim_unsplit, a->stats__nreclaim_split, - a->stats__bytes_on_loan_max, a->stats__bytes_on_loan + a->stats__bytes_on_loan_max, a->stats__bytes_on_loan, + a->rz_szB ); for (j = 0; j < a->sblocks_used; ++j) { diff --git a/coregrind/m_options.c b/coregrind/m_options.c index ab38d080e8..4c3282fc98 100644 --- a/coregrind/m_options.c +++ b/coregrind/m_options.c @@ -96,6 +96,10 @@ enum FairSchedType VG_(clo_fair_sched) = disable_fair_sched; Bool VG_(clo_trace_sched) = False; Bool VG_(clo_profile_heap) = False; +Int VG_(clo_core_redzone_size) = CORE_REDZONE_DEFAULT_SZB; +// A value != -1 overrides the tool-specific value +// VG_(needs_malloc_replacement).tool_client_redzone_szB +Int VG_(clo_redzone_size) = -1; Int VG_(clo_dump_error) = 0; Int VG_(clo_backtrace_size) = 12; Char* VG_(clo_sim_hints) = NULL; diff --git a/coregrind/pub_core_options.h b/coregrind/pub_core_options.h index 60978e1241..c24c6ec5cd 100644 --- a/coregrind/pub_core_options.h +++ b/coregrind/pub_core_options.h @@ -159,6 +159,18 @@ extern enum FairSchedType VG_(clo_fair_sched); extern Bool VG_(clo_trace_sched); /* DEBUG: do heap profiling? default: NO */ extern Bool VG_(clo_profile_heap); +#define MAX_REDZONE_SZB 128 +// Maximum for the default values for core arenas and for client +// arena given by the tool. +// 128 is no special figure, just something not too big +#define MAX_CLO_REDZONE_SZB 4096 +// We allow the user to increase the redzone size to 4Kb : +// This allows "off by one" in an array of pages to be detected. +#define CORE_REDZONE_DEFAULT_SZB 4 +extern Int VG_(clo_core_redzone_size); +// VG_(clo_redzone_size) has default value -1, indicating to keep +// the tool provided value. +extern Int VG_(clo_redzone_size); /* DEBUG: display gory details for the k'th most popular error. default: Infinity. */ extern Int VG_(clo_dump_error); diff --git a/docs/xml/manual-core.xml b/docs/xml/manual-core.xml index 8169ea415e..f9da8438a4 100644 --- a/docs/xml/manual-core.xml +++ b/docs/xml/manual-core.xml @@ -1411,8 +1411,8 @@ that can report errors, e.g. Memcheck, but not Cachegrind. For tools that use their own version of -malloc (e.g. Memcheck and -Massif), the following options apply. +malloc (e.g. Memcheck, +Massif, Helgrind, DRD), the following options apply. @@ -1431,6 +1431,26 @@ Massif), the following options apply. + + + + + + Valgrind's malloc, realloc, etc, add padding + blocks before and after each block allocated for the client. Such padding + blocks are called redzones. + The default value for the redzone size depends on the tool. + For example, Memcheck adds and protects a minimum of 16 bytes before and + after each block allocated by the client to detect block overrun or + underrun. + + Increasing the redzone size allows to detect more cases of + blocks overrun or underrun. Decreasing the redzone size will + reduce the memory needed by Valgrind but reduces the chance to + detect block overrun/underrun. + + + @@ -2156,7 +2176,7 @@ plans to disable them. If one of them breaks, please mail us! If you get an assertion failure in m_mallocfree.c, this may have happened because your program wrote off the end of a heap block, or before its -beginning, thus corrupting head metadata. Valgrind hopefully will have +beginning, thus corrupting heap metadata. Valgrind hopefully will have emitted a message to that effect before dying in this way. Read the for more advice about common problems, diff --git a/helgrind/hg_main.c b/helgrind/hg_main.c index b46fc0e492..81fbea1a4b 100644 --- a/helgrind/hg_main.c +++ b/helgrind/hg_main.c @@ -102,7 +102,7 @@ static void all__sanity_check ( Char* who ); /* fwds */ -#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */ +#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */ // 0 for none, 1 for dump at end of run #define SHOW_DATA_STRUCTURES 0 @@ -5159,7 +5159,7 @@ static void hg_pre_clo_init ( void ) hg_cli____builtin_vec_delete, hg_cli__realloc, hg_cli_malloc_usable_size, - HG_CLI__MALLOC_REDZONE_SZB ); + HG_CLI__DEFAULT_MALLOC_REDZONE_SZB ); /* 21 Dec 08: disabled this; it mostly causes H to start more slowly and use significantly more memory, without very often diff --git a/include/pub_tool_mallocfree.h b/include/pub_tool_mallocfree.h index a71a1ffebc..6885562e18 100644 --- a/include/pub_tool_mallocfree.h +++ b/include/pub_tool_mallocfree.h @@ -47,6 +47,14 @@ extern Char* VG_(strdup) ( HChar* cc, const Char* s ); // possibly some more due to rounding up. extern SizeT VG_(malloc_usable_size)( void* p ); +// If tool is replacing malloc for the client, the below returns +// the effective client redzone as derived from the default +// provided by the tool, VG_(clo_redzone_size) and the minimum +// redzone required by m_mallocfree.c. +// It is an error to call this before VG_(needs_malloc_replacement) has +// been called. +extern SizeT VG_(malloc_effective_client_redzone_size)(void); + // TODO: move somewhere else // Call here to bomb the system when out of memory (mmap anon fails) __attribute__((noreturn)) diff --git a/memcheck/mc_errors.c b/memcheck/mc_errors.c index 5084f2853d..84d9045efc 100644 --- a/memcheck/mc_errors.c +++ b/memcheck/mc_errors.c @@ -1082,7 +1082,7 @@ static Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a) { return VG_(addr_is_in_block)( a, mc->data, mc->szB, - MC_MALLOC_REDZONE_SZB ); + MC_(Malloc_Redzone_SzB) ); } static Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB) diff --git a/memcheck/mc_include.h b/memcheck/mc_include.h index 017868e5ef..d035e5e938 100644 --- a/memcheck/mc_include.h +++ b/memcheck/mc_include.h @@ -42,8 +42,12 @@ /*--- Tracking the heap ---*/ /*------------------------------------------------------------*/ -/* We want at least a 16B redzone on client heap blocks for Memcheck */ -#define MC_MALLOC_REDZONE_SZB 16 +/* By default, we want at least a 16B redzone on client heap blocks + for Memcheck. + The default can be modified by --redzone-size. */ +#define MC_MALLOC_DEFAULT_REDZONE_SZB 16 +// effective redzone, as (possibly) modified by --redzone-size: +extern SizeT MC_(Malloc_Redzone_SzB); /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */ typedef diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index 114afcd6b1..faa39113fe 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -6335,7 +6335,8 @@ static void mc_pre_clo_init(void) MC_(__builtin_vec_delete), MC_(realloc), MC_(malloc_usable_size), - MC_MALLOC_REDZONE_SZB ); + MC_MALLOC_DEFAULT_REDZONE_SZB ); + MC_(Malloc_Redzone_SzB) = VG_(malloc_effective_client_redzone_size)(); VG_(needs_xml_output) (); diff --git a/memcheck/mc_malloc_wrappers.c b/memcheck/mc_malloc_wrappers.c index 7399c86b43..3c2086ddb0 100644 --- a/memcheck/mc_malloc_wrappers.c +++ b/memcheck/mc_malloc_wrappers.c @@ -63,6 +63,8 @@ static ULong cmalloc_bs_mallocd = 0; /*--- Tracking malloc'd and free'd blocks ---*/ /*------------------------------------------------------------*/ +SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB + /* Record malloc'd blocks. */ VgHashTable MC_(malloc_list) = NULL; @@ -174,7 +176,7 @@ MC_Chunk* MC_(get_freed_block_bracketting) (Addr a) mc = freed_list_start[i]; while (mc) { if (VG_(addr_is_in_block)( a, mc->data, mc->szB, - MC_MALLOC_REDZONE_SZB )) + MC_(Malloc_Redzone_SzB) )) return mc; mc = mc->next; } @@ -387,19 +389,19 @@ void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind ) void MC_(free) ( ThreadId tid, void* p ) { MC_(handle_free)( - tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc ); + tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc ); } void MC_(__builtin_delete) ( ThreadId tid, void* p ) { MC_(handle_free)( - tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew); + tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew); } void MC_(__builtin_vec_delete) ( ThreadId tid, void* p ) { MC_(handle_free)( - tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec); + tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec); } void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB ) @@ -454,10 +456,10 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB ) tl_assert(ec); /* Retained part is copied, red zones set as normal */ - MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, - MC_MALLOC_REDZONE_SZB ); + MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), + MC_(Malloc_Redzone_SzB) ); MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB ); - MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB ); + MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB)); /* Copy from old to new */ VG_(memcpy)((void*)a_new, p_old, new_szB); @@ -472,7 +474,7 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB ) /* Nb: we have to allocate a new MC_Chunk for the new memory rather than recycling the old one, so that any erroneous accesses to the old memory are reported. */ - die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB ); + die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) ); // Allocate a new chunk. mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc ); @@ -497,12 +499,12 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB ) tl_assert(VG_(is_plausible_ECU)(ecu)); /* First half kept and copied, second half new, red zones as normal */ - MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB, - MC_MALLOC_REDZONE_SZB ); + MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), + MC_(Malloc_Redzone_SzB) ); MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB ); MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB, ecu | MC_OKIND_HEAP ); - MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB ); + MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB) ); /* Possibly fill new area with specified junk */ if (MC_(clo_malloc_fill) != -1) { @@ -525,7 +527,7 @@ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB ) /* Nb: we have to allocate a new MC_Chunk for the new memory rather than recycling the old one, so that any erroneous accesses to the old memory are reported. */ - die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB ); + die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) ); // Allocate a new chunk. mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc ); diff --git a/memcheck/tests/Makefile.am b/memcheck/tests/Makefile.am index 96b31676e7..d0470aa82e 100644 --- a/memcheck/tests/Makefile.am +++ b/memcheck/tests/Makefile.am @@ -74,6 +74,8 @@ EXTRA_DIST = \ clientperm.stdout.exp clientperm.vgtest \ clireq_nofill.stderr.exp \ clireq_nofill.stdout.exp clireq_nofill.vgtest \ + clo_redzone_default.vgtest clo_redzone_128.vgtest \ + clo_redzone_default.stderr.exp clo_redzone_128.stderr.exp \ custom_alloc.stderr.exp custom_alloc.vgtest custom_alloc.stderr.exp-s390x-mvc \ custom-overlap.stderr.exp custom-overlap.vgtest \ deep-backtrace.vgtest deep-backtrace.stderr.exp \ @@ -240,6 +242,7 @@ check_PROGRAMS = \ calloc-overflow \ clientperm \ clireq_nofill \ + clo_redzone \ custom_alloc \ custom-overlap \ deep-backtrace \ diff --git a/memcheck/tests/clo_redzone.c b/memcheck/tests/clo_redzone.c new file mode 100644 index 0000000000..5733ffae12 --- /dev/null +++ b/memcheck/tests/clo_redzone.c @@ -0,0 +1,17 @@ +#include +#include +int main() +{ + __attribute__((unused)) char *p = malloc (1); + char *b1 = malloc (128); + char *b2 = malloc (128); + fprintf (stderr, "b1 %p b2 %p\n", b1, b2); + + // Try to land in b2 from b1, causing no error + // with the default redzone-size, but having + // an error with a bigger redzone-size. + // We need to choose a value which lands in b2 + // on 32 bits and 64 bits. + b1[127 + 70] = 'a'; + return 0; +} diff --git a/memcheck/tests/clo_redzone_128.stderr.exp b/memcheck/tests/clo_redzone_128.stderr.exp new file mode 100644 index 0000000000..08b360dae3 --- /dev/null +++ b/memcheck/tests/clo_redzone_128.stderr.exp @@ -0,0 +1,7 @@ +b1 0x........ b2 0x........ +Invalid write of size 1 + ... + Address 0x........ is 69 bytes after a block of size 128 alloc'd + at 0x........: malloc (vg_replace_malloc.c:...) + ... + diff --git a/memcheck/tests/clo_redzone_128.vgtest b/memcheck/tests/clo_redzone_128.vgtest new file mode 100644 index 0000000000..6b7b2a6b4c --- /dev/null +++ b/memcheck/tests/clo_redzone_128.vgtest @@ -0,0 +1,2 @@ +vgopts: --leak-check=no -q --redzone-size=128 +prog: clo_redzone diff --git a/memcheck/tests/clo_redzone_default.stderr.exp b/memcheck/tests/clo_redzone_default.stderr.exp new file mode 100644 index 0000000000..f86f233f31 --- /dev/null +++ b/memcheck/tests/clo_redzone_default.stderr.exp @@ -0,0 +1 @@ +b1 0x........ b2 0x........ diff --git a/memcheck/tests/clo_redzone_default.vgtest b/memcheck/tests/clo_redzone_default.vgtest new file mode 100644 index 0000000000..fc63752ac2 --- /dev/null +++ b/memcheck/tests/clo_redzone_default.vgtest @@ -0,0 +1,2 @@ +vgopts: --leak-check=no -q +prog: clo_redzone diff --git a/memcheck/tests/x86-linux/scalar.stderr.exp b/memcheck/tests/x86-linux/scalar.stderr.exp index 364e6e7f60..54616dbaf0 100644 --- a/memcheck/tests/x86-linux/scalar.stderr.exp +++ b/memcheck/tests/x86-linux/scalar.stderr.exp @@ -2116,7 +2116,9 @@ Syscall param rt_sigaction(act->sa_handler) points to unaddressable byte(s) Syscall param rt_sigaction(act->sa_mask) points to unaddressable byte(s) ... by 0x........: main (scalar.c:776) - Address 0x........ is not stack'd, malloc'd or (recently) free'd + Address 0x........ is 16 bytes after a block of size 4 alloc'd + at 0x........: malloc (vg_replace_malloc.c:...) + by 0x........: main (scalar.c:30) Syscall param rt_sigaction(act->sa_flags) points to unaddressable byte(s) ... diff --git a/none/tests/cmdline1.stdout.exp b/none/tests/cmdline1.stdout.exp index 12dafde30a..33e8ea34e3 100644 --- a/none/tests/cmdline1.stdout.exp +++ b/none/tests/cmdline1.stdout.exp @@ -50,7 +50,9 @@ usage: valgrind [options] prog-and-args [use current 'ulimit' value] user options for Valgrind tools that replace malloc: - --alignment= set minimum alignment of heap allocations [...] + --alignment= set minimum alignment of heap allocations [not used by this tool] + --redzone-size= set minimum size of redzones added before/after + heap blocks (in bytes). [not used by this tool] uncommon user options for all Valgrind tools: --fullpath-after= (with nothing after the '=') diff --git a/none/tests/cmdline2.stdout.exp b/none/tests/cmdline2.stdout.exp index 793c352956..72184a8721 100644 --- a/none/tests/cmdline2.stdout.exp +++ b/none/tests/cmdline2.stdout.exp @@ -50,7 +50,9 @@ usage: valgrind [options] prog-and-args [use current 'ulimit' value] user options for Valgrind tools that replace malloc: - --alignment= set minimum alignment of heap allocations [...] + --alignment= set minimum alignment of heap allocations [not used by this tool] + --redzone-size= set minimum size of redzones added before/after + heap blocks (in bytes). [not used by this tool] uncommon user options for all Valgrind tools: --fullpath-after= (with nothing after the '=') @@ -106,6 +108,8 @@ usage: valgrind [options] prog-and-args --trace-redir=no|yes show redirection details? [no] --trace-sched=no|yes show thread scheduler details? [no] --profile-heap=no|yes profile Valgrind's own space use + --core-redzone= set minimum size of redzones added before/after + heap blocks allocated for Valgrind internal use (in bytes) [4] --wait-for-gdb=yes|no pause on startup to wait for gdb attach --sym-offsets=yes|no show syms in form 'name+offset' ? [no] --command-line-only=no|yes only use command line options [no]