* ==================== OTHER CHANGES ====================
+* For tools replacing the malloc library (e.g. Memcheck, Helgrind, ...),
+ the option --redzone-size=<number> allows to control the padding
+ blocks (redzones) added before and after each client allocated block.
+ Smaller redzones decrease the memory needed by Valgrind. Bigger
+ redzones increase the chance to detect blocks overrun or underrun.
+
* The C++ demangler has been updated so as to work well with C++
compiled by up to at least g++ 4.6.
" [use current 'ulimit' value]\n"
"\n"
" user options for Valgrind tools that replace malloc:\n"
-" --alignment=<number> set minimum alignment of heap allocations [%ld]\n"
+" --alignment=<number> set minimum alignment of heap allocations [%s]\n"
+" --redzone-size=<number> set minimum size of redzones added before/after\n"
+" heap blocks (in bytes). [%s]\n"
"\n"
" uncommon user options for all Valgrind tools:\n"
" --fullpath-after= (with nothing after the '=')\n"
" --trace-redir=no|yes show redirection details? [no]\n"
" --trace-sched=no|yes show thread scheduler details? [no]\n"
" --profile-heap=no|yes profile Valgrind's own space use\n"
+" --core-redzone=<number> set minimum size of redzones added before/after\n"
+" heap blocks allocated for Valgrind internal use (in bytes) [4]\n"
" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
" --sym-offsets=yes|no show syms in form 'name+offset' ? [no]\n"
" --command-line-only=no|yes only use command line options [no]\n"
"\n";
Char* gdb_path = GDB_PATH;
+ Char default_alignment[30];
+ Char default_redzone_size[30];
// Ensure the message goes to stdout
VG_(log_output_sink).fd = 1;
VG_(log_output_sink).is_socket = False;
- /* 'usage1' expects two int, two char* argument, and one SizeT argument. */
+ if (VG_(needs).malloc_replacement) {
+ VG_(sprintf)(default_alignment, "%d", VG_MIN_MALLOC_SZB);
+ VG_(sprintf)(default_redzone_size, "%lu", VG_(tdict).tool_client_redzone_szB);
+ } else {
+ VG_(strcpy)(default_alignment, "not used by this tool");
+ VG_(strcpy)(default_redzone_size, "not used by this tool");
+ }
+ /* 'usage1' a type as described after each arg. */
VG_(printf)(usage1,
- VG_(clo_vgdb_error), gdb_path, VG_MIN_MALLOC_SZB,
- VG_(clo_vgdb_poll), VG_(vgdb_prefix_default)());
+ VG_(clo_vgdb_error) /* int */,
+ gdb_path /* char* */,
+ default_alignment /* char* */,
+ default_redzone_size /* char* */,
+ VG_(clo_vgdb_poll) /* int */,
+ VG_(vgdb_prefix_default)() /* char* */
+ );
if (VG_(details).name) {
VG_(printf)(" user options for %s:\n", VG_(details).name);
if (VG_(needs).command_line_options)
else if VG_STREQN(16, arg, "--main-stacksize") {}
else if VG_STREQN(11, arg, "--sim-hints") {}
else if VG_STREQN(14, arg, "--profile-heap") {}
+ else if VG_STREQN(14, arg, "--core-redzone-size") {}
+ else if VG_STREQN(14, arg, "--redzone-size") {}
// These options are new.
else if (VG_STREQ(arg, "-v") ||
//--------------------------------------------------------------
/* Start the debugging-log system ASAP. First find out how many
"-d"s were specified. This is a pre-scan of the command line. Also
- get --profile-heap=yes which is needed by the time we start up dynamic
- memory management. */
+ get --profile-heap=yes, --core-redzone-size, --redzone-size which are
+ needed by the time we start up dynamic memory management. */
loglevel = 0;
for (i = 1; i < argc; i++) {
if (argv[i][0] != '-') break;
if VG_STREQ(argv[i], "--") break;
if VG_STREQ(argv[i], "-d") loglevel++;
if VG_BOOL_CLO(argv[i], "--profile-heap", VG_(clo_profile_heap)) {}
+ if VG_BINT_CLO(argv[i], "--core-redzone-size", VG_(clo_core_redzone_size),
+ 0, MAX_CLO_REDZONE_SZB) {}
+ if VG_BINT_CLO(argv[i], "--redzone-size", VG_(clo_redzone_size),
+ 0, MAX_CLO_REDZONE_SZB) {}
}
/* ... and start the debug logger. Now we can safely emit logging
//--------------------------------------------------------------
// Start up the dynamic memory manager
// p: address space management
- // p: getting --profile-heap
+ // p: getting --profile-heap,--core-redzone-size,--redzone-size
// In fact m_mallocfree is self-initialising, so there's no
// initialisation call to do. Instead, try a simple malloc/
// free pair right now to check that nothing is broken.
return bszB & (~SIZE_T_0x1);
}
+// Forward definition.
+static
+void ensure_mm_init ( ArenaId aid );
+
// return either 0 or sizeof(ULong) depending on whether or not
// heap profiling is engaged
#define hp_overhead_szB() set_at_init_hp_overhead_szB
return & vg_arena[arena];
}
-// Initialise an arena. rz_szB is the minimum redzone size; it might be
-// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
+SizeT VG_(malloc_effective_client_redzone_size)(void)
+{
+ vg_assert(VG_(needs).malloc_replacement);
+ ensure_mm_init (VG_AR_CLIENT);
+ /* ensure_mm_init will call arena_init if not yet done.
+ This then ensures that the arena redzone size is properly
+ initialised. */
+ return arenaId_to_ArenaP(VG_AR_CLIENT)->rz_szB;
+}
+
+// Initialise an arena. rz_szB is the (default) minimum redzone size;
+// It might be overriden by VG_(clo_redzone_size) or VG_(clo_core_redzone_size).
+// it might be made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
static
void arena_init ( ArenaId aid, Char* name, SizeT rz_szB,
SizeT min_sblock_szB, SizeT min_unsplittable_sblock_szB )
{
SizeT i;
Arena* a = arenaId_to_ArenaP(aid);
+
+ // Ensure default redzones are a reasonable size.
+ vg_assert(rz_szB <= MAX_REDZONE_SZB);
- // Ensure redzones are a reasonable size. They must always be at least
- // the size of a pointer, for holding the prev/next pointer (see the layout
- // details at the top of this file).
- vg_assert(rz_szB < 128);
+ /* Override the default redzone size if a clo value was given.
+ Note that the clo value can be significantly bigger than MAX_REDZONE_SZB
+ to allow the user to chase horrible bugs using up to 1 page
+ of protection. */
+ if (VG_AR_CLIENT == aid) {
+ if (VG_(clo_redzone_size) != -1)
+ rz_szB = VG_(clo_redzone_size);
+ } else {
+ if (VG_(clo_core_redzone_size) != rz_szB)
+ rz_szB = VG_(clo_core_redzone_size);
+ }
+
+ // Redzones must always be at least the size of a pointer, for holding the
+ // prev/next pointer (see the layout details at the top of this file).
if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
-
- vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
- a->name = name;
- a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
// The size of the low and high admin sections in a block must be a
// multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
vg_assert(overhead_szB_lo(a) - hp_overhead_szB() == overhead_szB_hi(a));
+ // Here we have established the effective redzone size.
+
+
+ vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
+ a->name = name;
+ a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
+
a->min_sblock_szB = min_sblock_szB;
a->min_unsplittable_sblock_szB = min_unsplittable_sblock_szB;
for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
"%llu/%llu unsplit/split sb unmmap'd, "
"%8ld/%8ld max/curr, "
"%10llu/%10llu totalloc-blocks/bytes,"
- " %10llu searches\n",
+ " %10llu searches %lu rzB\n",
a->name,
a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
a->stats__bytes_on_loan_max,
a->stats__bytes_on_loan,
a->stats__tot_blocks, a->stats__tot_bytes,
- a->stats__nsearches
+ a->stats__nsearches,
+ a->rz_szB
);
}
}
// Check and set the client arena redzone size
if (VG_(needs).malloc_replacement) {
client_rz_szB = VG_(tdict).tool_client_redzone_szB;
- // 128 is no special figure, just something not too big
- if (client_rz_szB > 128) {
+ if (client_rz_szB > MAX_REDZONE_SZB) {
VG_(printf)( "\nTool error:\n"
" specified redzone size is too big (%llu)\n",
(ULong)client_rz_szB);
VG_(clo_profile_heap) ? VG_MIN_MALLOC_SZB : 0;
// Initialise the non-client arenas
// Similarly to client arena, big allocations will be unsplittable.
- arena_init ( VG_AR_CORE, "core", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_TOOL, "tool", 4, 4194304, 4194304+1 );
- arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536, 65536+1 );
- arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576, 1048576+1 );
- arena_init ( VG_AR_ERRORS, "errors", 4, 65536, 65536+1 );
- arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536, 65536+1 );
+ arena_init ( VG_AR_CORE, "core", CORE_REDZONE_DEFAULT_SZB,
+ 1048576, 1048576+1 );
+ arena_init ( VG_AR_TOOL, "tool", CORE_REDZONE_DEFAULT_SZB,
+ 4194304, 4194304+1 );
+ arena_init ( VG_AR_DINFO, "dinfo", CORE_REDZONE_DEFAULT_SZB,
+ 1048576, 1048576+1 );
+ arena_init ( VG_AR_DEMANGLE, "demangle", CORE_REDZONE_DEFAULT_SZB,
+ 65536, 65536+1 );
+ arena_init ( VG_AR_EXECTXT, "exectxt", CORE_REDZONE_DEFAULT_SZB,
+ 1048576, 1048576+1 );
+ arena_init ( VG_AR_ERRORS, "errors", CORE_REDZONE_DEFAULT_SZB,
+ 65536, 65536+1 );
+ arena_init ( VG_AR_TTAUX, "ttaux", CORE_REDZONE_DEFAULT_SZB,
+ 65536, 65536+1 );
nonclient_inited = True;
}
VG_(printf)(
"-------- Arena \"%s\": %lu/%lu max/curr mmap'd, "
"%llu/%llu unsplit/split sb unmmap'd, "
- "%lu/%lu max/curr on_loan --------\n",
+ "%lu/%lu max/curr on_loan %lu rzB --------\n",
a->name, a->stats__bytes_mmaped_max, a->stats__bytes_mmaped,
a->stats__nreclaim_unsplit, a->stats__nreclaim_split,
- a->stats__bytes_on_loan_max, a->stats__bytes_on_loan
+ a->stats__bytes_on_loan_max, a->stats__bytes_on_loan,
+ a->rz_szB
);
for (j = 0; j < a->sblocks_used; ++j) {
VG_(clo_fair_sched) = disable_fair_sched;
Bool VG_(clo_trace_sched) = False;
Bool VG_(clo_profile_heap) = False;
+Int VG_(clo_core_redzone_size) = CORE_REDZONE_DEFAULT_SZB;
+// A value != -1 overrides the tool-specific value
+// VG_(needs_malloc_replacement).tool_client_redzone_szB
+Int VG_(clo_redzone_size) = -1;
Int VG_(clo_dump_error) = 0;
Int VG_(clo_backtrace_size) = 12;
Char* VG_(clo_sim_hints) = NULL;
extern Bool VG_(clo_trace_sched);
/* DEBUG: do heap profiling? default: NO */
extern Bool VG_(clo_profile_heap);
+#define MAX_REDZONE_SZB 128
+// Maximum for the default values for core arenas and for client
+// arena given by the tool.
+// 128 is no special figure, just something not too big
+#define MAX_CLO_REDZONE_SZB 4096
+// We allow the user to increase the redzone size to 4Kb :
+// This allows "off by one" in an array of pages to be detected.
+#define CORE_REDZONE_DEFAULT_SZB 4
+extern Int VG_(clo_core_redzone_size);
+// VG_(clo_redzone_size) has default value -1, indicating to keep
+// the tool provided value.
+extern Int VG_(clo_redzone_size);
/* DEBUG: display gory details for the k'th most popular error.
default: Infinity. */
extern Int VG_(clo_dump_error);
<!-- start of xi:include in the manpage -->
<para id="malloc-related.opts.para">For tools that use their own version of
-<computeroutput>malloc</computeroutput> (e.g. Memcheck and
-Massif), the following options apply.</para>
+<computeroutput>malloc</computeroutput> (e.g. Memcheck,
+Massif, Helgrind, DRD), the following options apply.</para>
<variablelist id="malloc-related.opts.list">
</listitem>
</varlistentry>
+ <varlistentry id="opt.redzone-size" xreflabel="--redzone-size">
+ <term>
+ <option><![CDATA[--redzone-size=<number> [default: depends on the tool] ]]></option>
+ </term>
+ <listitem>
+ <para> Valgrind's <function>malloc, realloc,</function> etc, add padding
+ blocks before and after each block allocated for the client. Such padding
+ blocks are called redzones.
+ The default value for the redzone size depends on the tool.
+ For example, Memcheck adds and protects a minimum of 16 bytes before and
+ after each block allocated by the client to detect block overrun or
+ underrun.
+ </para>
+ <para>Increasing the redzone size allows to detect more cases of
+ blocks overrun or underrun. Decreasing the redzone size will
+ reduce the memory needed by Valgrind but reduces the chance to
+ detect block overrun/underrun.</para>
+ </listitem>
+ </varlistentry>
+
</variablelist>
<!-- end of xi:include in the manpage -->
<para>If you get an assertion failure
in <filename>m_mallocfree.c</filename>, this may have happened because
your program wrote off the end of a heap block, or before its
-beginning, thus corrupting head metadata. Valgrind hopefully will have
+beginning, thus corrupting heap metadata. Valgrind hopefully will have
emitted a message to that effect before dying in this way.</para>
<para>Read the <xref linkend="FAQ"/> for more advice about common problems,
static void all__sanity_check ( Char* who ); /* fwds */
-#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
+#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
// 0 for none, 1 for dump at end of run
#define SHOW_DATA_STRUCTURES 0
hg_cli____builtin_vec_delete,
hg_cli__realloc,
hg_cli_malloc_usable_size,
- HG_CLI__MALLOC_REDZONE_SZB );
+ HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
/* 21 Dec 08: disabled this; it mostly causes H to start more
slowly and use significantly more memory, without very often
// possibly some more due to rounding up.
extern SizeT VG_(malloc_usable_size)( void* p );
+// If tool is replacing malloc for the client, the below returns
+// the effective client redzone as derived from the default
+// provided by the tool, VG_(clo_redzone_size) and the minimum
+// redzone required by m_mallocfree.c.
+// It is an error to call this before VG_(needs_malloc_replacement) has
+// been called.
+extern SizeT VG_(malloc_effective_client_redzone_size)(void);
+
// TODO: move somewhere else
// Call here to bomb the system when out of memory (mmap anon fails)
__attribute__((noreturn))
Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
{
return VG_(addr_is_in_block)( a, mc->data, mc->szB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(Malloc_Redzone_SzB) );
}
static
Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
/*--- Tracking the heap ---*/
/*------------------------------------------------------------*/
-/* We want at least a 16B redzone on client heap blocks for Memcheck */
-#define MC_MALLOC_REDZONE_SZB 16
+/* By default, we want at least a 16B redzone on client heap blocks
+ for Memcheck.
+ The default can be modified by --redzone-size. */
+#define MC_MALLOC_DEFAULT_REDZONE_SZB 16
+// effective redzone, as (possibly) modified by --redzone-size:
+extern SizeT MC_(Malloc_Redzone_SzB);
/* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
typedef
MC_(__builtin_vec_delete),
MC_(realloc),
MC_(malloc_usable_size),
- MC_MALLOC_REDZONE_SZB );
+ MC_MALLOC_DEFAULT_REDZONE_SZB );
+ MC_(Malloc_Redzone_SzB) = VG_(malloc_effective_client_redzone_size)();
VG_(needs_xml_output) ();
/*--- Tracking malloc'd and free'd blocks ---*/
/*------------------------------------------------------------*/
+SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
+
/* Record malloc'd blocks. */
VgHashTable MC_(malloc_list) = NULL;
mc = freed_list_start[i];
while (mc) {
if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
- MC_MALLOC_REDZONE_SZB ))
+ MC_(Malloc_Redzone_SzB) ))
return mc;
mc = mc->next;
}
void MC_(free) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
}
void MC_(__builtin_delete) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
}
void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
{
MC_(handle_free)(
- tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
}
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
tl_assert(ec);
/* Retained part is copied, red zones set as normal */
- MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
+ MC_(Malloc_Redzone_SzB) );
MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
- MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
/* Copy from old to new */
VG_(memcpy)((void*)a_new, p_old, new_szB);
/* Nb: we have to allocate a new MC_Chunk for the new memory rather
than recycling the old one, so that any erroneous accesses to the
old memory are reported. */
- die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+ die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
// Allocate a new chunk.
mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
tl_assert(VG_(is_plausible_ECU)(ecu));
/* First half kept and copied, second half new, red zones as normal */
- MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
- MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
+ MC_(Malloc_Redzone_SzB) );
MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
ecu | MC_OKIND_HEAP );
- MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
+ MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB) );
/* Possibly fill new area with specified junk */
if (MC_(clo_malloc_fill) != -1) {
/* Nb: we have to allocate a new MC_Chunk for the new memory rather
than recycling the old one, so that any erroneous accesses to the
old memory are reported. */
- die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
+ die_and_free_mem ( tid, mc, MC_(Malloc_Redzone_SzB) );
// Allocate a new chunk.
mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
clientperm.stdout.exp clientperm.vgtest \
clireq_nofill.stderr.exp \
clireq_nofill.stdout.exp clireq_nofill.vgtest \
+ clo_redzone_default.vgtest clo_redzone_128.vgtest \
+ clo_redzone_default.stderr.exp clo_redzone_128.stderr.exp \
custom_alloc.stderr.exp custom_alloc.vgtest custom_alloc.stderr.exp-s390x-mvc \
custom-overlap.stderr.exp custom-overlap.vgtest \
deep-backtrace.vgtest deep-backtrace.stderr.exp \
calloc-overflow \
clientperm \
clireq_nofill \
+ clo_redzone \
custom_alloc \
custom-overlap \
deep-backtrace \
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+int main()
+{
+ __attribute__((unused)) char *p = malloc (1);
+ char *b1 = malloc (128);
+ char *b2 = malloc (128);
+ fprintf (stderr, "b1 %p b2 %p\n", b1, b2);
+
+ // Try to land in b2 from b1, causing no error
+ // with the default redzone-size, but having
+ // an error with a bigger redzone-size.
+ // We need to choose a value which lands in b2
+ // on 32 bits and 64 bits.
+ b1[127 + 70] = 'a';
+ return 0;
+}
--- /dev/null
+b1 0x........ b2 0x........
+Invalid write of size 1
+ ...
+ Address 0x........ is 69 bytes after a block of size 128 alloc'd
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ ...
+
--- /dev/null
+vgopts: --leak-check=no -q --redzone-size=128
+prog: clo_redzone
--- /dev/null
+b1 0x........ b2 0x........
--- /dev/null
+vgopts: --leak-check=no -q
+prog: clo_redzone
Syscall param rt_sigaction(act->sa_mask) points to unaddressable byte(s)
...
by 0x........: main (scalar.c:776)
- Address 0x........ is not stack'd, malloc'd or (recently) free'd
+ Address 0x........ is 16 bytes after a block of size 4 alloc'd
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ by 0x........: main (scalar.c:30)
Syscall param rt_sigaction(act->sa_flags) points to unaddressable byte(s)
...
[use current 'ulimit' value]
user options for Valgrind tools that replace malloc:
- --alignment=<number> set minimum alignment of heap allocations [...]
+ --alignment=<number> set minimum alignment of heap allocations [not used by this tool]
+ --redzone-size=<number> set minimum size of redzones added before/after
+ heap blocks (in bytes). [not used by this tool]
uncommon user options for all Valgrind tools:
--fullpath-after= (with nothing after the '=')
[use current 'ulimit' value]
user options for Valgrind tools that replace malloc:
- --alignment=<number> set minimum alignment of heap allocations [...]
+ --alignment=<number> set minimum alignment of heap allocations [not used by this tool]
+ --redzone-size=<number> set minimum size of redzones added before/after
+ heap blocks (in bytes). [not used by this tool]
uncommon user options for all Valgrind tools:
--fullpath-after= (with nothing after the '=')
--trace-redir=no|yes show redirection details? [no]
--trace-sched=no|yes show thread scheduler details? [no]
--profile-heap=no|yes profile Valgrind's own space use
+ --core-redzone=<number> set minimum size of redzones added before/after
+ heap blocks allocated for Valgrind internal use (in bytes) [4]
--wait-for-gdb=yes|no pause on startup to wait for gdb attach
--sym-offsets=yes|no show syms in form 'name+offset' ? [no]
--command-line-only=no|yes only use command line options [no]