This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
- Copyright (C) 2000-2017 Julian Seward
+ Copyright (C) 2000-2017 Julian Seward
jseward@acm.org
This program is free software; you can redistribute it and/or
/*------------------------------------------------------------*/
/*--- Fast-case knobs ---*/
/*------------------------------------------------------------*/
-
+
// Comment these out to disable the fast cases (don't just set them to zero).
/* PERF_FAST_LOADV is in mc_include.h */
/*------------------------------------------------------------*/
/* All reads and writes are checked against a memory map (a.k.a. shadow
- memory), which records the state of all memory in the process.
-
+ memory), which records the state of all memory in the process.
+
On 32-bit machines the memory map is organised as follows.
The top 16 bits of an address are used to index into a top-level
map table, containing 65536 entries. Each entry is a pointer to a
// Ie. instead of particular value bits being held in certain addresses, in
// this case certain addresses are represented by particular value bits.
// See insert_vabits2_into_vabits8() for an example.
-//
+//
// But note that we don't compress the V bits stored in registers; they
// need to be explicit to made the shadow operations possible. Therefore
// when moving values between registers and memory we need to convert
STATIC_ASSERT(SM_CHUNKS % 2 == 0);
-typedef
+typedef
union {
UChar vabits8[SM_CHUNKS];
UShort vabits16[SM_CHUNKS/2];
new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
if (new_sm == NULL)
- VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
+ VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
sizeof(SecMap) );
VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
update_SM_counts(dist_sm, new_sm);
if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
- if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
+ if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
}
/* --------------- Primary maps --------------- */
/* The main primary map. This covers some initial part of the address
space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
- handled using the auxiliary primary map.
+ handled using the auxiliary primary map.
*/
#if ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
&& (defined(VGP_arm_linux) \
LAYOUT: the first word has to be the key for OSet fast lookups.
*/
typedef
- struct {
+ struct {
Addr base;
SecMap* sm;
}
static struct {
Addr base;
AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
- }
+ }
auxmap_L1[N_AUXMAP_L1];
static OSet* auxmap_L2 = NULL;
/* 32-bit platform */
if (VG_(OSetGen_Size)(auxmap_L2) != 0)
return "32-bit: auxmap_L2 is non-empty";
- for (i = 0; i < N_AUXMAP_L1; i++)
+ for (i = 0; i < N_AUXMAP_L1; i++)
if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
return "32-bit: auxmap_L1 is non-empty";
} else {
static INLINE SecMap** get_secmap_ptr ( Addr a )
{
- return ( a <= MAX_PRIMARY_ADDRESS
- ? get_secmap_low_ptr(a)
+ return ( a <= MAX_PRIMARY_ADDRESS
+ ? get_secmap_low_ptr(a)
: get_secmap_high_ptr(a));
}
/* Produce the secmap for 'a', either from the primary map or by
ensuring there is an entry for it in the aux primary map. The
secmap may be a distinguished one as the caller will only want to
- be able to read it.
+ be able to read it.
*/
static INLINE SecMap* get_secmap_for_reading ( Addr a )
{
// *** WARNING! ***
// Any time this function is called, if it is possible that any of the
-// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
+// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
// corresponding entry(s) in the sec-V-bits table must also be set!
static INLINE
UChar get_vabits8_for_aligned_word32 ( Addr a )
// Note: the nodes in this table can become stale. Eg. if you write a PDB,
// then overwrite the same address with a fully defined byte, the sec-V-bit
// node will not necessarily be removed. This is because checking for
-// whether removal is necessary would slow down the fast paths.
+// whether removal is necessary would slow down the fast paths.
//
// To avoid the stale nodes building up too much, we periodically (once the
// table reaches a certain size) garbage collect (GC) the table by
// traversing it and evicting any nodes not having PDB.
// If more than a certain proportion of nodes survived, we increase the
-// table size so that GCs occur less often.
+// table size so that GCs occur less often.
//
// This policy is designed to avoid bad table bloat in the worst case where
// a program creates huge numbers of stale PDBs -- we would get this bloat
// will be deleted and re-added less frequently.
//
// The previous scaling up mechanism (now called STEPUP) is retained:
-// if residency exceeds 50%, the table is scaled up, although by a
+// if residency exceeds 50%, the table is scaled up, although by a
// factor sqrt(2) rather than 2 as before. This effectively doubles the
// frequency of GCs when there are many PDBs at reduces the tendency of
// stale PDBs to reside for long periods in the table.
// come out anyway.
static UInt GCs_done = 0;
-typedef
+typedef
struct {
Addr a;
UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
- }
+ }
SecVBitNode;
static OSet* createSecVBitTable(void)
{
OSet* newSecVBitTable;
newSecVBitTable = VG_(OSetGen_Create_With_Pool)
- ( offsetof(SecVBitNode, a),
+ ( offsetof(SecVBitNode, a),
NULL, // use fast comparisons
- VG_(malloc), "mc.cSVT.1 (sec VBit table)",
+ VG_(malloc), "mc.cSVT.1 (sec VBit table)",
VG_(free),
1000,
sizeof(SecVBitNode));
if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
// Found a non-stale byte, so keep =>
// Insert a copy of the node into the new table.
- SecVBitNode* n2 =
+ SecVBitNode* n2 =
VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
*n2 = *n;
VG_(OSetGen_Insert)(secVBitTable2, n2);
}
// Increase table size if necessary.
- if ((Double)n_survivors
+ if ((Double)n_survivors
> ((Double)secVBitLimit * STEPUP_SURVIVOR_PROPORTION)) {
secVBitLimit = (Int)((Double)secVBitLimit * (Double)STEPUP_GROWTH_FACTOR);
if (VG_(clo_verbosity) > 1)
}
else
if (secVBitLimit < DRIFTUP_MAX_SIZE
- && (Double)n_survivors
+ && (Double)n_survivors
> ((Double)secVBitLimit * DRIFTUP_SURVIVOR_PROPORTION)) {
secVBitLimit = (Int)((Double)secVBitLimit * (Double)DRIFTUP_GROWTH_FACTOR);
if (VG_(clo_verbosity) > 1)
/* Returns the offset in memory of the byteno-th most significant byte
in a wordszB-sized word, given the specified endianness. */
-static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
+static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
UWord byteno ) {
return bigendian ? (wordszB-1-byteno) : byteno;
}
PROF_EVENT(MCPE_LOADVN_SLOW_LOOP);
ai = a + byte_offset_w(szB, bigendian, i);
ok = get_vbits8(ai, &vbits8);
- vbits64 <<= 8;
+ vbits64 <<= 8;
vbits64 |= vbits8;
if (!ok) n_addrs_bad++;
pessim64 <<= 8;
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off16 = SM_OFF_16(a);
UWord vabits16 = sm->vabits16[sm_off16];
- if (LIKELY( !is_distinguished_sm(sm) &&
+ if (LIKELY( !is_distinguished_sm(sm) &&
(VA_BITS16_DEFINED == vabits16 ||
VA_BITS16_UNDEFINED == vabits16) )) {
/* Handle common case quickly: a is suitably aligned, */
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off = SM_OFF(a);
UWord vabits8 = sm->vabits8[sm_off];
- if (LIKELY( !is_distinguished_sm(sm) &&
+ if (LIKELY( !is_distinguished_sm(sm) &&
(VA_BITS8_DEFINED == vabits8 ||
VA_BITS8_UNDEFINED == vabits8) )) {
/* Handle common case quickly: a is suitably aligned, */
// * one partial sec-map (p) 1
// - one whole sec-map (P) 2
//
- // * two partial sec-maps (pp) 1,3
+ // * two partial sec-maps (pp) 1,3
// - one partial, one whole sec-map (pP) 1,2
// - one whole, one partial sec-map (Pp) 2,3
// - two whole sec-maps (PP) 2,2
set_vabits2(a+i, VA_BITS2_DEFINED);
if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
- }
+ }
}
}
}
set_vabits2(a+i, VA_BITS2_DEFINED);
if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
- }
+ }
}
}
}
while (len >= 4) {
vabits8 = get_vabits8_for_aligned_word32( src+i );
set_vabits8_for_aligned_word32( dst+i, vabits8 );
- if (LIKELY(VA_BITS8_DEFINED == vabits8
- || VA_BITS8_UNDEFINED == vabits8
+ if (LIKELY(VA_BITS8_DEFINED == vabits8
+ || VA_BITS8_UNDEFINED == vabits8
|| VA_BITS8_NOACCESS == vabits8)) {
/* do nothing */
} else {
tl_assert(ocacheL1 == NULL);
ocacheL1 = VG_(am_shadow_alloc)(sizeof(OCache));
if (ocacheL1 == NULL) {
- VG_(out_of_memory_NORETURN)( "memcheck:allocating ocacheL1",
+ VG_(out_of_memory_NORETURN)( "memcheck:allocating ocacheL1",
sizeof(OCache) );
}
tl_assert(ocacheL1 != NULL);
} else {
stats_ocacheL1_found_at_N++;
}
- if (UNLIKELY(0 == (ocacheL1_event_ctr++
+ if (UNLIKELY(0 == (ocacheL1_event_ctr++
& ((1<<OC_MOVE_FORWARDS_EVERY_BITS)-1)))) {
moveLineForwards( &ocacheL1->set[setno], line );
line--;
{ OCacheLine* line;
UWord lineoff = oc_line_offset(a);
if (OC_ENABLE_ASSERTIONS) {
- tl_assert(lineoff >= 0
+ tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
}
line = find_OCacheLine( a );
//// Set the origins for a+0 .. a+7
{ OCacheLine* line;
UWord lineoff = oc_line_offset(a);
- tl_assert(lineoff >= 0
+ tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
line = find_OCacheLine( a );
line->u.main.descr[lineoff+0] = 0xF;
if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
OCacheLine* line;
UWord lineoff = oc_line_offset(a);
- tl_assert(lineoff >= 0
+ tl_assert(lineoff >= 0
&& lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
line = find_OCacheLine( a );
line->u.main.descr[lineoff+0] = 0;
} else {
MC_(make_mem_undefined_w_otag)(base, len, otag);
}
-# endif
+# endif
/* Idea is: go fast when
* 8-aligned and length is 128
} else {
make_mem_undefined(base, len);
}
-# endif
+# endif
/* Idea is: go fast when
* 8-aligned and length is 128
} else {
make_mem_undefined(base, 128);
}
-# endif
+# endif
/* Idea is: go fast when
* 16-aligned and length is 128
}
}
}
-
+
/* The same, but for when base is 8 % 16, which is the situation
with RSP for amd64-ELF immediately after call instructions.
*/
/*--- Checking memory ---*/
/*------------------------------------------------------------*/
-typedef
+typedef
enum {
- MC_Ok = 5,
- MC_AddrErr = 6,
+ MC_Ok = 5,
+ MC_AddrErr = 6,
MC_ValueErr = 7
- }
+ }
MC_ReadResult;
return True;
}
-static Bool is_mem_addressable ( Addr a, SizeT len,
+static Bool is_mem_addressable ( Addr a, SizeT len,
/*OUT*/Addr* bad_addr )
{
SizeT i;
PROF_EVENT(MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP);
vabits2 = get_vabits2(a);
switch (vabits2) {
- case VA_BITS2_DEFINED:
- a++;
+ case VA_BITS2_DEFINED:
+ a++;
break;
case VA_BITS2_UNDEFINED:
case VA_BITS2_PARTDEFINED:
if (!ok) {
switch (part) {
case Vg_CoreSysCall:
- MC_(record_memparam_error) ( tid, bad_addr,
+ MC_(record_memparam_error) ( tid, bad_addr,
/*isAddrErr*/True, s, 0/*otag*/ );
break;
static
void check_mem_is_defined ( CorePart part, ThreadId tid, const HChar* s,
Addr base, SizeT size )
-{
+{
UInt otag = 0;
Addr bad_addr;
MC_ReadResult res = is_mem_defined ( base, size, &bad_addr, &otag );
MC_(record_memparam_error) ( tid, bad_addr, isAddrErr, s,
isAddrErr ? 0 : otag );
break;
-
+
case Vg_CoreSysCallArgInMem:
MC_(record_regparam_error) ( tid, s, otag );
break;
- /* If we're being asked to jump to a silly address, record an error
+ /* If we're being asked to jump to a silly address, record an error
message before potentially crashing the entire system. */
case Vg_CoreTranslate:
MC_(record_jump_error)( tid, bad_addr );
{
// Because code is defined, initialised variables get put in the data
// segment and are defined, and uninitialised variables get put in the
- // bss segment and are auto-zeroed (and so defined).
+ // bss segment and are auto-zeroed (and so defined).
//
// It's possible that there will be padding between global variables.
// This will also be auto-zeroed, and marked as defined by Memcheck. If
chunks of guest state, hence the _SIZE value, which has to be as
big as the biggest guest state.
*/
-static void mc_post_reg_write ( CorePart part, ThreadId tid,
+static void mc_post_reg_write ( CorePart part, ThreadId tid,
PtrdiffT offset, SizeT size)
{
# define MAX_REG_WRITE_SIZE 2264
# undef MAX_REG_WRITE_SIZE
}
-static
-void mc_post_reg_write_clientcall ( ThreadId tid,
+static
+void mc_post_reg_write_clientcall ( ThreadId tid,
PtrdiffT offset, SizeT size, Addr f)
{
mc_post_reg_write(/*dummy*/0, tid, offset, size);
}
-/* Look at the definedness of the guest's shadow state for
- [offset, offset+len). If any part of that is undefined, record
+/* Look at the definedness of the guest's shadow state for
+ [offset, offset+len). If any part of that is undefined, record
a parameter error.
*/
-static void mc_pre_reg_read ( CorePart part, ThreadId tid, const HChar* s,
+static void mc_pre_reg_read ( CorePart part, ThreadId tid, const HChar* s,
PtrdiffT offset, SizeT size)
{
Int i;
if (!is_distinguished_sm(sm) && VA_BITS16_DEFINED == vabits16) {
sm->vabits16[sm_off16] = VA_BITS16_UNDEFINED;
return;
- }
+ }
PROF_EVENT(MCPE_STOREV64_SLOW3);
mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
return;
".global vgMemCheck_helperc_LOADV16le \n"
".type vgMemCheck_helperc_LOADV16le, %function \n"
"vgMemCheck_helperc_LOADV16le: \n" //
-" tst r0, #1 \n" //
+" tst r0, #1 \n" //
" bne .LLV16LEc12 \n" // if misaligned
" lsr r2, r0, #16 \n" // r2 = pri-map-ix
" movw r3, #:lower16:primary_map \n" //
if (LIKELY(vabits8 == VA_BITS8_DEFINED)) {
return;
}
- if (!is_distinguished_sm(sm)
+ if (!is_distinguished_sm(sm)
&& accessible_vabits4_in_vabits8(a, vabits8)) {
insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED,
&(sm->vabits8[sm_off]) );
if (vabits8 == VA_BITS8_UNDEFINED) {
return;
}
- if (!is_distinguished_sm(sm)
+ if (!is_distinguished_sm(sm)
&& accessible_vabits4_in_vabits8(a, vabits8)) {
insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
&(sm->vabits8[sm_off]) );
if (LIKELY(vabits8 == VA_BITS8_DEFINED)) {
return; // defined on defined
}
- if (!is_distinguished_sm(sm)
+ if (!is_distinguished_sm(sm)
&& VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8)) {
// direct mod
insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
if (vabits8 == VA_BITS8_UNDEFINED) {
return; // undefined on undefined
}
- if (!is_distinguished_sm(sm)
- && (VA_BITS2_NOACCESS
+ if (!is_distinguished_sm(sm)
+ && (VA_BITS2_NOACCESS
!= extract_vabits2_from_vabits8(a, vabits8))) {
// direct mod
insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
MC_(record_value_error) ( VG_(get_running_tid)(), 8, (UInt)origin );
}
-VG_REGPARM(2)
+VG_REGPARM(2)
void MC_(helperc_value_checkN_fail_w_o) ( HWord sz, UWord origin ) {
MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, (UInt)origin );
}
MC_(record_value_error) ( VG_(get_running_tid)(), 8, 0/*origin*/ );
}
-VG_REGPARM(1)
+VG_REGPARM(1)
void MC_(helperc_value_checkN_fail_no_o) ( HWord sz ) {
MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, 0/*origin*/ );
}
/* Nb: We used to issue various definedness/addressability errors from here,
but we took them out because they ranged from not-very-helpful to
downright annoying, and they complicated the error data structures. */
-static Int mc_get_or_set_vbits_for_client (
- Addr a,
- Addr vbits,
- SizeT szB,
- Bool setting, /* True <=> set vbits, False <=> get vbits */
- Bool is_client_request /* True <=> real user request
- False <=> internal call from gdbserver */
+static Int mc_get_or_set_vbits_for_client (
+ Addr a,
+ Addr vbits,
+ SizeT szB,
+ Bool setting, /* True <=> set vbits, False <=> get vbits */
+ Bool is_client_request /* True <=> real user request
+ False <=> internal call from gdbserver */
)
{
SizeT i;
/* Auxiliary primary maps */
init_auxmap_L1_L2();
- /* auxmap_size = auxmap_used = 0;
+ /* auxmap_size = auxmap_used = 0;
no ... these are statically initialised */
/* Secondary V bit table */
}
static void mc_print_debug_usage(void)
-{
+{
VG_(printf)(
" (none)\n"
);
/*------------------------------------------------------------*/
/* Client block management:
-
+
This is managed as an expanding array of client block descriptors.
Indices of live descriptors are issued to the client, so it can ask
to free them later. Therefore we cannot slide live entries down
over dead ones. Instead we must use free/inuse flags and scan for
an empty slot at allocation time. This in turn means allocation is
- relatively expensive, so we hope this does not happen too often.
+ relatively expensive, so we hope this does not happen too often.
An unused block has start == size == 0
*/
sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
cgbs_new = VG_(malloc)( "mc.acb.1", sz_new * sizeof(CGenBlock) );
- for (i = 0; i < cgb_used; i++)
+ for (i = 0; i < cgb_used; i++)
cgbs_new[i] = cgbs[i];
if (cgbs != NULL)
static void show_client_block_stats ( void )
{
- VG_(message)(Vg_DebugMsg,
+ VG_(message)(Vg_DebugMsg,
"general CBs: %llu allocs, %llu discards, %llu maxinuse, %llu search\n",
- cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
+ cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
);
}
static void print_monitor_help ( void )
{
- VG_(gdb_printf)
+ VG_(gdb_printf)
(
"\n"
"memcheck monitor commands:\n"
" leak_check [full*|summary|xtleak]\n"
" [kinds kind1,kind2,...|reachable|possibleleak*|definiteleak]\n"
" [heuristics heur1,heur2,...]\n"
-" [increased*|changed|any]\n"
+" [new|increased*|changed|any]\n"
" [unlimited*|limited <max_loss_records_output>]\n"
" * = defaults\n"
" xtleak produces an xtree full leak result in xtleak.kcg.%%p.%%n\n"
/* NB: if possible, avoid introducing a new command below which
starts with the same first letter(s) as an already existing
command. This ensures a shorter abbreviation for the user. */
- switch (VG_(keyword_id)
+ switch (VG_(keyword_id)
("help get_vbits leak_check make_memory check_memory "
- "block_list who_points_at xb xtmemory",
+ "block_list who_points_at xb xtmemory",
wcmd, kwd_report_duplicated_matches)) {
case -2: /* multiple matches */
return True;
Int i;
Int unaddressable = 0;
for (i = 0; i < szB; i++) {
- Int res = mc_get_or_set_vbits_for_client
- (address+i, (Addr) &vbits, 1,
+ Int res = mc_get_or_set_vbits_for_client
+ (address+i, (Addr) &vbits, 1,
False, /* get them */
- False /* is client request */ );
+ False /* is client request */ );
/* we are before the first character on next line, print a \n. */
if ((i % 32) == 0 && i != 0)
VG_(printf) ("\n");
LeakCheckParams lcp;
HChar* xt_filename = NULL;
HChar* kw;
-
+
lcp.mode = LC_Full;
lcp.show_leak_kinds = R2S(Possible) | R2S(Unreached);
lcp.errors_for_leak_kinds = 0; // no errors for interactive leak search.
lcp.max_loss_records_output = 999999999;
lcp.requested_by_monitor_command = True;
lcp.xt_filename = NULL;
-
- for (kw = VG_(strtok_r) (NULL, " ", &ssaveptr);
- kw != NULL;
+
+ for (kw = VG_(strtok_r) (NULL, " ", &ssaveptr);
+ kw != NULL;
kw = VG_(strtok_r) (NULL, " ", &ssaveptr)) {
- switch (VG_(keyword_id)
+ switch (VG_(keyword_id)
("full summary xtleak "
"kinds reachable possibleleak definiteleak "
"heuristics "
- "increased changed any "
+ "new increased changed any "
"unlimited limited ",
kw, kwd_report_all)) {
case -2: err++; break;
lcp.mode = LC_Summary; break;
case 2: /* xtleak */
lcp.mode = LC_Full;
- xt_filename
+ xt_filename
= VG_(expand_file_name)("--xtleak-mc_main.c",
"xtleak.kcg.%p.%n");
lcp.xt_filename = xt_filename;
break;
case 3: { /* kinds */
wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
- if (wcmd == NULL
+ if (wcmd == NULL
|| !VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
True/*allow_all*/,
wcmd,
lcp.show_leak_kinds = MC_(all_Reachedness)();
break;
case 5: /* possibleleak */
- lcp.show_leak_kinds
+ lcp.show_leak_kinds
= R2S(Possible) | R2S(IndirectLeak) | R2S(Unreached);
break;
case 6: /* definiteleak */
break;
case 7: { /* heuristics */
wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
- if (wcmd == NULL
+ if (wcmd == NULL
|| !VG_(parse_enum_set)(MC_(parse_leak_heuristics_tokens),
True,/*allow_all*/
wcmd,
}
break;
}
- case 8: /* increased */
+ case 8: /* new */
+ lcp.deltamode = LCD_New; break;
+ case 9: /* increased */
lcp.deltamode = LCD_Increased; break;
- case 9: /* changed */
+ case 10: /* changed */
lcp.deltamode = LCD_Changed; break;
- case 10: /* any */
+ case 11: /* any */
lcp.deltamode = LCD_Any; break;
- case 11: /* unlimited */
+ case 12: /* unlimited */
lcp.max_loss_records_output = 999999999; break;
- case 12: { /* limited */
+ case 13: { /* limited */
Int int_value;
const HChar* endptr;
VG_(free)(xt_filename);
return True;
}
-
+
case 3: { /* make_memory */
Addr address;
SizeT szB = 1;
- Int kwdid = VG_(keyword_id)
+ Int kwdid = VG_(keyword_id)
("noaccess undefined defined Definedifaddressable",
VG_(strtok_r) (NULL, " ", &ssaveptr), kwd_report_all);
if (!VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr))
case -2: break;
case -1: break;
case 0: MC_(make_mem_noaccess) (address, szB); break;
- case 1: make_mem_undefined_w_tid_and_okind ( address, szB, tid,
+ case 1: make_mem_undefined_w_tid_and_okind ( address, szB, tid,
MC_OKIND_USER ); break;
case 2: MC_(make_mem_defined) ( address, szB ); break;
case 3: make_mem_defined_if_addressable ( address, szB ); break;;
ExeContext* origin_ec;
MC_ReadResult res;
- Int kwdid = VG_(keyword_id)
+ Int kwdid = VG_(keyword_id)
("addressable defined",
VG_(strtok_r) (NULL, " ", &ssaveptr), kwd_report_all);
if (!VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr))
case -1: break;
case 0: /* addressable */
if (is_mem_addressable ( address, szB, &bad_addr ))
- VG_(printf) ("Address %p len %lu addressable\n",
+ VG_(printf) ("Address %p len %lu addressable\n",
(void *)address, szB);
else
VG_(printf)
else if (MC_ValueErr == res) {
okind = otag & 3;
switch (okind) {
- case MC_OKIND_STACK:
+ case MC_OKIND_STACK:
src = " was created by a stack allocation"; break;
- case MC_OKIND_HEAP:
+ case MC_OKIND_HEAP:
src = " was created by a heap allocation"; break;
- case MC_OKIND_USER:
+ case MC_OKIND_USER:
src = " was created by a client request"; break;
- case MC_OKIND_UNKNOWN:
+ case MC_OKIND_UNKNOWN:
src = ""; break;
default: tl_assert(0);
}
- VG_(printf)
+ VG_(printf)
("Address %p len %lu not defined:\n"
"Uninitialised value at %p%s\n",
(void *)address, szB, (void *) bad_addr, src);
UInt limit_blocks = 999999999;
Int int_value;
UInt heuristics = 0;
-
+
for (wl = VG_(strtok_r) (NULL, " ", &ssaveptr);
wl != NULL;
wl = VG_(strtok_r) (NULL, " ", &ssaveptr)) {
- switch (VG_(keyword_id) ("unlimited limited heuristics ",
+ switch (VG_(keyword_id) ("unlimited limited heuristics ",
wl, kwd_report_all)) {
case -2: return True;
case -1: return True;
break;
case 2: /* heuristics */
wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
- if (wcmd == NULL
+ if (wcmd == NULL
|| !VG_(parse_enum_set)(MC_(parse_leak_heuristics_tokens),
True,/*allow_all*/
wcmd,
return True;
}
- default:
+ default:
tl_assert(0);
return False;
}
UInt otagV = 0;
Bool errorA = False;
Addr bad_addrA = 0;
- is_mem_defined_comprehensive(
+ is_mem_defined_comprehensive(
arg[1], arg[2],
&errorV, &bad_addrV, &otagV, &errorA, &bad_addrA
);
case VG_USERREQ__DO_LEAK_CHECK: {
LeakCheckParams lcp;
-
+
if (arg[1] == 0)
lcp.mode = LC_Full;
else if (arg[1] == 1)
lcp.mode = LC_Summary;
else {
- VG_(message)(Vg_UserMsg,
+ VG_(message)(Vg_UserMsg,
"Warning: unknown memcheck leak search mode\n");
lcp.mode = LC_Full;
}
-
+
lcp.show_leak_kinds = MC_(clo_show_leak_kinds);
lcp.errors_for_leak_kinds = MC_(clo_error_for_leak_kinds);
lcp.heuristics = MC_(clo_leak_check_heuristics);
lcp.deltamode = LCD_Increased;
else if (arg[2] == 2)
lcp.deltamode = LCD_Changed;
+ else if (arg[2] == 3)
+ lcp.deltamode = LCD_New;
else {
VG_(message)
- (Vg_UserMsg,
+ (Vg_UserMsg,
"Warning: unknown memcheck leak search deltamode\n");
lcp.deltamode = LCD_Any;
}
lcp.max_loss_records_output = 999999999;
lcp.requested_by_monitor_command = False;
lcp.xt_filename = NULL;
-
+
MC_(detect_memory_leaks)(tid, &lcp);
*ret = 0; /* return value is meaningless */
break;
break;
case VG_USERREQ__MAKE_MEM_UNDEFINED:
- make_mem_undefined_w_tid_and_okind ( arg[1], arg[2], tid,
+ make_mem_undefined_w_tid_and_okind ( arg[1], arg[2], tid,
MC_OKIND_USER );
*ret = -1;
break;
break;
case VG_USERREQ__DISCARD: /* discard */
- if (cgbs == NULL
+ if (cgbs == NULL
|| arg[2] >= cgb_used ||
(cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
*ret = 1;
case VG_USERREQ__GET_VBITS:
*ret = mc_get_or_set_vbits_for_client
( arg[1], arg[2], arg[3],
- False /* get them */,
+ False /* get them */,
True /* is client request */ );
break;
UInt rzB = arg[3];
Bool is_zeroed = (Bool)arg[4];
- MC_(new_block) ( tid, p, sizeB, /*ignored*/0, is_zeroed,
+ MC_(new_block) ( tid, p, sizeB, /*ignored*/0, is_zeroed,
MC_AllocCustom, MC_(malloc_list) );
if (rzB > 0) {
MC_(make_mem_noaccess) ( p - rzB, rzB);
// The create_mempool function does not know these mempool flags,
// pass as booleans.
- MC_(create_mempool) ( pool, rzB, is_zeroed,
+ MC_(create_mempool) ( pool, rzB, is_zeroed,
(flags & VALGRIND_MEMPOOL_AUTO_FREE),
(flags & VALGRIND_MEMPOOL_METAPOOL) );
return True;
default:
VG_(message)(
- Vg_UserMsg,
+ Vg_UserMsg,
"Warning: unknown memcheck client request code %llx\n",
(ULong)arg[0]
);
[MCPE_STOREVN_SLOW] = "STOREVn_slow",
[MCPE_STOREVN_SLOW_LOOP] = "STOREVn_slow(loop)",
[MCPE_MAKE_ALIGNED_WORD32_UNDEFINED] = "make_aligned_word32_undefined",
- [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW] =
+ [MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW] =
"make_aligned_word32_undefined_slow",
[MCPE_MAKE_ALIGNED_WORD64_UNDEFINED] = "make_aligned_word64_undefined",
- [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW] =
+ [MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW] =
"make_aligned_word64_undefined_slow",
[MCPE_MAKE_ALIGNED_WORD32_NOACCESS] = "make_aligned_word32_noaccess",
[MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW] =
if (MC_(event_ctr)[i] > 0) {
spaced = False;
++n;
- VG_(printf)( "prof mem event %3d: %11llu %s\n",
+ VG_(printf)( "prof mem event %3d: %11llu %s\n",
i, MC_(event_ctr)[i],
MC_(event_ctr_name)[i]);
}
// uninitialized, but in practice will retain previous contents [zero in
// this case.]"
//
- // In short:
+ // In short:
//
// A key property of sbrk/brk is that new whole pages that are supplied
// by the operating system *do* get initialized to zero.
// Activate full xtree memory profiling.
VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
}
-
+
}
static void print_SM_info(const HChar* type, Int n_SMs)
n_sanity_cheap, n_sanity_expensive );
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps: %llu auxmap entries (%lluk, %lluM) in use\n",
- n_auxmap_L2_nodes,
- n_auxmap_L2_nodes * 64,
+ n_auxmap_L2_nodes,
+ n_auxmap_L2_nodes * 64,
n_auxmap_L2_nodes / 16 );
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps_L1: %llu searches, %llu cmps, ratio %llu:10\n",
n_auxmap_L1_searches, n_auxmap_L1_cmps,
- (10ULL * n_auxmap_L1_cmps)
- / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
- );
+ (10ULL * n_auxmap_L1_cmps)
+ / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
+ );
VG_(message)(Vg_DebugMsg,
" memcheck: auxmaps_L2: %llu searches, %llu nodes\n",
n_auxmap_L2_searches, n_auxmap_L2_nodes
- );
+ );
print_SM_info("n_issued ", n_issued_SMs);
print_SM_info("n_deissued ", n_deissued_SMs);
// Note that the pool allocator has some additional small overhead
// which is not counted in the below.
// Hardwiring this logic sucks, but I don't see how else to do it.
- max_secVBit_szB = max_secVBit_nodes *
+ max_secVBit_szB = max_secVBit_nodes *
(3*sizeof(Word) + VG_ROUNDUP(sizeof(SecVBitNode), sizeof(void*)));
max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
if (MC_(clo_mc_level) >= 3) {
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'14lu refs %'14lu misses (%'lu lossage)\n",
- stats_ocacheL1_find,
+ stats_ocacheL1_find,
stats_ocacheL1_misses,
stats_ocacheL1_lossage );
VG_(message)(Vg_DebugMsg,
" ocacheL1: %'14lu at 0 %'14lu at 1\n",
- stats_ocacheL1_find - stats_ocacheL1_misses
- - stats_ocacheL1_found_at_1
+ stats_ocacheL1_find - stats_ocacheL1_misses
+ - stats_ocacheL1_found_at_1
- stats_ocacheL1_found_at_N,
stats_ocacheL1_found_at_1 );
VG_(message)(Vg_DebugMsg,
mc_print_stats();
if (0) {
- VG_(message)(Vg_DebugMsg,
+ VG_(message)(Vg_DebugMsg,
"------ Valgrind's client block stats follow ---------------\n" );
show_client_block_stats();
}
MC_(__builtin_vec_delete),
MC_(__builtin_vec_delete_aligned),
MC_(realloc),
- MC_(malloc_usable_size),
+ MC_(malloc_usable_size),
MC_MALLOC_DEFAULT_REDZONE_SZB );
MC_(Malloc_Redzone_SzB) = VG_(malloc_effective_client_redzone_size)();
// mc_new_mem_mmap.
VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
VG_(track_change_mem_mprotect) ( mc_new_mem_mprotect );
-
+
VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
- VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
+ VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
- VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
+ VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
/* Defer the specification of the new_mem_stack functions to the
post_clo_init function, since we need to first parse the command
VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
# endif
VG_(track_die_mem_stack) ( mc_die_mem_stack );
-
+
VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
VG_(track_pre_mem_read) ( check_mem_is_defined );