}
-/* Compare two ExeContexts, comparing all callers. */
+/* Compare two ExeContexts. Number of callers considered depends on res. */
Bool VG_(eq_ExeContext) ( VgRes res, ExeContext* e1, ExeContext* e2 )
{
Int i;
struct {
UInt n_this_record;
UInt n_total_records;
- LossRecord* lossRecord;
+ LossRecord* lr;
} Leak;
// A memory pool error.
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
UInt n_this_record = extra->Err.Leak.n_this_record;
UInt n_total_records = extra->Err.Leak.n_total_records;
- LossRecord* l = extra->Err.Leak.lossRecord;
+ LossRecord* lr = extra->Err.Leak.lr;
if (VG_(clo_xml)) {
VG_(message_no_f_c)(Vg_UserMsg, " <kind>%t</kind>",
- xml_leak_kind(l->loss_mode));
+ xml_leak_kind(lr->state));
} else {
VG_(message)(Vg_UserMsg, "");
}
- if (l->indirect_szB) {
+ if (lr->indirect_szB > 0) {
VG_(message)(Vg_UserMsg,
"%s%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
" are %s in loss record %'u of %'u%s",
xpre,
- l->total_bytes + l->indirect_szB,
- l->total_bytes, l->indirect_szB, l->num_blocks,
- str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
+ lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
+ lr->num_blocks,
+ str_leak_lossmode(lr->state), n_this_record, n_total_records,
xpost
);
if (VG_(clo_xml)) {
// Nb: don't put commas in these XML numbers
VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
- l->total_bytes + l->indirect_szB);
+ lr->szB + lr->indirect_szB);
VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
- l->num_blocks);
+ lr->num_blocks);
}
} else {
VG_(message)(
Vg_UserMsg,
"%s%'lu bytes in %'u blocks are %s in loss record %'u of %'u%s",
xpre,
- l->total_bytes, l->num_blocks,
- str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
+ lr->szB, lr->num_blocks,
+ str_leak_lossmode(lr->state), n_this_record, n_total_records,
xpost
);
if (VG_(clo_xml)) {
VG_(message)(Vg_UserMsg, " <leakedbytes>%ld</leakedbytes>",
- l->total_bytes);
+ lr->szB);
VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
- l->num_blocks);
+ lr->num_blocks);
}
}
- VG_(pp_ExeContext)(l->allocated_at);
+ VG_(pp_ExeContext)(lr->allocated_at);
break;
}
}
Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
- UInt n_total_records, LossRecord* lossRecord,
+ UInt n_total_records, LossRecord* lr,
Bool print_record )
{
MC_Error extra;
extra.Err.Leak.n_this_record = n_this_record;
extra.Err.Leak.n_total_records = n_total_records;
- extra.Err.Leak.lossRecord = lossRecord;
+ extra.Err.Leak.lr = lr;
return
VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
- lossRecord->allocated_at, print_record,
+ lr->allocated_at, print_record,
/*allow_GDB_attach*/False, /*count_error*/False );
}
}
LeakCheckMode;
-/* A block record, used for generating err msgs. */
+/* A loss record, used for generating err msgs. Multiple leaked blocks can be
+ * merged into a single loss record if they have the same state and similar
+ * enough allocation points (controlled by --leak-resolution). */
typedef
struct _LossRecord {
struct _LossRecord* next;
- /* Where these lost blocks were allocated. */
- ExeContext* allocated_at;
- /* Their reachability. */
- Reachedness loss_mode;
- /* Number of blocks and total # bytes involved. */
- SizeT total_bytes;
- SizeT indirect_szB;
- UInt num_blocks;
+ ExeContext* allocated_at; // Where they were allocated.
+ Reachedness state; // LC_Extra.state value shared by all blocks.
+ SizeT szB; // Sum of all MC_Chunk.szB values.
+ SizeT indirect_szB; // Sum of all LC_Extra.indirect_szB values.
+ UInt num_blocks; // Number of blocks represented by the record.
}
LossRecord;
static void print_results(ThreadId tid, Bool is_full_check)
{
Int i, n_lossrecords;
- LossRecord* errlist;
- LossRecord* p;
+ LossRecord* lr_list;
+ LossRecord* lr;
Bool is_suppressed;
// Common up the lost blocks so we can print sensible error messages.
n_lossrecords = 0;
- errlist = NULL;
+ lr_list = NULL;
for (i = 0; i < lc_n_chunks; i++) {
MC_Chunk* ch = lc_chunks[i];
LC_Extra* ex = &(lc_extras)[i];
- for (p = errlist; p != NULL; p = p->next) {
- if (p->loss_mode == ex->state
+ // Determine if this chunk is sufficiently similar to any of our
+ // previously-created loss records to merge.
+ // XXX: This is quadratic! (see bug #191182)
+ for (lr = lr_list; lr != NULL; lr = lr->next) {
+ if (lr->state == ex->state
&& VG_(eq_ExeContext) ( MC_(clo_leak_resolution),
- p->allocated_at,
+ lr->allocated_at,
ch->where) ) {
break;
}
}
- if (p != NULL) {
- p->num_blocks++;
- p->total_bytes += ch->szB;
- p->indirect_szB += ex->indirect_szB;
+ if (lr != NULL) {
+ // Similar to a previous loss record; merge.
+ lr->num_blocks++;
+ lr->szB += ch->szB;
+ lr->indirect_szB += ex->indirect_szB;
} else {
+ // Create a new loss record.
n_lossrecords++;
- p = VG_(malloc)( "mc.fr.1", sizeof(LossRecord));
- p->loss_mode = ex->state;
- p->allocated_at = ch->where;
- p->total_bytes = ch->szB;
- p->indirect_szB = ex->indirect_szB;
- p->num_blocks = 1;
- p->next = errlist;
- errlist = p;
+ lr = VG_(malloc)( "mc.fr.1", sizeof(LossRecord));
+ lr->state = ex->state;
+ lr->allocated_at = ch->where;
+ lr->szB = ch->szB;
+ lr->indirect_szB = ex->indirect_szB;
+ lr->num_blocks = 1;
+ lr->next = lr_list;
+ lr_list = lr;
}
}
MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
- // Print out the commoned-up blocks and collect summary stats.
+ // Print out the loss records and collect summary stats.
for (i = 0; i < n_lossrecords; i++) {
Bool print_record;
- LossRecord* p_min = NULL;
- SizeT n_min = ~(0x0L);
- for (p = errlist; p != NULL; p = p->next) {
- if (p->num_blocks > 0 && p->total_bytes < n_min) {
- n_min = p->total_bytes + p->indirect_szB;
- p_min = p;
+ LossRecord* lr_min = NULL;
+ SizeT total_szB_min = ~(0x0L);
+ // Find the loss record covering the smallest number of directly-lost
+ // bytes. Note that we set lr_min->num_blocks to zero after printing it;
+ // that combined with the "lr->num_blocks > 0" test ensures that each
+ // loss record is only printed once.
+ // XXX: This is quadratic! (see bug #191182)
+ // XXX: why do we show the smallest loss records first -- biggest first
+ // would make more sense?
+ for (lr = lr_list; lr != NULL; lr = lr->next) {
+ SizeT total_szB = lr->szB + lr->indirect_szB;
+ if (lr->num_blocks > 0 && total_szB < total_szB_min) {
+ total_szB_min = total_szB;
+ lr_min = lr;
}
}
- tl_assert(p_min != NULL);
+ tl_assert(lr_min != NULL);
// Rules for printing:
// - We don't show suppressed loss records ever (and that's controlled
//
print_record = is_full_check &&
( MC_(clo_show_reachable) ||
- Unreached == p_min->loss_mode ||
- Possible == p_min->loss_mode );
+ Unreached == lr_min->state ||
+ Possible == lr_min->state );
is_suppressed =
- MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
+ MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr_min,
print_record );
if (is_suppressed) {
- MC_(blocks_suppressed) += p_min->num_blocks;
- MC_(bytes_suppressed) += p_min->total_bytes;
+ MC_(blocks_suppressed) += lr_min->num_blocks;
+ MC_(bytes_suppressed) += lr_min->szB;
- } else if (Unreached == p_min->loss_mode) {
- MC_(blocks_leaked) += p_min->num_blocks;
- MC_(bytes_leaked) += p_min->total_bytes;
+ } else if (Unreached == lr_min->state) {
+ MC_(blocks_leaked) += lr_min->num_blocks;
+ MC_(bytes_leaked) += lr_min->szB;
- } else if (IndirectLeak == p_min->loss_mode) {
- MC_(blocks_indirect) += p_min->num_blocks;
- MC_(bytes_indirect) += p_min->total_bytes;
+ } else if (IndirectLeak == lr_min->state) {
+ MC_(blocks_indirect) += lr_min->num_blocks;
+ MC_(bytes_indirect) += lr_min->szB;
- } else if (Possible == p_min->loss_mode) {
- MC_(blocks_dubious) += p_min->num_blocks;
- MC_(bytes_dubious) += p_min->total_bytes;
+ } else if (Possible == lr_min->state) {
+ MC_(blocks_dubious) += lr_min->num_blocks;
+ MC_(bytes_dubious) += lr_min->szB;
- } else if (Reachable == p_min->loss_mode) {
- MC_(blocks_reachable) += p_min->num_blocks;
- MC_(bytes_reachable) += p_min->total_bytes;
+ } else if (Reachable == lr_min->state) {
+ MC_(blocks_reachable) += lr_min->num_blocks;
+ MC_(bytes_reachable) += lr_min->szB;
} else {
VG_(tool_panic)("unknown loss mode");
}
- p_min->num_blocks = 0;
+ lr_min->num_blocks = 0;
}
if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
32 (16 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cases.c:52)
- by 0x........: f (leak-cases.c:76)
+ by 0x........: f (leak-cases.c:91)
by 0x........: main (leak-cases.c:107)
32 (16 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cases.c:52)
- by 0x........: f (leak-cases.c:91)
+ by 0x........: f (leak-cases.c:76)
by 0x........: main (leak-cases.c:107)
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cycle.c:15)
by 0x........: mkcycle (leak-cycle.c:26)
- by 0x........: main (leak-cycle.c:44)
+ by 0x........: main (leak-cycle.c:45)
48 (16 direct, 32 indirect) bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cycle.c:15)
by 0x........: mkcycle (leak-cycle.c:26)
- by 0x........: main (leak-cycle.c:45)
+ by 0x........: main (leak-cycle.c:44)
96 (16 direct, 80 indirect) bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cycle.c:15)
by 0x........: mkcycle (leak-cycle.c:26)
- by 0x........: main (leak-cycle.c:51)
+ by 0x........: main (leak-cycle.c:63)
96 (16 direct, 80 indirect) bytes in 1 blocks are definitely lost in loss record ... of ...
at 0x........: malloc (vg_replace_malloc.c:...)
by 0x........: mk (leak-cycle.c:15)
by 0x........: mkcycle (leak-cycle.c:26)
- by 0x........: main (leak-cycle.c:63)
+ by 0x........: main (leak-cycle.c:51)