lr->num_blocks, d_num_blocks,
str_leak_lossmode(lr->key.state),
n_this_record, n_total_records );
- emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
- emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
+ emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
+ emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
emit( " </xwhat>\n" );
}
VG_(pp_ExeContext)(lr->key.allocated_at);
MC_(any_value_errors) = True;
if (xml) {
emit( " <kind>UninitValue</kind>\n" );
- emit( " <what>Use of uninitialised value of size %ld</what>\n",
+ emit( " <what>Use of uninitialised value of size %lu</what>\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
} else {
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
- emit( "Use of uninitialised value of size %ld\n",
+ emit( "Use of uninitialised value of size %lu\n",
extra->Err.Value.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (extra->Err.Value.origin_ec)
if (xml) {
emit( " <kind>Invalid%s</kind>\n",
extra->Err.Addr.isWrite ? "Write" : "Read" );
- emit( " <what>Invalid %s of size %ld</what>\n",
+ emit( " <what>Invalid %s of size %lu</what>\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
} else {
- emit( "Invalid %s of size %ld\n",
+ emit( "Invalid %s of size %lu\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
&& addr_in_reg < searched_wpa + searched_szB) {
if (addr_in_reg == searched_wpa)
VG_(umsg)
- ("tid %d register %s pointing at %#lx\n",
+ ("tid %u register %s pointing at %#lx\n",
tid, regname, searched_wpa);
else
VG_(umsg)
- ("tid %d register %s interior pointing %lu bytes inside %#lx\n",
+ ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
tid, regname, (long unsigned) addr_in_reg - searched_wpa,
searched_wpa);
}
VG_(printf) ("\n");
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
case -1: break;
case 0: /* addressable */
if (is_mem_addressable ( address, szB, &bad_addr ))
- VG_(printf) ("Address %p len %ld addressable\n",
+ VG_(printf) ("Address %p len %lu addressable\n",
(void *)address, szB);
else
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
MC_(pp_describe_addr) (address);
break;
res = is_mem_defined ( address, szB, &bad_addr, &otag );
if (MC_AddrErr == res)
VG_(printf)
- ("Address %p len %ld not addressable:\nbad address %p\n",
+ ("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
else if (MC_ValueErr == res) {
okind = otag & 3;
default: tl_assert(0);
}
VG_(printf)
- ("Address %p len %ld not defined:\n"
+ ("Address %p len %lu not defined:\n"
"Uninitialised value at %p%s\n",
(void *)address, szB, (void *) bad_addr, src);
ecu = otag & ~3;
}
}
else
- VG_(printf) ("Address %p len %ld defined\n",
+ VG_(printf) ("Address %p len %lu defined\n",
(void *)address, szB);
MC_(pp_describe_addr) (address);
break;
gdb_xb (address + szB - szB % 8, szB % 8, res);
if (unaddressable) {
VG_(printf)
- ("Address %p len %ld has %d bytes unaddressable\n",
+ ("Address %p len %lu has %d bytes unaddressable\n",
(void *)address, szB, unaddressable);
}
}
static void print_SM_info(const HChar* type, Int n_SMs)
{
VG_(message)(Vg_DebugMsg,
- " memcheck: SMs: %s = %d (%ldk, %ldM)\n",
+ " memcheck: SMs: %s = %d (%luk, %luM)\n",
type,
n_SMs,
n_SMs * sizeof(SecMap) / 1024UL,
" memcheck: sanity checks: %d cheap, %d expensive\n",
n_sanity_cheap, n_sanity_expensive );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use\n",
+ " memcheck: auxmaps: %llu auxmap entries (%lluk, %lluM) in use\n",
n_auxmap_L2_nodes,
n_auxmap_L2_nodes * 64,
n_auxmap_L2_nodes / 16 );
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10\n",
+ " memcheck: auxmaps_L1: %llu searches, %llu cmps, ratio %llu:10\n",
n_auxmap_L1_searches, n_auxmap_L1_cmps,
(10ULL * n_auxmap_L1_cmps)
/ (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
);
VG_(message)(Vg_DebugMsg,
- " memcheck: auxmaps_L2: %lld searches, %lld nodes\n",
+ " memcheck: auxmaps_L2: %llu searches, %llu nodes\n",
n_auxmap_L2_searches, n_auxmap_L2_nodes
);
max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
VG_(message)(Vg_DebugMsg,
- " memcheck: max sec V bit nodes: %d (%ldk, %ldM)\n",
+ " memcheck: max sec V bit nodes: %d (%luk, %luM)\n",
max_secVBit_nodes, max_secVBit_szB / 1024,
max_secVBit_szB / (1024 * 1024));
VG_(message)(Vg_DebugMsg,
sec_vbits_new_nodes + sec_vbits_updates,
sec_vbits_new_nodes, sec_vbits_updates );
VG_(message)(Vg_DebugMsg,
- " memcheck: max shadow mem size: %ldk, %ldM\n",
+ " memcheck: max shadow mem size: %luk, %luM\n",
max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
if (MC_(clo_mc_level) >= 3) {
stats_ocacheL1_found_at_N,
stats_ocacheL1_movefwds );
VG_(message)(Vg_DebugMsg,
- " ocacheL1: %'12lu sizeB %'12u useful\n",
- (UWord)sizeof(OCache),
+ " ocacheL1: %'12lu sizeB %'12d useful\n",
+ (SizeT)sizeof(OCache),
4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
VG_(message)(Vg_DebugMsg,
" ocacheL2: %'12lu refs %'12lu misses\n",
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
+ VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %u, %d)\n",
pool, rzB, is_zeroed);
VG_(get_and_pp_StackTrace)
(VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
VG_(message)(Vg_UserMsg,
- "Total mempools active: %d pools, %d chunks\n",
+ "Total mempools active: %u pools, %u chunks\n",
total_pools, total_chunks);
tick = 0;
}
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data > chunks[i+1]->data) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d is out of order "
+ "Mempool chunk %u / %u is out of order "
"wrt. its successor\n",
i+1, n_chunks);
bad = 1;
for (i = 0; i < n_chunks-1; i++) {
if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d overlaps with its successor\n",
+ "Mempool chunk %u / %u overlaps with its successor\n",
i+1, n_chunks);
bad = 1;
}
if (bad) {
VG_(message)(Vg_UserMsg,
- "Bad mempool (%d chunks), dumping chunks for inspection:\n",
+ "Bad mempool (%u chunks), dumping chunks for inspection:\n",
n_chunks);
for (i = 0; i < n_chunks; ++i) {
VG_(message)(Vg_UserMsg,
- "Mempool chunk %d / %d: %ld bytes "
+ "Mempool chunk %u / %u: %lu bytes "
"[%lx,%lx), allocated:\n",
i+1,
n_chunks,
MC_Mempool* mp;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_UserMsg,
- "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
+ "mempool_free(0x%lx, 0x%lx) freed chunk of %lu bytes\n",
pool, addr, mc->szB + 0UL);
}
VgHashNode** chunks;
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %lu)\n",
pool, addr, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}
ThreadId tid = VG_(get_running_tid)();
if (VG_(clo_verbosity) > 2) {
- VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
+ VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %lu)\n",
pool, addrA, addrB, szB);
VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
}