# ifdef free
# undef free
# endif
+
+#if 0
+# define malloc(s) unbound_stat_malloc_log(s, __FILE__, __LINE__, __func__)
+# define calloc(n,s) unbound_stat_calloc_log(n, s, __FILE__, __LINE__, __func__)
+# define free(p) unbound_stat_free_log(p, __FILE__, __LINE__, __func__)
+# define realloc(p,s) unbound_stat_realloc_log(p, s, __FILE__, __LINE__, __func__)
+#else
# define malloc unbound_stat_malloc
# define calloc unbound_stat_calloc
# define free unbound_stat_free
# define realloc unbound_stat_realloc
+#endif
void *unbound_stat_malloc(size_t size);
void *unbound_stat_calloc(size_t nmemb, size_t size);
void unbound_stat_free(void *ptr);
void *unbound_stat_realloc(void *ptr, size_t size);
+void *unbound_stat_malloc_log(size_t size, const char* file, int line,
+ const char* func);
+void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file,
+ int line, const char* func);
+void unbound_stat_free_log(void *ptr, const char* file, int line,
+ const char* func);
+void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file,
+ int line, const char* func);
#endif /* UNBOUND_ALLOC_STATS */
/** default port for DNS traffic. */
/** Size of an UDP datagram */
#define NORMAL_UDP_SIZE 512 /* bytes */
+/** measure memory leakage */
+static void
+debug_memleak(size_t accounted, size_t heap,
+ size_t total_alloc, size_t total_free)
+{
+ static int init = 0;
+ static size_t base_heap, base_accounted, base_alloc, base_free;
+ size_t base_af, cur_af, grow_af, grow_acc;
+ if(!init) {
+ init = 1;
+ base_heap = heap;
+ base_accounted = accounted;
+ base_alloc = total_alloc;
+ base_free = total_free;
+ }
+ base_af = base_alloc - base_free;
+ cur_af = total_alloc - total_free;
+ grow_af = cur_af - base_af;
+ grow_acc = accounted - base_accounted;
+ log_info("Leakage: %u leaked. growth: %u use, %u acc, %u heap",
+ (unsigned)(grow_af - grow_acc), (unsigned)grow_af,
+ (unsigned)grow_acc, (unsigned)(heap - base_heap));
+}
+
/** give debug heap size indication */
static void
-debug_total_mem()
+debug_total_mem(size_t calctotal)
{
extern void* unbound_start_brk;
extern size_t unbound_mem_alloc, unbound_mem_freed;
log_info("Total heap memory estimate: %u total-alloc: %u "
"total-free: %u", (unsigned)total,
(unsigned)unbound_mem_alloc, (unsigned)unbound_mem_freed);
+ debug_memleak(calctotal, (size_t)total,
+ unbound_mem_alloc, unbound_mem_freed);
}
/** Report on memory usage by this thread and global */
-static void
-worker_mem_report(struct worker* worker)
+void
+worker_mem_report(struct worker* worker, struct serviced_query* cur_serv)
{
size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
size_t me;
back = outnet_get_mem(worker->back);
msg = slabhash_get_mem(worker->env.msg_cache);
rrset = slabhash_get_mem(&worker->env.rrset_cache->table);
- infra = slabhash_get_mem(worker->env.infra_cache->hosts);
+ infra = infra_get_mem(worker->env.infra_cache);
mesh = mesh_get_mem(worker->env.mesh);
ac = alloc_get_mem(&worker->alloc);
superac = alloc_get_mem(&worker->daemon->superalloc);
me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
+ comm_point_get_mem(worker->cmd_com) +
- sizeof(worker->rndstate) + region_get_mem(worker->scratchpad);
+ sizeof(worker->rndstate) + region_get_mem(worker->scratchpad)+
+ sizeof(*worker->env.scratch_buffer) +
+ ldns_buffer_capacity(worker->env.scratch_buffer);
+ if(cur_serv)
+ me += serviced_get_mem(cur_serv);
total = front+back+mesh+msg+rrset+infra+ac+superac+me;
log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
"rrset=%u infra=%u alloccache=%u globalalloccache=%u me=%u",
(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
(unsigned)infra, (unsigned)ac, (unsigned)superac,
(unsigned)me);
- debug_total_mem();
+ debug_total_mem(total);
}
void
if(error != 0) {
mesh_report_reply(worker->env.mesh, &e, 0, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
return 0;
}
/* sanity check. */
/* error becomes timeout for the module as if this reply
* never arrived. */
mesh_report_reply(worker->env.mesh, &e, 0, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
return 0;
}
mesh_report_reply(worker->env.mesh, &e, 1, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
return 0;
}
{
struct outbound_entry* e = (struct outbound_entry*)arg;
struct worker* worker = e->qstate->env->worker;
+ struct serviced_query *sq = e->qsent;
verbose(VERB_ALGO, "worker svcd callback for qstate %p", e->qstate);
if(error != 0) {
mesh_report_reply(worker->env.mesh, e, 0, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, sq);
return 0;
}
/* sanity check. */
* never arrived. */
verbose(VERB_ALGO, "worker: bad reply handled as timeout");
mesh_report_reply(worker->env.mesh, e, 0, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, sq);
return 0;
}
mesh_report_reply(worker->env.mesh, e, 1, reply_info);
- worker_mem_report(worker);
+ worker_mem_report(worker, sq);
return 0;
}
/* the max request number has been reached, stop accepting */
listen_pushback(worker->front);
}
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
return 0;
}
worker_delete(worker);
return 0;
}
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
return 1;
}
return;
mesh_stats(worker->env.mesh, "mesh has");
server_stats_log(&worker->stats, worker->thread_num);
- worker_mem_report(worker);
+ worker_mem_report(worker, NULL);
mesh_delete(worker->env.mesh);
ldns_buffer_free(worker->env.scratch_buffer);
listen_delete(worker->front);
struct module_qstate* q)
{
struct worker* worker = q->env->worker;
- struct outbound_entry* e = (struct outbound_entry*)malloc(sizeof(*e));
+ struct outbound_entry* e = (struct outbound_entry*)region_alloc(
+ q->region, sizeof(*e));
if(!e)
return NULL;
e->qstate = q;
+14 August 2007: Wouter
+ - default outgoing ports changed to avoid port 2049 by default.
+ This port is widely blocked by firewalls.
+ - count infra lameness cache in memory size.
+ - accounting of memory improved
+ - outbound entries are allocated in the query region they are for.
+ - extensive debugging for memory allocations.
+
13 August 2007: Wouter
- fixup makefile, if lexer is missing give nice error and do not
mess up the dependencies.
/** calculate size for the hashtable, does not count size of lameness,
* so the hashtable is a fixed number of items */
static size_t
-infra_host_sizefunc(void* ATTR_UNUSED(k), void* ATTR_UNUSED(d))
+infra_host_sizefunc(void* k, void* ATTR_UNUSED(d))
{
- return sizeof(struct infra_host_key) + sizeof(struct infra_host_data);
+ struct infra_host_key* key = (struct infra_host_key*)k;
+ return sizeof(struct infra_host_key) + sizeof(struct infra_host_data)
+ + lock_get_mem(&key->entry.lock);
}
/** compare two addresses, returns -1, 0, or +1 */
/** calculate size, which is fixed, zonename does not count so that
* a fixed number of items is stored */
static size_t
-infra_lame_sizefunc(void* ATTR_UNUSED(k), void* ATTR_UNUSED(d))
+infra_lame_sizefunc(void* k, void* ATTR_UNUSED(d))
{
- return sizeof(struct infra_lame_key)+sizeof(struct infra_lame_data);
+ struct infra_lame_key* key = (struct infra_lame_key*)k;
+ return sizeof(struct infra_lame_key)+sizeof(struct infra_lame_data)
+ + lock_get_mem(&key->entry.lock);
}
/** compare zone names, returns -1, 0, +1 */
lock_rw_unlock(&e->lock);
return 1;
}
+
+/** helper memory count for a host lame cache */
+static size_t
+count_host_lame(struct lruhash_entry* e)
+{
+ struct infra_host_data* host_data = (struct infra_host_data*)e->data;
+ if(!host_data->lameness)
+ return 0;
+ return lruhash_get_mem(host_data->lameness);
+}
+
+size_t
+infra_get_mem(struct infra_cache* infra)
+{
+ size_t i, bin;
+ size_t s = sizeof(*infra) +
+ slabhash_get_mem(infra->hosts);
+ struct lruhash_entry* e;
+ for(i=0; i<infra->hosts->size; i++) {
+ lock_quick_lock(&infra->hosts->array[i]->lock);
+ for(bin=0; bin<infra->hosts->array[i]->size; bin++) {
+ lock_quick_lock(&infra->hosts->array[i]->
+ array[bin].lock);
+ /* count data size in bin items. */
+ for(e = infra->hosts->array[i]->array[bin].
+ overflow_list; e; e = e->overflow_next) {
+ lock_rw_rdlock(&e->lock);
+ s += count_host_lame(e);
+ lock_rw_unlock(&e->lock);
+ }
+ lock_quick_unlock(&infra->hosts->array[i]->
+ array[bin].lock);
+ }
+ lock_quick_unlock(&infra->hosts->array[i]->lock);
+ }
+ return s;
+}
struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* name, size_t namelen, int* lame, int* rtt, time_t timenow);
+/**
+ * Get memory used by the infra cache.
+ * @param infra: infrastructure cache.
+ * @return memory in use in bytes.
+ */
+size_t infra_get_mem(struct infra_cache* infra);
+
#endif /* SERVICES_CACHE_INFRA_H */
while(p) {
np = p->next;
outnet_serviced_query_stop(p->qsent, p);
- free(p);
+ /* in region, no free needed */
p = np;
}
outbound_list_init(list);
if(e->prev)
e->prev->next = e->next;
else list->first = e->next;
- free(e);
+ /* in region, no free needed */
}
(struct pending_tcp*)p->next_waiting);
} else {
waiting_list_remove(sq->outnet, p);
+ waiting_tcp_delete(p);
}
}
}
for(w=outnet->tcp_wait_first; w; w = w->next_waiting)
s += waiting_tcp_get_mem(w);
s += sizeof(*outnet->pending);
- s += sizeof(struct pending) * outnet->pending->count;
+ s += (sizeof(struct pending) + comm_timer_get_mem(NULL)) *
+ outnet->pending->count;
s += sizeof(*outnet->serviced);
RBTREE_FOR(sq, struct serviced_query*, outnet->serviced) {
s += sizeof(*sq) + sq->qbuflen;
}
return s;
}
+
+size_t
+serviced_get_mem(struct serviced_query* sq)
+{
+ struct service_callback* sb;
+ size_t s;
+ s = sizeof(*sq) + sq->qbuflen;
+ for(sb = sq->cblist; sb; sb = sb->next)
+ s += sizeof(*sb);
+ /* always sq->pending existed, but is null to delete after callback */
+ if(sq->status == serviced_query_UDP_EDNS ||
+ sq->status == serviced_query_UDP) {
+ s += sizeof(struct pending);
+ s += comm_timer_get_mem(NULL);
+ } else {
+ /* does not have size of the pkt pointer */
+ s += sizeof(struct waiting_tcp);
+ /* always has a timer expect on malloc failures */
+ s += comm_timer_get_mem(NULL);
+ }
+ return s;
+}
*/
size_t outnet_get_mem(struct outside_network* outnet);
+/**
+ * Get memory size in use by serviced query while it is servicing callbacks.
+ * This takes into account the pre-deleted status of it; it will be deleted
+ * when the callbacks are done.
+ * @param sq: serviced query.
+ * @return size in bytes.
+ */
+size_t serviced_get_mem(struct serviced_query* sq);
+
#endif /* OUTSIDE_NETWORK_H */
}
}
+size_t lock_get_mem(void* pp)
+{
+ size_t s;
+ struct checked_lock* lock = *(struct checked_lock**)pp;
+ struct protected_area* p;
+ s = sizeof(struct checked_lock);
+ acquire_locklock(lock, __func__, __FILE__, __LINE__);
+ for(p = lock->prot; p; p = p->next) {
+ s += sizeof(struct protected_area);
+ s += p->size;
+ }
+ LOCKRET(pthread_mutex_unlock(&lock->lock));
+ return s;
+}
+
/** write lock trace info to file, while you hold those locks */
static void
ordercheck_locklock(struct thr_check* thr, struct checked_lock* lock)
*/
void lock_unprotect(void* lock, void* area);
+/**
+ * Get memory associated with a checked lock
+ * @param lock: the checked lock, any type. (struct checked_lock**).
+ * @return: in bytes, including protected areas.
+ */
+size_t lock_get_mem(void* lock);
+
/**
* Initialise checklock. Sets up internal debug structures.
*/
return 0;
}
+size_t serviced_get_mem(struct serviced_query* ATTR_UNUSED(c))
+{
+ return 0;
+}
+
/*********** End of Dummy routines ***********/
size_t alloc_get_mem(struct alloc_cache* alloc)
{
+ alloc_special_t* p;
size_t s = sizeof(*alloc);
if(!alloc->super) {
lock_quick_lock(&alloc->lock); /* superalloc needs locking */
}
s += sizeof(alloc_special_t) * alloc->num_quar;
+ for(p = alloc->quar; p; p = alloc_special_next(p)) {
+ s += lock_get_mem(&p->entry.lock);
+ }
if(!alloc->super) {
lock_quick_unlock(&alloc->lock);
}
res = malloc(size+16);
if(!res) return NULL;
unbound_mem_alloc += size;
+ log_info("stat %p=malloc(%u)", res+16, (unsigned)size);
memcpy(res, &size, sizeof(size));
memcpy(res+8, &mem_special, sizeof(mem_special));
return res+16;
size_t s = (nmemb*size==0)?(size_t)1:nmemb*size;
void* res = calloc(1, s+16);
if(!res) return NULL;
+ log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
unbound_mem_alloc += s;
memcpy(res, &s, sizeof(s));
memcpy(res+8, &mem_special, sizeof(mem_special));
}
ptr-=16;
memcpy(&s, ptr, sizeof(s));
+ log_info("stat free(%p) size %u", ptr+16, (unsigned)s);
memset(ptr+8, 0, 8);
unbound_mem_freed += s;
free(ptr);
if(!res) return NULL;
unbound_mem_alloc += size;
unbound_mem_freed += cursz;
+ log_info("stat realloc(%p, %u) from %u", ptr+16, (unsigned)size, (unsigned)cursz);
if(cursz > size) {
memcpy(res+16, ptr+16, size);
} else if(size > cursz) {
memcpy(res+8, &mem_special, sizeof(mem_special));
return res+16;
}
+
+void *unbound_stat_malloc_log(size_t size, const char* file, int line,
+ const char* func)
+{
+ log_info("%s:%d %s malloc(%u)", file, line, func, (unsigned)size);
+ return unbound_stat_malloc(size);
+}
+
+void *unbound_stat_calloc_log(size_t nmemb, size_t size, const char* file,
+ int line, const char* func)
+{
+ log_info("%s:%d %s calloc(%u, %u)", file, line, func,
+ (unsigned) nmemb, (unsigned)size);
+ return unbound_stat_calloc(nmemb, size);
+}
+
+void unbound_stat_free_log(void *ptr, const char* file, int line,
+ const char* func)
+{
+ if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) {
+ size_t s;
+ memcpy(&s, ptr-16, sizeof(s));
+ log_info("%s:%d %s free(%p) size %u",
+ file, line, func, ptr, (unsigned)s);
+ } else
+ log_info("%s:%d %s unmatched free(%p)", file, line, func, ptr);
+ unbound_stat_free(ptr);
+}
+
+void *unbound_stat_realloc_log(void *ptr, size_t size, const char* file,
+ int line, const char* func)
+{
+ log_info("%s:%d %s realloc(%p, %u)", file, line, func,
+ ptr, (unsigned)size);
+ return unbound_stat_realloc(ptr, size);
+}
+
+void *unbound_stat_malloc_region(size_t size)
+{
+ log_info("region malloc(%u)", (unsigned)size);
+ return unbound_stat_malloc(size);
+}
+
+void unbound_stat_free_region(void *ptr)
+{
+ if(ptr && memcmp(ptr-8, &mem_special, sizeof(mem_special)) == 0) {
+ size_t s;
+ memcpy(&s, ptr-16, sizeof(s));
+ log_info("region free(%p) size %u", ptr, (unsigned)s);
+ } else
+ log_info("region unmatched free(%p)", ptr);
+ unbound_stat_free(ptr);
+}
+
#endif /* UNBOUND_ALLOC_STATS */
cfg->do_ip6 = 1;
cfg->do_udp = 1;
cfg->do_tcp = 1;
- cfg->outgoing_base_port = cfg->port + 1000;
+ cfg->outgoing_base_port = cfg->port + 2000;
cfg->outgoing_num_ports = 16;
cfg->outgoing_num_tcp = 10;
cfg->msg_cache_size = 4 * 1024 * 1024;
size_t
msgreply_sizefunc(void* k, void* d)
{
- struct query_info* q = (struct query_info*)k;
+ struct msgreply_entry* q = (struct msgreply_entry*)k;
struct reply_info* r = (struct reply_info*)d;
size_t s = sizeof(struct msgreply_entry) + sizeof(struct reply_info)
- + q->qname_len;
+ + q->key.qname_len + lock_get_mem(&q->entry.lock);
s += (r->rrset_count-1) * sizeof(struct rrset_ref);
s += r->rrset_count * sizeof(struct ub_packed_rrset_key*);
return s;
struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
struct packed_rrset_data* d = (struct packed_rrset_data*)data;
size_t s = sizeof(struct ub_packed_rrset_key) + k->rk.dname_len;
- s += packed_rrset_sizeof(d);
+ s += packed_rrset_sizeof(d) + lock_get_mem(&k->entry.lock);
return s;
}
#else /* USE_THREAD_DEBUG */
#define lock_protect(lock, area, size) /* nop */
#define lock_unprotect(lock, area) /* nop */
+#define lock_get_mem(lock) (0) /* nothing */
#define checklock_start() /* nop */
#define checklock_stop() /* nop */
region_type *
region_create(void *(*allocator)(size_t), void (*deallocator)(void *))
{
- region_type* result = alloc_region_base(allocator, deallocator,
+ region_type* result;
+#ifdef UNBOUND_ALLOC_STATS
+ void *unbound_stat_malloc_region(size_t size);
+ void unbound_stat_free_region(void *ptr);
+ allocator = &unbound_stat_malloc_region;
+ deallocator = &unbound_stat_free_region;
+#endif
+ result = alloc_region_base(allocator, deallocator,
DEFAULT_INITIAL_CLEANUP_SIZE);
if(!result)
return NULL;
size_t initial_cleanup_size,
int recycle)
{
- region_type* result = alloc_region_base(allocator, deallocator,
+ region_type* result;
+#ifdef UNBOUND_ALLOC_STATS
+ void *unbound_stat_malloc_region(size_t size);
+ void unbound_stat_free_region(void *ptr);
+ allocator = &unbound_stat_malloc_region;
+ deallocator = &unbound_stat_free_region;
+#endif
+ result = alloc_region_base(allocator, deallocator,
initial_cleanup_size);
if(!result)
return NULL;
s += region->maximum_cleanup_count * sizeof(cleanup_type);
if(region->recycle_bin)
s += sizeof(struct recycle_elem*)*region->large_object_size;
+ log_assert(s >= region->chunk_size * region->chunk_count);
return s;
}
}
lock_quick_unlock(&table->lock);
}
+
+size_t
+lruhash_get_mem(struct lruhash* table)
+{
+ size_t s;
+ size_t i;
+ lock_quick_lock(&table->lock);
+ s = sizeof(struct lruhash) + table->space_used;
+ for(i=0; i<table->size; i++) {
+ s += sizeof(struct lruhash_bin) +
+ lock_get_mem(&table->array[i].lock);
+ }
+ lock_quick_unlock(&table->lock);
+ s += lock_get_mem(&table->lock);
+ return s;
+}
*/
void lruhash_status(struct lruhash* table, const char* id, int extended);
+/**
+ * Get memory in use now by the lruhash table.
+ * @param table: hash table. Will be locked before use. And unlocked after.
+ * @return size in bytes.
+ */
+size_t lruhash_get_mem(struct lruhash* table);
+
#endif /* UTIL_STORAGE_LRUHASH_H */
size_t slabhash_get_mem(struct slabhash* sl)
{
size_t i, total = sizeof(*sl);
+ total += sizeof(struct lruhash*)*sl->size;
for(i=0; i<sl->size; i++) {
- lock_quick_lock(&sl->array[i]->lock);
- total += sizeof(struct lruhash) + sl->array[i]->space_used +
- sizeof(struct lruhash_bin)*sl->array[i]->size;
- lock_quick_unlock(&sl->array[i]->lock);
+ total += lruhash_get_mem(sl->array[i]);
}
return total;
}
struct key_entry_key* kk = (struct key_entry_key*)key;
struct key_entry_data* kd = (struct key_entry_data*)data;
size_t s = sizeof(*kk) + kk->namelen;
- s += sizeof(*kd);
+ s += sizeof(*kd) + lock_get_mem(&kk->entry.lock);
if(kd->rrset_data)
s += packed_rrset_sizeof(kd->rrset_data);
return s;