}
run_daemon(cfgfile, cmdline_verbose, debug_mode);
+ log_init(NULL); /* close logfile */
return 0;
}
void
worker_mem_report(struct worker* worker, struct serviced_query* cur_serv)
{
+ /* debug func in validator module */
+ size_t val_kcache_get_mem(void*);
size_t total, front, back, mesh, msg, rrset, infra, ac, superac;
- size_t me;
+ size_t me, iter, val;
+ int i;
if(verbosity < VERB_ALGO)
return;
front = listen_get_mem(worker->front);
mesh = mesh_get_mem(worker->env.mesh);
ac = alloc_get_mem(&worker->alloc);
superac = alloc_get_mem(&worker->daemon->superalloc);
+ iter = 0;
+ val = 0;
+ for(i=0; i<worker->env.mesh->num_modules; i++) {
+ if(strcmp(worker->env.mesh->modfunc[i]->name, "validator")==0)
+ val += (*worker->env.mesh->modfunc[i]->get_mem)
+ (&worker->env, i);
+ else iter += (*worker->env.mesh->modfunc[i]->get_mem)
+ (&worker->env, i);
+ }
me = sizeof(*worker) + sizeof(*worker->base) + sizeof(*worker->comsig)
+ comm_point_get_mem(worker->cmd_com) +
sizeof(worker->rndstate) + region_get_mem(worker->scratchpad)+
sizeof(*worker->env.scratch_buffer) +
ldns_buffer_capacity(worker->env.scratch_buffer);
- if(cur_serv)
+ if(cur_serv) {
+ log_info("cur_serv = %d", (int)serviced_get_mem(cur_serv));
me += serviced_get_mem(cur_serv);
- total = front+back+mesh+msg+rrset+infra+ac+superac+me;
+ }
+ total = front+back+mesh+msg+rrset+infra+iter+val+ac+superac+me;
log_info("Memory conditions: %u front=%u back=%u mesh=%u msg=%u "
- "rrset=%u infra=%u alloccache=%u globalalloccache=%u me=%u",
+ "rrset=%u infra=%u iter=%u val=%u "
+ "alloccache=%u globalalloccache=%u me=%u",
(unsigned)total, (unsigned)front, (unsigned)back,
(unsigned)mesh, (unsigned)msg, (unsigned)rrset,
- (unsigned)infra, (unsigned)ac, (unsigned)superac,
- (unsigned)me);
+ (unsigned)infra, (unsigned)iter, (unsigned)val, (unsigned)ac,
+ (unsigned)superac, (unsigned)me);
debug_total_mem(total);
}
worker_handle_service_reply, e, worker->back->udp_buff,
&outbound_entry_compare);
if(!e->qsent) {
- free(e);
return NULL;
}
return e;
30 August 2007: Wouter
- fixup override date config option.
- config options to control memory usage.
+ - caught bad free of un-alloced data in worker_send error case.
+ - memory accounting for key cache (trust anchors and temporary cache).
+ - memory accounting fixup for outside network tcp pending waits.
+ - memory accounting fixup for outside network tcp callbacks.
+ - memory accounting for iterator fixed storage.
+ - key cache size and slabs config options.
29 August 2007: Wouter
- test tool to sign rrsets for testing validator with.
# replies if the message is found secure. The default is off.
# val-permissive-mode: no
+ # the amount of memory to use for the key cache.
+ # in bytes. default is 4 Mb
+ # key-cache-size: 4194304
+
+ # the number of slabs to use for the key cache.
+ # the number of slabs must be a power of 2.
+ # more slabs reduce lock contention, but fragment memory usage.
+ # key-cache-slabs: 4
+
+
# Stub zones.
# Create entries like below, to make all queries for 'example.com' and
# 'example.org' go to the given list of nameservers. list zero or more
receives the bogus data. For messages that are found to be secure the AD bit
is set in replies. Also logging is performed as for full validation.
The default value is "no".
+.It \fBkey-cache-size:\fR <number>
+Number of bytes size of the key cache. Default is 4 megabytes.
+.It \fBkey-cache-slabs:\fR <number>
+Number of slabs in the key cache. Slabs reduce lock contention by threads.
+Must be set to a power of 2. Setting (close) to the number of cpus is a
+reasonable guess.
.El
.Ss Stub Zone Options
rrset-cache-slabs: 1
infra-cache-numhosts: 200
infra-cache-numlame: 10
+ key-cache-size: 102400 # 100 Kb.
+ key-cache-slabs: 1
num-queries-per-thread: 30
target-fetch-policy: "2 1 0 0 0 0"
harden-large-queries: "yes"
return 1;
return 0;
}
+
+size_t
+donotq_get_mem(struct iter_donotq* donotq)
+{
+ if(!donotq) return 0;
+ return sizeof(*donotq) + region_get_mem(donotq->region);
+}
int donotq_lookup(struct iter_donotq* donotq, struct sockaddr_storage* addr,
socklen_t addrlen);
+/**
+ * Get memory used by donotqueryaddresses structure.
+ * @param donotq: structure for address storage.
+ * @return bytes in use.
+ */
+size_t donotq_get_mem(struct iter_donotq* donotq);
+
+
#endif /* ITERATOR_ITER_DONOTQ_H */
return result->dp;
return NULL;
}
+
+size_t
+forwards_get_mem(struct iter_forwards* fwd)
+{
+ if(!fwd)
+ return 0;
+ return sizeof(*fwd) + region_get_mem(fwd->region);
+}
struct delegpt* forwards_lookup(struct iter_forwards* fwd,
uint8_t* qname, uint16_t qclass);
+/**
+ * Get memory in use by forward storage
+ * @param fwd: forward storage.
+ * @return bytes in use
+ */
+size_t forwards_get_mem(struct iter_forwards* fwd);
+
#endif /* ITERATOR_ITER_FWD_H */
return result->dp; /* need to prime this stub */
return NULL;
}
+
+size_t
+hints_get_mem(struct iter_hints* hints)
+{
+ if(!hints) return 0;
+ return sizeof(*hints) + region_get_mem(hints->region);
+}
struct delegpt* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp);
+/**
+ * Get memory in use by hints
+ * @param hints: hint storage.
+ * @return bytes in use
+ */
+size_t hints_get_mem(struct iter_hints* hints);
+
#endif /* ITERATOR_ITER_HINTS_H */
qstate->minfo[id] = NULL;
}
+/** iterator alloc size routine */
+static size_t iter_get_mem(struct module_env* env, int id)
+{
+ struct iter_env* ie = (struct iter_env*)env->modinfo[id];
+ if(!ie)
+ return 0;
+ return sizeof(*ie) + sizeof(int)*((size_t)ie->max_dependency_depth+1)
+ + hints_get_mem(ie->hints) + forwards_get_mem(ie->fwds)
+ + donotq_get_mem(ie->donotq);
+}
+
/**
* The iterator function block
*/
static struct module_func_block iter_block = {
"iterator",
&iter_init, &iter_deinit, &iter_operate, &iter_inform_super,
- &iter_clear
+ &iter_clear, &iter_get_mem
};
struct module_func_block*
/** use next free buffer to service a tcp query */
static int
-outnet_tcp_take_into_use(struct waiting_tcp* w, uint8_t* pkt)
+outnet_tcp_take_into_use(struct waiting_tcp* w, uint8_t* pkt, size_t pkt_len)
{
struct pending_tcp* pend = w->outnet->tcp_free;
int s;
pend->next_free = NULL;
pend->query = w;
ldns_buffer_clear(pend->c->buffer);
- ldns_buffer_write(pend->c->buffer, pkt, w->pkt_len);
+ ldns_buffer_write(pend->c->buffer, pkt, pkt_len);
ldns_buffer_flip(pend->c->buffer);
pend->c->tcp_is_reading = 0;
pend->c->tcp_byte_count = 0;
outnet->tcp_wait_first = w->next_waiting;
if(outnet->tcp_wait_last == w)
outnet->tcp_wait_last = NULL;
- if(!outnet_tcp_take_into_use(w, w->pkt)) {
+ if(!outnet_tcp_take_into_use(w, w->pkt, w->pkt_len)) {
(void)(*w->cb)(NULL, w->cb_arg, NETEVENT_CLOSED, NULL);
waiting_tcp_delete(w);
}
return NULL;
}
w->pkt = NULL;
- w->pkt_len = ldns_buffer_limit(packet);
+ w->pkt_len = 0;
/* id uses lousy random() TODO use better and entropy */
id = ((unsigned)ub_random(rnd)>>8) & 0xffff;
LDNS_ID_SET(ldns_buffer_begin(packet), id);
comm_timer_set(w->timer, &tv);
if(pend) {
/* we have a buffer available right now */
- if(!outnet_tcp_take_into_use(w, ldns_buffer_begin(packet))) {
+ if(!outnet_tcp_take_into_use(w, ldns_buffer_begin(packet),
+ ldns_buffer_limit(packet))) {
waiting_tcp_delete(w);
return NULL;
}
} else {
/* queue up */
w->pkt = (uint8_t*)w + sizeof(struct waiting_tcp);
+ w->pkt_len = ldns_buffer_limit(packet);
memmove(w->pkt, ldns_buffer_begin(packet), w->pkt_len);
w->next_waiting = NULL;
if(outnet->tcp_wait_last)
s = sizeof(*sq) + sq->qbuflen;
for(sb = sq->cblist; sb; sb = sb->next)
s += sizeof(*sb);
- /* always sq->pending existed, but is null to delete after callback */
if(sq->status == serviced_query_UDP_EDNS ||
sq->status == serviced_query_UDP) {
s += sizeof(struct pending);
s += comm_timer_get_mem(NULL);
} else {
/* does not have size of the pkt pointer */
+ /* always has a timer except on malloc failures */
+
+ /* these sizes are part of the main outside network mem */
+ /*
s += sizeof(struct waiting_tcp);
- /* always has a timer expect on malloc failures */
s += comm_timer_get_mem(NULL);
+ */
}
return s;
}
cfg->val_date_override = 0;
cfg->val_clean_additional = 1;
cfg->val_permissive_mode = 0;
+ cfg->key_cache_size = 4 * 1024 * 1024;
+ cfg->key_cache_slabs = 4;
if(!(cfg->module_conf = strdup("validator iterator"))) goto error_exit;
return cfg;
error_exit:
/** should validator allow bogus messages to go through */
int val_permissive_mode;
+ /** size of the key cache */
+ size_t key_cache_size;
+ /** slabs in the key cache. */
+ size_t key_cache_slabs;
+
/** daemonize, i.e. fork into the background. */
int do_daemonize;
};
val-bogus-ttl{COLON} { YDOUT; return VAR_BOGUS_TTL;}
val-clean-additional{COLON} { YDOUT; return VAR_VAL_CLEAN_ADDITIONAL;}
val-permissive-mode{COLON} { YDOUT; return VAR_VAL_PERMISSIVE_MODE;}
+key-cache-size{COLON} { YDOUT; return VAR_KEY_CACHE_SIZE;}
+key-cache-slabs{COLON} { YDOUT; return VAR_KEY_CACHE_SLABS;}
{NEWLINE} { LEXOUT(("NL\n")); cfg_parser->line++;}
/* Quoted strings. Strip leading and ending quotes */
%token VAR_IDENTITY VAR_VERSION VAR_HARDEN_GLUE VAR_MODULE_CONF
%token VAR_TRUST_ANCHOR_FILE VAR_TRUST_ANCHOR VAR_VAL_OVERRIDE_DATE
%token VAR_BOGUS_TTL VAR_VAL_CLEAN_ADDITIONAL VAR_VAL_PERMISSIVE_MODE
-%token VAR_INCOMING_NUM_TCP VAR_MSG_BUFFER_SIZE
+%token VAR_INCOMING_NUM_TCP VAR_MSG_BUFFER_SIZE VAR_KEY_CACHE_SIZE
+%token VAR_KEY_CACHE_SLABS
%%
toplevelvars: /* empty */ | toplevelvars toplevelvar ;
server_harden_glue | server_module_conf | server_trust_anchor_file |
server_trust_anchor | server_val_override_date | server_bogus_ttl |
server_val_clean_additional | server_val_permissive_mode |
- server_incoming_num_tcp | server_msg_buffer_size
+ server_incoming_num_tcp | server_msg_buffer_size |
+ server_key_cache_size | server_key_cache_slabs
;
stubstart: VAR_STUB_ZONE
{
free($2);
}
;
+server_key_cache_size: VAR_KEY_CACHE_SIZE STRING
+ {
+ OUTYY(("P(server_key_cache_size:%s)\n", $2));
+ if(atoi($2) == 0)
+ yyerror("number expected");
+ else cfg_parser->cfg->key_cache_size = atoi($2);
+ free($2);
+ }
+ ;
+server_key_cache_slabs: VAR_KEY_CACHE_SLABS STRING
+ {
+ OUTYY(("P(server_key_cache_slabs:%s)\n", $2));
+ if(atoi($2) == 0)
+ yyerror("number expected");
+ else {
+ cfg_parser->cfg->key_cache_slabs = atoi($2);
+ if(!is_pow2(cfg_parser->cfg->key_cache_slabs))
+ yyerror("must be a power of 2");
+ }
+ free($2);
+ }
+ ;
stub_name: VAR_NAME STRING
{
OUTYY(("P(name:%s)\n", $2));
* return: 0 on error
*/
int (*init)(struct module_env* env, int id);
+
/**
* de-init, delete, the module. Called once for the global state.
* @param env: module environment.
* clear module specific data
*/
void (*clear)(struct module_qstate* qstate, int id);
+
+ /**
+ * How much memory is the module specific data using.
+ * @param env: module environment.
+ * @param id: the module id.
+ * @return the number of bytes that are alloced.
+ */
+ size_t (*get_mem)(struct module_env* env, int id);
};
/**
}
return result;
}
+
+size_t
+anchors_get_mem(struct val_anchors* anchors)
+{
+ return sizeof(*anchors) + region_get_mem(anchors->region);
+}
int anchor_store_str(struct val_anchors* anchors, ldns_buffer* buffer,
const char* str);
+/**
+ * Get memory in use by the trust anchor storage
+ * @param anchors: anchor storage.
+ * @return memory in use in bytes.
+ */
+size_t anchors_get_mem(struct val_anchors* anchors);
+
#endif /* VALIDATOR_VAL_ANCHOR_H */
log_err("malloc failure");
return NULL;
}
- (void)cfg; /* TODO use config for keycache params */
- numtables = HASH_DEFAULT_SLABS;
+ numtables = cfg->key_cache_slabs;
start_size = HASH_DEFAULT_STARTARRAY;
- maxmem = HASH_DEFAULT_MAXMEM;
+ maxmem = cfg->key_cache_size;
kcache->slab = slabhash_create(numtables, start_size, maxmem,
&key_entry_sizefunc, &key_entry_compfunc,
&key_entry_delkeyfunc, &key_entry_deldatafunc, NULL);
}
return NULL;
}
+
+size_t
+key_cache_get_mem(struct key_cache* kcache)
+{
+ return sizeof(*kcache) + slabhash_get_mem(kcache->slab);
+}
+
uint8_t* name, size_t namelen, uint16_t key_class,
struct region* region);
+/**
+ * Get memory in use by the key cache.
+ * @param kcache: the key cache.
+ * @return memory in use in bytes.
+ */
+size_t key_cache_get_mem(struct key_cache* kcache);
+
#endif /* VALIDATOR_VAL_KCACHE_H */
qstate->minfo[id] = NULL;
}
+/**
+ * Debug helper routine that assists worker in determining memory in
+ * use.
+ * @param me: mod_env value
+ * @return memory in use in bytes.
+ */
+static size_t
+val_get_mem(struct module_env* env, int id)
+{
+ struct val_env* ve = (struct val_env*)env->modinfo[id];
+ if(!ve)
+ return 0;
+ return sizeof(*ve) + key_cache_get_mem(ve->kcache) +
+ anchors_get_mem(ve->anchors);
+}
+
/**
* The validator function block
*/
static struct module_func_block val_block = {
"validator",
- &val_init, &val_deinit, &val_operate, &val_inform_super, &val_clear
+ &val_init, &val_deinit, &val_operate, &val_inform_super, &val_clear,
+ &val_get_mem
};
struct module_func_block*
}
return "UNKNOWN VALIDATOR STATE";
}
+