free(daemon);
return NULL;
}
+ daemon->rrset_cache = slabhash_create(HASH_DEFAULT_SLABS,
+ HASH_DEFAULT_STARTARRAY, HASH_DEFAULT_MAXMEM,
+ ub_rrset_sizefunc, ub_rrset_compare,
+ ub_rrset_key_delete, rrset_data_delete, &daemon->superalloc);
+ if(!daemon->rrset_cache) {
+ slabhash_delete(daemon->msg_cache);
+ free(daemon);
+ return NULL;
+ }
alloc_init(&daemon->superalloc, NULL, 0);
return daemon;
}
return;
listening_ports_free(daemon->ports);
slabhash_delete(daemon->msg_cache);
+ slabhash_delete(daemon->rrset_cache);
alloc_clear(&daemon->superalloc);
free(daemon->cwd);
free(daemon->pidfile);
struct alloc_cache superalloc;
/** the message cache, content is struct msgreply_entry* */
struct slabhash* msg_cache;
+ /** the rrset cache, content is struct struct ub_packed_rrset_key* */
+ struct slabhash* rrset_cache;
};
/**
w->worker->free_queries = w;
}
-/** reply to query with given error code */
-static void
-replyerror(int r, struct work_query* w)
+/** create error and fill into buffer */
+static void
+replyerror_fillbuf(int r, struct comm_reply* repinfo, uint16_t id,
+ uint16_t qflags, struct query_info* qinfo)
{
- ldns_buffer* buf = w->query_reply.c->buffer;
+ ldns_buffer* buf = repinfo->c->buffer;
uint16_t flags;
verbose(VERB_DETAIL, "reply with error");
ldns_buffer_clear(buf);
- ldns_buffer_write(buf, &w->query_id, sizeof(uint16_t));
+ ldns_buffer_write(buf, &id, sizeof(uint16_t));
flags = (uint16_t)(BIT_QR | r); /* QR and retcode*/
- flags |= (w->query_flags & (BIT_RD|BIT_CD)); /* copy RD and CD bit */
+ flags |= (qflags & (BIT_RD|BIT_CD)); /* copy RD and CD bit */
ldns_buffer_write_u16(buf, flags);
flags = 1;
ldns_buffer_write_u16(buf, flags);
ldns_buffer_write(buf, &flags, sizeof(uint16_t));
ldns_buffer_write(buf, &flags, sizeof(uint16_t));
ldns_buffer_write(buf, &flags, sizeof(uint16_t));
- ldns_buffer_write(buf, w->qinfo.qname, w->qinfo.qnamesize);
- ldns_buffer_write_u16(buf, w->qinfo.qtype);
- ldns_buffer_write_u16(buf, w->qinfo.qclass);
+ ldns_buffer_write(buf, qinfo->qname, qinfo->qnamesize);
+ ldns_buffer_write_u16(buf, qinfo->qtype);
+ ldns_buffer_write_u16(buf, qinfo->qclass);
ldns_buffer_flip(buf);
+}
+
+/** reply to query with given error code */
+static void
+replyerror(int r, struct work_query* w)
+{
+ replyerror_fillbuf(r, &w->query_reply, w->query_id, w->query_flags,
+ &w->qinfo);
comm_point_send_reply(&w->query_reply);
req_release(w);
query_info_clear(&w->qinfo);
}
+/** store rrsets in the rrset cache. */
+static void
+worker_store_rrsets(struct worker* worker, struct reply_info* rep)
+{
+ size_t i;
+ for(i=0; i<rep->rrset_count; i++) {
+ /* TODO: check if update really needed */
+ slabhash_insert(worker->daemon->rrset_cache,
+ rep->rrsets[i]->entry.hash, &rep->rrsets[i]->entry,
+ rep->rrsets[i]->entry.data);
+ }
+}
+
/** process incoming replies from the network */
static int
worker_handle_reply(struct comm_point* c, void* arg, int error,
struct comm_reply* ATTR_UNUSED(reply_info))
{
struct work_query* w = (struct work_query*)arg;
+ struct query_info qinf;
struct reply_info* rep;
struct msgreply_entry* e;
+ int r;
+
verbose(VERB_DETAIL, "reply to query with stored ID %d",
ntohs(w->query_id)); /* byteswapped so same as dig prints */
if(error != 0) {
if(LDNS_QDCOUNT(ldns_buffer_begin(c->buffer)) > 1)
return 0; /* too much in the query section */
/* woohoo a reply! */
- rep = (struct reply_info*)malloc(sizeof(struct reply_info));
- if(!rep) {
- log_err("out of memory");
+ if((r=reply_info_parse(c->buffer, &w->worker->alloc, &qinf, &rep))!=0) {
+ if(r == LDNS_RCODE_SERVFAIL)
+ log_err("reply_info_parse: out of memory");
+ /* formerr on my parse gives servfail to my client */
replyerror(LDNS_RCODE_SERVFAIL, w);
return 0;
}
- rep->flags = ldns_buffer_read_u16_at(c->buffer, 2);
- rep->replysize = ldns_buffer_limit(c->buffer) - DNS_ID_AND_FLAGS;
- log_info("got reply of size %u", (unsigned)rep->replysize);
- rep->reply = (uint8_t*)malloc(rep->replysize);
- if(!rep->reply) {
- free(rep);
- log_err("out of memory");
+ if(!reply_info_answer_encode(&qinf, rep, w->query_id, w->query_flags,
+ w->query_reply.c->buffer, 0, 0)) {
replyerror(LDNS_RCODE_SERVFAIL, w);
+ query_info_clear(&qinf);
+ reply_info_parsedelete(rep, &w->worker->alloc);
return 0;
}
- memcpy(rep->reply, ldns_buffer_at(c->buffer, DNS_ID_AND_FLAGS),
- rep->replysize);
- reply_info_answer_iov(rep, w->query_id, w->query_flags,
- &w->query_reply, 0);
+ comm_point_send_reply(&w->query_reply);
req_release(w);
- /* store or update reply in the cache */
- if(!(e = query_info_entrysetup(&w->qinfo, rep, w->query_hash))) {
- free(rep->reply);
- free(rep);
- log_err("out of memory");
+ query_info_clear(&w->qinfo);
+ if(rep->ttl == 0) {
+ log_info("TTL 0: dropped");
+ query_info_clear(&qinf);
+ reply_info_parsedelete(rep, &w->worker->alloc);
+ return 0;
+ }
+ reply_info_set_ttls(rep, time(0));
+ worker_store_rrsets(w->worker, rep);
+ reply_info_fillref(rep);
+ /* store msg in the cache */
+ if(!(e = query_info_entrysetup(&qinf, rep, w->query_hash))) {
+ query_info_clear(&qinf);
+ reply_info_parsedelete(rep, &w->worker->alloc);
return 0;
}
slabhash_insert(w->worker->daemon->msg_cache, w->query_hash,
return 0;
}
+/** answer query from the cache */
+static int
+answer_from_cache(struct lruhash_entry* e, uint16_t id,
+ uint16_t flags, struct comm_reply* repinfo)
+{
+ struct msgreply_entry* mrentry = (struct msgreply_entry*)e->key;
+ struct reply_info* rep = (struct reply_info*)e->data;
+ uint32_t timenow = time(0);
+ size_t i;
+ /* see if it is possible */
+ if(rep->ttl <= timenow) {
+ /* the rrsets may have been updated in the meantime */
+ /* but this ignores it */
+ return 0;
+ }
+ /* check rrsets */
+ for(i=0; i<rep->rrset_count; i++) {
+ lock_rw_rdlock(&rep->ref[i].key->entry.lock);
+ if(rep->ref[i].id != rep->ref[i].key->id ||
+ rep->ttl <= timenow) {
+ /* failure! rollback our readlocks */
+ size_t j;
+ for(j=0; j<=i; j++)
+ lock_rw_unlock(&rep->ref[j].key->entry.lock);
+ return 0;
+ }
+ }
+ /* locked and ids and ttls are OK. */
+ if(!reply_info_answer_encode(&mrentry->key, rep, id, flags,
+ repinfo->c->buffer, timenow, 1)) {
+ replyerror_fillbuf(LDNS_RCODE_SERVFAIL, repinfo, id,
+ flags, &mrentry->key);
+ }
+ /* unlock */
+ for(i=0; i<rep->rrset_count; i++)
+ lock_rw_unlock(&rep->ref[i].key->entry.lock);
+ /* go and return this buffer to the client */
+ return 1;
+}
+
/** handles callbacks from listening event interface */
static int
worker_handle_request(struct comm_point* c, void* arg, int error,
h = query_info_hash(&qinfo);
if((e=slabhash_lookup(worker->daemon->msg_cache, h, &qinfo, 0))) {
/* answer from cache - we have acquired a readlock on it */
- uint16_t id;
log_info("answer from the cache");
- memcpy(&id, ldns_buffer_begin(c->buffer), sizeof(uint16_t));
- reply_info_answer_iov((struct reply_info*)e->data, id,
- ldns_buffer_read_u16_at(c->buffer, 2), repinfo, 1);
+ if(answer_from_cache(e,
+ *(uint16_t*)ldns_buffer_begin(c->buffer),
+ ldns_buffer_read_u16_at(c->buffer, 2), repinfo)) {
+ lock_rw_unlock(&e->lock);
+ return 1;
+ }
+ log_info("answer from the cache -- data has timed out");
lock_rw_unlock(&e->lock);
- return 0;
}
ldns_buffer_rewind(c->buffer);
server_stats_querymiss(&worker->stats, worker);
+3 May 2007: Wouter
+ - fill refs. Use new parse and encode to answer queries.
+ - stores rrsets in cache.
+ - uses new msgreply format in cache.
+
2 May 2007: Wouter
- dname unit tests in own file and spread out neatly in functions.
- more dname unit tests.
#define PARSE_TABLE_SIZE 1024
/** Maximum TTL that is allowed. */
#define MAX_TTL 3600*24*365*10 /* ten years */
+/** Negative cache time (for entries without any RRs.) */
+#define NORR_TTL 5 /* seconds */
/**
* Data stored in scratch pad memory during parsing.
qinf->qnamesize = msg->qname_len;
qinf->qtype = msg->qtype;
qinf->qclass = msg->qclass;
+ qinf->has_cd = 0;
+ if(msg->flags & BIT_CD)
+ qinf->has_cd = 1;
return 1;
}
sizeof(struct rrset_ref) * (msg->rrset_count-1) +
sizeof(struct ub_packed_rrset_key*) * msg->rrset_count);
if(!*rep) return 0;
- (*rep)->reply = 0; /* unused */
- (*rep)->replysize = 0; /* unused */
(*rep)->flags = msg->flags;
(*rep)->qdcount = msg->qdcount;
(*rep)->ttl = 0;
struct rrset_parse *pset = msg->rrset_first;
struct packed_rrset_data* data;
log_assert(rep);
+ rep->ttl = MAX_TTL;
+ if(rep->rrset_count == 0)
+ rep->ttl = NORR_TTL;
for(i=0; i<rep->rrset_count; i++) {
rep->rrsets[i]->rk.flags = pset->flags;
if((ret=parse_create_rrset(pkt, pset, &data)) != 0)
return ret;
rep->rrsets[i]->entry.data = (void*)data;
+ rep->rrsets[i]->entry.key = (void*)rep->rrsets[i];
rep->rrsets[i]->entry.hash = pset->hash;
+ if(data->ttl < rep->ttl)
+ rep->ttl = data->ttl;
pset = pset->rrset_all_next;
}
return 0;
}
+/** helper compare function to sort in lock order */
+static int
+reply_info_fillref_cmp(const void* a, const void* b)
+{
+ if(a < b) return -1;
+ if(a > b) return 1;
+ return 0;
+}
+
+void
+reply_info_fillref(struct reply_info* rep)
+{
+ size_t i;
+ for(i=0; i<rep->rrset_count; i++) {
+ rep->ref[i].key = rep->rrsets[i];
+ rep->ref[i].id = rep->rrsets[i]->id;
+ }
+ qsort(&rep->ref[0], rep->rrset_count, sizeof(struct rrset_ref),
+ reply_info_fillref_cmp);
+}
+
+void
+reply_info_set_ttls(struct reply_info* rep, uint32_t timenow)
+{
+ size_t i, j;
+ rep->ttl += timenow;
+ for(i=0; i<rep->rrset_count; i++) {
+ struct packed_rrset_data* data = (struct packed_rrset_data*)
+ rep->rrsets[i]->entry.data;
+ data->ttl += timenow;
+ for(j=0; j<data->count + data->rrsig_count; j++)
+ data->rr_ttl[j] += timenow;
+ }
+}
+
void
reply_info_parsedelete(struct reply_info* rep, struct alloc_cache* alloc)
{
log_assert(LDNS_OPCODE_WIRE(q) == LDNS_PACKET_QUERY);
log_assert(LDNS_QDCOUNT(q) == 1);
log_assert(ldns_buffer_position(query) == 0);
- m->has_cd = (int)LDNS_CD_WIRE(q);
+ m->has_cd = LDNS_CD_WIRE(q)?1:0;
ldns_buffer_skip(query, LDNS_HEADER_SIZE);
m->qname = ldns_buffer_current(query);
if((m->qnamesize = query_dname_len(query)) == 0)
m->qname = NULL;
}
-void
-reply_info_clear(struct reply_info* m)
-{
- free(m->reply);
- m->reply = NULL;
-}
-
size_t
msgreply_sizefunc(void* k, void* d)
{
struct query_info* q = (struct query_info*)k;
struct reply_info* r = (struct reply_info*)d;
- return sizeof(struct msgreply_entry) + sizeof(struct reply_info)
- + r->replysize + q->qnamesize;
+ size_t s = sizeof(struct msgreply_entry) + sizeof(struct reply_info)
+ + q->qnamesize;
+ if(r->rrset_count > 0)
+ s += r->rrset_count * (sizeof(struct ub_packed_rrset_key*) +
+ sizeof(struct rrset_ref));
+ return s;
}
void
reply_info_delete(void* d, void* ATTR_UNUSED(arg))
{
struct reply_info* r = (struct reply_info*)d;
- reply_info_clear(r);
free(r);
}
return h;
}
-void
-reply_info_answer(struct reply_info* rep, uint16_t qflags,
- ldns_buffer* buffer)
-{
- uint16_t flags;
- ldns_buffer_clear(buffer);
- ldns_buffer_skip(buffer, 2); /* ID */
- flags = rep->flags | (qflags & BIT_RD); /* copy RD bit */
- log_assert(flags & BIT_QR); /* QR bit must be on in our replies */
- ldns_buffer_write_u16(buffer, flags);
- ldns_buffer_write(buffer, rep->reply, rep->replysize);
- ldns_buffer_flip(buffer);
-}
-
/**
* Data structure to help domain name compression in outgoing messages.
* A tree of dnames and their offsets in the packet is kept.
if(do_data) {
const ldns_rr_descriptor* c = type_rdata_compressable(key);
for(i=0; i<data->count; i++) {
- if(1) { /* compression */
- if((r=compress_owner(key, pkt, region, tree,
- owner_pos, &owner_ptr, owner_labs))
- != RETVAL_OK)
- return r;
- } else {
- /* no compression */
- if(ldns_buffer_remaining(pkt) <
- key->rk.dname_len + 4+4+2)
- return RETVAL_TRUNC;
- ldns_buffer_write(pkt, key->rk.dname,
- key->rk.dname_len + 4);
- }
+ if((r=compress_owner(key, pkt, region, tree,
+ owner_pos, &owner_ptr, owner_labs))
+ != RETVAL_OK)
+ return r;
ldns_buffer_write_u32(pkt, data->rr_ttl[i]-timenow);
if(c) {
if((r=compress_rdata(pkt, data->rr_data[i],
if(do_sig) {
size_t total = data->count+data->rrsig_count;
for(i=data->count; i<total; i++) {
- if(1) { /* compression */
- if(owner_ptr) {
- if(ldns_buffer_remaining(pkt) <
- 2+4+4+data->rr_len[i])
- return RETVAL_TRUNC;
- ldns_buffer_write(pkt, &owner_ptr, 2);
- } else {
- if((r=compress_any_dname(key->rk.dname,
- pkt, owner_labs, region, tree))
- != RETVAL_OK)
- return r;
- if(ldns_buffer_remaining(pkt) <
- 4+4+data->rr_len[i])
- return RETVAL_TRUNC;
- }
+ if(owner_ptr) {
+ if(ldns_buffer_remaining(pkt) <
+ 2+4+4+data->rr_len[i])
+ return RETVAL_TRUNC;
+ ldns_buffer_write(pkt, &owner_ptr, 2);
} else {
- /* no compression */
+ if((r=compress_any_dname(key->rk.dname,
+ pkt, owner_labs, region, tree))
+ != RETVAL_OK)
+ return r;
if(ldns_buffer_remaining(pkt) <
- key->rk.dname_len+4+4+data->rr_len[i])
+ 4+4+data->rr_len[i])
return RETVAL_TRUNC;
- ldns_buffer_write(pkt, key->rk.dname,
- key->rk.dname_len);
}
ldns_buffer_write_u16(pkt, LDNS_RR_TYPE_RRSIG);
ldns_buffer_write(pkt, &(key->rk.dname[
return 1;
}
-void
-reply_info_answer_iov(struct reply_info* rep, uint16_t qid,
- uint16_t qflags, struct comm_reply* comrep, int cached)
+int
+reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
+ uint16_t id, uint16_t qflags, ldns_buffer* pkt, uint32_t timenow,
+ int cached)
{
- /* [0]=reserved for tcplen, [1]=id, [2]=flags, [3]=message */
- struct iovec iov[4];
+ uint16_t flags;
+ region_type* region = region_create(malloc, free);
- iov[1].iov_base = (void*)&qid;
- iov[1].iov_len = sizeof(uint16_t);
if(!cached) {
/* original flags, copy RD bit from query. */
- qflags = rep->flags | (qflags & BIT_RD);
+ flags = rep->flags | (qflags & BIT_RD);
} else {
/* remove AA bit, copy RD and CD bits from query. */
- qflags = (rep->flags & ~BIT_AA) | (qflags & (BIT_RD|BIT_CD));
+ flags = (rep->flags & ~BIT_AA) | (qflags & (BIT_RD|BIT_CD));
}
- log_assert(qflags & BIT_QR); /* QR bit must be on in our replies */
- qflags = htons(qflags);
- iov[2].iov_base = (void*)&qflags;
- iov[2].iov_len = sizeof(uint16_t);
- iov[3].iov_base = (void*)rep->reply;
- iov[3].iov_len = rep->replysize;
- comm_point_send_reply_iov(comrep, iov, 4);
+ log_assert(flags & BIT_QR); /* QR bit must be on in our replies */
+
+ if(!reply_info_encode(qinf, rep, id, flags, pkt, timenow, region)) {
+ log_err("reply encode: out of memory");
+ return 0;
+ }
+ region_destroy(region);
+ return 1;
}
struct msgreply_entry*
* o packed_rrset_key* array.
*/
struct reply_info {
- /** the reply packet, skips ID and flags,
- * starts with opcode/rcode word */
- uint8_t* reply;
- /** the reply size */
- size_t replysize;
-
/** the flags for the answer, host byte order. */
uint16_t flags;
int reply_info_parse(ldns_buffer* pkt, struct alloc_cache* alloc,
struct query_info* qinf, struct reply_info** rep);
+/**
+ * Fills in the ref array based on the rest of the structure, the rrsets.
+ * @param rep: reply info. rrsets must be filled in.
+ */
+void reply_info_fillref(struct reply_info* rep);
+
+/**
+ * Set TTLs inside the replyinfo to absolute values.
+ * @param rep: reply info. rrsets must be filled in.
+ * @param timenow: the current time.
+ */
+void reply_info_set_ttls(struct reply_info* rep, uint32_t timenow);
+
/**
* Delete reply_info and packed_rrsets (while they are not yet added to the
* hashtables.). Returns rrsets to the alloc cache.
/** clear out query info structure. */
void query_info_clear(struct query_info* m);
-/** clear out reply info structure */
-void reply_info_clear(struct reply_info* m);
-
/** calculate size of struct query_info + reply_info */
size_t msgreply_sizefunc(void* k, void* d);
/**
* Generate answer from reply_info.
+ * @param qinf: query information that provides query section in packet.
* @param rep: reply to fill in.
+ * @param id: id word from the query.
* @param qflags: flags word from the query.
- * @param buf: buffer to put reply into. Note that the query ID must
- * be put in the buffer by caller.
- * The buffer must be large enough.
+ * @param dest: buffer to put message into; will truncate if it does not fit.
+ * @param timenow: time to subtract.
+ * @param cached: set true if a cached reply (so no AA bit).
+ * set false for the first reply.
+ * @return: 0 on error (server failure).
*/
-void reply_info_answer(struct reply_info* rep, uint16_t qflags,
- ldns_buffer* buf);
+int reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
+ uint16_t id, uint16_t qflags, ldns_buffer* dest, uint32_t timenow,
+ int cached);
/**
* Regenerate the wireformat from the stored msg reply.
uint16_t id, uint16_t flags, ldns_buffer* buffer, uint32_t timenow,
struct region* region);
-/**
- * Generate and send out answer from reply_info.
- * @param rep: reply to fill in.
- * @param qid: query id, in network byte order.
- * @param qflags: flags word from the query.
- * @param comrep: communication reply point.
- * @param cached: set true if a cached reply (so no AA bit).
- * set false for the first reply.
- */
-void reply_info_answer_iov(struct reply_info* rep, uint16_t qid,
- uint16_t qflags, struct comm_reply* comrep, int cached);
-
/**
* Setup query info entry
* @param q: query info to copy. Emptied as if clear is called.
alloc_special_release(alloc, pkey);
}
+size_t
+ub_rrset_sizefunc(void* key, void* data)
+{
+ struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
+ struct packed_rrset_data* d = (struct packed_rrset_data*)data;
+ size_t s = sizeof(struct ub_packed_rrset_key) + k->rk.dname_len;
+ if(d->rrsig_count > 0) {
+ s += ((uint8_t*)d->rr_data[d->count+d->rrsig_count-1] -
+ (uint8_t*)d) + d->rr_len[d->count+d->rrsig_count-1];
+ } else {
+ log_assert(d->count > 0);
+ s += ((uint8_t*)d->rr_data[d->count-1] - (uint8_t*)d) +
+ d->rr_len[d->count-1];
+ }
+ return s;
+}
+
+int
+ub_rrset_compare(void* k1, void* k2)
+{
+ struct ub_packed_rrset_key* key1 = (struct ub_packed_rrset_key*)k1;
+ struct ub_packed_rrset_key* key2 = (struct ub_packed_rrset_key*)k2;
+ int c;
+ if(key1 == key2 || key1->id == key2->id)
+ return 0;
+ if(key1->rk.dname_len != key2->rk.dname_len) {
+ if(key1->rk.dname_len < key2->rk.dname_len)
+ return -1;
+ return 1;
+ }
+ if((c=memcmp(key1->rk.dname, key2->rk.dname, key1->rk.dname_len)) != 0)
+ return c;
+ if(key1->rk.flags != key2->rk.flags) {
+ if(key1->rk.flags < key2->rk.flags)
+ return -1;
+ return 1;
+ }
+ return 0;
+}
+
+void
+ub_rrset_key_delete(void* key, void* userdata)
+{
+ struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
+ struct alloc_cache* a = (struct alloc_cache*)userdata;
+ k->id = 0;
+ free(k->rk.dname);
+ k->rk.dname = NULL;
+ lock_quick_lock(&a->lock);
+ alloc_special_release(a, k);
+ lock_quick_unlock(&a->lock);
+}
+
+void
+rrset_data_delete(void* data, void* ATTR_UNUSED(userdata))
+{
+ struct packed_rrset_data* d = (struct packed_rrset_data*)data;
+ free(d);
+}
void ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
struct alloc_cache* alloc);
+/**
+ * Calculate memory size of rrset entry. For hash table usage.
+ * @param key: struct ub_packed_rrset_key*.
+ * @param data: struct packed_rrset_data*.
+ * @return size in bytes.
+ */
+size_t ub_rrset_sizefunc(void* key, void* data);
+
+/**
+ * compares two rrset keys.
+ * @param k1: struct ub_packed_rrset_key*.
+ * @param k2: struct ub_packed_rrset_key*.
+ * @return 0 if equal.
+ */
+int ub_rrset_compare(void* k1, void* k2);
+
+/**
+ * Old key to be deleted. RRset keys are recycled via alloc.
+ * @param key: struct ub_packed_rrset_key*.
+ * @param userdata: alloc structure to use for recycling.
+ */
+void ub_rrset_key_delete(void* key, void* userdata);
+
+/**
+ * Old data to be deleted.
+ * @param data: what to delete.
+ * @param userdata: user data ptr.
+ */
+void rrset_data_delete(void* data, void* userdata);
+
#endif /* UTIL_DATA_PACKED_RRSET_H */