/* Lookup the delegation in the cache. If null, then the cache needs
* to be primed for the qclass. */
iq->dp = dns_cache_find_delegation(qstate->env, delname, delnamelen,
- qstate->qinfo.qclass, qstate->region);
+ qstate->qinfo.qtype, qstate->qinfo.qclass, qstate->region,
+ &iq->deleg_msg);
/* If the cache has returned nothing, then we have a root priming
* situation. */
return next_state(qstate, iq, INIT_REQUEST_3_STATE);
}
-#if 0
-/** TODO */
+/**
+ * Process the third part of the initial request handling. This state exists
+ * as a separate state so that queries that generate stub priming events
+ * will get the tail end of the init process but not repeat the stub priming
+ * check.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @return true if the event needs more request processing immediately,
+ * false if not.
+ */
static int
-processInitRequest3(struct module_qstate* qstate, struct iter_qstate* iq,
- struct iter_env* ie, int id)
+processInitRequest3(struct module_qstate* qstate, struct iter_qstate* iq)
{
- return 0;
+ log_nametypeclass("resolving (init part 3): ", qstate->qinfo.qname,
+ qstate->qinfo.qtype, qstate->qinfo.qclass);
+ /* If the RD flag wasn't set, then we just finish with the
+ * cached referral as the response. */
+ if(!(qstate->query_flags & BIT_RD)) {
+ iq->response = iq->deleg_msg;
+ return final_state(qstate, iq);
+ }
+
+ /* After this point, unset the RD flag -- this query is going to
+ * be sent to an auth. server. */
+ qstate->query_flags &= ~BIT_RD;
+
+ /* Jump to the next state. */
+ return next_state(qstate, iq, QUERYTARGETS_STATE);
}
+#if 0
/** TODO */
static int
processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
case INIT_REQUEST_2_STATE:
cont = processInitRequest2(qstate, iq, ie, id);
break;
-#if 0
case INIT_REQUEST_3_STATE:
- cont = processInitRequest3(qstate, iq, ie, id);
+ cont = processInitRequest3(qstate, iq);
break;
+#if 0
case QUERYTARGETS_STATE:
cont = processQueryTargets(qstate, iq, ie, id);
break;
slabhash_insert(env->msg_cache, hash, &e->entry, rep, env->alloc);
}
+/** allocate rrset in region - no more locks needed */
+static struct ub_packed_rrset_key*
+copy_rrset(struct ub_packed_rrset_key* key, struct region* region,
+ uint32_t now)
+{
+ struct ub_packed_rrset_key* ck = region_alloc(region,
+ sizeof(struct ub_packed_rrset_key));
+ struct packed_rrset_data* d;
+ struct packed_rrset_data* data = (struct packed_rrset_data*)
+ key->entry.data;
+ size_t dsize, i;
+ if(!ck)
+ return NULL;
+ ck->id = key->id;
+ memset(&ck->entry, 0, sizeof(ck->entry));
+ ck->entry.hash = key->entry.hash;
+ ck->entry.key = ck;
+ ck->rk = key->rk;
+ ck->rk.dname = region_alloc_init(region, key->rk.dname,
+ key->rk.dname_len);
+ if(!ck->rk.dname)
+ return NULL;
+ dsize = packed_rrset_sizeof(data);
+ d = (struct packed_rrset_data*)region_alloc_init(region, data, dsize);
+ if(!d)
+ return NULL;
+ ck->entry.data = d;
+ packed_rrset_ptr_fixup(d);
+ /* make TTLs relative - once per rrset */
+ for(i=0; i<d->count + d->rrsig_count; i++)
+ d->rr_ttl[i] -= now;
+ d->ttl -= now;
+ return ck;
+}
+
/** find closest NS and returns the rrset (locked) */
static struct ub_packed_rrset_key*
find_deleg_ns(struct module_env* env, uint8_t* qname, size_t qnamelen,
return NULL;
}
+/** add addr to additional section */
+static void
+addr_to_additional(struct ub_packed_rrset_key* rrset, struct region* region,
+ struct dns_msg* msg, uint32_t now)
+{
+ if((msg->rep->rrsets[msg->rep->rrset_count] =
+ copy_rrset(rrset, region, now))) {
+ msg->rep->ar_numrrsets++;
+ msg->rep->rrset_count++;
+ }
+}
+
/** add A records to delegation */
static int
add_a(struct ub_packed_rrset_key* ak, struct delegpt* dp,
- struct region* region)
+ struct region* region, struct dns_msg** msg, uint32_t now)
{
struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data;
size_t i;
len))
return 0;
}
+ if(msg)
+ addr_to_additional(ak, region, *msg, now);
return 1;
}
/** add AAAA records to delegation */
static int
add_aaaa(struct ub_packed_rrset_key* ak, struct delegpt* dp,
- struct region* region)
+ struct region* region, struct dns_msg** msg, uint32_t now)
{
struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data;
size_t i;
len))
return 0;
}
+ if(msg)
+ addr_to_additional(ak, region, *msg, now);
return 1;
}
/** find and add A and AAAA records for nameservers in delegpt */
static int
find_add_addrs(struct module_env* env, uint16_t qclass, struct region* region,
- struct delegpt* dp, uint32_t now)
+ struct delegpt* dp, uint32_t now, struct dns_msg** msg)
{
struct delegpt_ns* ns;
struct ub_packed_rrset_key* akey;
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
if(akey) {
- if(!add_a(akey, dp, region)) {
+ if(!add_a(akey, dp, region, msg, now)) {
lock_rw_unlock(&akey->entry.lock);
return 0;
}
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
ns->namelen, LDNS_RR_TYPE_AAAA, qclass, 0, now, 0);
if(akey) {
- if(!add_aaaa(akey, dp, region)) {
+ if(!add_aaaa(akey, dp, region, msg, now)) {
lock_rw_unlock(&akey->entry.lock);
return 0;
}
return 1;
}
+/** Add NS records to delegation */
+static void
+add_ns(struct packed_rrset_data* nsdata, struct delegpt* dp,
+ struct region* region)
+{
+ size_t i;
+ for(i=0; i<nsdata->count; i++) {
+ if(nsdata->rr_len[i] < 2+1) continue; /* len + root label */
+ if(dname_valid(nsdata->rr_data[i]+2, nsdata->rr_len[i]-2) !=
+ (size_t)ldns_read_uint16(nsdata->rr_data[i])-2)
+ continue; /* bad format */
+ /* add rdata of NS (= wirefmt dname), skip rdatalen bytes */
+ if(!delegpt_add_ns(dp, region, nsdata->rr_data[i]+2))
+ log_err("find_delegation: addns out of memory");
+ }
+}
+
+/** find and add DS or NSEC to delegation msg */
+static void
+find_add_ds(struct module_env* env, struct region* region,
+ struct dns_msg* msg, struct delegpt* dp, uint32_t now)
+{
+ /* Lookup the DS or NSEC at the delegation point. */
+ struct ub_packed_rrset_key* rrset = rrset_cache_lookup(
+ env->rrset_cache, dp->name, dp->namelen, LDNS_RR_TYPE_DS,
+ msg->qinfo.qclass, 0, now, 0);
+ if(!rrset) {
+ /* NOTE: this won't work for alternate NSEC schemes
+ * (opt-in, NSEC3) */
+ rrset = rrset_cache_lookup(env->rrset_cache, dp->name,
+ dp->namelen, LDNS_RR_TYPE_NSEC, msg->qinfo.qclass,
+ 0, now, 0);
+ /* Note: the PACKED_RRSET_NSEC_AT_APEX flag is not used.
+ * since this is a referral, we need the NSEC at the parent
+ * side of the zone cut, not the NSEC at apex side. */
+ }
+ if(rrset) {
+ /* add it to auth section. This is the second rrset. */
+ if((msg->rep->rrsets[msg->rep->rrset_count] =
+ copy_rrset(rrset, region, now))) {
+ msg->rep->ns_numrrsets++;
+ msg->rep->rrset_count++;
+ }
+ lock_rw_unlock(&rrset->entry.lock);
+ }
+}
+
+/** create referral message with NS and query */
+static struct dns_msg*
+create_msg(uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
+ struct region* region, struct ub_packed_rrset_key* nskey,
+ struct packed_rrset_data* nsdata, uint32_t now)
+{
+ struct dns_msg* msg = (struct dns_msg*)region_alloc(region,
+ sizeof(struct dns_msg));
+ if(!msg)
+ return NULL;
+ msg->qinfo.qname = region_alloc_init(region, qname, qnamelen);
+ if(!msg->qinfo.qname)
+ return NULL;
+ msg->qinfo.qname_len = qnamelen;
+ msg->qinfo.qtype = qtype;
+ msg->qinfo.qclass = qclass;
+ /* non-packed reply_info, because it needs to grow the array */
+ msg->rep = (struct reply_info*)region_alloc(region,
+ sizeof(struct reply_info)-sizeof(struct rrset_ref));
+ if(!msg->rep)
+ return NULL;
+ memset(msg->rep, 0,
+ sizeof(struct reply_info)-sizeof(struct rrset_ref));
+ msg->rep->flags = BIT_QR; /* with QR, no AA */
+ msg->rep->qdcount = 1;
+ /* allocate the array to as much as we could need:
+ * NS rrset + DS/NSEC rrset +
+ * A rrset for every NS RR
+ * AAAA rrset for every NS RR
+ */
+ msg->rep->rrsets = (struct ub_packed_rrset_key**)region_alloc(region,
+ (2 + nsdata->count*2)*sizeof(struct ub_packed_rrset_key*));
+ if(!msg->rep->rrsets)
+ return NULL;
+ msg->rep->rrsets[0] = copy_rrset(nskey, region, now);
+ if(!msg->rep->rrsets[0])
+ return NULL;
+ msg->rep->ns_numrrsets++;
+ msg->rep->rrset_count++;
+ return msg;
+}
+
struct delegpt*
dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
- size_t qnamelen, uint16_t qclass, struct region* region)
+ size_t qnamelen, uint16_t qtype, uint16_t qclass,
+ struct region* region, struct dns_msg** msg)
{
/* try to find closest NS rrset */
struct ub_packed_rrset_key* nskey;
struct packed_rrset_data* nsdata;
struct delegpt* dp;
- size_t i;
uint32_t now = (uint32_t)time(NULL);
nskey = find_deleg_ns(env, qname, qnamelen, qclass, now);
log_err("find_delegation: out of memory");
return NULL;
}
- /* add NS entries */
- for(i=0; i<nsdata->count; i++) {
- if(nsdata->rr_len[i] < 2+1) continue; /* len + root label */
- if(dname_valid(nsdata->rr_data[i]+2, nsdata->rr_len[i]-2) !=
- (size_t)ldns_read_uint16(nsdata->rr_data[i])-2)
- continue; /* bad format */
- /* add rdata of NS (= wirefmt dname), skip rdatalen bytes */
- if(!delegpt_add_ns(dp, region, nsdata->rr_data[i]+2))
- log_err("find_delegation: addns out of memory");
+ /* create referral message */
+ if(msg) {
+ *msg = create_msg(qname, qnamelen, qtype, qclass, region,
+ nskey, nsdata, now);
+ if(!*msg) {
+ lock_rw_unlock(&nskey->entry.lock);
+ log_err("find_delegation: out of memory");
+ return NULL;
+ }
}
- /* find and add A entries */
+ add_ns(nsdata, dp, region);
lock_rw_unlock(&nskey->entry.lock); /* first unlock before next lookup*/
- if(!find_add_addrs(env, qclass, region, dp, now))
+ /* find and add DS/NSEC (if any) */
+ if(msg)
+ find_add_ds(env, region, *msg, dp, now);
+ /* find and add A entries */
+ if(!find_add_addrs(env, qclass, region, dp, now, msg))
log_err("find_delegation: addrs out of memory");
log_info("dns_cache_find_delegation returns delegpt");
delegpt_log(dp);
return dp;
}
-/** allocate rrset in region - no more locks needed */
-static struct ub_packed_rrset_key*
-copy_rrset(struct ub_packed_rrset_key* key, struct region* region,
- uint32_t now)
-{
- struct ub_packed_rrset_key* ck = region_alloc(region,
- sizeof(struct ub_packed_rrset_key));
- struct packed_rrset_data* d;
- struct packed_rrset_data* data = (struct packed_rrset_data*)
- key->entry.data;
- size_t dsize, i;
- if(!ck)
- return NULL;
- ck->id = key->id;
- memset(&ck->entry, 0, sizeof(ck->entry));
- ck->entry.hash = key->entry.hash;
- ck->entry.key = ck;
- ck->rk = key->rk;
- ck->rk.dname = region_alloc_init(region, key->rk.dname,
- key->rk.dname_len);
- if(!ck->rk.dname)
- return NULL;
- dsize = packed_rrset_sizeof(data);
- d = (struct packed_rrset_data*)region_alloc_init(region, data, dsize);
- if(!d)
- return NULL;
- ck->entry.data = d;
- packed_rrset_ptr_fixup(d);
- /* make TTLs relative - once per rrset */
- for(i=0; i<d->count + d->rrsig_count; i++)
- d->rr_ttl[i] -= now;
- d->ttl -= now;
- return ck;
-}
-
/** allocate dns_msg from query_info and reply_info */
static struct dns_msg*
tomsg(struct module_env* env, struct msgreply_entry* e, struct reply_info* r,