if(!qstate->return_msg || !qstate->return_msg->rep)
return 0;
+ /* We don't store the reply if its TTL is 0 unless serve-expired is
+ * enabled. Such a reply won't be reusable and simply be a waste for
+ * the backend. It's also compatible with the default behavior of
+ * dns_cache_store_msg(). */
+ if(qstate->return_msg->rep->ttl == 0 &&
+ !qstate->env->cfg->serve_expired)
+ return 0;
if(verbosity >= VERB_ALGO)
log_dns_msg("cachedb encoding", &qstate->return_msg->qinfo,
qstate->return_msg->rep);
&expiry, sizeof(expiry));
expiry = be64toh(expiry);
- if((time_t)expiry < *qstate->env->now)
+ if((time_t)expiry < *qstate->env->now &&
+ !qstate->env->cfg->serve_expired)
return 0;
return 1;
}
+/* Adjust the TTL of the given RRset by 'subtract'. If 'subtract' is
+ * negative, set the TTL to 0. */
static void
packed_rrset_ttl_subtract(struct packed_rrset_data* data, time_t subtract)
{
size_t i;
size_t total = data->count + data->rrsig_count;
- if(data->ttl > subtract)
+ if(subtract >= 0 && data->ttl > subtract)
data->ttl -= subtract;
else data->ttl = 0;
for(i=0; i<total; i++) {
- if(data->rr_ttl[i] > subtract)
+ if(subtract >= 0 && data->rr_ttl[i] > subtract)
data->rr_ttl[i] -= subtract;
else data->rr_ttl[i] = 0;
}
}
+/* Adjust the TTL of a DNS message and its RRs by 'adjust'. If 'adjust' is
+ * negative, set the TTLs to 0. */
static void
adjust_msg_ttl(struct dns_msg* msg, time_t adjust)
{
size_t i;
- if(msg->rep->ttl > adjust)
+ if(adjust >= 0 && msg->rep->ttl > adjust)
msg->rep->ttl -= adjust;
else msg->rep->ttl = 0;
msg->rep->prefetch_ttl = PREFETCH_TTL_CALC(msg->rep->ttl);
adjust = *qstate->env->now - (time_t)timestamp;
if(qstate->return_msg->rep->ttl < adjust) {
verbose(VERB_ALGO, "cachedb msg expired");
- return 0; /* message expired */
+ /* If serve-expired is enabled, we still use an expired message
+ * setting the TTL to 0. */
+ if(qstate->env->cfg->serve_expired)
+ adjust = -1;
+ else
+ return 0; /* message expired */
}
verbose(VERB_ALGO, "cachedb msg adjusted down by %d", (int)adjust);
adjust_msg_ttl(qstate->return_msg, adjust);
+
+ /* Similar to the unbound worker, if serve-expired is enabled and
+ * the msg would be considered to be expired, mark the state so a
+ * refetch will be scheduled. The comparison between 'expiry' and
+ * 'now' should be redundant given how these values were calculated,
+ * but we check it just in case as does good_expiry_and_qinfo(). */
+ if(qstate->env->cfg->serve_expired &&
+ (adjust == -1 || (time_t)expiry < *qstate->env->now)) {
+ qstate->need_refetch = 1;
+ }
+
return 1;
}
static void
cachedb_intcache_store(struct module_qstate* qstate)
{
+ uint32_t store_flags = qstate->query_flags;
+
+ if(qstate->env->cfg->serve_expired)
+ store_flags |= DNSCACHE_STORE_ZEROTTL;
if(!qstate->return_msg)
return;
(void)dns_cache_store(qstate->env, &qstate->qinfo,
qstate->return_msg->rep, 0, qstate->prefetch_leeway, 0,
- qstate->region, qstate->query_flags);
+ qstate->region, store_flags);
}
/**
+19 September 2017: Wouter
+ - use a cachedb answer even if it's "expired" when serve-expired is yes
+ (patch from Jinmei Tatuya).
+ - trigger refetching of the answer in that case (this will bypass
+ cachedb lookup)
+ - allow storing a 0-TTL answer from cachedb in the in-memory message
+ cache when serve-expired is yes
+
18 September 2017: Ralph
- Fix #1400: allowing use of global cache on ECS-forwarding unless
always-forward.
void
dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
- struct reply_info* qrep, struct regional* region)
+ struct reply_info* qrep, uint32_t flags, struct regional* region)
{
struct msgreply_entry* e;
time_t ttl = rep->ttl;
* unnecessary, because the cache gets locked per rrset. */
reply_info_set_ttls(rep, *env->now);
store_rrsets(env, rep, *env->now, leeway, pside, qrep, region);
- if(ttl == 0) {
+ if(ttl == 0 && !(flags & DNSCACHE_STORE_ZEROTTL)) {
/* we do not store the message, but we did store the RRs,
* which could be useful for delegation information */
verbose(VERB_ALGO, "TTL 0: dropped msg from cache");
int
dns_cache_store(struct module_env* env, struct query_info* msgqinf,
struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
- struct regional* region, uint16_t flags)
+ struct regional* region, uint32_t flags)
{
struct reply_info* rep = NULL;
/* alloc, malloc properly (not in region, like msg is) */
* Not AA from cache. Not CD in cache (depends on client bit). */
rep->flags |= (BIT_RA | BIT_QR);
rep->flags &= ~(BIT_AA | BIT_CD);
- h = query_info_hash(&qinf, flags);
+ h = query_info_hash(&qinf, (uint16_t)flags);
dns_cache_store_msg(env, &qinf, h, rep, leeway, pside, msgrep,
- region);
+ flags, region);
/* qname is used inside query_info_entrysetup, and set to
* NULL. If it has not been used, free it. free(0) is safe. */
free(qinf.qname);
struct regional;
struct delegpt;
+/** Flags to control behavior of dns_cache_store() and dns_cache_store_msg().
+ * Must be an unsigned 32-bit value larger than 0xffff */
+
+/** Allow caching a DNS message with a zero TTL. */
+#define DNSCACHE_STORE_ZEROTTL 0x1000
+
/**
* Region allocated message reply
*/
* @param region: region to allocate better entries from cache into.
* (used when is_referral is false).
* @param flags: flags with BIT_CD for AAAA queries in dns64 translation.
+ * The higher 16 bits are used internally to customize the cache policy.
+ * (See DNSCACHE_STORE_xxx flags).
* @return 0 on alloc error (out of memory).
*/
int dns_cache_store(struct module_env* env, struct query_info* qinf,
struct reply_info* rep, int is_referral, time_t leeway, int pside,
- struct regional* region, uint16_t flags);
+ struct regional* region, uint32_t flags);
/**
* Store message in the cache. Stores in message cache and rrset cache.
* from the parentside of the zonecut. This means that the type NS
* can be updated to full TTL even in prefetch situations.
* @param qrep: message that can be altered with better rrs from cache.
+ * @param flags: customization flags for the cache policy.
* @param region: to allocate into for qmsg.
*/
void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
hashvalue_type hash, struct reply_info* rep, time_t leeway, int pside,
- struct reply_info* qrep, struct regional* region);
+ struct reply_info* qrep, uint32_t flags, struct regional* region);
/**
* Find a delegation from the cache.
return 1;
}
+static void mesh_schedule_prefetch(struct mesh_area* mesh,
+ struct query_info* qinfo, uint16_t qflags, time_t leeway, int run);
+
void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
uint16_t qflags, time_t leeway)
+{
+ mesh_schedule_prefetch(mesh, qinfo, qflags, leeway, 1);
+}
+
+/* Internal backend routine of mesh_new_prefetch(). It takes one additional
+ * parameter, 'run', which controls whether to run the prefetch state
+ * immediately. When this function is called internally 'run' could be
+ * 0 (false), in which case the new state is only made runnable so it
+ * will not be run recursively on top of the current state. */
+static void mesh_schedule_prefetch(struct mesh_area* mesh,
+ struct query_info* qinfo, uint16_t qflags, time_t leeway, int run)
{
struct mesh_state* s = mesh_area_find(mesh, NULL, qinfo,
qflags&(BIT_RD|BIT_CD), 0, 0);
s->list_select = mesh_jostle_list;
}
}
+
+ if(!run) {
+ rbtree_insert(&mesh->run, &s->run_node);
+ return;
+ }
+
mesh_run(mesh, s, module_event_new, NULL);
}
mstate->s.prefetch_leeway = 0;
mstate->s.no_cache_lookup = 0;
mstate->s.no_cache_store = 0;
+ mstate->s.need_refetch = 0;
+
/* init modules */
for(i=0; i<env->mesh->mods.num; i++) {
mstate->s.minfo[i] = NULL;
return 1;
}
+/* Extract the query info and flags from 'mstate' into '*qinfop' and '*qflags'.
+ * Since this is only used for internal refetch of otherwise-expired answer,
+ * we simply ignore the rare failure mode when memory allocation fails. */
+static void
+mesh_copy_qinfo(struct mesh_state* mstate, struct query_info** qinfop,
+ uint16_t* qflags)
+{
+ struct regional* region = mstate->s.env->scratch;
+ struct query_info* qinfo;
+
+ qinfo = regional_alloc_init(region, &mstate->s.qinfo, sizeof(*qinfo));
+ if(!qinfo)
+ return;
+ qinfo->qname = regional_alloc_init(region, qinfo->qname,
+ qinfo->qname_len);
+ if(!qinfo->qname)
+ return;
+ *qinfop = qinfo;
+ *qflags = mstate->s.query_flags;
+}
+
/**
* Continue processing the mesh state at another module.
* Handles module to modules transfer of control.
}
if(s == module_finished) {
if(mstate->s.curmod == 0) {
+ struct query_info* qinfo = NULL;
+ uint16_t qflags;
+
mesh_query_done(mstate);
mesh_walk_supers(mesh, mstate);
+
+ /* If the answer to the query needs to be refetched
+ * from an external DNS server, we'll need to schedule
+ * a prefetch after removing the current state, so
+ * we need to make a copy of the query info here. */
+ if(mstate->s.need_refetch)
+ mesh_copy_qinfo(mstate, &qinfo, &qflags);
+
mesh_state_delete(&mstate->s);
+ if(qinfo) {
+ mesh_schedule_prefetch(mesh, qinfo, qflags,
+ 0, 1);
+ }
return 0;
}
/* pass along the locus of control */
int no_cache_lookup;
/** whether modules should store answer in the cache */
int no_cache_store;
+ /** whether to refetch a fresh answer on finishing this state*/
+ int need_refetch;
/**
* Attributes of clients that share the qstate that may affect IP-based