local c = kres.context().cache
ns_name = todname('ns.example.com')
local ns_addr = '\1\2\3\4'
-local rr = kres.rrset(ns_name, kres.type.A, kres.class.IN, 3600999999)
+local rr = kres.rrset(ns_name, kres.type.A, kres.class.IN, 2147483647)
assert(rr:add_rdata(ns_addr, #ns_addr))
assert(c:insert(rr, nil, ffi.C.KR_RANK_SECURE))
if (kr_fails_assert(val_new_entry.data))
return kr_error(EFAULT);
- const uint32_t ttl = rr->ttl;
- /* FIXME: consider TTLs and expirations of RRSIGs as well, just in case. */
-
/* Write the entry itself. */
struct entry_h *eh = val_new_entry.data;
memset(eh, 0, offsetof(struct entry_h, data));
eh->time = timestamp;
- eh->ttl = MAX(MIN(ttl, cache->ttl_max), cache->ttl_min);
+ eh->ttl = rr->ttl;
eh->rank = rank;
rdataset_dematerialize(&rr->rrs, eh->data);
rdataset_dematerialize(rds_sigs, eh->data + rr_ssize);
kr_cdb_pt db; /**< Storage instance */
const struct kr_cdb_api *api; /**< Storage engine */
struct kr_cdb_stats stats;
- uint32_t ttl_min, ttl_max; /**< TTL limits */
+ uint32_t ttl_min, ttl_max; /**< TTL limits; enforced primarily in iterator actually. */
/* A pair of stamps for detection of real-time shifts during runtime. */
struct timeval checkpoint_walltime; /**< Wall time on the last check-point. */
/** Assuming `rrs` was validated with `sig`, trim its TTL in case it's over-extended. */
static bool trim_ttl(knot_rrset_t *rrs, const knot_rdata_t *sig,
- uint32_t timestamp, const struct kr_query *log_qry)
+ const kr_rrset_validation_ctx_t *vctx)
{
- const uint32_t ttl_max = MIN(knot_rrsig_original_ttl(sig),
- knot_rrsig_sig_expiration(sig) - timestamp);
+ /* The trimming logic is a bit complicated.
+ *
+ * We respect configured ttl_min over the (signed) original TTL,
+ * but we very much want to avoid TTLs over signature expiration,
+ * as that could cause serious issues with downstream validators.
+ */
+ const uint32_t ttl_max = MIN(
+ MAX(knot_rrsig_original_ttl(sig), vctx->ttl_min),
+ knot_rrsig_sig_expiration(sig) - vctx->timestamp
+ );
if (likely(rrs->ttl <= ttl_max))
return false;
- if (kr_log_is_debug_qry(VALIDATOR, log_qry)) {
+ if (kr_log_is_debug_qry(VALIDATOR, vctx->log_qry)) {
auto_free char *name_str = kr_dname_text(rrs->owner),
*type_str = kr_rrtype_text(rrs->type);
- kr_log_q(log_qry, VALIDATOR, "trimming TTL of %s %s: %d -> %d\n",
+ kr_log_q(vctx->log_qry, VALIDATOR, "trimming TTL of %s %s: %d -> %d\n",
name_str, type_str, (int)rrs->ttl, (int)ttl_max);
}
rrs->ttl = ttl_max;
struct kr_svldr_ctx *ctx = calloc(1, sizeof(*ctx));
if (unlikely(!ctx))
return NULL;
- ctx->vctx.timestamp = timestamp;
+ ctx->vctx.timestamp = timestamp; // .ttl_min is implicitly zero
ctx->vctx.zone_name = knot_dname_copy(ds->owner, NULL);
if (unlikely(!ctx->vctx.zone_name))
goto fail;
// that also means we don't need to perform non-existence proofs.
const int trim_labels = (val_flgs & FLG_WILDCARD_EXPANSION) ? 1 : 0;
if (kr_check_signature(rdata_j, key->key, rrs, trim_labels) == 0) {
- trim_ttl(rrs, rdata_j, vctx->timestamp, vctx->log_qry);
+ trim_ttl(rrs, rdata_j, vctx);
vctx->result = kr_ok();
return vctx->result;
} else {
vctx->flags |= KR_DNSSEC_VFLG_WEXPAND;
}
- trim_ttl(covered, rdata_j, vctx->timestamp, vctx->log_qry);
+ trim_ttl(covered, rdata_j, vctx);
kr_dnssec_key_free(&created_key);
vctx->result = kr_ok();
knot_rrset_t *keys; /*!< DNSKEY RRSet; TTLs may get lowered when validating this set. */
const knot_dname_t *zone_name; /*!< Name of the zone containing the RRSIG RRSet. */
uint32_t timestamp; /*!< Validation time. */
+ uint32_t ttl_min; /*!< See trim_ttl() for details. */
bool has_nsec3; /*!< Whether to use NSEC3 validation. */
uint32_t qry_uid; /*!< Current query uid. */
uint32_t flags; /*!< Output - Flags. */
return false;
}
+/** Restrict all RRset TTLs to the specified bounds (if matching qry_uid). */
+static void bound_ttls(ranked_rr_array_t *array, uint32_t qry_uid,
+ uint32_t ttl_min, uint32_t ttl_max)
+{
+ for (ssize_t i = 0; i < array->len; ++i) {
+ if (array->at[i]->qry_uid != qry_uid)
+ continue;
+ uint32_t *ttl = &array->at[i]->rr->ttl;
+ if (*ttl < ttl_min) {
+ *ttl = ttl_min;
+ } else if (*ttl > ttl_max) {
+ *ttl = ttl_max;
+ }
+ }
+}
+
/** Resolve input query or continue resolution with followups.
*
* This roughly corresponds to RFC1034, 5.3.3 4a-d.
/* Finish construction of libknot-format RRsets.
* We do this even if dropping the answer, though it's probably useless. */
(void)0;
+ const struct kr_cache *cache = &req->ctx->cache;
ranked_rr_array_t *selected[] = kr_request_selected(req);
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
ret = kr_ranked_rrarray_finalize(selected[i], query->uid, &req->pool);
- if (unlikely(ret)) {
+ if (unlikely(ret))
return KR_STATE_FAIL;
- }
+ if (!query->flags.CACHED)
+ bound_ttls(selected[i], query->uid, cache->ttl_min, cache->ttl_max);
}
return state;
.keys = qry->zone_cut.key,
.zone_name = qry->zone_cut.name,
.timestamp = qry->timestamp.tv_sec,
+ .ttl_min = req->ctx->cache.ttl_min,
.qry_uid = qry->uid,
.has_nsec3 = has_nsec3,
.flags = 0,
.keys = qry->zone_cut.key,
.zone_name = qry->zone_cut.name,
.timestamp = qry->timestamp.tv_sec,
+ .ttl_min = req->ctx->cache.ttl_min,
.qry_uid = qry->uid,
.has_nsec3 = has_nsec3,
.flags = 0,
local c = kres.context().cache
local passthru_addr = '\127\0\0\9'
- rr_passthru = kres.rrset(todname('rpzpassthru.'), kres.type.A, kres.class.IN, 3600999999)
+ rr_passthru = kres.rrset(todname('rpzpassthru.'), kres.type.A, kres.class.IN, 2147483647)
assert(rr_passthru:add_rdata(passthru_addr, #passthru_addr))
assert(c:insert(rr_passthru, nil, ffi.C.KR_RANK_SECURE + ffi.C.KR_RANK_AUTH))