struct kr_query *qry;
struct kr_cache *cache;
unsigned timestamp;
- uint32_t min_ttl;
};
static int commit_rrsig(struct rrcache_baton *baton, uint8_t rank, uint8_t flags, knot_rrset_t *rr)
{
knot_rrset_t *rr = val;
struct rrcache_baton *baton = data;
- /* Ensure minimum TTL */
- knot_rdata_t *rd = rr->rrs.data;
- for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
- if (knot_rdata_ttl(rd) < baton->min_ttl) {
- knot_rdata_set_ttl(rd, baton->min_ttl);
- }
- rd = kr_rdataset_next(rd);
- }
/* Save RRSIG in a special cache. */
uint8_t rank = KEY_FLAG_RANK(key);
.qry = qry,
.cache = cache,
.timestamp = qry->timestamp.tv_sec,
- .min_ttl = MAX(DEFAULT_MINTTL, cache->ttl_min),
};
return map_walk(stash, &commit_rr, &baton);
}
if (!arr->len) {
return kr_ok();
}
+
+ uint32_t min_ttl = MAX(DEFAULT_MINTTL, req->ctx->cache.ttl_min);
/* uncached entries are located at the end */
for (ssize_t i = arr->len - 1; i >= 0; --i) {
ranked_rr_array_entry_t *entry = arr->at[i];
if (entry->cached) {
continue;
}
- const knot_rrset_t *rr = entry->rr;
+ knot_rrset_t *rr = entry->rr;
+
+ /* Ensure minimum TTL */
+ knot_rdata_t *rd = rr->rrs.data;
+ for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
+ if (knot_rdata_ttl(rd) < min_ttl) {
+ knot_rdata_set_ttl(rd, min_ttl);
+ }
+ rd = kr_rdataset_next(rd);
+ }
+
/* Skip NSEC3 RRs and their signatures. We don't use them this way.
* They would be stored under the hashed name, etc. */
if (kr_rrset_type_maysig(rr) == KNOT_RRTYPE_NSEC3) {