When a whole packet is cached (instead of individual RRs),
let's simplify the way the packet's TTL gets computed.
The previous mechanism came from commit
5b383a2bb7,
probably a misunderstanding of:
https://datatracker.ietf.org/doc/html/rfc2308#section-5
Anyway, I see no motivation to do it, and this way we should
get rid of some weird cases where we might extend TTL of some records,
except if they were below the cache.min_ttl() setting (5s default).
{
struct http_data *data;
nghttp2_data_provider prov;
- const bool is_negative = kr_response_classify(pkt) & (PKT_NODATA|PKT_NXDOMAIN);
data = malloc(sizeof(struct http_data));
if (!data)
data->pos = 0;
data->on_write = on_write;
data->req = req;
- data->ttl = packet_ttl(pkt, is_negative);
+ data->ttl = packet_ttl(pkt);
prov.source.ptr = data;
prov.read_callback = read_callback;
int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
-uint32_t packet_ttl(const knot_pkt_t *, _Bool);
+uint32_t packet_ttl(const knot_pkt_t *);
typedef struct {
int sock_type;
_Bool tls;
int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
-uint32_t packet_ttl(const knot_pkt_t *, _Bool);
+uint32_t packet_ttl(const knot_pkt_t *);
typedef struct {
int sock_type;
_Bool tls;
int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
-uint32_t packet_ttl(const knot_pkt_t *, _Bool);
+uint32_t packet_ttl(const knot_pkt_t *);
typedef struct {
int sock_type;
_Bool tls;
#include "lib/cache/impl.h"
-/** Compute TTL for a packet. Generally it's minimum TTL, with extra conditions. */
+/** Compute TTL for a packet. It's minimum TTL or zero. (You can apply limits.) */
KR_EXPORT
-uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative)
+uint32_t packet_ttl(const knot_pkt_t *pkt)
{
bool has_ttl = false;
uint32_t ttl = TTL_MAX_MAX;
- /* Find minimum entry TTL in the packet or SOA minimum TTL. */
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
const knot_rrset_t *rr = knot_pkt_rr(sec, k);
- if (is_negative) {
- /* Use SOA minimum TTL for negative answers. */
- if (rr->type == KNOT_RRTYPE_SOA) {
- return MIN(rr->ttl, knot_soa_minimum(rr->rrs.rdata));
- } else {
- continue; /* Use SOA only for negative answers. */
- }
- }
- if (knot_rrtype_is_metatype(rr->type)) {
- continue; /* Skip metatypes. */
- }
ttl = MIN(ttl, rr->ttl);
has_ttl = true;
}
}
- /* If no valid TTL present, go with zero (will get clamped to minimum). */
return has_ttl ? ttl : 0;
}
struct entry_h *eh = val_new_entry.data;
memset(eh, 0, offsetof(struct entry_h, data));
eh->time = qry->timestamp.tv_sec;
- eh->ttl = MAX(MIN(packet_ttl(pkt, is_negative), cache->ttl_max), cache->ttl_min);
+ eh->ttl = MAX(MIN(packet_ttl(pkt), cache->ttl_max), cache->ttl_min);
eh->rank = rank;
eh->is_packet = true;
eh->has_optout = qf->DNSSEC_OPTOUT;
/* SPDX-License-Identifier: GPL-3.0-or-later */
#include <libknot/packet/pkt.h>
-uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative);
+uint32_t packet_ttl(const knot_pkt_t *pkt);
local ffi = require('ffi')
local condition = require('cqueues.condition')
-local function get_http_ttl(pkt)
- local an_records = pkt:section(kres.section.ANSWER)
- local is_negative = #an_records <= 0
- return ffi.C.packet_ttl(pkt, is_negative)
-end
-
-- Trace execution of DNS queries
local function serve_doh(h, stream)
local input
local cond = condition.new()
local waiting, done = false, false
local finish_cb = function (answer, _)
- output_ttl = get_http_ttl(answer)
+ output_ttl = ffi.C.packet_ttl(answer)
-- binary output
output = ffi.string(answer.wire, answer.size)
if waiting then