To work on RRSIG TTLs, libknot >= 2.7.1 is needed.
}
if (zs->r_type == KNOT_RRTYPE_A || zs->r_type == KNOT_RRTYPE_AAAA) {
knot_rdata_t rdata[RDATA_ARR_MAX];
- knot_rdata_init(rdata, zs->r_data_length, zs->r_data, zs->r_ttl);
+ knot_rdata_init(rdata, zs->r_data_length, zs->r_data);
kr_zonecut_add(hints, zs->r_owner, rdata);
}
}
knot_rdataset_merge
knot_rrset_add_rdata
knot_rrset_init_empty
- knot_rrset_ttl
knot_rrset_txt_dump
knot_rrset_txt_dump_data
knot_rrset_size
end,
ttl = function(rr)
assert(ffi.istype(knot_rrset_t, rr))
- return tonumber(knot.knot_rrset_ttl(rr))
+ return tonumber(rr.ttl)
end,
class = function(rr, val)
assert(ffi.istype(knot_rrset_t, rr))
return tonumber(rr.rrs.rr_count)
end,
-- Add binary RDATA to the RR set
- add_rdata = function (rr, rdata, rdlen, ttl)
+ add_rdata = function (rr, rdata, rdlen, no_ttl)
assert(ffi.istype(knot_rrset_t, rr))
- local ret = knot.knot_rrset_add_rdata(rr, rdata, tonumber(rdlen), tonumber(ttl or 0), nil)
+ assert(no_ttl == nil, 'add_rdata() can not accept TTL anymore')
+ local ret = knot.knot_rrset_add_rdata(rr, rdata, tonumber(rdlen), nil)
if ret ~= 0 then return nil, knot_error_t(ret) end
return true
end,
-- Construct a single-RR temporary set while minimizing copying.
local ret
do
- local rrs = knot_rrset_t(rr.owner, rr.type, kres.class.IN)
- rrs:add_rdata(rr.rdata, #rr.rdata, rr.ttl)
+ local rrs = knot_rrset_t(rr.owner, rr.type, kres.class.IN, rr.ttl)
+ rrs:add_rdata(rr.rdata, #rr.rdata)
ret = rrs:txt_dump(style)
end
zone_import_ctx_t *z_import = (zone_import_ctx_t *)s->process.data;
knot_rrset_t *new_rr = knot_rrset_new(s->r_owner, s->r_type, s->r_class,
- &z_import->pool);
+ s->r_ttl, &z_import->pool);
if (!new_rr) {
kr_log_error("[zscanner] line %"PRIu64": error creating rrset\n",
s->line_counter);
return -1;
}
int res = knot_rrset_add_rdata(new_rr, s->r_data, s->r_data_length,
- s->r_ttl, &z_import->pool);
+ &z_import->pool);
if (res != KNOT_EOK) {
kr_log_error("[zscanner] line %"PRIu64": error adding rdata to rrset\n",
s->line_counter);
if (ret) return kr_ok(); /* some aren't really errors */
assert(val_new_entry.data);
- /* Compute TTL, just in case they weren't equal. */
- uint32_t ttl = -1;
- const knot_rdataset_t *rdatasets[] = { &rr->rrs, rds_sigs, NULL };
- for (int j = 0; rdatasets[j]; ++j) {
- knot_rdata_t *rd = rdatasets[j]->data;
- assert(rdatasets[j]->rr_count);
- for (uint16_t l = 0; l < rdatasets[j]->rr_count; ++l) {
- ttl = MIN(ttl, knot_rdata_ttl(rd));
- rd = kr_rdataset_next(rd);
- }
- } /* TODO: consider expirations of RRSIGs as well, just in case. */
+ const uint32_t ttl = rr->ttl;
+ /* FIXME: consider TTLs and expirations of RRSIGs as well, just in case. */
/* Write the entry itself. */
struct entry_h *eh = val_new_entry.data;
/*TODO: reorder*/
KR_EXPORT
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
- uint32_t new_ttl, knot_mm_t *pool);
+ knot_mm_t *pool);
if (is_negative) {
/* Use SOA minimum TTL for negative answers. */
if (rr->type == KNOT_RRTYPE_SOA) {
- return MIN(knot_rrset_ttl(rr),
- knot_soa_minimum(&rr->rrs));
+ return MIN(rr->ttl, knot_soa_minimum(rr->rrs.rdata));
} else {
continue; /* Use SOA only for negative answers. */
}
if (knot_rrtype_is_metatype(rr->type)) {
continue; /* Skip metatypes. */
}
- /* Find minimum TTL in the record set */
- knot_rdata_t *rd = rr->rrs.data;
- for (uint16_t j = 0; j < rr->rrs.rr_count; ++j) {
- has_ttl = true;
- ttl = MIN(ttl, knot_rdata_ttl(rd));
- rd = kr_rdataset_next(rd);
- }
+ ttl = MIN(ttl, rr->ttl);
}
}
/* If no valid TTL present, go with zero (will get clamped to minimum). */
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
- const knot_rrset_t *rr = knot_pkt_rr(sec, k);
- knot_rdata_t *rd = rr->rrs.data;
- for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
- /* We need to be careful:
- * due to enforcing minimum TTL on packet,
- * some records may be below that value.
- * We keep those records at TTL 0. */
- uint32_t ttl = knot_rdata_ttl(rd);
- if (drift <= ttl) {
- ttl -= drift;
- } else {
- ttl = 0;
- }
- knot_rdata_set_ttl(rd, ttl);
- rd = kr_rdataset_next(rd);
+ knot_rrset_t *rrs = // vv FIXME??
+ /*const-cast*/(knot_rrset_t *)knot_pkt_rr(sec, k);
+ /* We need to be careful: due to enforcing minimum TTL
+ * on packet, some records may be below that value.
+ * We keep those records at TTL 0. */
+ if (rrs->ttl >= drift) {
+ rrs->ttl -= drift;
+ } else {
+ rrs->ttl = 0;
}
}
}
* Return the number of bytes consumed or an error code.
*/
static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
- const uint8_t *data_bound, uint32_t ttl, knot_mm_t *pool)
+ const uint8_t *data_bound, knot_mm_t *pool)
{
+ /* FIXME: rdataset_t and cache's rdataset have the same binary format now */
assert(rds && data && data_bound && data_bound > data && !rds->data);
assert(pool); /* not required, but that's our current usage; guard leaks */
const uint8_t *d = data; /* iterates over the cache data */
d += sizeof(len) + len;
rdata_len_sum += len;
}
- /* Each item in knot_rdataset_t needs TTL (4B) + rdlength (2B) + rdata */
- rds->data = mm_alloc(pool, rdata_len_sum + ((size_t)rds->rr_count) * (4 + 2));
+ /* Each item in knot_rdataset_t needs rdlength (2B) + rdata */
+ rds->data = mm_alloc(pool, rdata_len_sum + (size_t)rds->rr_count * 2);
if (!rds->data) {
return kr_error(ENOMEM);
}
uint16_t len;
memcpy(&len, d, sizeof(len));
d += sizeof(len);
- knot_rdata_init(d_out, len, d, ttl);
+ knot_rdata_init(d_out, len, d);
d += len;
//d_out = kr_rdataset_next(d_out);
- d_out += 4 + 2 + len; /* TTL + rdlen + rdata */
+ d_out += 2 + len; /* rdlen + rdata */
}
//VERBOSE_MSG(NULL, "materialized from %d B\n", (int)(d - data));
return d - data;
}
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
- uint32_t new_ttl, knot_mm_t *pool)
+ knot_mm_t *pool)
{
struct entry_h *eh = ref->raw_data;
- return rdataset_materialize(dst, eh->data, ref->raw_bound, new_ttl, pool);
+ return rdataset_materialize(dst, eh->data, ref->raw_bound, pool);
}
}
/* Materialize the base RRset. */
knot_rrset_t *rr = ans->rrsets[id].set.rr
- = knot_rrset_new(owner, type, KNOT_CLASS_IN, ans->mm);
+ = knot_rrset_new(owner, type, KNOT_CLASS_IN, new_ttl, ans->mm);
if (!rr) {
assert(!ENOMEM);
return kr_error(ENOMEM);
}
- int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, new_ttl, ans->mm);
+ int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, ans->mm);
if (ret < 0) goto fail;
size_t data_off = ret;
ans->rrsets[id].set.rank = eh->rank;
bool want_rrsigs = true; /* LATER(optim.): might be omitted in some cases. */
if (want_rrsigs) {
ret = rdataset_materialize(&ans->rrsets[id].sig_rds, eh->data + data_off,
- eh_bound, new_ttl, ans->mm);
+ eh_bound, ans->mm);
if (ret < 0) goto fail;
/* Sanity check: we consumed exactly all data. */
int unused_bytes = eh_bound - (uint8_t *)eh->data - data_off - ret;
pkt->rr[pkt->rrset_count] = (knot_rrset_t){
.owner = knot_dname_copy(rrset->set.rr->owner, &pkt->mm),
/* ^^ well, another copy isn't really needed */
+ .ttl = rrset->set.rr->ttl,
.type = KNOT_RRTYPE_RRSIG,
.rclass = KNOT_CLASS_IN,
.rrs = *rdss[i],
assert(nsec_rr);
const uint32_t new_ttl_log =
- kr_verbose_status ? knot_rrset_ttl(nsec_rr) : -1;
+ kr_verbose_status ? nsec_rr->ttl : -1;
uint8_t *bm = NULL;
uint16_t bm_size;
knot_nsec_bitmap(&nsec_rr->rrs, &bm, &bm_size);
bool is_new_key = false;
knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, name);
if (!ta_rr) {
- ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, NULL);
+ ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, ttl, NULL);
is_new_key = true;
}
/* Merge-in new key data */
- if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, ttl, NULL) != 0)) {
+ if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, NULL) != 0)) {
knot_rrset_free(&ta_rr, NULL);
return kr_error(ENOMEM);
}
size_t addr_len = kr_inaddr_len(&qry->ns.addr[0].ip);
/* @warning _NOT_ thread-safe */
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
- knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr, 0);
+ knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr);
return kr_zonecut_del(&qry->zone_cut, qry->ns.name, rdata_arr);
} else {
return kr_zonecut_del_all(&qry->zone_cut, qry->ns.name);
}
/* Create empty RR */
knot_rrset_t rr;
- knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass);
+ knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass, ttl);
/* Create RDATA
* @warning _NOT_ thread safe.
*/
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
- knot_rdata_init(rdata_arr, rdlen, rdata, ttl);
+ knot_rdata_init(rdata_arr, rdlen, rdata);
knot_rdataset_add(&rr.rrs, rdata_arr, &pkt->mm);
/* Append RR */
return knot_pkt_put(pkt, 0, &rr, KNOT_PF_FREE);
}
knot_rrset_t cached_rr;
- knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
- if (kr_cache_materialize(&cached_rr.rrs, &peek, new_ttl, cut->pool) < 0) {
+ knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype,
+ KNOT_CLASS_IN, new_ttl);
+ if (kr_cache_materialize(&cached_rr.rrs, &peek, cut->pool) < 0) {
return;
}
knot_rdata_t *rd = cached_rr.rrs.data;
}
/* Materialize the rdataset temporarily, for simplicity. */
knot_rdataset_t ns_rds = { 0, NULL };
- ret = kr_cache_materialize(&ns_rds, &peek, new_ttl, cut->pool);
+ ret = kr_cache_materialize(&ns_rds, &peek, cut->pool);
if (ret < 0) {
return ret;
}
*rr = NULL;
return kr_error(ENOMEM);
}
- knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type, KNOT_CLASS_IN);
- ret = kr_cache_materialize(&(*rr)->rrs, &peek, new_ttl, pool);
+ knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type,
+ KNOT_CLASS_IN, new_ttl);
+ ret = kr_cache_materialize(&(*rr)->rrs, &peek, pool);
if (ret < 0) {
knot_rrset_free(rr, pool);
return ret;
}
knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
knot_rrset_t rr;
- knot_rrset_init(&rr, qname, KNOT_RRTYPE_PTR, KNOT_CLASS_IN);
+ knot_rrset_init(&rr, qname, KNOT_RRTYPE_PTR, KNOT_CLASS_IN, 0);
/* Append address records from hints */
uint8_t *addr = pack_last(*addr_set);
if (addr != NULL) {
size_t len = pack_obj_len(addr);
void *addr_val = pack_obj_val(addr);
- knot_rrset_add_rdata(&rr, addr_val, len, 0, &pkt->mm);
+ knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
}
return put_answer(pkt, qry, &rr, use_nodata);
}
knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
knot_rrset_t rr;
- knot_rrset_init(&rr, qname, qry->stype, qry->sclass);
+ knot_rrset_init(&rr, qname, qry->stype, qry->sclass, 0);
size_t family_len = sizeof(struct in_addr);
if (rr.type == KNOT_RRTYPE_AAAA) {
family_len = sizeof(struct in6_addr);
size_t len = pack_obj_len(addr);
void *addr_val = pack_obj_val(addr);
if (len == family_len) {
- knot_rrset_add_rdata(&rr, addr_val, len, 0, &pkt->mm);
+ knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
}
addr = pack_obj_next(addr);
}
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
size_t addr_len = kr_inaddr_len((struct sockaddr *)&ss);
const uint8_t *raw_addr = (const uint8_t *)kr_inaddr((struct sockaddr *)&ss);
- knot_rdata_init(rdata_arr, addr_len, raw_addr, 0);
+ knot_rdata_init(rdata_arr, addr_len, raw_addr);
return rdata_arr;
}
/* Build RDATA */
knot_rdata_t rdata[RDATA_ARR_MAX];
- knot_rdata_init(rdata, knot_dname_size(ptr_name), ptr_name, 0);
+ knot_rdata_init(rdata, knot_dname_size(ptr_name), ptr_name);
return kr_zonecut_add(hints, key, rdata);
}
return kr_error(EINVAL);
}
knot_rdata_t ptr_rdata[RDATA_ARR_MAX];
- knot_rdata_init(ptr_rdata, knot_dname_size(key), key, 0);
+ knot_rdata_init(ptr_rdata, knot_dname_size(key), key);
if (addr) {
/* Remove the pair. */
same(rr_text:gsub('%s+', ' '), 'com. 1 TXT "hello"', 'rrset to text works')
same(kres.dname2str(todname('com.')), 'com.', 'domain name conversion works')
-- test creating rrset
- rr = kres.rrset(todname('com.'), kres.type.A, kres.class.IN)
+ rr = kres.rrset(todname('com.'), kres.type.A, kres.class.IN, 66)
ok(ffi.istype(kres.rrset, rr), 'created an empty RR')
same(rr:owner(), '\3com\0', 'created RR has correct owner')
same(rr:class(), kres.class.IN, 'created RR has correct class')
same(rr.type, kres.type.A, 'created RR has correct type')
-- test adding rdata
same(rr:wire_size(), 0, 'empty RR wire size is zero')
- ok(rr:add_rdata('\1\2\3\4', 4, 66), 'adding RDATA works')
+ ok(rr:add_rdata('\1\2\3\4', 4), 'adding RDATA works')
same(rr:wire_size(), 5 + 4 + 4 + 2 + 4, 'RR wire size works after adding RDATA')
-- test conversion to text
local expect = 'com. 66 A 1.2.3.4\n'
same(rr:txt_dump(), expect, 'RR to text works')
-- create a dummy rrsig
- local rrsig = kres.rrset(todname('com.'), kres.type.RRSIG, kres.class.IN)
- rrsig:add_rdata('\0\1', 2, 0)
+ local rrsig = kres.rrset(todname('com.'), kres.type.RRSIG, kres.class.IN, 0)
+ rrsig:add_rdata('\0\1', 2)
same(rr:rdcount(), 1, 'add_rdata really added RDATA')
-- check rrsig matching
same(rr.type, rrsig:type_covered(), 'rrsig type covered matches covered RR type')
ok(rr:is_covered_by(rrsig), 'rrsig is covering a record')
-- test rrset merging
- local copy = kres.rrset(rr:owner(), rr.type)
- ok(copy:add_rdata('\4\3\2\1', 4, 66), 'adding second RDATA works')
+ local copy = kres.rrset(rr:owner(), rr.type, kres.class.IN, 66)
+ ok(copy:add_rdata('\4\3\2\1', 4), 'adding second RDATA works')
ok(rr:merge_rdata(copy), 'merge_rdata works')
same(rr:rdcount(), 2, 'RDATA count is correct after merge_rdata')
expect = 'com. 66 A 1.2.3.4\n' ..
local copy = kres.packet(512)
copy:question(todname('hello'), kres.class.IN, kres.type.A)
copy:begin(kres.section.ANSWER)
- local rr = kres.rrset(pkt:qname(), kres.type.A)
- rr:add_rdata('\4\3\2\1', 4, 66)
+ local rr = kres.rrset(pkt:qname(), kres.type.A, kres.class.IN, 66)
+ rr:add_rdata('\4\3\2\1', 4)
ok(copy:put_rr(rr), 'adding RR sets directly works')
ok(copy:recycle())
same({s.hit, s.miss, s.insert, s.delete}, {0, 0, 0, 0}, 'context cache stats works')
-- insert a record into cache
local rdata = '\1\2\3\4'
- local rr = kres.rrset('\3com\0', kres.type.A, kres.class.IN)
- rr:add_rdata(rdata, #rdata, 66)
+ local rr = kres.rrset('\3com\0', kres.type.A, kres.class.IN, 66)
+ rr:add_rdata(rdata, #rdata)
ok(c:insert(rr, nil, 0, 0), 'cache insertion works')
ok(c:sync(), 'cache sync works')
same(s.insert, 1, 'cache insertion increments counters')
test_stats,
test_resize,
test_context_cache,
-}
\ No newline at end of file
+}
/* Create payload */
tmp_buf[0] = num;
test_randstr((char *)(tmp_buf + 1), tmp_buf[0] + 1);
- knot_rdata_init(rdata_buf, num + 1, tmp_buf, ttl);
+ knot_rdata_init(rdata_buf, num + 1, tmp_buf);
/* Assign static buffers. */
- knot_rrset_init(rr, owner_buf, KNOT_RRTYPE_TXT, KNOT_CLASS_IN);
+ knot_rrset_init(rr, owner_buf, KNOT_RRTYPE_TXT, KNOT_CLASS_IN, ttl);
rr->rrs.rr_count = 1;
rr->rrs.data = rdata_buf;
}
global_rr.owner = NULL;
knot_rrset_init(&output_rr, NULL, 0, 0);
- kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
+ kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_ok_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_HEADER);
res_cmp_fail_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
knot_rrset_init(&output_rr, NULL, 0, 0);
will_return (knot_rdataset_gather, 0);
- kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
+ kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_ok = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_true(res_cmp_ok);
knot_rrset_init(&output_rr, NULL, 0, 0);
will_return (knot_rdataset_gather, KNOT_ENOMEM);
- kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
+ kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
res_cmp_fail = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_false(res_cmp_fail);