Fixes https://gitlab.labs.nic.cz/knot/resolver/issues/93.
API of a KR_EXPORT function is changed, so ABIVER is bumped.
+Knot Resolver 1.x.y (2016-xx-yy)
+================================
+
+- It now requires knot >= 2.3.1 to link successfully.
+
Knot Resolver 1.1.1 (2016-08-24)
================================
MAJOR := 1
MINOR := 1
PATCH := 1
-ABIVER := 1
+ABIVER := 2
BUILDMODE := dynamic
HARDENING := yes
"Use in-bailiwick glue", "normal, permissive"
"Use any glue records", "permissive"
+.. function:: reorder_RR([true | false])
+
+ :param boolean value: New value for the option *(optional)*
+ :return: The (new) value of the option
+
+ If set, resolver will vary the order of resource records within RR-sets
+ every time when answered from cache. It is disabled by default.
+
.. function:: user(name, [group])
:param string name: user name
"verbose(true|false)\n toggle verbose mode\n"
"option(opt[, new_val])\n get/set server option\n"
"mode(strict|normal|permissive)\n set resolver strictness level\n"
+ "reorder_RR([true|false])\n set/get reordering of RRs within RRsets\n"
"resolve(name, type[, class, flags, callback])\n resolve query, callback when it's finished\n"
"todname(name)\n convert name to wire format\n"
"tojson(val)\n convert value to JSON\n"
return true
end
+-- Trivial option alias
+function reorder_RR(val)
+ return option('REORDER_RR', val)
+end
+
-- Function aliases
-- `env.VAR returns os.getenv(VAR)`
env = {}
return found->rank;
}
-int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift, knot_mm_t *mm)
+int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift,
+ uint reorder, knot_mm_t *mm)
{
if (!dst || !src || dst == src) {
return kr_error(EINVAL);
return kr_error(ENOMEM);
}
- /* Copy valid records */
+ /* Find valid records */
+ knot_rdata_t **valid = malloc(sizeof(knot_rdata_t *) * src->rrs.rr_count);
+ uint16_t valid_count = 0;
knot_rdata_t *rd = src->rrs.data;
for (uint16_t i = 0; i < src->rrs.rr_count; ++i) {
if (knot_rdata_ttl(rd) >= drift) {
- if (knot_rdataset_add(&dst->rrs, rd, mm) != 0) {
- knot_rrset_clear(dst, mm);
- return kr_error(ENOMEM);
- }
+ valid[valid_count++] = rd;
}
rd = kr_rdataset_next(rd);
}
+
+ if (reorder && valid_count > 1) {
+ /* Reorder the valid part; it's a reversed rotation,
+ * done by two array reversals. */
+ uint16_t shift = reorder % valid_count;
+ for (uint16_t i = 0; i < shift / 2; ++i) {
+ SWAP(valid[i], valid[shift - 1 - i]);
+ }
+ for (uint16_t i = 0; i < (valid_count - shift) / 2; ++i) {
+ SWAP(valid[shift + i], valid[valid_count - 1 - i]);
+ }
+ }
+
+ int err = knot_rdataset_gather(&dst->rrs, valid, valid_count, mm);
+ free(valid);
+ if (err) {
+ knot_rrset_clear(dst, mm);
+ return kr_error(err);
+ }
+
/* Fixup TTL by time passed */
rd = dst->rrs.data;
for (uint16_t i = 0; i < dst->rrs.rr_count; ++i) {
* @param dst destination for materialized RRSet
* @param src read-only RRSet (its rdataset may be changed depending on the result)
* @param drift time passed between cache time and now
+ * @param reorder (pseudo)-random seed to reorder the data or zero
* @param mm memory context
* @return 0 or an errcode
*/
KR_EXPORT
-int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift, knot_mm_t *mm);
+int kr_cache_materialize(knot_rrset_t *dst, const knot_rrset_t *src, uint32_t drift,
+ uint reorder, knot_mm_t *mm);
/**
* Insert RRSet into cache, replacing any existing data.
/* Update packet answer */
knot_rrset_t rr_copy;
- ret = kr_cache_materialize(&rr_copy, &cache_rr, drift, &pkt->mm);
+ ret = kr_cache_materialize(&rr_copy, &cache_rr, drift, qry->reorder, &pkt->mm);
if (ret == 0) {
ret = knot_pkt_put(pkt, KNOT_COMPR_HINT_QNAME, &rr_copy, KNOT_PF_FREE);
if (ret != 0) {
qry->ns.addr[0].ip.sa_family = AF_UNSPEC;
gettimeofday(&qry->timestamp, NULL);
kr_zonecut_init(&qry->zone_cut, (const uint8_t *)"", rplan->pool);
+ qry->reorder = qry->flags & QUERY_REORDER_RR
+ ? knot_wire_get_id(rplan->request->answer->wire)
+ : 0;
array_push(rplan->pending, qry);
return qry;
X(PERMISSIVE, 1 << 20) /**< Permissive resolver mode. */ \
X(STRICT, 1 << 21) /**< Strict resolver mode. */ \
X(BADCOOKIE_AGAIN, 1 << 22) /**< Query again because bad cookie returned. */ \
- X(CNAME, 1 << 23) /**< Query response contains CNAME in answer section. */
+ X(CNAME, 1 << 23) /**< Query response contains CNAME in answer section. */ \
+ X(REORDER_RR, 1 << 24) /**< Reorder cached RRs. */
/** Query flags */
enum kr_query_flag {
uint32_t flags;
uint32_t secret;
uint16_t fails;
+ uint16_t reorder; /**< Seed to reorder (cached) RRs in answer or zero. */
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_nsrep ns;
*/
KR_EXPORT
char *kr_module_call(struct kr_context *ctx, const char *module, const char *prop, const char *input);
+
+/** Swap two places. Note: the parameters need to be without side effects. */
+#define SWAP(x, y) do { /* http://stackoverflow.com/a/3982430/587396 */ \
+ unsigned char swap_temp[sizeof(x) == sizeof(y) ? (ssize_t)sizeof(x) : -1]; \
+ memcpy(swap_temp, &y, sizeof(x)); \
+ memcpy(&y, &x, sizeof(x)); \
+ memcpy(&x, swap_temp, sizeof(x)); \
+ } while(0)
+
/* Materialize as we'll going to do more cache lookups. */
knot_rrset_t rr_copy;
- ret = kr_cache_materialize(&rr_copy, &cached_rr, drift, cut->pool);
+ ret = kr_cache_materialize(&rr_copy, &cached_rr, drift, 0, cut->pool);
if (ret != 0) {
return ret;
}
return kr_error(ENOMEM);
}
- ret = kr_cache_materialize(*rr, &cached_rr, drift, pool);
+ ret = kr_cache_materialize(*rr, &cached_rr, drift, 0, pool);
if (ret != 0) {
knot_rrset_free(rr, pool);
return ret;
#define CACHE_TTL 10
#define CACHE_TIME 0
-int (*original_knot_rdataset_add)(knot_rdataset_t *rrs, const knot_rdata_t *rr, knot_mm_t *mm) = NULL;
+int (*original_knot_rdataset_gather)(knot_rdataset_t *dst, knot_rdata_t **src,
+ uint16_t count, knot_mm_t *mm) = NULL;
-int knot_rdataset_add(knot_rdataset_t *rrs, const knot_rdata_t *rr, knot_mm_t *mm)
+int knot_rdataset_gather(knot_rdataset_t *dst, knot_rdata_t **src, uint16_t count,
+ knot_mm_t *mm)
{
int err, err_mock;
err_mock = (int)mock();
- if (original_knot_rdataset_add == NULL) {
- original_knot_rdataset_add = dlsym(RTLD_NEXT,"knot_rdataset_add");
- assert_non_null (original_knot_rdataset_add);
+ if (original_knot_rdataset_gather == NULL) {
+ original_knot_rdataset_gather = dlsym(RTLD_NEXT,"knot_rdataset_gather");
+ assert_non_null (original_knot_rdataset_gather);
}
- err = original_knot_rdataset_add(rrs, rr, mm);
+ err = original_knot_rdataset_gather(dst, src, count, mm);
if (err_mock != 0)
err = err_mock;
return err;
global_rr.owner = NULL;
knot_rrset_init(&output_rr, NULL, 0, 0);
- kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
+ kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
res_cmp_ok_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_HEADER);
res_cmp_fail_empty = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_false(res_cmp_fail_empty);
knot_rrset_init(&output_rr, NULL, 0, 0);
- will_return (knot_rdataset_add, 0);
- kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
+ will_return (knot_rdataset_gather, 0);
+ kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
res_cmp_ok = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_true(res_cmp_ok);
knot_rrset_init(&output_rr, NULL, 0, 0);
- will_return (knot_rdataset_add, KNOT_EINVAL);
- kr_cache_materialize(&output_rr, &global_rr, 0, &global_mm);
+ will_return (knot_rdataset_gather, KNOT_ENOMEM);
+ kr_cache_materialize(&output_rr, &global_rr, 0, 0, &global_mm);
res_cmp_fail = knot_rrset_equal(&global_rr, &output_rr, KNOT_RRSET_COMPARE_WHOLE);
knot_rrset_clear(&output_rr, &global_mm);
assert_false(res_cmp_fail);