/** @internal Unregister layer callback reference from registry. */
#define LAYER_UNREGISTER(L, api, name) do { \
- int *cb_slot = (int *)((char *)api + sizeof(knot_layer_api_t)); \
+ int *cb_slot = (int *)((char *)api + sizeof(kr_layer_api_t)); \
if (cb_slot[SLOT_ ## name] > 0) \
luaL_unref(L, LUA_REGISTRYINDEX, cb_slot[SLOT_ ## name]); \
} while(0)
ret = l_ffi_call(L, 1);
}
/* Free the layer API wrapper (unconst it) */
- knot_layer_api_t* api = module->data;
+ kr_layer_api_t* api = module->data;
if (api) {
LAYER_UNREGISTER(L, api, begin);
LAYER_UNREGISTER(L, api, finish);
/** @internal Helper for retrieving layer Lua function by name. */
#define LAYER_FFI_CALL(ctx, slot) \
- int *cb_slot = (int *)((char *)(ctx)->api + sizeof(knot_layer_api_t)); \
+ int *cb_slot = (int *)((char *)(ctx)->api + sizeof(kr_layer_api_t)); \
if (cb_slot[SLOT_ ## slot] <= 0) { \
return ctx->state; \
} \
lua_rawgeti(L, LUA_REGISTRYINDEX, cb_slot[SLOT_ ## slot]); \
lua_pushnumber(L, ctx->state)
-static int l_ffi_layer_begin(knot_layer_t *ctx, void *module_param)
+static int l_ffi_layer_begin(kr_layer_t *ctx, void *module_param)
{
LAYER_FFI_CALL(ctx, begin);
lua_pushlightuserdata(L, ctx->data);
return l_ffi_call(L, 2);
}
-static int l_ffi_layer_reset(knot_layer_t *ctx)
+static int l_ffi_layer_reset(kr_layer_t *ctx)
{
LAYER_FFI_CALL(ctx, reset);
lua_pushlightuserdata(L, ctx->data);
return l_ffi_call(L, 2);
}
-static int l_ffi_layer_finish(knot_layer_t *ctx)
+static int l_ffi_layer_finish(kr_layer_t *ctx)
{
struct kr_request *req = ctx->data;
LAYER_FFI_CALL(ctx, finish);
return l_ffi_call(L, 3);
}
-static int l_ffi_layer_consume(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_consume(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- if (ctx->state & KNOT_STATE_FAIL) {
+ if (ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Already failed, skip */
}
LAYER_FFI_CALL(ctx, consume);
return l_ffi_call(L, 3);
}
-static int l_ffi_layer_produce(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_produce(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- if (ctx->state & (KNOT_STATE_FAIL)) {
+ if (ctx->state & (KR_STATE_FAIL)) {
return ctx->state; /* Already failed or done, skip */
}
LAYER_FFI_CALL(ctx, produce);
/** @internal Conditionally register layer trampoline
* @warning Expects 'module.layer' to be on top of Lua stack. */
#define LAYER_REGISTER(L, api, name) do { \
- int *cb_slot = (int *)((char *)api + sizeof(knot_layer_api_t)); \
+ int *cb_slot = (int *)((char *)api + sizeof(kr_layer_api_t)); \
lua_getfield((L), -1, #name); \
if (!lua_isnil((L), -1)) { \
(api)->name = l_ffi_layer_ ## name; \
} while(0)
/** @internal Create C layer api wrapper. */
-static knot_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module)
+static kr_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module)
{
/* Fabricate layer API wrapping the Lua functions
* reserve slots after it for references to Lua callbacks. */
- const size_t api_length = sizeof(knot_layer_api_t) + (SLOT_count * SLOT_size);
- knot_layer_api_t *api = malloc(api_length);
+ const size_t api_length = sizeof(kr_layer_api_t) + (SLOT_count * SLOT_size);
+ kr_layer_api_t *api = malloc(api_length);
if (api) {
memset(api, 0, api_length);
LAYER_REGISTER(L, api, begin);
}
/** @internal Retrieve C layer api wrapper. */
-static const knot_layer_api_t *l_ffi_layer(struct kr_module *module)
+static const kr_layer_api_t *l_ffi_layer(struct kr_module *module)
{
if (module) {
- return (const knot_layer_api_t *)module->data;
+ return (const kr_layer_api_t *)module->data;
}
return NULL;
}
task->finished = true;
/* Send back answer */
(void) qr_task_send(task, task->source.handle, (struct sockaddr *)&task->source.addr, task->req.answer);
- return state == KNOT_STATE_DONE ? 0 : kr_error(EIO);
+ return state == KR_STATE_DONE ? 0 : kr_error(EIO);
}
static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_source, knot_pkt_t *packet)
task->addrlist_count = 0;
task->addrlist_turn = 0;
int state = kr_resolve_consume(&task->req, packet_source, packet);
- while (state == KNOT_STATE_PRODUCE) {
+ while (state == KR_STATE_PRODUCE) {
state = kr_resolve_produce(&task->req, &task->addrlist, &sock_type, task->pktbuf);
if (unlikely(++task->iter_count > KR_ITER_LIMIT || task->timeouts >= KR_TIMEOUT_LIMIT)) {
- return qr_task_finalize(task, KNOT_STATE_FAIL);
+ return qr_task_finalize(task, KR_STATE_FAIL);
}
}
/* We're done, no more iterations needed */
- if (state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+ if (state & (KR_STATE_DONE|KR_STATE_FAIL)) {
return qr_task_finalize(task, state);
} else if (!task->addrlist || sock_type < 0) {
return qr_task_step(task, NULL, NULL);
/* Start next step with timeout, fatal if can't start a timer. */
if (ret != 0) {
subreq_finalize(task, packet_source, packet);
- return qr_task_finalize(task, KNOT_STATE_FAIL);
+ return qr_task_finalize(task, KR_STATE_FAIL);
}
return 0;
}
.. code-block:: c
- int consume(knot_layer_t *ctx, knot_pkt_t *pkt)
+ int consume(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *request = ctx->data;
struct kr_query *query = request->current_query;
.. code-block:: c
- int produce(knot_layer_t *ctx, knot_pkt_t *pkt)
+ int produce(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *request = ctx->data;
struct kr_query *cur = request->current_query;
/* This flag makes the resolver move the query
* to the "resolved" list. */
cur->flags |= QUERY_RESOLVED;
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
/* Pass-through. */
.. code-block:: c
- int finish(knot_layer_t *ctx)
+ int finish(kr_layer_t *ctx)
{
struct kr_request *request = ctx->data;
struct kr_rplan *rplan = request->rplan;
* Each state represents the state machine transition,
* and determines readiness for the next action.
*/
-enum knot_layer_state {
- KNOT_STATE_NOOP = 0, /*!< N/A */
- KNOT_STATE_CONSUME = 1 << 0, /*!< Consume data. */
- KNOT_STATE_PRODUCE = 1 << 1, /*!< Produce data. */
- KNOT_STATE_DONE = 1 << 2, /*!< Finished. */
- KNOT_STATE_FAIL = 1 << 3 /*!< Error. */
+enum kr_layer_state {
+ KR_STATE_NOOP = 0, /*!< N/A */
+ KR_STATE_CONSUME = 1 << 0, /*!< Consume data. */
+ KR_STATE_PRODUCE = 1 << 1, /*!< Produce data. */
+ KR_STATE_DONE = 1 << 2, /*!< Finished. */
+ KR_STATE_FAIL = 1 << 3 /*!< Error. */
};
/* Forward declarations. */
-struct knot_layer_api;
+struct kr_layer_api;
/*! \brief Packet processing context. */
-typedef struct knot_layer {
+typedef struct kr_layer {
knot_mm_t *mm; /* Processing memory context. */
- uint16_t state; /* Bitmap of enum knot_layer_state. */
+ uint16_t state; /* Bitmap of enum kr_layer_state. */
void *data; /* Module specific. */
- const struct knot_layer_api *api;
-} knot_layer_t;
+ const struct kr_layer_api *api;
+} kr_layer_t;
/*! \brief Packet processing module API. */
-struct knot_layer_api {
- int (*begin)(knot_layer_t *ctx, void *module_param);
- int (*reset)(knot_layer_t *ctx);
- int (*finish)(knot_layer_t *ctx);
- int (*consume)(knot_layer_t *ctx, knot_pkt_t *pkt);
- int (*produce)(knot_layer_t *ctx, knot_pkt_t *pkt);
- int (*fail)(knot_layer_t *ctx, knot_pkt_t *pkt);
+struct kr_layer_api {
+ int (*begin)(kr_layer_t *ctx, void *module_param);
+ int (*reset)(kr_layer_t *ctx);
+ int (*finish)(kr_layer_t *ctx);
+ int (*consume)(kr_layer_t *ctx, knot_pkt_t *pkt);
+ int (*produce)(kr_layer_t *ctx, knot_pkt_t *pkt);
+ int (*fail)(kr_layer_t *ctx, knot_pkt_t *pkt);
void *data;
};
-typedef struct knot_layer_api knot_layer_api_t;
+typedef struct kr_layer_api kr_layer_api_t;
/** Pickled layer state (api, input, state). */
struct kr_layer_pickle {
struct kr_layer_pickle *next;
- const struct knot_layer_api *api;
+ const struct kr_layer_api *api;
knot_pkt_t *pkt;
unsigned state;
};
/* Repurpose layer states. */
-#define KNOT_STATE_YIELD KNOT_STATE_NOOP
+#define KR_STATE_YIELD KR_STATE_NOOP
const knot_rdata_t *rdata = rr->rrs.data;
if (!(query->flags & QUERY_ALLOW_LOCAL) &&
!is_valid_addr(knot_rdata_data(rdata), knot_rdata_rdlen(rdata))) {
- return KNOT_STATE_CONSUME; /* Ignore invalid addresses */
+ return KR_STATE_CONSUME; /* Ignore invalid addresses */
}
int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
if (ret != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
static int update_parent(const knot_rrset_t *rr, struct kr_query *qry)
/* Scrub DNSSEC records when not requested. */
if (!knot_pkt_has_dnssec(answer)) {
if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
- return KNOT_STATE_DONE; /* Scrub */
+ return KR_STATE_DONE; /* Scrub */
}
}
/* Copy record, as it may be accessed after packet processing. */
int ret = knot_pkt_put(answer, hint, copy, KNOT_PF_FREE);
if (ret != KNOT_EOK) {
knot_wire_set_tc(answer->wire);
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
{
struct kr_query *qry = req->current_query;
struct kr_zonecut *cut = &qry->zone_cut;
- int state = KNOT_STATE_CONSUME;
+ int state = KR_STATE_CONSUME;
/* Authority MUST be at/below the authority of the nameserver, otherwise
* possible cache injection attempt. */
if (!knot_dname_in(cut->name, rr->owner)) {
DEBUG_MSG("<= authority: ns outside bailiwick\n");
#ifdef STRICT_MODE
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
#else
/* Workaround: ignore out-of-bailiwick NSs for authoritative answers,
* but fail for referrals. This is important to detect lame answers. */
if (knot_pkt_section(pkt, KNOT_ANSWER)->count == 0) {
- state = KNOT_STATE_FAIL;
+ state = KR_STATE_FAIL;
}
return state;
#endif
} else {
kr_zonecut_set(cut, rr->owner);
}
- state = KNOT_STATE_DONE;
+ state = KR_STATE_DONE;
}
/* Fetch glue for each NS */
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
- int result = KNOT_STATE_CONSUME;
+ int result = KR_STATE_CONSUME;
struct kr_query *qry = req->current_query;
const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
/* Stub resolution doesn't process authority */
if (qry->flags & QUERY_STUB) {
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
#ifdef STRICT_MODE
/* AA, terminate resolution chain. */
if (knot_wire_get_aa(pkt->wire)) {
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
#else
/* Work around servers sending back CNAME with different delegation and no AA. */
if (an->count > 0 && ns->count > 0) {
const knot_rrset_t *rr = knot_pkt_rr(an, 0);
if (rr->type == KNOT_RRTYPE_CNAME) {
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
}
#endif
if (rr->type == KNOT_RRTYPE_NS) {
int state = update_cut(pkt, rr, req);
switch(state) {
- case KNOT_STATE_DONE: result = state; break;
- case KNOT_STATE_FAIL: return state; break;
+ case KR_STATE_DONE: result = state; break;
+ case KR_STATE_FAIL: return state; break;
default: /* continue */ break;
}
} else if (rr->type == KNOT_RRTYPE_SOA && knot_dname_is_sub(rr->owner, qry->zone_cut.name)) {
(pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
DEBUG_MSG("<= found cut, retrying with non-minimized name\n");
query->flags |= QUERY_NO_MINIMIZE;
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
/* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
if (!(query->flags & QUERY_STUB) && !is_authoritative(pkt, query)) {
if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
DEBUG_MSG("<= lame response: non-auth sent negative response\n");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
hint = KNOT_COMPR_HINT_QNAME;
}
int state = is_final ? update_answer(rr, hint, req->answer) : update_parent(rr, query);
- if (state == KNOT_STATE_FAIL) {
+ if (state == KR_STATE_FAIL) {
return state;
}
/* can_follow is false, therefore QUERY_DNSSEC_WANT flag is set.
}
if (cname_chain_len > an->count || cname_chain_len > KR_CNAME_CHAIN_LIMIT) {
DEBUG_MSG("<= too long cname chain\n");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* Don't use pending_cname immediately.
* There are can be records for "old" cname. */
continue;
}
finalize_answer(pkt, query, req);
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
}
DEBUG_MSG("<= cname chain, following\n");
q->stype == query->stype &&
knot_dname_is_equal(q->sname, cname)) {
DEBUG_MSG("<= cname chain loop\n");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
struct kr_query *next = kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
if (!next) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
next->flags |= QUERY_AWAIT_CUT;
/* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
} else if (!query->parent) {
finalize_answer(pkt, query, req);
}
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
/** Error handling, RFC1034 5.3.3, 4d. */
static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
{
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* State-less single resolution iteration step, not needed. */
-static int reset(knot_layer_t *ctx) { return KNOT_STATE_PRODUCE; }
+static int reset(kr_layer_t *ctx) { return KR_STATE_PRODUCE; }
/* Set resolution context and parameters. */
-static int begin(knot_layer_t *ctx, void *module_param)
+static int begin(kr_layer_t *ctx, void *module_param)
{
- if (ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+ if (ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
return ctx->state;
}
/*
const struct kr_request *req = ctx->data;
const knot_pkt_t *pkt = req->qsource.packet;
if (!pkt || knot_wire_get_qdcount(pkt->wire) == 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
return reset(ctx);
}
return kr_ok();
}
-static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
struct kr_request *req = ctx->data;
struct kr_query *query = req->current_query;
- if (!query || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+ if (!query || ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
return ctx->state;
}
/* Make query */
int ret = kr_make_query(query, pkt);
if (ret != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_query *query)
return resolve_error(pkt, req);
} else {
query->flags |= QUERY_SAFEMODE;
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
#else
return resolve_error(pkt, req);
*
* This roughly corresponds to RFC1034, 5.3.3 4a-d.
*/
-static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
struct kr_request *req = ctx->data;
}
query->flags |= QUERY_TCP;
}
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
#ifndef NDEBUG
return resolve_error(pkt, req);
} else {
query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
}
case KNOT_RCODE_FORMERR:
/* Resolve authority to see if it's referral or authoritative. */
int state = process_authority(pkt, req);
switch(state) {
- case KNOT_STATE_CONSUME: /* Not referral, process answer. */
+ case KR_STATE_CONSUME: /* Not referral, process answer. */
DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
state = process_answer(pkt, req);
break;
- case KNOT_STATE_DONE: /* Referral */
+ case KR_STATE_DONE: /* Referral */
DEBUG_MSG("<= referral response, follow\n");
break;
default:
}
/** Module implementation. */
-const knot_layer_api_t *iterate_layer(struct kr_module *module)
+const kr_layer_api_t *iterate_layer(struct kr_module *module)
{
- static const knot_layer_api_t _layer = {
+ static const kr_layer_api_t _layer = {
.begin = &begin,
.reset = &reset,
.consume = &resolve,
return loot_cache_pkt(cache, pkt, qname, rrtype, want_secure, timestamp, flags);
}
-static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
- if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+ if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
if (qry->ns.addr[0].ip.sa_family != AF_UNSPEC) {
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
knot_wire_set_aa(pkt->wire);
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
return ctx->state;
}
return limit_ttl(ttl);
}
-static int pktcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
/* Cache only answers that make query resolved (i.e. authoritative)
* that didn't fail during processing and are negative. */
- if (qry->flags & QUERY_CACHED || ctx->state & KNOT_STATE_FAIL) {
+ if (qry->flags & QUERY_CACHED || ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Don't cache anything if failed. */
}
/* Cache only authoritative answers from IN class. */
}
/** Module implementation. */
-const knot_layer_api_t *pktcache_layer(struct kr_module *module)
+const kr_layer_api_t *pktcache_layer(struct kr_module *module)
{
- static const knot_layer_api_t _layer = {
+ static const kr_layer_api_t _layer = {
.produce = &pktcache_peek,
.consume = &pktcache_stash
};
return ret;
}
-static int rrcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
- if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+ if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
return ctx->state; /* Already resolved/failed */
}
if (qry->ns.addr[0].ip.sa_family != AF_UNSPEC) {
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
knot_wire_set_aa(pkt->wire);
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
return ctx->state;
}
return kr_ok();
}
-static int rrcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int rrcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
- if (!qry || ctx->state & KNOT_STATE_FAIL) {
+ if (!qry || ctx->state & KR_STATE_FAIL) {
return ctx->state;
}
/* Do not cache truncated answers. */
}
/** Module implementation. */
-const knot_layer_api_t *rrcache_layer(struct kr_module *module)
+const kr_layer_api_t *rrcache_layer(struct kr_module *module)
{
- static const knot_layer_api_t _layer = {
+ static const kr_layer_api_t _layer = {
.produce = &rrcache_peek,
.consume = &rrcache_stash
};
DEBUG_MSG(qry, "<= parent: updating DNSKEY\n");
parent->zone_cut.key = knot_rrset_copy(qry->zone_cut.key, parent->zone_cut.pool);
if (!parent->zone_cut.key) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
break;
case KNOT_RRTYPE_DS:
} else { /* DS existence proven. */
parent->zone_cut.trust_anchor = knot_rrset_copy(qry->zone_cut.trust_anchor, parent->zone_cut.pool);
if (!parent->zone_cut.trust_anchor) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
break;
return NULL;
}
-static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
{
int ret = 0;
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
/* Ignore faulty or unprocessed responses. */
- if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_CONSUME)) {
+ if (ctx->state & (KR_STATE_FAIL|KR_STATE_CONSUME)) {
return ctx->state;
}
if (!(qry->flags & QUERY_CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
DEBUG_MSG(qry, "<= got insecure response\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* Track difference between current TA and signer name.
const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL;
const knot_dname_t *signer = signature_authority(pkt);
if (track_pc_change && ta_name && (!signer || !knot_dname_is_equal(ta_name, signer))) {
- if (ctx->state == KNOT_STATE_YIELD) { /* Already yielded for revalidation. */
- return KNOT_STATE_FAIL;
+ if (ctx->state == KR_STATE_YIELD) { /* Already yielded for revalidation. */
+ return KR_STATE_FAIL;
}
DEBUG_MSG(qry, ">< cut changed, needs revalidation\n");
if (!signer) {
}
qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
} /* else zone cut matches, but DS/DNSKEY doesn't => refetch. */
- return KNOT_STATE_YIELD;
+ return KR_STATE_YIELD;
}
/* Check if this is a DNSKEY answer, check trust chain and store. */
if (ret != 0) {
DEBUG_MSG(qry, "<= bad keys, broken trust chain\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
if (ret != 0) {
DEBUG_MSG(qry, "<= bad NXDOMAIN proof\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
} else {
DEBUG_MSG(qry, "<= bad NODATA proof\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
}
if (ret != 0) {
DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
qry->flags |= QUERY_DNSSEC_BOGUS;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
/* Check and update current delegation point security status. */
ret = update_delegation(req, qry, pkt, has_nsec3);
if (ret != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* Update parent query zone cut */
if (qry->parent) {
if (update_parent_keys(qry, qtype) != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
DEBUG_MSG(qry, "<= answer valid, OK\n");
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
/** Module implementation. */
-const knot_layer_api_t *validate_layer(struct kr_module *module)
+const kr_layer_api_t *validate_layer(struct kr_module *module)
{
- static const knot_layer_api_t _layer = {
+ static const kr_layer_api_t _layer = {
.consume = &validate,
};
/* Store module reference */
#include "lib/module.h"
/* List of embedded modules */
-const knot_layer_api_t *iterate_layer(struct kr_module *module);
-const knot_layer_api_t *validate_layer(struct kr_module *module);
-const knot_layer_api_t *rrcache_layer(struct kr_module *module);
-const knot_layer_api_t *pktcache_layer(struct kr_module *module);
+const kr_layer_api_t *iterate_layer(struct kr_module *module);
+const kr_layer_api_t *validate_layer(struct kr_module *module);
+const kr_layer_api_t *rrcache_layer(struct kr_module *module);
+const kr_layer_api_t *pktcache_layer(struct kr_module *module);
static const struct kr_module embedded_modules[] = {
{ "iterate", NULL, NULL, NULL, iterate_layer, NULL, NULL, NULL },
{ "validate", NULL, NULL, NULL, validate_layer, NULL, NULL, NULL },
typedef int (module_init_cb)(struct kr_module *);
typedef int (module_deinit_cb)(struct kr_module *);
typedef int (module_config_cb)(struct kr_module *, const char *);
-typedef const knot_layer_api_t* (module_layer_cb)(struct kr_module *);
+typedef const kr_layer_api_t* (module_layer_cb)(struct kr_module *);
typedef struct kr_prop *(module_prop_cb)(void);
typedef char *(kr_prop_cb)(void *, struct kr_module *, const char *);
#define KR_MODULE_API ((uint32_t) 0x20150402)
* @internal Defer execution of current query.
* The current layer state and input will be pushed to a stack and resumed on next iteration.
*/
-static int consume_yield(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt->size, &req->pool);
}
return kr_error(ENOMEM);
}
-static int begin_yield(knot_layer_t *ctx, void *module) { return kr_ok(); }
-static int reset_yield(knot_layer_t *ctx) { return kr_ok(); }
-static int finish_yield(knot_layer_t *ctx) { return kr_ok(); }
-static int produce_yield(knot_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
+static int begin_yield(kr_layer_t *ctx, void *module) { return kr_ok(); }
+static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
+static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
+static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
/** @internal Macro for iterating module layers. */
#define RESUME_LAYERS(from, req, qry, func, ...) \
for (size_t i = (from); i < (req)->ctx->modules->len; ++i) { \
struct kr_module *mod = (req)->ctx->modules->at[i]; \
if (mod->layer) { \
- struct knot_layer layer = {.state = (req)->state, .api = mod->layer(mod), .data = (req)}; \
+ struct kr_layer layer = {.state = (req)->state, .api = mod->layer(mod), .data = (req)}; \
if (layer.api && layer.api->func) { \
(req)->state = layer.api->func(&layer, ##__VA_ARGS__); \
- if ((req)->state == KNOT_STATE_YIELD) { \
+ if ((req)->state == KR_STATE_YIELD) { \
func ## _yield(&layer, ##__VA_ARGS__); \
break; \
} \
#define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
/** @internal Find layer id matching API. */
-static inline size_t layer_id(struct kr_request *req, const struct knot_layer_api *api) {
+static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
module_array_t *modules = req->ctx->modules;
for (size_t i = 0; i < modules->len; ++i) {
struct kr_module *mod = modules->at[i];
knot_pkt_t *answer = request->answer;
/* Always set SERVFAIL for bogus answers. */
- if (state == KNOT_STATE_FAIL && rplan->pending.len > 0) {
+ if (state == KR_STATE_FAIL && rplan->pending.len > 0) {
struct kr_query *last = array_tail(rplan->pending);
if ((last->flags & QUERY_DNSSEC_WANT) && (last->flags & QUERY_DNSSEC_BOGUS)) {
return answer_fail(answer);
/* Set AD=1 if succeeded and requested secured answer. */
const bool has_ad = knot_wire_get_ad(answer->wire);
knot_wire_clear_ad(answer->wire);
- if (state == KNOT_STATE_DONE && rplan->resolved.len > 0) {
+ if (state == KR_STATE_DONE && rplan->resolved.len > 0) {
struct kr_query *last = array_tail(rplan->resolved);
/* Do not set AD for RRSIG query, as we can't validate it. */
const bool secure = (last->flags & QUERY_DNSSEC_WANT) &&
request->ctx = ctx;
request->answer = answer;
request->options = ctx->options;
- request->state = KNOT_STATE_CONSUME;
+ request->state = KR_STATE_CONSUME;
request->current_query = NULL;
array_init(request->authority);
array_init(request->additional);
/* Expect first query */
kr_rplan_init(&request->rplan, request, &request->pool);
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
qry = kr_rplan_push_empty(rplan, NULL);
}
if (!qry) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* Deferred zone cut lookup for this query. */
request->qsource.packet = packet;
ITERATE_LAYERS(request, qry, begin, request);
request->qsource.packet = NULL;
- if (request->state == KNOT_STATE_DONE) {
+ if (request->state == KR_STATE_DONE) {
kr_rplan_pop(rplan, qry);
}
return request->state;
{
struct kr_context *ctx = request->ctx;
/* On sucessful answer, update preference list RTT and penalise timer */
- if (request->state != KNOT_STATE_FAIL) {
+ if (request->state != KR_STATE_FAIL) {
/* Update RTT information for preference list */
update_nslist_rtt(ctx, qry, src);
/* Do not complete NS address resolution on soft-fail. */
/* Empty resolution plan, push packet as the new query */
if (packet && kr_rplan_empty(rplan)) {
if (answer_prepare(request->answer, packet, request) != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
return resolve_query(request, packet);
}
bool tried_tcp = (qry->flags & QUERY_TCP);
if (!packet || packet->size == 0) {
if (tried_tcp) {
- request->state = KNOT_STATE_FAIL;
+ request->state = KR_STATE_FAIL;
} else {
qry->flags |= QUERY_TCP;
}
if (qname_raw && qry->secret != 0) {
randomized_qname_case(qname_raw, qry->secret);
}
- request->state = KNOT_STATE_CONSUME;
+ request->state = KR_STATE_CONSUME;
if (qry->flags & QUERY_CACHED) {
ITERATE_LAYERS(request, qry, consume, packet);
} else {
update_nslist_score(request, qry, src, packet);
}
/* Resolution failed, invalidate current NS. */
- if (request->state == KNOT_STATE_FAIL) {
+ if (request->state == KR_STATE_FAIL) {
invalidate_ns(rplan, qry);
qry->flags &= ~QUERY_RESOLVED;
}
/* Pop query if resolved. */
- if (request->state == KNOT_STATE_YIELD) {
- return KNOT_STATE_PRODUCE; /* Requery */
+ if (request->state == KR_STATE_YIELD) {
+ return KR_STATE_PRODUCE; /* Requery */
} else if (qry->flags & QUERY_RESOLVED) {
kr_rplan_pop(rplan, qry);
} else if (!tried_tcp && (qry->flags & QUERY_TCP)) {
- return KNOT_STATE_PRODUCE; /* Requery over TCP */
+ return KR_STATE_PRODUCE; /* Requery over TCP */
} else { /* Clear query flags for next attempt */
qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
}
/* Do not finish with bogus answer. */
if (qry->flags & QUERY_DNSSEC_BOGUS) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
- return kr_rplan_empty(&request->rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
+ return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
/** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
/* @todo we could fetch the information from the parent cut, but we don't remember that now */
struct kr_query *next = kr_rplan_push(rplan, qry, qry->zone_cut.name, qry->sclass, KNOT_RRTYPE_DS);
if (!next) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
next->flags |= QUERY_AWAIT_CUT|QUERY_DNSSEC_WANT;
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
/* Try to fetch missing DNSKEY (either missing or above current cut).
* Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
if (want_secured && refetch_key && !is_dnskey_subreq) {
struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
if (!next) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
- return KNOT_STATE_PRODUCE;
+ return KR_STATE_PRODUCE;
}
/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
/* Stub mode, just forward and do not solve cut. */
if (qry->flags & QUERY_STUB) {
- return KNOT_STATE_PRODUCE;
+ return KR_STATE_PRODUCE;
}
/* The query wasn't resolved from cache,
if (ret == kr_error(ENOENT)) {
ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
if (ret != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
DEBUG_MSG(qry, "=> using root hints\n");
qry->flags &= ~QUERY_AWAIT_CUT;
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
} else {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
/* Update minimized QNAME if zone cut changed */
if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
if (kr_make_query(qry, packet) != 0) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
}
qry->flags &= ~QUERY_AWAIT_CUT;
/* No query left for resolution */
if (kr_rplan_empty(rplan)) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* If we have deferred answers, resume them. */
struct kr_query *qry = array_tail(rplan->pending);
if (qry->deferred != NULL) {
/* @todo: Refactoring validator, check trust chain before resuming. */
switch(trust_chain_check(request, qry)) {
- case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
- case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
DEBUG_MSG(qry, "=> resuming yielded answer\n");
struct kr_layer_pickle *pickle = qry->deferred;
- request->state = KNOT_STATE_YIELD;
+ request->state = KR_STATE_YIELD;
RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
qry->deferred = pickle->next;
} else {
* this is normally not required, and incurrs another cache lookups for cached answer. */
if (qry->flags & QUERY_ALWAYS_CUT) {
switch(zone_cut_check(request, qry, packet)) {
- case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
- case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
}
/* Resolve current query and produce dependent or finish */
- request->state = KNOT_STATE_PRODUCE;
+ request->state = KR_STATE_PRODUCE;
ITERATE_LAYERS(request, qry, produce, packet);
- if (request->state != KNOT_STATE_FAIL && knot_wire_get_qr(packet->wire)) {
+ if (request->state != KR_STATE_FAIL && knot_wire_get_qr(packet->wire)) {
/* Produced an answer, consume it. */
qry->secret = 0;
- request->state = KNOT_STATE_CONSUME;
+ request->state = KR_STATE_CONSUME;
ITERATE_LAYERS(request, qry, consume, packet);
}
}
switch(request->state) {
- case KNOT_STATE_FAIL: return request->state;
- case KNOT_STATE_CONSUME: break;
- case KNOT_STATE_DONE:
+ case KR_STATE_FAIL: return request->state;
+ case KR_STATE_CONSUME: break;
+ case KR_STATE_DONE:
default: /* Current query is done */
- if (qry->flags & QUERY_RESOLVED && request->state != KNOT_STATE_YIELD) {
+ if (qry->flags & QUERY_RESOLVED && request->state != KR_STATE_YIELD) {
kr_rplan_pop(rplan, qry);
}
ITERATE_LAYERS(request, qry, reset);
- return kr_rplan_empty(rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
+ return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
/* This query has RD=0 or is ANY, stop here. */
if (qry->stype == KNOT_RRTYPE_ANY || !knot_wire_get_rd(request->answer->wire)) {
DEBUG_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
/* Update zone cut, spawn new subrequests. */
if (!(qry->flags & QUERY_STUB)) {
int state = zone_cut_check(request, qry, packet);
switch(state) {
- case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
- case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
default: break;
}
}
*/
if(++ns_election_iter >= KR_ITER_LIMIT) {
DEBUG_MSG(qry, "=> couldn't converge NS selection, bail out\n");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
const bool retry = (qry->flags & (QUERY_TCP|QUERY_STUB|QUERY_BADCOOKIE_AGAIN));
DEBUG_MSG(qry, "=> no valid NS left\n");
ITERATE_LAYERS(request, qry, reset);
kr_rplan_pop(rplan, qry);
- return KNOT_STATE_PRODUCE;
+ return KR_STATE_PRODUCE;
}
}
goto ns_election; /* Must try different NS */
}
ITERATE_LAYERS(request, qry, reset);
- return KNOT_STATE_PRODUCE;
+ return KR_STATE_PRODUCE;
}
/* Randomize query case (if not in safemode) */
#endif
/* Finalize answer */
if (answer_finalize(request, state) != 0) {
- state = KNOT_STATE_FAIL;
+ state = KR_STATE_FAIL;
}
/* Error during procesing, internal failure */
- if (state != KNOT_STATE_DONE) {
+ if (state != KR_STATE_DONE) {
knot_pkt_t *answer = request->answer;
if (knot_wire_get_rcode(answer->wire) == KNOT_RCODE_NOERROR) {
knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL);
ITERATE_LAYERS(request, NULL, finish);
DEBUG_MSG(NULL, "finished: %d, queries: %zu, mempool: %zu B\n",
request->state, rplan->resolved.len, (size_t) mp_total_size(request->pool.ctx));
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
struct kr_rplan *kr_resolve_plan(struct kr_request *request)
* state = kr_resolve_consume(&req, query);
*
* // Generate answer
- * while (state == KNOT_STATE_PRODUCE) {
+ * while (state == KR_STATE_PRODUCE) {
*
* // Additional query generate, do the I/O and pass back answer
* state = kr_resolve_produce(&req, &addr, &type, query);
- * while (state == KNOT_STATE_CONSUME) {
+ * while (state == KR_STATE_CONSUME) {
* int ret = sendrecv(addr, proto, query, resp);
*
* // If I/O fails, make "resp" empty
#include "lib/layer.h"
#include "lib/module.h"
// Need a forward declaration of the function signature
- int finish(knot_layer_t *);
+ int finish(kr_layer_t *);
// Workaround for layers composition
- static inline const knot_layer_api_t *_layer(void)
+ static inline const kr_layer_api_t *_layer(void)
{
- static const knot_layer_api_t api = {
+ static const kr_layer_api_t api = {
.finish = &finish
};
return &api;
.. code-block:: go
//export finish
- func finish(ctx *C.knot_layer_t) C.int {
+ func finish(ctx *C.kr_layer_t) C.int {
// Since the context is unsafe.Pointer, we need to cast it
var param *C.struct_kr_request = (*C.struct_kr_request)(ctx.data)
// Now we can use the C API as well
}
//export mymodule_layer
- func mymodule_layer(module *C.struct_kr_module) *C.knot_layer_api_t {
+ func mymodule_layer(module *C.struct_kr_module) *C.kr_layer_api_t {
// Wrapping the inline trampoline function
return C._layer()
}
}
/** Process incoming response. */
-int check_response(knot_layer_t *ctx, knot_pkt_t *pkt)
+int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
struct kr_cookie_ctx *cookie_ctx = &req->ctx->cookie_ctx;
- if (ctx->state & (KNOT_STATE_DONE | KNOT_STATE_FAIL)) {
+ if (ctx->state & (KR_STATE_DONE | KR_STATE_FAIL)) {
return ctx->state;
}
/* We haven't received any cookies although we should. */
DEBUG_MSG(NULL, "%s\n",
"expected to receive a cookie but none received");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
if (!pkt_cookie_opt) {
if (!check_cookie_content_and_cache(&cookie_ctx->clnt, req,
pkt_cookie_opt, cookie_cache)) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
uint16_t rcode = knot_pkt_get_ext_rcode(pkt);
* we always expect that the server doesn't support TCP.
*/
qry->flags &= ~QUERY_BADCOOKIE_AGAIN;
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
- return KNOT_STATE_CONSUME;
+ return KR_STATE_CONSUME;
}
return ctx->state;
* @brief Returns resolver state and sets answer RCODE on missing or invalid
* server cookie.
*
- * @note Caller should exit when only KNOT_STATE_FAIL is returned.
+ * @note Caller should exit when only KR_STATE_FAIL is returned.
*
* @param state original resolver state
* @param sc_present true if server cookie is present
const knot_pkt_t *pkt = req->qsource.packet;
if (!pkt) {
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
if (knot_wire_get_qdcount(pkt->wire) == 0) {
/* RFC7873 5.4 */
- state = KNOT_STATE_DONE;
+ state = KR_STATE_DONE;
if (sc_present) {
kr_pkt_set_ext_rcode(answer, KNOT_RCODE_BADCOOKIE);
- state |= KNOT_STATE_FAIL;
+ state |= KR_STATE_FAIL;
}
} else if (!ignore_badcookie) {
/* Generate BADCOOKIE response. */
DEBUG_MSG(NULL, "%s\n",
"missing EDNS section in prepared answer");
/* Caller should exit on this (and only this) state. */
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
kr_pkt_set_ext_rcode(answer, KNOT_RCODE_BADCOOKIE);
- state = KNOT_STATE_FAIL | KNOT_STATE_DONE;
+ state = KR_STATE_FAIL | KR_STATE_DONE;
}
return state;
}
-int check_request(knot_layer_t *ctx, void *module_param)
+int check_request(kr_layer_t *ctx, void *module_param)
{
struct kr_request *req = ctx->data;
struct kr_cookie_settings *srvr_sett = &req->ctx->cookie_ctx.srvr;
knot_pkt_t *answer = req->answer;
- if (ctx->state & (KNOT_STATE_DONE | KNOT_STATE_FAIL)) {
+ if (ctx->state & (KR_STATE_DONE | KR_STATE_FAIL)) {
return ctx->state;
}
/* FORMERR -- malformed cookies. */
DEBUG_MSG(NULL, "%s\n", "request with malformed cookie");
knot_wire_set_rcode(answer->wire, KNOT_RCODE_FORMERR);
- return KNOT_STATE_FAIL | KNOT_STATE_DONE;
+ return KR_STATE_FAIL | KR_STATE_DONE;
}
/*
if (!req->qsource.addr || !srvr_sett->current.secr || !current_sc_alg) {
DEBUG_MSG(NULL, "%s\n", "missing valid server cookie context");
- return KNOT_STATE_FAIL;
+ return KR_STATE_FAIL;
}
int return_state = ctx->state;
/* Request has no server cookie. */
return_state = invalid_sc_status(return_state, false,
ignore_badcookie, req, answer);
- if (return_state == KNOT_STATE_FAIL) {
+ if (return_state == KR_STATE_FAIL) {
return return_state;
}
goto answer_add_cookies;
/* Invalid server cookie. */
return_state = invalid_sc_status(return_state, true,
ignore_badcookie, req, answer);
- if (return_state == KNOT_STATE_FAIL) {
+ if (return_state == KR_STATE_FAIL) {
return return_state;
}
goto answer_add_cookies;
/* Add server cookie into response. */
ret = kr_answer_write_cookie(&sc_input, &nonce, current_sc_alg, answer);
if (ret != kr_ok()) {
- return_state = KNOT_STATE_FAIL;
+ return_state = KR_STATE_FAIL;
}
return return_state;
}
* @param module_param module parameters
* @return layer state
*/
-int check_request(knot_layer_t *ctx, void *module_param);
+int check_request(kr_layer_t *ctx, void *module_param);
/**
* @brief Checks cookies of received responses.
* @param pkt response packet
* @return layer state
*/
-int check_response(knot_layer_t *ctx, knot_pkt_t *pkt);
+int check_response(kr_layer_t *ctx, knot_pkt_t *pkt);
}
KR_EXPORT
-const knot_layer_api_t *cookies_layer(struct kr_module *module)
+const kr_layer_api_t *cookies_layer(struct kr_module *module)
{
/* The function answer_finalize() in resolver is called before any
* .finish callback. Therefore this layer does not use it. */
- static knot_layer_api_t _layer = {
+ static kr_layer_api_t _layer = {
.begin = &check_request,
.consume = &check_response
};
size_t addr_len;
};
-static int begin(knot_layer_t *ctx, void *module_param)
+static int begin(kr_layer_t *ctx, void *module_param)
{
ctx->data = module_param;
return ctx->state;
return put_answer(pkt, &rr);
}
-static int query(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
- if (!qry || ctx->state & (KNOT_STATE_FAIL)) {
+ if (!qry || ctx->state & (KR_STATE_FAIL)) {
return ctx->state;
}
qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
pkt->parsed = pkt->size;
knot_wire_set_qr(pkt->wire);
- return KNOT_STATE_DONE;
+ return KR_STATE_DONE;
}
static int parse_addr_str(struct sockaddr_storage *sa, const char *addr)
*/
KR_EXPORT
-const knot_layer_api_t *hints_layer(struct kr_module *module)
+const kr_layer_api_t *hints_layer(struct kr_module *module)
{
- static knot_layer_api_t _layer = {
+ static kr_layer_api_t _layer = {
.begin = &begin,
.produce = &query,
};
}
}
-static int collect_rtt(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int collect_rtt(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->data;
struct kr_query *qry = req->current_query;
return ctx->state;
}
-static int collect(knot_layer_t *ctx)
+static int collect(kr_layer_t *ctx)
{
struct kr_request *param = ctx->data;
struct kr_module *module = ctx->api->data;
*/
KR_EXPORT
-const knot_layer_api_t *stats_layer(struct kr_module *module)
+const kr_layer_api_t *stats_layer(struct kr_module *module)
{
- static knot_layer_api_t _layer = {
+ static kr_layer_api_t _layer = {
.consume = &collect_rtt,
.finish = &collect,
};