#include "lib/module.h"
#include "lib/layer.h"
-/** @internal Slots for layer callbacks.
- * Each slot ID corresponds to Lua reference in module API. */
-enum {
- SLOT_begin = 0,
- SLOT_reset,
- SLOT_finish,
- SLOT_consume,
- SLOT_produce,
- SLOT_checkout,
- SLOT_answer_finalize,
- SLOT_count /* dummy, must be the last */
-};
-
/** Lua registry indices for functions that wrap layer callbacks (shared by all lua modules). */
static int l_ffi_wrap_slots[SLOT_count] = { 0 };
+/** @internal Mapping from name # to slot name in kr_layer_api.
+ * Each slot ID corresponds to name of Lua function. */
+static const char *slot_name[] = {
+ [SLOT_begin] = "begin",
+ [SLOT_reset] = "reset",
+ [SLOT_finish] = "finish",
+ [SLOT_consume] = "consume",
+ [SLOT_produce] = "produce",
+ [SLOT_checkout] = "checkout",
+ [SLOT_answer_finalize] = "answer_finalize"
+};
+
+
/** @internal Continue with coroutine. */
static void l_ffi_resume_cb(uv_idle_t *check)
{
}
/** @internal Helper for calling a layer Lua function by e.g. SLOT_begin. */
-static int l_ffi_call_layer(kr_layer_t *ctx, int slot_ix)
+static int l_ffi_call_layer(kr_layer_t *ctx, enum slot_idx slot_idx)
{
- const int wrap_slot = l_ffi_wrap_slots[slot_ix];
- const int cb_slot = ctx->api->cb_slots[slot_ix];
+ const int wrap_slot = l_ffi_wrap_slots[slot_idx];
+ const int cb_slot = ctx->api->cb_slots[slot_idx];
assert(wrap_slot > 0 && cb_slot > 0);
lua_State *L = the_worker->engine->L;
lua_rawgeti(L, LUA_REGISTRYINDEX, wrap_slot);
return ret < 0 ? KR_STATE_FAIL : ret;
}
-static int l_ffi_layer_begin(kr_layer_t *ctx)
+static int l_ffi_layer_begin(kr_layer_t *ctx, va_list ap /* none */)
{
return l_ffi_call_layer(ctx, SLOT_begin);
}
-static int l_ffi_layer_reset(kr_layer_t *ctx)
+static int l_ffi_layer_reset(kr_layer_t *ctx, va_list ap /* none */)
{
return l_ffi_call_layer(ctx, SLOT_reset);
}
-static int l_ffi_layer_finish(kr_layer_t *ctx)
+static int l_ffi_layer_finish(kr_layer_t *ctx, va_list ap /* none */)
{
ctx->pkt = ctx->req->answer;
return l_ffi_call_layer(ctx, SLOT_finish);
}
-static int l_ffi_layer_consume(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_consume(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
if (ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Already failed, skip */
}
- ctx->pkt = pkt;
+ ctx->pkt = va_arg(ap, knot_pkt_t *);
+
return l_ffi_call_layer(ctx, SLOT_consume);
}
-static int l_ffi_layer_produce(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_produce(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
if (ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Already failed, skip */
}
- ctx->pkt = pkt;
+ ctx->pkt = va_arg(ap, knot_pkt_t *);
return l_ffi_call_layer(ctx, SLOT_produce);
}
-static int l_ffi_layer_checkout(kr_layer_t *ctx, knot_pkt_t *pkt,
- struct sockaddr *dst, int type)
+static int l_ffi_layer_checkout(kr_layer_t *ctx, va_list ap
+ /* knot_pkt_t *pkt, struct sockaddr *dst, int type */)
{
if (ctx->state & KR_STATE_FAIL) {
return ctx->state; /* Already failed, skip */
}
- ctx->pkt = pkt;
- ctx->dst = dst;
- ctx->is_stream = (type == SOCK_STREAM);
+ ctx->pkt = va_arg(ap, knot_pkt_t *);
+ ctx->dst = va_arg(ap, struct sockaddr *);
+ ctx->is_stream = (va_arg(ap, int) == SOCK_STREAM);
return l_ffi_call_layer(ctx, SLOT_checkout);
}
-static int l_ffi_layer_answer_finalize(kr_layer_t *ctx)
+static int l_ffi_layer_answer_finalize(kr_layer_t *ctx, va_list ap /* none */)
{
return l_ffi_call_layer(ctx, SLOT_answer_finalize);
}
+/** @internal Mapping enum slot_idx -> Lua wrapper */
+static const int (*l_wrap_funcs[SLOT_count])(kr_layer_t *ctx, va_list ap) = {
+ [SLOT_begin] = l_ffi_layer_begin,
+ [SLOT_reset] = l_ffi_layer_reset,
+ [SLOT_finish] = l_ffi_layer_finish,
+ [SLOT_consume] = l_ffi_layer_consume,
+ [SLOT_produce] = l_ffi_layer_produce,
+ [SLOT_checkout] = l_ffi_layer_checkout,
+ [SLOT_answer_finalize] = l_ffi_layer_answer_finalize
+};
+
int ffimodule_init(lua_State *L)
{
/* Wrappers defined in ./lua/sandbox.lua */
/** @internal Conditionally register layer trampoline
* @warning Expects 'module.layer' to be on top of Lua stack. */
-#define LAYER_REGISTER(L, api, name) do { \
- int *cb_slot = (api)->cb_slots + SLOT_ ## name; \
- lua_getfield((L), -1, #name); \
- if (!lua_isnil((L), -1)) { \
- (api)->name = l_ffi_layer_ ## name; \
- *cb_slot = luaL_ref((L), LUA_REGISTRYINDEX); \
- } else { \
- lua_pop((L), 1); \
- } \
-} while(0)
+static void LAYER_REGISTER(lua_State *L, kr_layer_api_t *capi, enum slot_idx fidx) {
+ int *cb_slot = capi->cb_slots + fidx;
+ lua_getfield(L, -1, slot_name[fidx]);
+ if (!lua_isnil(L, -1)) {
+ capi->funcs[fidx] = l_wrap_funcs[fidx];
+ *cb_slot = luaL_ref(L, LUA_REGISTRYINDEX);
+ } else {
+ lua_pop(L, 1);
+ }
+}
/** @internal Create C layer api wrapper. */
static kr_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module)
kr_layer_api_t *api = malloc(api_length);
if (api) {
memset(api, 0, api_length);
- LAYER_REGISTER(L, api, begin);
- LAYER_REGISTER(L, api, finish);
- LAYER_REGISTER(L, api, consume);
- LAYER_REGISTER(L, api, produce);
- LAYER_REGISTER(L, api, checkout);
- LAYER_REGISTER(L, api, answer_finalize);
- LAYER_REGISTER(L, api, reset);
+ LAYER_REGISTER(L, api, SLOT_begin);
+ LAYER_REGISTER(L, api, SLOT_finish);
+ LAYER_REGISTER(L, api, SLOT_consume);
+ LAYER_REGISTER(L, api, SLOT_produce);
+ LAYER_REGISTER(L, api, SLOT_checkout);
+ LAYER_REGISTER(L, api, SLOT_answer_finalize);
+ LAYER_REGISTER(L, api, SLOT_reset);
}
return api;
}
-#undef LAYER_REGISTER
-
int ffimodule_register_lua(struct engine *engine, struct kr_module *module, const char *name)
{
/* Register module in Lua */
/** The inside for cache_peek(); implementation separated to ./peek.c */
int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt);
/** function for .produce phase */
-int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
+int cache_peek(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
/* We first check various exit-conditions and then call the _real function. */
struct kr_request *req);
/** The whole .consume phase for the cache module. */
-int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
+int cache_stash(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
struct kr_cache *cache = &req->ctx->cache;
#include "lib/module.h"
/* Prototypes for the 'cache' module implementation. */
-int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt);
-int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt);
+int cache_peek(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */);
+int cache_stash(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */);
/**
bool is_stream; /*!< In glue for checkout layer it's used to pass the parameter. */
} kr_layer_t;
+/** @internal Mapping from name to slot # in kr_layer_api.
+ * Each slot ID corresponds to Lua reference in module API. */
+enum slot_idx {
+ SLOT_begin = 0,
+ SLOT_reset,
+ SLOT_finish,
+ SLOT_consume,
+ SLOT_produce,
+ SLOT_checkout,
+ SLOT_answer_finalize,
+ SLOT_count /* dummy, must be the last */
+};
+
/** Packet processing module API. All functions return the new kr_layer_state. */
struct kr_layer_api {
- /** Start of processing the DNS request. */
+ int (*funcs[SLOT_count])(kr_layer_t *ctx, va_list ap);
+ /* Content of funcs array:
+ ** Start of processing the DNS request.
int (*begin)(kr_layer_t *ctx);
int (*reset)(kr_layer_t *ctx);
- /** Paired to begin, called both on successes and failures. */
+ ** Paired to begin, called both on successes and failures.
int (*finish)(kr_layer_t *ctx);
- /** Processing an answer from upstream or the answer to the request.
- * Lua API: call is omitted iff (state & KR_STATE_FAIL). */
+ ** Processing an answer from upstream or the answer to the request.
+ * Lua API: call is omitted iff (state & KR_STATE_FAIL).
int (*consume)(kr_layer_t *ctx, knot_pkt_t *pkt);
- /** Produce either an answer to the request or a query for upstream (or fail).
- * Lua API: call is omitted iff (state & KR_STATE_FAIL). */
+ ** Produce either an answer to the request or a query for upstream (or fail).
+ * Lua API: call is omitted iff (state & KR_STATE_FAIL).
int (*produce)(kr_layer_t *ctx, knot_pkt_t *pkt);
- /** Finalises the outbound query packet with the knowledge of the IP addresses.
+ ** Finalises the outbound query packet with the knowledge of the IP addresses.
* The checkout layer doesn't persist the state, so canceled subrequests
* don't affect the resolution or rest of the processing.
- * Lua API: call is omitted iff (state & KR_STATE_FAIL). */
+ * Lua API: call is omitted iff (state & KR_STATE_FAIL).
int (*checkout)(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type);
- /** Finalises the answer.
- * Last chance to affect what will get into the answer, including EDNS.*/
+ ** Finalises the answer.
+ * Last chance to affect what will get into the answer, including EDNS.
int (*answer_finalize)(kr_layer_t *ctx);
+ */
/** The C module can store anything in here. */
void *data;
int cache_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
- .produce = &cache_peek,
- .consume = &cache_stash,
+ .funcs = {
+ [SLOT_produce] = &cache_peek,
+ [SLOT_consume] = &cache_stash,
+ }
};
self->layer = &layer;
return kr_ok();
#include <sys/time.h>
#include <assert.h>
#include <arpa/inet.h>
+#include <stdarg.h>
#include <contrib/cleanup.h>
#include <libknot/descriptor.h>
}
/* State-less single resolution iteration step, not needed. */
-static int reset(kr_layer_t *ctx) { return KR_STATE_PRODUCE; }
+static int reset(kr_layer_t *ctx, va_list ap /* none */) { return KR_STATE_PRODUCE; }
/* Set resolution context and parameters. */
-static int begin(kr_layer_t *ctx)
+static int begin(kr_layer_t *ctx, va_list ap /* none */)
{
if (ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
return ctx->state;
return KR_STATE_FAIL;
}
- return reset(ctx);
+ /* this will require va_copy() or wrapper in case we add arguments */
+ return reset(ctx, ap);
}
int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
return kr_ok();
}
-static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int prepare_query(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
assert(pkt && ctx);
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
*
* This roughly corresponds to RFC1034, 5.3.3 4a-d.
*/
-static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int resolve(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
assert(pkt && ctx);
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
int iterate_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
- .begin = &begin,
- .reset = &reset,
- .consume = &resolve,
- .produce = &prepare_query
+ .funcs = {
+ [SLOT_begin] = &begin,
+ [SLOT_reset] = &reset,
+ [SLOT_consume] = &resolve,
+ [SLOT_produce] = &prepare_query
+ }
};
self->layer = &layer;
return kr_ok();
#include <assert.h>
#include <errno.h>
#include <sys/time.h>
+#include <stdarg.h>
#include <stdio.h>
#include <string.h>
return kr_ok();
}
-static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int validate(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
int ret = 0;
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
int validate_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
- .consume = &validate,
+ .funcs = {
+ [SLOT_consume] = &validate,
+ }
};
self->layer = &layer;
return kr_ok();
#include <ctype.h>
#include <inttypes.h>
+#include <stdarg.h>
#include <stdio.h>
#include <fcntl.h>
#include <assert.h>
}
}
+
/** @internal Set @a yielded to all RRs with matching @a qry_uid. */
static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
{
* @internal Defer execution of current query.
* The current layer state and input will be pushed to a stack and resumed on next iteration.
*/
-static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int consume_yield(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
struct kr_request *req = ctx->req;
size_t pkt_size = pkt->size;
if (knot_pkt_has_tsig(pkt)) {
}
return kr_error(ENOMEM);
}
-static int begin_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
-static int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
-static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
+static int begin_yield(kr_layer_t *ctx, va_list ap /* none */) { return kr_ok(); }
+static int reset_yield(kr_layer_t *ctx, va_list ap /* none */) { return kr_ok(); }
+static int finish_yield(kr_layer_t *ctx, va_list ap /* none */) { return kr_ok(); }
+static int produce_yield(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */) { return kr_ok(); }
+static int checkout_yield(kr_layer_t *ctx, va_list ap /* knot_pkt_t *packet, struct sockaddr *dst, int type */) { return kr_ok(); }
+static int answer_finalize_yield(kr_layer_t *ctx, va_list ap /* none */) { return kr_ok(); }
+
+/** Map slot enum slot_idx -> yield function.
+ */
+static const int (*yield_funcs[SLOT_count])(kr_layer_t *ctx, va_list ap) = {
+ [SLOT_begin] = begin_yield,
+ [SLOT_reset] = reset_yield,
+ [SLOT_finish] = finish_yield,
+ [SLOT_consume] = consume_yield,
+ [SLOT_produce] = produce_yield,
+ [SLOT_checkout] = checkout_yield,
+ [SLOT_answer_finalize] = answer_finalize_yield
+};
/** @internal Macro for iterating module layers. */
-#define RESUME_LAYERS(from, r, qry, func, ...) \
- (r)->current_query = (qry); \
- for (size_t i = (from); i < (r)->ctx->modules->len; ++i) { \
- struct kr_module *mod = (r)->ctx->modules->at[i]; \
- if (mod->layer) { \
- struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)}; \
- if (layer.api && layer.api->func) { \
- (r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
- if ((r)->state == KR_STATE_YIELD) { \
- func ## _yield(&layer, ##__VA_ARGS__); \
- break; \
- } \
- } \
- } \
- } /* Invalidate current query. */ \
- (r)->current_query = NULL
+void RESUME_LAYERS(size_t from, struct kr_request *r, struct kr_query *qry, enum slot_idx function, ...) {
+ va_list ap;
+ (r)->current_query = (qry);
+ for (size_t i = (from); i < (r)->ctx->modules->len; ++i) {
+ struct kr_module *mod = (r)->ctx->modules->at[i];
+ if (mod->layer) {
+ struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)};
+ if (layer.api && layer.api->funcs[function]) {
+ va_start(ap, function);
+ (r)->state = layer.api->funcs[function](&layer, ap);
+ va_end(ap);
+ if ((r)->state == KR_STATE_YIELD) {
+ va_start(ap, function);
+ yield_funcs[function](&layer, ap);
+ va_end(ap);
+ break;
+ }
+ }
+ }
+ } /* Invalidate current query. */
+ (r)->current_query = NULL;
+}
/** @internal Macro for starting module iteration. */
#define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
}
/* Expect answer, pop if satisfied immediately */
- ITERATE_LAYERS(request, qry, begin);
+ ITERATE_LAYERS(request, qry, SLOT_begin);
if ((request->state & KR_STATE_DONE) != 0) {
kr_rplan_pop(rplan, qry);
} else if (qname == NULL) {
}
request->state = KR_STATE_CONSUME;
if (qry->flags.CACHED) {
- ITERATE_LAYERS(request, qry, consume, packet);
+ ITERATE_LAYERS(request, qry, SLOT_consume, packet);
} else {
/* Fill in source and latency information. */
request->upstream.rtt = kr_now() - qry->timestamp_mono;
request->upstream.addr = src;
- ITERATE_LAYERS(request, qry, consume, packet);
+ ITERATE_LAYERS(request, qry, SLOT_consume, packet);
/* Clear temporary information */
request->upstream.addr = NULL;
request->upstream.rtt = 0;
}
}
- ITERATE_LAYERS(request, qry, reset);
+ ITERATE_LAYERS(request, qry, SLOT_reset);
/* Do not finish with bogus answer. */
if (qry->flags.DNSSEC_BOGUS) {
request->state = KR_STATE_YIELD;
set_yield(&request->answ_selected, qry->uid, false);
set_yield(&request->auth_selected, qry->uid, false);
- RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
+ RESUME_LAYERS(layer_id(request, pickle->api), request, qry, SLOT_consume, pickle->pkt);
if (request->state != KR_STATE_YIELD) {
/* No new deferred answers, take the next */
qry->deferred = pickle->next;
}
/* Resolve current query and produce dependent or finish */
request->state = KR_STATE_PRODUCE;
- ITERATE_LAYERS(request, qry, produce, packet);
+ ITERATE_LAYERS(request, qry, SLOT_produce, packet);
if (!(request->state & KR_STATE_FAIL) && knot_wire_get_qr(packet->wire)) {
/* Produced an answer from cache, consume it. */
qry->secret = 0;
request->state = KR_STATE_CONSUME;
- ITERATE_LAYERS(request, qry, consume, packet);
+ ITERATE_LAYERS(request, qry, SLOT_consume, packet);
}
}
switch(request->state) {
if (qry->flags.RESOLVED && request->state != KR_STATE_YIELD) {
kr_rplan_pop(rplan, qry);
}
- ITERATE_LAYERS(request, qry, reset);
+ ITERATE_LAYERS(request, qry, SLOT_reset);
return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
if (!qry->flags.NO_NS_FOUND) {
qry->flags.NO_NS_FOUND = true;
} else {
- ITERATE_LAYERS(request, qry, reset);
+ ITERATE_LAYERS(request, qry, SLOT_reset);
kr_rplan_pop(rplan, qry);
}
return KR_STATE_PRODUCE;
qry->ns.name = NULL;
goto ns_election; /* Must try different NS */
}
- ITERATE_LAYERS(request, qry, reset);
+ ITERATE_LAYERS(request, qry, SLOT_reset);
return KR_STATE_PRODUCE;
}
* The checkout layer doesn't persist the state, so canceled subrequests
* don't affect the resolution or rest of the processing. */
int state = request->state;
- ITERATE_LAYERS(request, qry, checkout, packet, dst, type);
+ ITERATE_LAYERS(request, qry, SLOT_checkout, packet, dst, type);
if (request->state & KR_STATE_FAIL) {
request->state = state; /* Restore */
return kr_error(ECANCELED);
{
request->state = state;
/* Finalize answer and construct wire-buffer. */
- ITERATE_LAYERS(request, NULL, answer_finalize);
+ ITERATE_LAYERS(request, NULL, SLOT_answer_finalize);
answer_finalize(request);
/* Defensive style, in case someone has forgotten.
}
}
- ITERATE_LAYERS(request, NULL, finish);
+ ITERATE_LAYERS(request, NULL, SLOT_finish);
#ifndef NOVERBOSELOG
struct kr_rplan *rplan = &request->rplan;
*
* This module logs (query name, type) pairs which failed DNSSEC validation. */
+#include <stdarg.h>
+
#include <libknot/packet/pkt.h>
#include <libknot/dname.h>
#include <ccan/json/json.h>
namehash_t *frequent;
};
-static int consume(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int consume(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
if (!(ctx->state & KR_STATE_FAIL)
|| !ctx->req
|| !ctx->req->current_query
int bogus_log_init(struct kr_module *module)
{
static kr_layer_api_t layer = {
- .consume = &consume,
+ .funcs = {
+ [SLOT_consume] = &consume,
+ }
};
layer.data = module;
module->layer = &layer;
}
/* dnstap_log prepares dnstap message and sent it to fstrm */
-static int dnstap_log(kr_layer_t *ctx) {
+static int dnstap_log(kr_layer_t *ctx, va_list ap /* none */) {
const struct kr_request *req = ctx->req;
const struct kr_module *module = ctx->api->data;
const struct kr_rplan *rplan = &req->rplan;
KR_EXPORT
int dnstap_init(struct kr_module *module) {
static kr_layer_api_t layer = {
- .finish = &dnstap_log,
+ .funcs = {
+ [SLOT_finish] = &dnstap_log,
+ }
};
/* Store module reference */
layer.data = module;
#include "lib/module.h"
#include "lib/layer.h"
-static int edns_keepalive_finalize(kr_layer_t *ctx)
+static int edns_keepalive_finalize(kr_layer_t *ctx, va_list ap /* none */)
{
struct kr_request *req = ctx->req;
knot_pkt_t *answer = req->answer;
KR_EXPORT int edns_keeapalive_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
- .answer_finalize = &edns_keepalive_finalize,
+ .funcs = {
+ [SLOT_answer_finalize] = &edns_keepalive_finalize
+ }
};
self->layer = &layer;
return kr_ok();
return put_answer(pkt, qry, &rr, data->use_nodata);
}
-static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int query(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+
struct kr_query *qry = ctx->req->current_query;
if (!qry || (ctx->state & KR_STATE_FAIL)) {
return ctx->state;
int hints_init(struct kr_module *module)
{
static kr_layer_api_t layer = {
- .produce = &query,
+ .funcs = {
+ [SLOT_produce] = &query,
+ }
};
/* Store module reference */
layer.data = module;
size_t local_nsid_len;
};
-static int nsid_finalize(kr_layer_t *ctx) {
+static int nsid_finalize(kr_layer_t *ctx, va_list ap /* none */) {
const struct kr_module *module = ctx->api->data;
const struct nsid_config *config = module->data;
struct kr_request *req = ctx->req;
KR_EXPORT
int nsid_init(struct kr_module *module) {
static kr_layer_api_t layer = {
- .answer_finalize = &nsid_finalize,
+ .funcs = {
+ [SLOT_answer_finalize] = &nsid_finalize,
+ }
};
layer.data = module;
module->layer = &layer;
#include "lib/module.h"
#include "lib/layer.h"
-static int refuse_nord_query(kr_layer_t *ctx)
+static int refuse_nord_query(kr_layer_t *ctx, va_list ap /* none */)
{
struct kr_request *req = ctx->req;
uint8_t rd = knot_wire_get_rd(req->qsource.packet->wire);
KR_EXPORT int refuse_nord_init(struct kr_module *module)
{
static const kr_layer_api_t layer = {
- .begin = &refuse_nord_query,
+ .funcs = {
+ [SLOT_begin] = &refuse_nord_query,
+ }
};
module->layer = &layer;
return kr_ok();
}
}
-static int collect_rtt(kr_layer_t *ctx, knot_pkt_t *pkt)
+static int collect_rtt(kr_layer_t *ctx, va_list ap /* knot_pkt_t *pkt */)
{
+ /* pkt is unused in this function
+ knot_pkt_t *pkt = va_arg(ap, knot_pkt_t *);
+ */
+
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
if (qry->flags.CACHED || !req->upstream.addr) {
return ctx->state;
}
-static int collect_transport(kr_layer_t *ctx)
+static int collect_transport(kr_layer_t *ctx, va_list ap /* none */)
{
struct kr_request *req = ctx->req;
struct kr_module *module = ctx->api->data;
return ctx->state;
}
-static int collect(kr_layer_t *ctx)
+static int collect(kr_layer_t *ctx, va_list ap /* none */)
{
struct kr_request *param = ctx->req;
struct kr_module *module = ctx->api->data;
int stats_init(struct kr_module *module)
{
static kr_layer_api_t layer = {
- .consume = &collect_rtt,
- .finish = &collect,
- .begin = &collect_transport,
+ .funcs = {
+ [SLOT_consume] = &collect_rtt,
+ [SLOT_finish] = &collect,
+ [SLOT_begin] = &collect_transport,
+ }
};
/* Store module reference */
layer.data = module;