- :ref:`Nameservers <lib_api_nameservers>` - Reputation database of nameservers, this serves as an aid for nameserver choice.
A processing layer is going to be called by the query resolution driver for each query,
-so you're going to work with :ref:`struct kr_layer_param <lib_api_rplan>` as your per-query context. This structure contains pointers to
+so you're going to work with :ref:`struct kr_request <lib_api_rplan>` as your per-query context. This structure contains pointers to
resolution context, resolution plan and also the final answer. You're likely to retrieve currently solved query from the query plan:
.. code-block:: c
int consume(knot_layer_t *ctx, knot_pkt_t *pkt)
{
- struct kr_layer_param *param = ctx->data;
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_request *request = ctx->data;
+ struct kr_query *query = kr_rplan_current(request->rplan);
}
This is only passive processing of the incoming answer. If you want to change the course of resolution, say satisfy a query from a local cache before the library issues a query to the nameserver, you can use states (see the :ref:`Static hints <mod-hints>` for example).
int produce(knot_layer_t *ctx, knot_pkt_t *pkt)
{
- struct kr_layer_param *param = ctx->data;
- struct kr_query *cur = kr_rplan_current(param->rplan);
+ struct kr_request *request = ctx->data;
+ struct kr_query *cur = kr_rplan_current(request->rplan);
/* Query can be satisfied locally. */
if (can_satisfy(cur)) {
int finish(knot_layer_t *ctx)
{
- struct kr_layer_param *param = ctx->data;
- struct kr_rplan *rplan = param->rplan;
+ struct kr_request *request = ctx->data;
+ struct kr_rplan *rplan = request->rplan;
/* Print the query sequence with start time. */
char qname_str[KNOT_DNAME_MAXLEN];
* Connection limits.
* @cond internal
*/
-#define KR_CONN_RTT_MAX 5000
+#define KR_CONN_RTT_MAX 5000 /* Timeout for network activity */
+#define ITER_LIMIT 50 /* Built-in iterator limit */
/*
* Timers.
#include "lib/nsrep.h"
#include "lib/module.h"
-#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(param->rplan), "iter", fmt)
+#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(&req->rplan), "iter", fmt)
/* Packet classification. */
enum {
};
/* Iterator often walks through packet section, this is an abstraction. */
-typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_layer_param *);
+typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
/** Return minimized QNAME/QTYPE for current zone cut. */
static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qtype)
return KNOT_STATE_CONSUME;
}
-static int update_glue(const knot_rrset_t *rr, unsigned hint, struct kr_layer_param *param)
+static int update_glue(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
{
- return update_nsaddr(rr, kr_rplan_current(param->rplan), hint);
+ return update_nsaddr(rr, kr_rplan_current(&req->rplan), hint);
}
-int rr_update_parent(const knot_rrset_t *rr, unsigned hint, struct kr_layer_param *param)
+int rr_update_parent(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
{
- struct kr_query *query = kr_rplan_current(param->rplan);
- return update_nsaddr(rr, query->parent, hint);
+ struct kr_query *qry = kr_rplan_current(&req->rplan);
+ return update_nsaddr(rr, qry->parent, hint);
}
-int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_layer_param *param)
+int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *req)
{
- knot_pkt_t *answer = param->answer;
+ knot_pkt_t *answer = req->answer;
/* Write copied RR to the result packet. */
int ret = knot_pkt_put(answer, KNOT_COMPR_HINT_NONE, rr, hint);
}
/** Attempt to find glue for given nameserver name (best effort). */
-static int fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_layer_param *param)
+static int fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
{
int result = 0;
const knot_pktsection_t *ar = knot_pkt_section(pkt, KNOT_ADDITIONAL);
for (unsigned i = 0; i < ar->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ar, i);
if (knot_dname_is_equal(ns, rr->owner)) {
- (void) update_glue(rr, 0, param);
+ (void) update_glue(rr, 0, req);
result += 1;
}
}
return result;
}
-static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_layer_param *param)
+static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request *req)
{
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_query *query = kr_rplan_current(&req->rplan);
struct kr_zonecut *cut = &query->zone_cut;
int state = KNOT_STATE_CONSUME;
kr_zonecut_add(cut, knot_ns_name(&rr->rrs, 0), NULL);
for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
- int glue_records = fetch_glue(pkt, ns_name, param);
+ int glue_records = fetch_glue(pkt, ns_name, req);
/* Glue is mandatory for NS below zone */
if (knot_dname_in(ns_name, rr->owner) ) {
if (glue_records == 0) {
return state;
}
-static int process_authority(knot_pkt_t *pkt, struct kr_layer_param *param)
+static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
int result = KNOT_STATE_CONSUME;
const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (rr->type == KNOT_RRTYPE_NS) {
- int state = update_cut(pkt, rr, param);
+ int state = update_cut(pkt, rr, req);
switch(state) {
case KNOT_STATE_DONE: result = state; break;
case KNOT_STATE_FAIL: return state; break;
return result;
}
-static void finalize_answer(knot_pkt_t *pkt, struct kr_layer_param *param)
+static void finalize_answer(knot_pkt_t *pkt, struct kr_request *req)
{
/* Finalize header */
- knot_pkt_t *answer = param->answer;
+ knot_pkt_t *answer = req->answer;
knot_wire_set_rcode(answer->wire, knot_wire_get_rcode(pkt->wire));
/* Fill in SOA if negative response */
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (rr->type == KNOT_RRTYPE_SOA) {
- rr_update_answer(rr, 0, param);
+ rr_update_answer(rr, 0, req);
break;
}
}
}
}
-static int process_answer(knot_pkt_t *pkt, struct kr_layer_param *param)
+static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
{
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_query *query = kr_rplan_current(&req->rplan);
/* Response for minimized QNAME.
* NODATA => may be empty non-terminal, retry (found zone cut)
const knot_dname_t *cname = query->sname;
for (unsigned i = 0; i < an->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(an, i);
- int state = is_final ? rr_update_answer(rr, 0, param) : rr_update_parent(rr, 0, param);
+ int state = is_final ? rr_update_answer(rr, 0, req) : rr_update_parent(rr, 0, req);
if (state == KNOT_STATE_FAIL) {
return state;
}
/* Follow canonical name as next SNAME. */
if (cname != query->sname) {
- (void) kr_rplan_push(param->rplan, query->parent, cname, query->sclass, query->stype);
+ (void) kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
} else {
if (query->parent == NULL) {
- finalize_answer(pkt, param);
+ finalize_answer(pkt, req);
}
}
}
/** Error handling, RFC1034 5.3.3, 4d. */
-static int resolve_error(knot_pkt_t *pkt, struct kr_layer_param *param)
+static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
{
return KNOT_STATE_FAIL;
}
static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
- struct kr_layer_param *param = ctx->data;
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_request *req = ctx->data;
+ struct kr_query *query = kr_rplan_current(&req->rplan);
if (query == NULL || ctx->state == KNOT_STATE_DONE) {
+ assert(0);
return ctx->state;
}
return KNOT_STATE_FAIL;
}
-#ifndef NDEBUG
- char qname_str[KNOT_DNAME_MAXLEN], zonecut_str[KNOT_DNAME_MAXLEN], ns_str[SOCKADDR_STRLEN];
- knot_dname_to_str(qname_str, qname, sizeof(qname_str));
- struct sockaddr *addr = &query->ns.addr.ip;
- inet_ntop(addr->sa_family, kr_nsrep_inaddr(query->ns.addr), ns_str, sizeof(ns_str));
- knot_dname_to_str(zonecut_str, query->zone_cut.name, sizeof(zonecut_str));
- DEBUG_MSG("=> querying: '%s' zone cut: '%s' m12n: '%s'\n", ns_str, zonecut_str, qname_str);
-#endif
-
/* Query built, expect answer. */
return KNOT_STATE_CONSUME;
}
static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
- struct kr_layer_param *param = ctx->data;
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_request *req = ctx->data;
+ struct kr_query *query = kr_rplan_current(&req->rplan);
if (query == NULL || (query->flags & QUERY_RESOLVED)) {
return ctx->state;
}
/* Check for packet processing errors first. */
if (pkt->parsed < pkt->size) {
DEBUG_MSG("<= malformed response\n");
- return resolve_error(pkt, param);
+ return resolve_error(pkt, req);
} else if (!is_paired_to_query(pkt, query)) {
DEBUG_MSG("<= ignoring mismatching response\n");
return KNOT_STATE_CONSUME;
} else if (knot_wire_get_tc(pkt->wire)) {
DEBUG_MSG("<= truncated response, failover to TCP\n");
- struct kr_query *cur = kr_rplan_current(param->rplan);
- if (cur) {
+ if (query) {
/* Fail if already on TCP. */
- if (cur->flags & QUERY_TCP) {
+ if (query->flags & QUERY_TCP) {
DEBUG_MSG("<= TC=1 with TCP, bailing out\n");
- return resolve_error(pkt, param);
+ return resolve_error(pkt, req);
}
- cur->flags |= QUERY_TCP;
+ query->flags |= QUERY_TCP;
}
return KNOT_STATE_DONE;
}
break; /* OK */
default:
DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
- return resolve_error(pkt, param);
+ return resolve_error(pkt, req);
}
/* Resolve authority to see if it's referral or authoritative. */
int state = KNOT_STATE_CONSUME;
- state = process_authority(pkt, param);
+ state = process_authority(pkt, req);
switch(state) {
case KNOT_STATE_CONSUME: /* Not referral, process answer. */
DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
- state = process_answer(pkt, param);
+ state = process_answer(pkt, req);
break;
case KNOT_STATE_DONE: /* Referral */
DEBUG_MSG("<= referral response, follow\n");
* Result updates the query parent.
* @note Hint is an index of chosen RR in the set.
*/
-int rr_update_parent(const knot_rrset_t *rr, unsigned hint, struct kr_layer_param *param);
+int rr_update_parent(const knot_rrset_t *rr, unsigned hint, struct kr_request *param);
/**
* Result updates the original query response.
* @note When \a hint is KNOT_PF_FREE, RR is treated as a copy and answer takes its ownership.
*/
-int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_layer_param *param);
+int rr_update_answer(const knot_rrset_t *rr, unsigned hint, struct kr_request *param);
/* Processing module implementation. */
const knot_layer_api_t *iterate_layer(void);
\ No newline at end of file
#include "lib/cache.h"
#include "lib/module.h"
-#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(param->rplan), " cc ", fmt)
+#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), " cc ", fmt)
-typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_layer_param *);
+typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
-static int update_parent(const knot_rrset_t *rr, unsigned drift, struct kr_layer_param *param)
+static int update_parent(const knot_rrset_t *rr, unsigned drift, struct kr_request *req)
{
/* Find a first non-expired record. */
uint16_t i = 0;
}
}
- return rr_update_parent(rr, i, param);
+ return rr_update_parent(rr, i, req);
}
-static int update_answer(const knot_rrset_t *rr, unsigned drift, struct kr_layer_param *param)
+static int update_answer(const knot_rrset_t *rr, unsigned drift, struct kr_request *req)
{
- knot_pkt_t *answer = param->answer;
+ knot_pkt_t *answer = req->answer;
/* Materialize RR set */
knot_rrset_t rr_copy = kr_cache_materialize(rr, drift, &answer->mm);
return KNOT_STATE_FAIL;
}
- return rr_update_answer(&rr_copy, 0, param);
+ return rr_update_answer(&rr_copy, 0, req);
}
static int read_cache_rr(namedb_txn_t *txn, knot_rrset_t *cache_rr, uint32_t timestamp,
- rr_callback_t cb, struct kr_layer_param *param)
+ rr_callback_t cb, struct kr_request *req)
{
/* Query cache for requested record */
if (kr_cache_peek(txn, cache_rr, ×tamp) != KNOT_EOK) {
return KNOT_STATE_NOOP;
}
- return cb(cache_rr, timestamp, param);
+ return cb(cache_rr, timestamp, req);
}
static int begin(knot_layer_t *ctx, void *module_param)
static int read_cache(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
- struct kr_layer_param *param = ctx->data;
- struct kr_query *cur = kr_rplan_current(param->rplan);
+ struct kr_request *req = ctx->data;
+ struct kr_rplan *rplan = &req->rplan;
+ struct kr_query *cur = kr_rplan_current(rplan);
if (cur == NULL) {
return ctx->state;
}
- namedb_txn_t *txn = kr_rplan_txn_acquire(param->rplan, NAMEDB_RDONLY);
+ namedb_txn_t *txn = kr_rplan_txn_acquire(rplan, NAMEDB_RDONLY);
uint32_t timestamp = cur->timestamp.tv_sec;
knot_rrset_t cache_rr;
knot_rrset_init(&cache_rr, cur->sname, cur->stype, cur->sclass);
}
/* Try to find expected record first. */
- int state = read_cache_rr(txn, &cache_rr, timestamp, callback, param);
+ int state = read_cache_rr(txn, &cache_rr, timestamp, callback, req);
if (state == KNOT_STATE_DONE) {
DEBUG_MSG("=> satisfied from cache\n");
cur->flags |= QUERY_RESOLVED;
/* Check if CNAME chain exists. */
cache_rr.type = KNOT_RRTYPE_CNAME;
- state = read_cache_rr(txn, &cache_rr, timestamp, callback, param);
+ state = read_cache_rr(txn, &cache_rr, timestamp, callback, req);
if (state != KNOT_STATE_NOOP) {
if (cur->stype != KNOT_RRTYPE_CNAME) {
const knot_dname_t *cname = knot_cname_name(&cache_rr.rrs);
- if (kr_rplan_push(param->rplan, cur->parent, cname, cur->sclass, cur->stype) == NULL) {
+ if (kr_rplan_push(rplan, cur->parent, cname, cur->sclass, cur->stype) == NULL) {
return KNOT_STATE_FAIL;
}
}
static int write_cache(knot_layer_t *ctx, knot_pkt_t *pkt)
{
- struct kr_layer_param *param = ctx->data;
- struct kr_query *query = kr_rplan_current(param->rplan);
+ struct kr_request *req = ctx->data;
+ struct kr_rplan *rplan = &req->rplan;
+ struct kr_query *query = kr_rplan_current(rplan);
/* Don't cache anything if failed. */
if (query == NULL || ctx->state == KNOT_STATE_FAIL) {
}
/* Open write transaction */
- mm_ctx_t *pool = param->rplan->pool;
+ mm_ctx_t *pool = rplan->pool;
uint32_t timestamp = query->timestamp.tv_sec;
- namedb_txn_t *txn = kr_rplan_txn_acquire(param->rplan, 0);
+ namedb_txn_t *txn = kr_rplan_txn_acquire(rplan, 0);
if (txn == NULL) {
return ctx->state; /* Couldn't acquire cache, ignore. */
}
#define DEBUG_MSG(fmt...) QRDEBUG(kr_rplan_current(rplan), "resl", fmt)
-/* Defines */
-#define ITER_LIMIT 50
-
/** Invalidate current NS/addr pair. */
static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
{
/* Different processing for network error */
int state = KNOT_STATE_FAIL;
- if (packet->size == 0) {
+ if (!packet || packet->size == 0) {
/* Network error, retry over TCP. */
if (!(qry->flags & QUERY_TCP)) {
/** @todo This should just penalize UDP and elect next best. */
return kr_rplan_empty(&request->rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
}
-int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *proto, knot_pkt_t *packet)
+int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *type, knot_pkt_t *packet)
{
struct kr_rplan *rplan = &request->rplan;
struct kr_query *qry = kr_rplan_current(rplan);
/* Issue dependent query to this address */
*dst = &qry->ns.addr.ip;
- *proto = (qry->flags & QUERY_TCP) ? SOCK_STREAM : SOCK_DGRAM;
+ *type = (qry->flags & QUERY_TCP) ? SOCK_STREAM : SOCK_DGRAM;
return state;
}
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/** \addtogroup resolution
- * @{
- */
-
#pragma once
+#include <netinet/in.h>
+#include <libknot/processing/overlay.h>
#include <libknot/packet/pkt.h>
#include "lib/generic/array.h"
+#include "lib/rplan.h"
#include "lib/module.h"
-/** Array of modules. */
+/**
+ * @file resolve.h
+ * @brief The API provides a high-level API for simple name resolution,
+ * and an API providing a "consumer-producer"-like interface to enable
+ * you write custom I/O or special iterative resolution driver.
+ *
+ * # Example usage of the high-level API:
+ *
+ * @code{.c}
+ *
+ * struct kr_context ctx = {
+ * .pool = NULL, // for persistent data
+ * .cache = ..., // open cache instance (or NULL)
+ * .layers = {} // loaded layers
+ * };
+ *
+ * // Push basic layers
+ * array_push(ctx.layers, iterate_layer);
+ * array_push(ctx.layers, itercache_layer);
+ *
+ * // Resolve "IN A cz."
+ * knot_pkt_t *answer = knot_pkt_new(NULL, 65535, ctx.pool);
+ * int ret = kr_resolve(&ctx, answer, (uint8_t*)"\x02cz", 1, 1);
+ * printf("rcode: %d, ancount: %u\n",
+ * knot_wire_get_rcode(answer->wire),
+ * knot_wire_get_ancount(answer->wire));
+ * @endcode
+ *
+ * # Example usage of the iterative API:
+ *
+ * @code{.c}
+ *
+ * // Create request and its memory pool
+ * struct kr_request req;
+ * mm_ctx_mempool(&req.pool, 4096);
+ * kr_resolve_begin(&req, ctx, answer);
+ * int state = kr_resolve_query(&req, qname, qclass, qtype);
+ *
+ * // Generate answer
+ * while (state == KNOT_STATE_PRODUCE) {
+ *
+ * // Additional query generate, do the I/O and pass back answer
+ * state = kr_resolve_produce(&req, &addr, &type, query);
+ * while (state == KNOT_STATE_CONSUME) {
+ * int ret = sendrecv(addr, proto, query, resp);
+ *
+ * // If I/O fails, make "resp" empty
+ * state = kr_resolve_consume(&request, resp);
+ * knot_pkt_clear(resp);
+ * }
+ * knot_pkt_clear(query);
+ * }
+ *
+ * // "state" is either DONE or FAIL
+ * kr_resolve_finish(&request, state);
+ *
+ * @endcode
+ */
+
+/* @cond internal Array of modules. */
typedef array_t(struct kr_module) module_array_t;
+/* @endcond */
/**
* Name resolution context.
uint32_t options;
};
+/**
+ * Name resolution request.
+ *
+ * Keeps information about current query processing between calls to
+ * processing APIs, i.e. current resolved query, resolution plan, ...
+ * Use this instead of the simple interface if you want to implement
+ * multiplexing or custom I/O.
+ *
+ * @note All data for this request must be allocated from the given pool.
+ */
+struct kr_request {
+ struct kr_context *ctx;
+ struct kr_rplan rplan;
+ struct knot_overlay overlay;
+ knot_pkt_t *answer;
+ mm_ctx_t pool;
+};
+
/**
* Resolve an input query and produce a packet with an answer.
+ *
* @note The function doesn't change the packet question or message ID.
- * @param ctx resolution context
+ *
+ * @param ctx resolution context
* @param answer answer packet to be written
- * @param qname resolved query name
+ * @param qname resolved query name
* @param qclass resolved query class
- * @param qtype resolved query type
- * @return KNOT_E*
+ * @param qtype resolved query type
+ * @return 0 or an error code
*/
int kr_resolve(struct kr_context* ctx, knot_pkt_t *answer,
const knot_dname_t *qname, uint16_t qclass, uint16_t qtype);
-/** @} */
+/**
+ * Begin name resolution.
+ *
+ * @note Expects a request to have an initialized mempool, the "answer" packet will
+ * be kept during the resolution and will contain the final answer at the end.
+ *
+ * @param request request state with initialized mempool
+ * @param ctx resolution context
+ * @param answer allocated packet for final answer
+ * @return CONSUME (expecting query)
+ */
+int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pkt_t *answer);
+
+/**
+ * Push new query for resolution to the state.
+ * @param request request state (if already has a question, this will be resolved first)
+ * @param qname
+ * @param qclass
+ * @param qtype
+ * @return PRODUCE|FAIL
+ */
+int kr_resolve_query(struct kr_request *request, const knot_dname_t *qname, uint16_t qclass, uint16_t qtype);
+
+/**
+ * Consume input packet (may be either first query or answer to query originated from kr_resolve_produce())
+ *
+ * @note If the I/O fails, provide an empty or NULL packet, this will make iterator recognize nameserver failure.
+ *
+ * @param request request state (awaiting input)
+ * @param packet [in] input packet
+ * @return any state
+ */
+int kr_resolve_consume(struct kr_request *request, knot_pkt_t *packet);
+
+/**
+ * Produce either next additional query or finish.
+ *
+ * If the CONSUME is returned then dst, type and packet will be filled with
+ * appropriate values and caller is responsible to send them and receive answer.
+ * If it returns any other state, then content of the variables is undefined.
+ *
+ * @param request request state (in PRODUCE state)
+ * @param dst [out] possible address of the next nameserver
+ * @param type [out] possible used socket type (SOCK_STREAM, SOCK_DGRAM)
+ * @param packet [out] packet to be filled with additional query
+ * @return any state
+ */
+int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *type, knot_pkt_t *packet);
+
+/**
+ * Finish resolution and commit results if the state is DONE.
+ *
+ * @note The structures will be deinitialized, but the assigned memory pool is not going to
+ * be destroyed, as it's owned by caller.
+ *
+ * @param request request state
+ * @param state either DONE or FAIL state
+ * @return DONE
+ */
+int kr_resolve_finish(struct kr_request *request, int state);
func Finish(ctx *C.knot_layer_t) C.int {
// Since the context is unsafe.Pointer, we need to cast it
- var param *C.struct_kr_layer_param = (*C.struct_kr_layer_param)(ctx.data)
+ var param *C.struct_kr_request = (*C.struct_kr_request)(ctx.data)
// Now we can use the C API as well
fmt.Printf("[go] resolved %d queries", C.list_size(¶m.rplan.resolved))
return 0
}
func Finish(ctx *C.knot_layer_t) C.int {
- var param *C.struct_kr_layer_param = (*C.struct_kr_layer_param)(ctx.data)
+ var param *C.struct_kr_request = (*C.struct_kr_request)(ctx.data)
fmt.Printf("[gostats] resolved %d queries", C.list_size(¶m.rplan.resolved))
return 0
}
/* Defaults */
#define DEFAULT_FILE "/etc/hosts"
-#define DEBUG_MSG(fmt...) QRDEBUG(NULL, "hint", fmt)
-typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_layer_param *);
+#define DEBUG_MSG(qry, fmt...) QRDEBUG(qry, "hint", fmt)
+typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
/** @todo Hack until layers can store userdata. */
static struct kr_zonecut *g_map = NULL;
return ctx->state;
}
-static int answer_query(pack_t *addr_set, struct kr_layer_param *param)
+static int answer_query(pack_t *addr_set, struct kr_request *param)
{
- struct kr_query *qry = kr_rplan_current(param->rplan);
+ struct kr_query *qry = kr_rplan_current(¶m->rplan);
+ assert(qry);
+
knot_rrset_t rr;
knot_rrset_init(&rr, qry->sname, qry->stype, KNOT_CLASS_IN);
int family_len = sizeof(struct in_addr);
callback(&rr, 0, param);
/* Finalize */
- DEBUG_MSG("<= answered from hints\n");
+ DEBUG_MSG(qry, "<= answered from hints\n");
knot_rdataset_clear(&rr.rrs, NULL);
qry->flags |= QUERY_RESOLVED;
return KNOT_STATE_DONE;
static int query(knot_layer_t *ctx, knot_pkt_t *pkt)
{
assert(pkt && ctx);
- struct kr_layer_param *param = ctx->data;
- struct kr_query *qry = kr_rplan_current(param->rplan);
+ struct kr_request *param = ctx->data;
+ struct kr_query *qry = kr_rplan_current(¶m->rplan);
if (qry->stype != KNOT_RRTYPE_A && qry->stype != KNOT_RRTYPE_AAAA) {
return ctx->state;
}
}
}
- DEBUG_MSG("loaded %zu hints\n", count);
+ DEBUG_MSG(NULL, "loaded %zu hints\n", count);
return kr_ok();
}
{
auto_fclose FILE *fp = fopen(path, "r");
if (fp == NULL) {
- DEBUG_MSG("reading '%s' failed: %s\n", path, strerror(errno));
+ DEBUG_MSG(NULL, "reading '%s' failed: %s\n", path, strerror(errno));
return kr_error(errno);
} else {
- DEBUG_MSG("reading '%s'\n", path);
+ DEBUG_MSG(NULL, "reading '%s'\n", path);
}
/* Create pool and copy itself */