]> git.ipfire.org Git - thirdparty/knot-resolver.git/commitdiff
layer: rename knot_ identifiers that are private now
authorVladimír Čunát <vladimir.cunat@nic.cz>
Tue, 8 Nov 2016 16:58:30 +0000 (17:58 +0100)
committerVladimír Čunát <vladimir.cunat@nic.cz>
Tue, 8 Nov 2016 16:58:30 +0000 (17:58 +0100)
It causes lots of line changes, but it would be confusing to keep the
current state over long term.

18 files changed:
daemon/ffimodule.c
daemon/worker.c
lib/README.rst
lib/layer.h
lib/layer/iterate.c
lib/layer/pktcache.c
lib/layer/rrcache.c
lib/layer/validate.c
lib/module.c
lib/module.h
lib/resolve.c
lib/resolve.h
modules/README.rst
modules/cookies/cookiemonster.c
modules/cookies/cookiemonster.h
modules/cookies/cookies.c
modules/hints/hints.c
modules/stats/stats.c

index 3b6c059070a7dcec6c89bcc1ed8de126a4f9d231..65410a367f66c824225867eab3e25467d7218929 100644 (file)
@@ -107,7 +107,7 @@ static int l_ffi_init(struct kr_module *module)
 
 /** @internal Unregister layer callback reference from registry. */
 #define LAYER_UNREGISTER(L, api, name) do { \
-       int *cb_slot = (int *)((char *)api + sizeof(knot_layer_api_t)); \
+       int *cb_slot = (int *)((char *)api + sizeof(kr_layer_api_t)); \
        if (cb_slot[SLOT_ ## name] > 0) \
                luaL_unref(L, LUA_REGISTRYINDEX, cb_slot[SLOT_ ## name]); \
 } while(0)
@@ -121,7 +121,7 @@ static int l_ffi_deinit(struct kr_module *module)
                ret = l_ffi_call(L, 1);
        }
        /* Free the layer API wrapper (unconst it) */
-       knot_layer_api_t* api = module->data;
+       kr_layer_api_t* api = module->data;
        if (api) {
                LAYER_UNREGISTER(L, api, begin);
                LAYER_UNREGISTER(L, api, finish);
@@ -137,7 +137,7 @@ static int l_ffi_deinit(struct kr_module *module)
 
 /** @internal Helper for retrieving layer Lua function by name. */
 #define LAYER_FFI_CALL(ctx, slot) \
-       int *cb_slot = (int *)((char *)(ctx)->api + sizeof(knot_layer_api_t)); \
+       int *cb_slot = (int *)((char *)(ctx)->api + sizeof(kr_layer_api_t)); \
        if (cb_slot[SLOT_ ## slot] <= 0) { \
                return ctx->state; \
        } \
@@ -146,21 +146,21 @@ static int l_ffi_deinit(struct kr_module *module)
        lua_rawgeti(L, LUA_REGISTRYINDEX, cb_slot[SLOT_ ## slot]); \
        lua_pushnumber(L, ctx->state)
 
-static int l_ffi_layer_begin(knot_layer_t *ctx, void *module_param)
+static int l_ffi_layer_begin(kr_layer_t *ctx, void *module_param)
 {
        LAYER_FFI_CALL(ctx, begin);
        lua_pushlightuserdata(L, ctx->data);
        return l_ffi_call(L, 2);
 }
 
-static int l_ffi_layer_reset(knot_layer_t *ctx)
+static int l_ffi_layer_reset(kr_layer_t *ctx)
 {
        LAYER_FFI_CALL(ctx, reset);
        lua_pushlightuserdata(L, ctx->data);
        return l_ffi_call(L, 2);
 }
 
-static int l_ffi_layer_finish(knot_layer_t *ctx)
+static int l_ffi_layer_finish(kr_layer_t *ctx)
 {
        struct kr_request *req = ctx->data;
        LAYER_FFI_CALL(ctx, finish);
@@ -169,9 +169,9 @@ static int l_ffi_layer_finish(knot_layer_t *ctx)
        return l_ffi_call(L, 3);
 }
 
-static int l_ffi_layer_consume(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_consume(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
-       if (ctx->state & KNOT_STATE_FAIL) {
+       if (ctx->state & KR_STATE_FAIL) {
                return ctx->state; /* Already failed, skip */
        }
        LAYER_FFI_CALL(ctx, consume);
@@ -180,9 +180,9 @@ static int l_ffi_layer_consume(knot_layer_t *ctx, knot_pkt_t *pkt)
        return l_ffi_call(L, 3);
 }
 
-static int l_ffi_layer_produce(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int l_ffi_layer_produce(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
-       if (ctx->state & (KNOT_STATE_FAIL)) {
+       if (ctx->state & (KR_STATE_FAIL)) {
                return ctx->state; /* Already failed or done, skip */
        }
        LAYER_FFI_CALL(ctx, produce);
@@ -195,7 +195,7 @@ static int l_ffi_layer_produce(knot_layer_t *ctx, knot_pkt_t *pkt)
 /** @internal Conditionally register layer trampoline
   * @warning Expects 'module.layer' to be on top of Lua stack. */
 #define LAYER_REGISTER(L, api, name) do { \
-       int *cb_slot = (int *)((char *)api + sizeof(knot_layer_api_t)); \
+       int *cb_slot = (int *)((char *)api + sizeof(kr_layer_api_t)); \
        lua_getfield((L), -1, #name); \
        if (!lua_isnil((L), -1)) { \
                (api)->name = l_ffi_layer_ ## name; \
@@ -206,12 +206,12 @@ static int l_ffi_layer_produce(knot_layer_t *ctx, knot_pkt_t *pkt)
 } while(0)
 
 /** @internal Create C layer api wrapper. */
-static knot_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module)
+static kr_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *module)
 {
        /* Fabricate layer API wrapping the Lua functions
         * reserve slots after it for references to Lua callbacks. */
-       const size_t api_length = sizeof(knot_layer_api_t) + (SLOT_count * SLOT_size);
-       knot_layer_api_t *api = malloc(api_length);
+       const size_t api_length = sizeof(kr_layer_api_t) + (SLOT_count * SLOT_size);
+       kr_layer_api_t *api = malloc(api_length);
        if (api) {
                memset(api, 0, api_length);
                LAYER_REGISTER(L, api, begin);
@@ -227,10 +227,10 @@ static knot_layer_api_t *l_ffi_layer_create(lua_State *L, struct kr_module *modu
 }
 
 /** @internal Retrieve C layer api wrapper. */
-static const knot_layer_api_t *l_ffi_layer(struct kr_module *module)
+static const kr_layer_api_t *l_ffi_layer(struct kr_module *module)
 {
        if (module) {
-               return (const knot_layer_api_t *)module->data;
+               return (const kr_layer_api_t *)module->data;
        }
        return NULL;
 }
index b6f1afb6843971ea16122f57089b1b22acb77e50..a4f21158534be47d7430c00ce987e0ee2359303f 100644 (file)
@@ -705,7 +705,7 @@ static int qr_task_finalize(struct qr_task *task, int state)
        task->finished = true;
        /* Send back answer */
        (void) qr_task_send(task, task->source.handle, (struct sockaddr *)&task->source.addr, task->req.answer);
-       return state == KNOT_STATE_DONE ? 0 : kr_error(EIO);
+       return state == KR_STATE_DONE ? 0 : kr_error(EIO);
 }
 
 static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_source, knot_pkt_t *packet)
@@ -722,15 +722,15 @@ static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_sour
        task->addrlist_count = 0;
        task->addrlist_turn = 0;
        int state = kr_resolve_consume(&task->req, packet_source, packet);
-       while (state == KNOT_STATE_PRODUCE) {
+       while (state == KR_STATE_PRODUCE) {
                state = kr_resolve_produce(&task->req, &task->addrlist, &sock_type, task->pktbuf);
                if (unlikely(++task->iter_count > KR_ITER_LIMIT || task->timeouts >= KR_TIMEOUT_LIMIT)) {
-                       return qr_task_finalize(task, KNOT_STATE_FAIL);
+                       return qr_task_finalize(task, KR_STATE_FAIL);
                }
        }
 
        /* We're done, no more iterations needed */
-       if (state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+       if (state & (KR_STATE_DONE|KR_STATE_FAIL)) {
                return qr_task_finalize(task, state);
        } else if (!task->addrlist || sock_type < 0) {
                return qr_task_step(task, NULL, NULL);
@@ -794,7 +794,7 @@ static int qr_task_step(struct qr_task *task, const struct sockaddr *packet_sour
        /* Start next step with timeout, fatal if can't start a timer. */
        if (ret != 0) {
                subreq_finalize(task, packet_source, packet);
-               return qr_task_finalize(task, KNOT_STATE_FAIL);
+               return qr_task_finalize(task, KR_STATE_FAIL);
        }
        return 0;
 }
index 0019bf72342209f12954464a6934431d53f22fa6..d82b30667823b4ff3d2302e3ae240332faa4f285 100644 (file)
@@ -92,7 +92,7 @@ This structure contains pointers to resolution context, resolution plan and also
 
 .. code-block:: c
 
-       int consume(knot_layer_t *ctx, knot_pkt_t *pkt)
+       int consume(kr_layer_t *ctx, knot_pkt_t *pkt)
        {
                struct kr_request *request = ctx->data;
                struct kr_query *query = request->current_query;
@@ -102,7 +102,7 @@ This is only passive processing of the incoming answer. If you want to change th
 
 .. code-block:: c
 
-       int produce(knot_layer_t *ctx, knot_pkt_t *pkt)
+       int produce(kr_layer_t *ctx, knot_pkt_t *pkt)
        {
                struct kr_request *request = ctx->data;
                struct kr_query *cur = request->current_query;
@@ -112,7 +112,7 @@ This is only passive processing of the incoming answer. If you want to change th
                        /* This flag makes the resolver move the query
                         * to the "resolved" list. */
                        cur->flags |= QUERY_RESOLVED;
-                       return KNOT_STATE_DONE;
+                       return KR_STATE_DONE;
                }
 
                /* Pass-through. */
@@ -123,7 +123,7 @@ It is possible to not only act during the query resolution, but also to view the
 
 .. code-block:: c
 
-       int finish(knot_layer_t *ctx)
+       int finish(kr_layer_t *ctx)
        {
                struct kr_request *request = ctx->data;
                struct kr_rplan *rplan = request->rplan;
index 6276f6db099c353a9071720577468a5ff50c698e..95892bd5ccd7588e36df4b75a3af18aba670c146 100644 (file)
  *  Each state represents the state machine transition,
  *  and determines readiness for the next action.
  */
-enum knot_layer_state {
-       KNOT_STATE_NOOP    = 0,      /*!< N/A */
-       KNOT_STATE_CONSUME = 1 << 0, /*!< Consume data. */
-       KNOT_STATE_PRODUCE = 1 << 1, /*!< Produce data. */
-       KNOT_STATE_DONE    = 1 << 2, /*!< Finished. */
-       KNOT_STATE_FAIL    = 1 << 3  /*!< Error. */
+enum kr_layer_state {
+       KR_STATE_NOOP    = 0,      /*!< N/A */
+       KR_STATE_CONSUME = 1 << 0, /*!< Consume data. */
+       KR_STATE_PRODUCE = 1 << 1, /*!< Produce data. */
+       KR_STATE_DONE    = 1 << 2, /*!< Finished. */
+       KR_STATE_FAIL    = 1 << 3  /*!< Error. */
 };
 
 /* Forward declarations. */
-struct knot_layer_api;
+struct kr_layer_api;
 
 /*! \brief Packet processing context. */
-typedef struct knot_layer {
+typedef struct kr_layer {
        knot_mm_t *mm;   /* Processing memory context. */
-       uint16_t state;  /* Bitmap of enum knot_layer_state. */
+       uint16_t state;  /* Bitmap of enum kr_layer_state. */
        void *data;      /* Module specific. */
-       const struct knot_layer_api *api;
-} knot_layer_t;
+       const struct kr_layer_api *api;
+} kr_layer_t;
 
 /*! \brief Packet processing module API. */
-struct knot_layer_api {
-       int (*begin)(knot_layer_t *ctx, void *module_param);
-       int (*reset)(knot_layer_t *ctx);
-       int (*finish)(knot_layer_t *ctx);
-       int (*consume)(knot_layer_t *ctx, knot_pkt_t *pkt);
-       int (*produce)(knot_layer_t *ctx, knot_pkt_t *pkt);
-       int (*fail)(knot_layer_t *ctx, knot_pkt_t *pkt);
+struct kr_layer_api {
+       int (*begin)(kr_layer_t *ctx, void *module_param);
+       int (*reset)(kr_layer_t *ctx);
+       int (*finish)(kr_layer_t *ctx);
+       int (*consume)(kr_layer_t *ctx, knot_pkt_t *pkt);
+       int (*produce)(kr_layer_t *ctx, knot_pkt_t *pkt);
+       int (*fail)(kr_layer_t *ctx, knot_pkt_t *pkt);
        void *data;
 };
 
-typedef struct knot_layer_api knot_layer_api_t;
+typedef struct kr_layer_api kr_layer_api_t;
 
 /** Pickled layer state (api, input, state). */
 struct kr_layer_pickle {
     struct kr_layer_pickle *next;
-    const struct knot_layer_api *api;
+    const struct kr_layer_api *api;
     knot_pkt_t *pkt;
     unsigned state;
 };
 
 /* Repurpose layer states. */
-#define KNOT_STATE_YIELD KNOT_STATE_NOOP
+#define KR_STATE_YIELD KR_STATE_NOOP
index 8c2bb8e984b088353afb71478dc32bbd34732d77..37759041bea38720a1bd9ce90ef91161c24593e1 100644 (file)
@@ -137,15 +137,15 @@ static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
                const knot_rdata_t *rdata = rr->rrs.data;
                if (!(query->flags & QUERY_ALLOW_LOCAL) &&
                        !is_valid_addr(knot_rdata_data(rdata), knot_rdata_rdlen(rdata))) {
-                       return KNOT_STATE_CONSUME; /* Ignore invalid addresses */
+                       return KR_STATE_CONSUME; /* Ignore invalid addresses */
                }
                int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
                if (ret != 0) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
 
-       return KNOT_STATE_CONSUME;
+       return KR_STATE_CONSUME;
 }
 
 static int update_parent(const knot_rrset_t *rr, struct kr_query *qry)
@@ -158,7 +158,7 @@ static int update_answer(const knot_rrset_t *rr, unsigned hint, knot_pkt_t *answ
        /* Scrub DNSSEC records when not requested. */
        if (!knot_pkt_has_dnssec(answer)) {
                if (rr->type != knot_pkt_qtype(answer) && knot_rrtype_is_dnssec(rr->type)) {
-                       return KNOT_STATE_DONE; /* Scrub */
+                       return KR_STATE_DONE; /* Scrub */
                }
        }
        /* Copy record, as it may be accessed after packet processing. */
@@ -167,10 +167,10 @@ static int update_answer(const knot_rrset_t *rr, unsigned hint, knot_pkt_t *answ
        int ret = knot_pkt_put(answer, hint, copy, KNOT_PF_FREE);
        if (ret != KNOT_EOK) {
                knot_wire_set_tc(answer->wire);
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
 
-       return KNOT_STATE_DONE;
+       return KR_STATE_DONE;
 }
 
 static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
@@ -215,19 +215,19 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request
 {
        struct kr_query *qry = req->current_query;
        struct kr_zonecut *cut = &qry->zone_cut;
-       int state = KNOT_STATE_CONSUME;
+       int state = KR_STATE_CONSUME;
 
        /* Authority MUST be at/below the authority of the nameserver, otherwise
         * possible cache injection attempt. */
        if (!knot_dname_in(cut->name, rr->owner)) {
                DEBUG_MSG("<= authority: ns outside bailiwick\n");
 #ifdef STRICT_MODE
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
 #else
                /* Workaround: ignore out-of-bailiwick NSs for authoritative answers,
                 * but fail for referrals. This is important to detect lame answers. */
                if (knot_pkt_section(pkt, KNOT_ANSWER)->count == 0) {
-                       state = KNOT_STATE_FAIL;
+                       state = KR_STATE_FAIL;
                }
                return state;
 #endif
@@ -248,7 +248,7 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request
                } else {
                        kr_zonecut_set(cut, rr->owner);
                }
-               state = KNOT_STATE_DONE;
+               state = KR_STATE_DONE;
        }
 
        /* Fetch glue for each NS */
@@ -280,19 +280,19 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr, struct kr_request
 
 static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
 {
-       int result = KNOT_STATE_CONSUME;
+       int result = KR_STATE_CONSUME;
        struct kr_query *qry = req->current_query;
        const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
 
        /* Stub resolution doesn't process authority */
        if (qry->flags & QUERY_STUB) {
-               return KNOT_STATE_CONSUME;
+               return KR_STATE_CONSUME;
        }
 
 #ifdef STRICT_MODE
        /* AA, terminate resolution chain. */
        if (knot_wire_get_aa(pkt->wire)) {
-               return KNOT_STATE_CONSUME;
+               return KR_STATE_CONSUME;
        }
 #else
        /* Work around servers sending back CNAME with different delegation and no AA. */
@@ -300,7 +300,7 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
        if (an->count > 0 && ns->count > 0) {
                const knot_rrset_t *rr = knot_pkt_rr(an, 0);
                if (rr->type == KNOT_RRTYPE_CNAME) {
-                       return KNOT_STATE_CONSUME;
+                       return KR_STATE_CONSUME;
                }
        }
 #endif
@@ -311,8 +311,8 @@ static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
                if (rr->type == KNOT_RRTYPE_NS) {
                        int state = update_cut(pkt, rr, req);
                        switch(state) {
-                       case KNOT_STATE_DONE: result = state; break;
-                       case KNOT_STATE_FAIL: return state; break;
+                       case KR_STATE_DONE: result = state; break;
+                       case KR_STATE_FAIL: return state; break;
                        default:              /* continue */ break;
                        }
                } else if (rr->type == KNOT_RRTYPE_SOA && knot_dname_is_sub(rr->owner, qry->zone_cut.name)) {
@@ -379,14 +379,14 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
            (pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
                DEBUG_MSG("<= found cut, retrying with non-minimized name\n");
                query->flags |= QUERY_NO_MINIMIZE;
-               return KNOT_STATE_CONSUME;
+               return KR_STATE_CONSUME;
        }
 
        /* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
        if (!(query->flags & QUERY_STUB) && !is_authoritative(pkt, query)) {
                if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
                        DEBUG_MSG("<= lame response: non-auth sent negative response\n");
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
 
@@ -415,7 +415,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                                hint = KNOT_COMPR_HINT_QNAME;
                        }
                        int state = is_final ? update_answer(rr, hint, req->answer) : update_parent(rr, query);
-                       if (state == KNOT_STATE_FAIL) {
+                       if (state == KR_STATE_FAIL) {
                                return state;
                        }
                        /* can_follow is false, therefore QUERY_DNSSEC_WANT flag is set.
@@ -435,7 +435,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                        }
                        if (cname_chain_len > an->count || cname_chain_len > KR_CNAME_CHAIN_LIMIT) {
                                DEBUG_MSG("<= too long cname chain\n");
-                               return KNOT_STATE_FAIL;
+                               return KR_STATE_FAIL;
                        }
                        /* Don't use pending_cname immediately.
                         * There are can be records for "old" cname. */
@@ -467,7 +467,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                                        continue;
                                }
                                finalize_answer(pkt, query, req);
-                               return KNOT_STATE_DONE;
+                               return KR_STATE_DONE;
                        }
                }
                DEBUG_MSG("<= cname chain, following\n");
@@ -478,12 +478,12 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
                            q->stype == query->stype   &&
                            knot_dname_is_equal(q->sname, cname)) {
                                DEBUG_MSG("<= cname chain loop\n");
-                               return KNOT_STATE_FAIL;
+                               return KR_STATE_FAIL;
                        }
                }
                struct kr_query *next = kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
                if (!next) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
                next->flags |= QUERY_AWAIT_CUT;
                /* Want DNSSEC if it's posible to secure this name (e.g. is covered by any TA) */
@@ -494,22 +494,22 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
        } else if (!query->parent) {
                finalize_answer(pkt, query, req);
        }
-       return KNOT_STATE_DONE;
+       return KR_STATE_DONE;
 }
 
 /** Error handling, RFC1034 5.3.3, 4d. */
 static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
 {
-       return KNOT_STATE_FAIL;
+       return KR_STATE_FAIL;
 }
 
 /* State-less single resolution iteration step, not needed. */
-static int reset(knot_layer_t *ctx)  { return KNOT_STATE_PRODUCE; }
+static int reset(kr_layer_t *ctx)  { return KR_STATE_PRODUCE; }
 
 /* Set resolution context and parameters. */
-static int begin(knot_layer_t *ctx, void *module_param)
+static int begin(kr_layer_t *ctx, void *module_param)
 {
-       if (ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+       if (ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
                return ctx->state;
        }
        /*
@@ -522,7 +522,7 @@ static int begin(knot_layer_t *ctx, void *module_param)
        const struct kr_request *req = ctx->data;
        const knot_pkt_t *pkt = req->qsource.packet;
        if (!pkt || knot_wire_get_qdcount(pkt->wire) == 0) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
        return reset(ctx);
 }
@@ -547,22 +547,22 @@ int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
        return kr_ok();
 }
 
-static int prepare_query(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        assert(pkt && ctx);
        struct kr_request *req = ctx->data;
        struct kr_query *query = req->current_query;
-       if (!query || ctx->state & (KNOT_STATE_DONE|KNOT_STATE_FAIL)) {
+       if (!query || ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
                return ctx->state;
        }
 
        /* Make query */
        int ret = kr_make_query(query, pkt);
        if (ret != 0) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
-       return KNOT_STATE_CONSUME;
+       return KR_STATE_CONSUME;
 }
 
 static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_query *query)
@@ -573,7 +573,7 @@ static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_que
                return resolve_error(pkt, req);
        } else {
                query->flags |= QUERY_SAFEMODE;
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
 #else
                return resolve_error(pkt, req);
@@ -584,7 +584,7 @@ static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_que
  *
  *  This roughly corresponds to RFC1034, 5.3.3 4a-d.
  */
-static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        assert(pkt && ctx);
        struct kr_request *req = ctx->data;
@@ -615,7 +615,7 @@ static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
                        }
                        query->flags |= QUERY_TCP;
                }
-               return KNOT_STATE_CONSUME;
+               return KR_STATE_CONSUME;
        }
 
 #ifndef NDEBUG
@@ -637,7 +637,7 @@ static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
                        return resolve_error(pkt, req);
                } else {
                        query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
-                       return KNOT_STATE_CONSUME;
+                       return KR_STATE_CONSUME;
                }
        }
        case KNOT_RCODE_FORMERR:
@@ -652,11 +652,11 @@ static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
        /* Resolve authority to see if it's referral or authoritative. */
        int state = process_authority(pkt, req);
        switch(state) {
-       case KNOT_STATE_CONSUME: /* Not referral, process answer. */
+       case KR_STATE_CONSUME: /* Not referral, process answer. */
                DEBUG_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
                state = process_answer(pkt, req);
                break;
-       case KNOT_STATE_DONE: /* Referral */
+       case KR_STATE_DONE: /* Referral */
                DEBUG_MSG("<= referral response, follow\n");
                break;
        default:
@@ -667,9 +667,9 @@ static int resolve(knot_layer_t *ctx, knot_pkt_t *pkt)
 }
 
 /** Module implementation. */
-const knot_layer_api_t *iterate_layer(struct kr_module *module)
+const kr_layer_api_t *iterate_layer(struct kr_module *module)
 {
-       static const knot_layer_api_t _layer = {
+       static const kr_layer_api_t _layer = {
                .begin = &begin,
                .reset = &reset,
                .consume = &resolve,
index f754e44af48864fe65f060a1b359d7032cee4e33..0d0087ec908176b92a45a5119b4102e53b0a165d 100644 (file)
@@ -100,11 +100,11 @@ static int loot_pktcache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_quer
        return loot_cache_pkt(cache, pkt, qname, rrtype, want_secure, timestamp, flags);
 }
 
-static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int pktcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
-       if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
                return ctx->state; /* Already resolved/failed */
        }
        if (qry->ns.addr[0].ip.sa_family != AF_UNSPEC) {
@@ -127,7 +127,7 @@ static int pktcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
                pkt->parsed = pkt->size;
                knot_wire_set_qr(pkt->wire);
                knot_wire_set_aa(pkt->wire);
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
        return ctx->state;
 }
@@ -170,13 +170,13 @@ static uint32_t packet_ttl(knot_pkt_t *pkt, bool is_negative)
        return limit_ttl(ttl);
 }
 
-static int pktcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int pktcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
        /* Cache only answers that make query resolved (i.e. authoritative)
         * that didn't fail during processing and are negative. */
-       if (qry->flags & QUERY_CACHED || ctx->state & KNOT_STATE_FAIL) {
+       if (qry->flags & QUERY_CACHED || ctx->state & KR_STATE_FAIL) {
                return ctx->state; /* Don't cache anything if failed. */
        }
        /* Cache only authoritative answers from IN class. */
@@ -238,9 +238,9 @@ static int pktcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
 }
 
 /** Module implementation. */
-const knot_layer_api_t *pktcache_layer(struct kr_module *module)
+const kr_layer_api_t *pktcache_layer(struct kr_module *module)
 {
-       static const knot_layer_api_t _layer = {
+       static const kr_layer_api_t _layer = {
                .produce = &pktcache_peek,
                .consume = &pktcache_stash
        };
index 1cfcf6536dd5e624613969b79935b22df42945eb..315337b3f1aab355b2ff34db29cf4c3cb2634d1d 100644 (file)
@@ -110,11 +110,11 @@ static int loot_rrcache(struct kr_cache *cache, knot_pkt_t *pkt, struct kr_query
        return ret;
 }
 
-static int rrcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int rrcache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
-       if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
+       if (ctx->state & (KR_STATE_FAIL|KR_STATE_DONE) || (qry->flags & QUERY_NO_CACHE)) {
                return ctx->state; /* Already resolved/failed */
        }
        if (qry->ns.addr[0].ip.sa_family != AF_UNSPEC) {
@@ -145,7 +145,7 @@ static int rrcache_peek(knot_layer_t *ctx, knot_pkt_t *pkt)
                pkt->parsed = pkt->size;
                knot_wire_set_qr(pkt->wire);
                knot_wire_set_aa(pkt->wire);
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
        return ctx->state;
 }
@@ -320,11 +320,11 @@ static int stash_answer(struct kr_query *qry, knot_pkt_t *pkt, map_t *stash, kno
        return kr_ok();
 }
 
-static int rrcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int rrcache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
-       if (!qry || ctx->state & KNOT_STATE_FAIL) {
+       if (!qry || ctx->state & KR_STATE_FAIL) {
                return ctx->state;
        }
        /* Do not cache truncated answers. */
@@ -377,9 +377,9 @@ static int rrcache_stash(knot_layer_t *ctx, knot_pkt_t *pkt)
 }
 
 /** Module implementation. */
-const knot_layer_api_t *rrcache_layer(struct kr_module *module)
+const kr_layer_api_t *rrcache_layer(struct kr_module *module)
 {
-       static const knot_layer_api_t _layer = {
+       static const kr_layer_api_t _layer = {
                .produce = &rrcache_peek,
                .consume = &rrcache_stash
        };
index f7d07999b46d5427308d05bd04368696123cc402..4174f49e6594f4ea9b3bbd0a9b80b6885cb9287a 100644 (file)
@@ -274,7 +274,7 @@ static int update_parent_keys(struct kr_query *qry, uint16_t answer_type)
                DEBUG_MSG(qry, "<= parent: updating DNSKEY\n");
                parent->zone_cut.key = knot_rrset_copy(qry->zone_cut.key, parent->zone_cut.pool);
                if (!parent->zone_cut.key) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
                break;
        case KNOT_RRTYPE_DS:
@@ -285,7 +285,7 @@ static int update_parent_keys(struct kr_query *qry, uint16_t answer_type)
                } else { /* DS existence proven. */
                        parent->zone_cut.trust_anchor = knot_rrset_copy(qry->zone_cut.trust_anchor, parent->zone_cut.pool);
                        if (!parent->zone_cut.trust_anchor) {
-                               return KNOT_STATE_FAIL;
+                               return KR_STATE_FAIL;
                        }
                }
                break;
@@ -369,13 +369,13 @@ static const knot_dname_t *signature_authority(knot_pkt_t *pkt)
        return NULL;
 }
 
-static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        int ret = 0;
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
        /* Ignore faulty or unprocessed responses. */
-       if (ctx->state & (KNOT_STATE_FAIL|KNOT_STATE_CONSUME)) {
+       if (ctx->state & (KR_STATE_FAIL|KR_STATE_CONSUME)) {
                return ctx->state;
        }
 
@@ -389,7 +389,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
        if (!(qry->flags & QUERY_CACHED) && !knot_pkt_has_dnssec(pkt) && !use_signatures) {
                DEBUG_MSG(qry, "<= got insecure response\n");
                qry->flags |= QUERY_DNSSEC_BOGUS;
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        /* Track difference between current TA and signer name.
@@ -399,8 +399,8 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
        const knot_dname_t *ta_name = qry->zone_cut.trust_anchor ? qry->zone_cut.trust_anchor->owner : NULL;
        const knot_dname_t *signer = signature_authority(pkt);
        if (track_pc_change && ta_name && (!signer || !knot_dname_is_equal(ta_name, signer))) {
-               if (ctx->state == KNOT_STATE_YIELD) { /* Already yielded for revalidation. */
-                       return KNOT_STATE_FAIL;
+               if (ctx->state == KR_STATE_YIELD) { /* Already yielded for revalidation. */
+                       return KR_STATE_FAIL;
                }
                DEBUG_MSG(qry, ">< cut changed, needs revalidation\n");
                if (!signer) {
@@ -419,7 +419,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
                        }
                        qry->zone_cut.name = knot_dname_copy(signer, &req->pool);
                } /* else zone cut matches, but DS/DNSKEY doesn't => refetch. */
-               return KNOT_STATE_YIELD;
+               return KR_STATE_YIELD;
        }
        
        /* Check if this is a DNSKEY answer, check trust chain and store. */
@@ -431,7 +431,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
                if (ret != 0) {
                        DEBUG_MSG(qry, "<= bad keys, broken trust chain\n");
                        qry->flags |= QUERY_DNSSEC_BOGUS;
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
 
@@ -446,7 +446,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
                if (ret != 0) {
                        DEBUG_MSG(qry, "<= bad NXDOMAIN proof\n");
                        qry->flags |= QUERY_DNSSEC_BOGUS;
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
 
@@ -473,7 +473,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
                                } else {
                                        DEBUG_MSG(qry, "<= bad NODATA proof\n");
                                        qry->flags |= QUERY_DNSSEC_BOGUS;
-                                       return KNOT_STATE_FAIL;
+                                       return KR_STATE_FAIL;
                                }
                        }
                }
@@ -486,7 +486,7 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
                if (ret != 0) {
                        DEBUG_MSG(qry, "<= couldn't validate RRSIGs\n");
                        qry->flags |= QUERY_DNSSEC_BOGUS;
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
 
@@ -503,21 +503,21 @@ static int validate(knot_layer_t *ctx, knot_pkt_t *pkt)
        /* Check and update current delegation point security status. */
        ret = update_delegation(req, qry, pkt, has_nsec3);
        if (ret != 0) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
        /* Update parent query zone cut */
        if (qry->parent) {
                if (update_parent_keys(qry, qtype) != 0) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
        }
        DEBUG_MSG(qry, "<= answer valid, OK\n");
-       return KNOT_STATE_DONE;
+       return KR_STATE_DONE;
 }
 /** Module implementation. */
-const knot_layer_api_t *validate_layer(struct kr_module *module)
+const kr_layer_api_t *validate_layer(struct kr_module *module)
 {
-       static const knot_layer_api_t _layer = {
+       static const kr_layer_api_t _layer = {
                .consume = &validate,
        };
        /* Store module reference */
index 7c7a1398df7cd14bceac82ba8499a1ae1d1ae6b8..6df580e81431ebf7d7df617d6f6016d73ed9836e 100644 (file)
 #include "lib/module.h"
 
 /* List of embedded modules */
-const knot_layer_api_t *iterate_layer(struct kr_module *module);
-const knot_layer_api_t *validate_layer(struct kr_module *module);
-const knot_layer_api_t *rrcache_layer(struct kr_module *module);
-const knot_layer_api_t *pktcache_layer(struct kr_module *module);
+const kr_layer_api_t *iterate_layer(struct kr_module *module);
+const kr_layer_api_t *validate_layer(struct kr_module *module);
+const kr_layer_api_t *rrcache_layer(struct kr_module *module);
+const kr_layer_api_t *pktcache_layer(struct kr_module *module);
 static const struct kr_module embedded_modules[] = {
        { "iterate",  NULL, NULL, NULL, iterate_layer, NULL, NULL, NULL },
        { "validate", NULL, NULL, NULL, validate_layer, NULL, NULL, NULL },
index 8dad1d20ec1cc114f18fab387527a957b7cac32f..70279e24f5678f2887ae9712a5539fa399adf423 100644 (file)
@@ -34,7 +34,7 @@ typedef uint32_t (module_api_cb)(void);
 typedef int (module_init_cb)(struct kr_module *);
 typedef int (module_deinit_cb)(struct kr_module *);
 typedef int (module_config_cb)(struct kr_module *, const char *);
-typedef const knot_layer_api_t* (module_layer_cb)(struct kr_module *);
+typedef const kr_layer_api_t* (module_layer_cb)(struct kr_module *);
 typedef struct kr_prop *(module_prop_cb)(void);
 typedef char *(kr_prop_cb)(void *, struct kr_module *, const char *);
 #define KR_MODULE_API ((uint32_t) 0x20150402)
index 7f8c7f896427bbda078a2d5d2ccc3bb846c5b440..369236d93a16d18539f30d5ec9a44b87c7c70b32 100644 (file)
@@ -41,7 +41,7 @@
  * @internal Defer execution of current query.
  * The current layer state and input will be pushed to a stack and resumed on next iteration.
  */
-static int consume_yield(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        knot_pkt_t *pkt_copy = knot_pkt_new(NULL, pkt->size, &req->pool);
@@ -57,10 +57,10 @@ static int consume_yield(knot_layer_t *ctx, knot_pkt_t *pkt)
        }
        return kr_error(ENOMEM);
 }
-static int begin_yield(knot_layer_t *ctx, void *module) { return kr_ok(); }
-static int reset_yield(knot_layer_t *ctx) { return kr_ok(); }
-static int finish_yield(knot_layer_t *ctx) { return kr_ok(); }
-static int produce_yield(knot_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
+static int begin_yield(kr_layer_t *ctx, void *module) { return kr_ok(); }
+static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
+static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
+static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
 
 /** @internal Macro for iterating module layers. */
 #define RESUME_LAYERS(from, req, qry, func, ...) \
@@ -68,10 +68,10 @@ static int produce_yield(knot_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
        for (size_t i = (from); i < (req)->ctx->modules->len; ++i) { \
                struct kr_module *mod = (req)->ctx->modules->at[i]; \
                if (mod->layer) { \
-                       struct knot_layer layer = {.state = (req)->state, .api = mod->layer(mod), .data = (req)}; \
+                       struct kr_layer layer = {.state = (req)->state, .api = mod->layer(mod), .data = (req)}; \
                        if (layer.api && layer.api->func) { \
                                (req)->state = layer.api->func(&layer, ##__VA_ARGS__); \
-                               if ((req)->state == KNOT_STATE_YIELD) { \
+                               if ((req)->state == KR_STATE_YIELD) { \
                                        func ## _yield(&layer, ##__VA_ARGS__); \
                                        break; \
                                } \
@@ -84,7 +84,7 @@ static int produce_yield(knot_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
 #define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
 
 /** @internal Find layer id matching API. */
-static inline size_t layer_id(struct kr_request *req, const struct knot_layer_api *api) {
+static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
        module_array_t *modules = req->ctx->modules;
        for (size_t i = 0; i < modules->len; ++i) {
                struct kr_module *mod = modules->at[i];
@@ -357,7 +357,7 @@ static int answer_finalize(struct kr_request *request, int state)
        knot_pkt_t *answer = request->answer;
 
        /* Always set SERVFAIL for bogus answers. */
-       if (state == KNOT_STATE_FAIL && rplan->pending.len > 0) {
+       if (state == KR_STATE_FAIL && rplan->pending.len > 0) {
                struct kr_query *last = array_tail(rplan->pending);
                if ((last->flags & QUERY_DNSSEC_WANT) && (last->flags & QUERY_DNSSEC_BOGUS)) {
                        return answer_fail(answer);
@@ -382,7 +382,7 @@ static int answer_finalize(struct kr_request *request, int state)
        /* Set AD=1 if succeeded and requested secured answer. */
        const bool has_ad = knot_wire_get_ad(answer->wire);
        knot_wire_clear_ad(answer->wire);
-       if (state == KNOT_STATE_DONE && rplan->resolved.len > 0) {
+       if (state == KR_STATE_DONE && rplan->resolved.len > 0) {
                struct kr_query *last = array_tail(rplan->resolved);
                /* Do not set AD for RRSIG query, as we can't validate it. */
                const bool secure = (last->flags & QUERY_DNSSEC_WANT) &&
@@ -428,14 +428,14 @@ int kr_resolve_begin(struct kr_request *request, struct kr_context *ctx, knot_pk
        request->ctx = ctx;
        request->answer = answer;
        request->options = ctx->options;
-       request->state = KNOT_STATE_CONSUME;
+       request->state = KR_STATE_CONSUME;
        request->current_query = NULL;
        array_init(request->authority);
        array_init(request->additional);
 
        /* Expect first query */
        kr_rplan_init(&request->rplan, request, &request->pool);
-       return KNOT_STATE_CONSUME;
+       return KR_STATE_CONSUME;
 }
 
 static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
@@ -455,7 +455,7 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
                qry = kr_rplan_push_empty(rplan, NULL);
        }
        if (!qry) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        /* Deferred zone cut lookup for this query. */
@@ -482,7 +482,7 @@ static int resolve_query(struct kr_request *request, const knot_pkt_t *packet)
        request->qsource.packet = packet;
        ITERATE_LAYERS(request, qry, begin, request);
        request->qsource.packet = NULL;
-       if (request->state == KNOT_STATE_DONE) {
+       if (request->state == KR_STATE_DONE) {
                kr_rplan_pop(rplan, qry);
        }
        return request->state;
@@ -551,7 +551,7 @@ static void update_nslist_score(struct kr_request *request, struct kr_query *qry
 {
        struct kr_context *ctx = request->ctx;
        /* On sucessful answer, update preference list RTT and penalise timer  */
-       if (request->state != KNOT_STATE_FAIL) {
+       if (request->state != KR_STATE_FAIL) {
                /* Update RTT information for preference list */
                update_nslist_rtt(ctx, qry, src);
                /* Do not complete NS address resolution on soft-fail. */
@@ -579,7 +579,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
        /* Empty resolution plan, push packet as the new query */
        if (packet && kr_rplan_empty(rplan)) {
                if (answer_prepare(request->answer, packet, request) != 0) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
                return resolve_query(request, packet);
        }
@@ -589,7 +589,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
        bool tried_tcp = (qry->flags & QUERY_TCP);
        if (!packet || packet->size == 0) {
                if (tried_tcp) {
-                       request->state = KNOT_STATE_FAIL;
+                       request->state = KR_STATE_FAIL;
                } else {
                        qry->flags |= QUERY_TCP;
                }
@@ -599,7 +599,7 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
                if (qname_raw && qry->secret != 0) {
                        randomized_qname_case(qname_raw, qry->secret);
                }
-               request->state = KNOT_STATE_CONSUME;
+               request->state = KR_STATE_CONSUME;
                if (qry->flags & QUERY_CACHED) {
                        ITERATE_LAYERS(request, qry, consume, packet);
                } else {
@@ -620,18 +620,18 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
                update_nslist_score(request, qry, src, packet);
        }
        /* Resolution failed, invalidate current NS. */
-       if (request->state == KNOT_STATE_FAIL) {
+       if (request->state == KR_STATE_FAIL) {
                invalidate_ns(rplan, qry);
                qry->flags &= ~QUERY_RESOLVED;
        }
 
        /* Pop query if resolved. */
-       if (request->state == KNOT_STATE_YIELD) {
-               return KNOT_STATE_PRODUCE; /* Requery */
+       if (request->state == KR_STATE_YIELD) {
+               return KR_STATE_PRODUCE; /* Requery */
        } else if (qry->flags & QUERY_RESOLVED) {
                kr_rplan_pop(rplan, qry);
        } else if (!tried_tcp && (qry->flags & QUERY_TCP)) {
-               return KNOT_STATE_PRODUCE; /* Requery over TCP */
+               return KR_STATE_PRODUCE; /* Requery over TCP */
        } else { /* Clear query flags for next attempt */
                qry->flags &= ~(QUERY_CACHED|QUERY_TCP);
        }
@@ -640,10 +640,10 @@ int kr_resolve_consume(struct kr_request *request, const struct sockaddr *src, k
 
        /* Do not finish with bogus answer. */
        if (qry->flags & QUERY_DNSSEC_BOGUS)  {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
-       return kr_rplan_empty(&request->rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
+       return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
 }
 
 /** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
@@ -701,10 +701,10 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
                /* @todo we could fetch the information from the parent cut, but we don't remember that now */
                struct kr_query *next = kr_rplan_push(rplan, qry, qry->zone_cut.name, qry->sclass, KNOT_RRTYPE_DS);
                if (!next) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
                next->flags |= QUERY_AWAIT_CUT|QUERY_DNSSEC_WANT;
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
        /* Try to fetch missing DNSKEY (either missing or above current cut).
         * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
@@ -713,12 +713,12 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
        if (want_secured && refetch_key && !is_dnskey_subreq) {
                struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
                if (!next) {
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
-               return KNOT_STATE_DONE;
+               return KR_STATE_DONE;
        }
 
-       return KNOT_STATE_PRODUCE;
+       return KR_STATE_PRODUCE;
 }
 
 /** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
@@ -729,7 +729,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
 
        /* Stub mode, just forward and do not solve cut. */
        if (qry->flags & QUERY_STUB) {
-               return KNOT_STATE_PRODUCE;
+               return KR_STATE_PRODUCE;
        }
 
        /* The query wasn't resolved from cache,
@@ -748,19 +748,19 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
                        if (ret == kr_error(ENOENT)) {
                                ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
                                if (ret != 0) {
-                                       return KNOT_STATE_FAIL;
+                                       return KR_STATE_FAIL;
                                }
                                DEBUG_MSG(qry, "=> using root hints\n");
                                qry->flags &= ~QUERY_AWAIT_CUT;
-                               return KNOT_STATE_DONE;
+                               return KR_STATE_DONE;
                        } else {
-                               return KNOT_STATE_FAIL;
+                               return KR_STATE_FAIL;
                        }
                }
                /* Update minimized QNAME if zone cut changed */
                if (qry->zone_cut.name[0] != '\0' && !(qry->flags & QUERY_NO_MINIMIZE)) {
                        if (kr_make_query(qry, packet) != 0) {
-                               return KNOT_STATE_FAIL;
+                               return KR_STATE_FAIL;
                        }
                }
                qry->flags &= ~QUERY_AWAIT_CUT;
@@ -777,20 +777,20 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
 
        /* No query left for resolution */
        if (kr_rplan_empty(rplan)) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
        /* If we have deferred answers, resume them. */
        struct kr_query *qry = array_tail(rplan->pending);
        if (qry->deferred != NULL) {
                /* @todo: Refactoring validator, check trust chain before resuming. */
                switch(trust_chain_check(request, qry)) {
-               case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
-               case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+               case KR_STATE_FAIL: return KR_STATE_FAIL;
+               case KR_STATE_DONE: return KR_STATE_PRODUCE;
                default: break;
                }
                DEBUG_MSG(qry, "=> resuming yielded answer\n");
                struct kr_layer_pickle *pickle = qry->deferred;
-               request->state = KNOT_STATE_YIELD;
+               request->state = KR_STATE_YIELD;
                RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
                qry->deferred = pickle->next;
        } else {
@@ -798,46 +798,46 @@ int kr_resolve_produce(struct kr_request *request, struct sockaddr **dst, int *t
                 * this is normally not required, and incurrs another cache lookups for cached answer. */
                if (qry->flags & QUERY_ALWAYS_CUT) {
                        switch(zone_cut_check(request, qry, packet)) {
-                       case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
-                       case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+                       case KR_STATE_FAIL: return KR_STATE_FAIL;
+                       case KR_STATE_DONE: return KR_STATE_PRODUCE;
                        default: break;
                        }
                }
                /* Resolve current query and produce dependent or finish */
-               request->state = KNOT_STATE_PRODUCE;
+               request->state = KR_STATE_PRODUCE;
                ITERATE_LAYERS(request, qry, produce, packet);
-               if (request->state != KNOT_STATE_FAIL && knot_wire_get_qr(packet->wire)) {
+               if (request->state != KR_STATE_FAIL && knot_wire_get_qr(packet->wire)) {
                        /* Produced an answer, consume it. */
                        qry->secret = 0;
-                       request->state = KNOT_STATE_CONSUME;
+                       request->state = KR_STATE_CONSUME;
                        ITERATE_LAYERS(request, qry, consume, packet);
                }
        }
        switch(request->state) {
-       case KNOT_STATE_FAIL: return request->state;
-       case KNOT_STATE_CONSUME: break;
-       case KNOT_STATE_DONE:
+       case KR_STATE_FAIL: return request->state;
+       case KR_STATE_CONSUME: break;
+       case KR_STATE_DONE:
        default: /* Current query is done */
-               if (qry->flags & QUERY_RESOLVED && request->state != KNOT_STATE_YIELD) {
+               if (qry->flags & QUERY_RESOLVED && request->state != KR_STATE_YIELD) {
                        kr_rplan_pop(rplan, qry);
                }
                ITERATE_LAYERS(request, qry, reset);
-               return kr_rplan_empty(rplan) ? KNOT_STATE_DONE : KNOT_STATE_PRODUCE;
+               return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
        }
        
 
        /* This query has RD=0 or is ANY, stop here. */
        if (qry->stype == KNOT_RRTYPE_ANY || !knot_wire_get_rd(request->answer->wire)) {
                DEBUG_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        /* Update zone cut, spawn new subrequests. */
        if (!(qry->flags & QUERY_STUB)) {
                int state = zone_cut_check(request, qry, packet);
                switch(state) {
-               case KNOT_STATE_FAIL: return KNOT_STATE_FAIL;
-               case KNOT_STATE_DONE: return KNOT_STATE_PRODUCE;
+               case KR_STATE_FAIL: return KR_STATE_FAIL;
+               case KR_STATE_DONE: return KR_STATE_PRODUCE;
                default: break;
                }
        }
@@ -850,7 +850,7 @@ ns_election:
         */
        if(++ns_election_iter >= KR_ITER_LIMIT) {
                DEBUG_MSG(qry, "=> couldn't converge NS selection, bail out\n");
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        const bool retry = (qry->flags & (QUERY_TCP|QUERY_STUB|QUERY_BADCOOKIE_AGAIN));
@@ -867,7 +867,7 @@ ns_election:
                        DEBUG_MSG(qry, "=> no valid NS left\n");
                        ITERATE_LAYERS(request, qry, reset);
                        kr_rplan_pop(rplan, qry);
-                       return KNOT_STATE_PRODUCE;
+                       return KR_STATE_PRODUCE;
                }
        }
 
@@ -879,7 +879,7 @@ ns_election:
                        goto ns_election; /* Must try different NS */
                }
                ITERATE_LAYERS(request, qry, reset);
-               return KNOT_STATE_PRODUCE;
+               return KR_STATE_PRODUCE;
        }
 
        /* Randomize query case (if not in safemode) */
@@ -995,10 +995,10 @@ int kr_resolve_finish(struct kr_request *request, int state)
 #endif
        /* Finalize answer */
        if (answer_finalize(request, state) != 0) {
-               state = KNOT_STATE_FAIL;
+               state = KR_STATE_FAIL;
        }
        /* Error during procesing, internal failure */
-       if (state != KNOT_STATE_DONE) {
+       if (state != KR_STATE_DONE) {
                knot_pkt_t *answer = request->answer;
                if (knot_wire_get_rcode(answer->wire) == KNOT_RCODE_NOERROR) {
                        knot_wire_set_rcode(answer->wire, KNOT_RCODE_SERVFAIL);
@@ -1009,7 +1009,7 @@ int kr_resolve_finish(struct kr_request *request, int state)
        ITERATE_LAYERS(request, NULL, finish);
        DEBUG_MSG(NULL, "finished: %d, queries: %zu, mempool: %zu B\n",
                  request->state, rplan->resolved.len, (size_t) mp_total_size(request->pool.ctx));
-       return KNOT_STATE_DONE;
+       return KR_STATE_DONE;
 }
 
 struct kr_rplan *kr_resolve_plan(struct kr_request *request)
index d8434bb70ac12af113d832f7120ffb2f31763d4a..4187f62c8e355b8569bc78fecfbac031e32b16b1 100644 (file)
  * state = kr_resolve_consume(&req, query);
  *
  * // Generate answer
- * while (state == KNOT_STATE_PRODUCE) {
+ * while (state == KR_STATE_PRODUCE) {
  *
  *     // Additional query generate, do the I/O and pass back answer
  *     state = kr_resolve_produce(&req, &addr, &type, query);
- *     while (state == KNOT_STATE_CONSUME) {
+ *     while (state == KR_STATE_CONSUME) {
  *         int ret = sendrecv(addr, proto, query, resp);
  *
  *         // If I/O fails, make "resp" empty
index 26a6923297a008f37d8dc3fb2cee67d8edda28eb..6d988bd0baa9d5387fb8549d41ffcdad11977f94 100644 (file)
@@ -210,11 +210,11 @@ to avoid multiple declarations. Here's how the preface looks like:
        #include "lib/layer.h"
        #include "lib/module.h"
        // Need a forward declaration of the function signature
-       int finish(knot_layer_t *);
+       int finish(kr_layer_t *);
        // Workaround for layers composition
-       static inline const knot_layer_api_t *_layer(void)
+       static inline const kr_layer_api_t *_layer(void)
        {
-               static const knot_layer_api_t api = {
+               static const kr_layer_api_t api = {
                        .finish = &finish
                };
                return &api;
@@ -228,7 +228,7 @@ Now we can add the implementations for the ``finish`` layer and finalize the mod
 .. code-block:: go
 
        //export finish
-       func finish(ctx *C.knot_layer_t) C.int {
+       func finish(ctx *C.kr_layer_t) C.int {
                // Since the context is unsafe.Pointer, we need to cast it
                var param *C.struct_kr_request = (*C.struct_kr_request)(ctx.data)
                // Now we can use the C API as well
@@ -237,7 +237,7 @@ Now we can add the implementations for the ``finish`` layer and finalize the mod
        }
 
        //export mymodule_layer
-       func mymodule_layer(module *C.struct_kr_module) *C.knot_layer_api_t {
+       func mymodule_layer(module *C.struct_kr_module) *C.kr_layer_api_t {
                // Wrapping the inline trampoline function
                return C._layer()
        }
index cfaf189f81da7486984e16d14af35351d03dc109..414e1b7be854251f6512b7287024992993e1c808 100644 (file)
@@ -216,13 +216,13 @@ static bool check_cookie_content_and_cache(const struct kr_cookie_settings *clnt
 }
 
 /** Process incoming response. */
-int check_response(knot_layer_t *ctx, knot_pkt_t *pkt)
+int check_response(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
        struct kr_cookie_ctx *cookie_ctx = &req->ctx->cookie_ctx;
 
-       if (ctx->state & (KNOT_STATE_DONE | KNOT_STATE_FAIL)) {
+       if (ctx->state & (KR_STATE_DONE | KR_STATE_FAIL)) {
                return ctx->state;
        }
 
@@ -246,7 +246,7 @@ int check_response(knot_layer_t *ctx, knot_pkt_t *pkt)
                /* We haven't received any cookies although we should. */
                DEBUG_MSG(NULL, "%s\n",
                          "expected to receive a cookie but none received");
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        if (!pkt_cookie_opt) {
@@ -256,7 +256,7 @@ int check_response(knot_layer_t *ctx, knot_pkt_t *pkt)
 
        if (!check_cookie_content_and_cache(&cookie_ctx->clnt, req,
                                            pkt_cookie_opt, cookie_cache)) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        uint16_t rcode = knot_pkt_get_ext_rcode(pkt);
@@ -281,10 +281,10 @@ int check_response(knot_layer_t *ctx, knot_pkt_t *pkt)
                         * we always expect that the server doesn't support TCP.
                         */
                        qry->flags &= ~QUERY_BADCOOKIE_AGAIN;
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
 
-               return KNOT_STATE_CONSUME;
+               return KR_STATE_CONSUME;
        }
 
        return ctx->state;
@@ -303,7 +303,7 @@ static inline uint8_t *req_cookie_option(struct kr_request *req)
  * @brief Returns resolver state and sets answer RCODE on missing or invalid
  *        server cookie.
  *
- * @note Caller should exit when only KNOT_STATE_FAIL is returned.
+ * @note Caller should exit when only KR_STATE_FAIL is returned.
  *
  * @param state            original resolver state
  * @param sc_present       true if server cookie is present
@@ -319,15 +319,15 @@ static int invalid_sc_status(int state, bool sc_present, bool ignore_badcookie,
        const knot_pkt_t *pkt = req->qsource.packet;
 
        if (!pkt) {
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        if (knot_wire_get_qdcount(pkt->wire) == 0) {
                /* RFC7873 5.4 */
-               state = KNOT_STATE_DONE;
+               state = KR_STATE_DONE;
                if (sc_present) {
                        kr_pkt_set_ext_rcode(answer, KNOT_RCODE_BADCOOKIE);
-                       state |= KNOT_STATE_FAIL;
+                       state |= KR_STATE_FAIL;
                }
        } else if (!ignore_badcookie) {
                /* Generate BADCOOKIE response. */
@@ -338,23 +338,23 @@ static int invalid_sc_status(int state, bool sc_present, bool ignore_badcookie,
                        DEBUG_MSG(NULL, "%s\n",
                                  "missing EDNS section in prepared answer");
                        /* Caller should exit on this (and only this) state. */
-                       return KNOT_STATE_FAIL;
+                       return KR_STATE_FAIL;
                }
                kr_pkt_set_ext_rcode(answer, KNOT_RCODE_BADCOOKIE);
-               state = KNOT_STATE_FAIL | KNOT_STATE_DONE;
+               state = KR_STATE_FAIL | KR_STATE_DONE;
        }
 
        return state;
 }
 
-int check_request(knot_layer_t *ctx, void *module_param)
+int check_request(kr_layer_t *ctx, void *module_param)
 {
        struct kr_request *req = ctx->data;
        struct kr_cookie_settings *srvr_sett = &req->ctx->cookie_ctx.srvr;
 
        knot_pkt_t *answer = req->answer;
 
-       if (ctx->state & (KNOT_STATE_DONE | KNOT_STATE_FAIL)) {
+       if (ctx->state & (KR_STATE_DONE | KR_STATE_FAIL)) {
                return ctx->state;
        }
 
@@ -378,7 +378,7 @@ int check_request(knot_layer_t *ctx, void *module_param)
                /* FORMERR -- malformed cookies. */
                DEBUG_MSG(NULL, "%s\n", "request with malformed cookie");
                knot_wire_set_rcode(answer->wire, KNOT_RCODE_FORMERR);
-               return KNOT_STATE_FAIL | KNOT_STATE_DONE;
+               return KR_STATE_FAIL | KR_STATE_DONE;
        }
 
        /*
@@ -392,7 +392,7 @@ int check_request(knot_layer_t *ctx, void *module_param)
 
        if (!req->qsource.addr || !srvr_sett->current.secr || !current_sc_alg) {
                DEBUG_MSG(NULL, "%s\n", "missing valid server cookie context");
-               return KNOT_STATE_FAIL;
+               return KR_STATE_FAIL;
        }
 
        int return_state = ctx->state;
@@ -419,7 +419,7 @@ int check_request(knot_layer_t *ctx, void *module_param)
                /* Request has no server cookie. */
                return_state = invalid_sc_status(return_state, false,
                                                 ignore_badcookie, req, answer);
-               if (return_state == KNOT_STATE_FAIL) {
+               if (return_state == KR_STATE_FAIL) {
                        return return_state;
                }
                goto answer_add_cookies;
@@ -445,7 +445,7 @@ int check_request(knot_layer_t *ctx, void *module_param)
                /* Invalid server cookie. */
                return_state = invalid_sc_status(return_state, true,
                                                 ignore_badcookie, req, answer);
-               if (return_state == KNOT_STATE_FAIL) {
+               if (return_state == KR_STATE_FAIL) {
                        return return_state;
                }
                goto answer_add_cookies;
@@ -457,7 +457,7 @@ answer_add_cookies:
        /* Add server cookie into response. */
        ret = kr_answer_write_cookie(&sc_input, &nonce, current_sc_alg, answer);
        if (ret != kr_ok()) {
-               return_state = KNOT_STATE_FAIL;
+               return_state = KR_STATE_FAIL;
        }
        return return_state;
 }
index bf88964fd4f2f7a3297ed63a344c190c1b533835..480de903fc9097568bcd05fb5556ddff570a68c0 100644 (file)
@@ -26,7 +26,7 @@
  * @param module_param module parameters
  * @return layer state
  */
-int check_request(knot_layer_t *ctx, void *module_param);
+int check_request(kr_layer_t *ctx, void *module_param);
 
 /**
  * @brief Checks cookies of received responses.
@@ -34,4 +34,4 @@ int check_request(knot_layer_t *ctx, void *module_param);
  * @param pkt response packet
  * @return layer state
  */
-int check_response(knot_layer_t *ctx, knot_pkt_t *pkt);
+int check_response(kr_layer_t *ctx, knot_pkt_t *pkt);
index f2695f5fdd414af582dbf9a91a6bbd157553fbd1..a6429ef20db4847f0aa67f3218d192900fea00d6 100644 (file)
@@ -73,12 +73,12 @@ int cookies_deinit(struct kr_module *module)
 }
 
 KR_EXPORT
-const knot_layer_api_t *cookies_layer(struct kr_module *module)
+const kr_layer_api_t *cookies_layer(struct kr_module *module)
 {
        /* The function answer_finalize() in resolver is called before any
         * .finish callback. Therefore this layer does not use it. */
 
-       static knot_layer_api_t _layer = {
+       static kr_layer_api_t _layer = {
                .begin = &check_request,
                .consume = &check_response
        };
index 8eb81a8d9a0a76504df4ebd9c6d764cb20990ebc..0cb936cd05fa3f8222d7dfdda0b910141da5c9a6 100644 (file)
@@ -48,7 +48,7 @@ struct rev_search_baton {
        size_t addr_len;
 };
 
-static int begin(knot_layer_t *ctx, void *module_param)
+static int begin(kr_layer_t *ctx, void *module_param)
 {
        ctx->data = module_param;
        return ctx->state;
@@ -179,11 +179,11 @@ static int satisfy_forward(struct kr_zonecut *hints, knot_pkt_t *pkt, struct kr_
        return put_answer(pkt, &rr);
 }
 
-static int query(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
-       if (!qry || ctx->state & (KNOT_STATE_FAIL)) {
+       if (!qry || ctx->state & (KR_STATE_FAIL)) {
                return ctx->state;
        }
 
@@ -208,7 +208,7 @@ static int query(knot_layer_t *ctx, knot_pkt_t *pkt)
        qry->flags |= QUERY_CACHED|QUERY_NO_MINIMIZE;
        pkt->parsed = pkt->size;
        knot_wire_set_qr(pkt->wire);
-       return KNOT_STATE_DONE;
+       return KR_STATE_DONE;
 }
 
 static int parse_addr_str(struct sockaddr_storage *sa, const char *addr)
@@ -439,9 +439,9 @@ static char* hint_root(void *env, struct kr_module *module, const char *args)
  */
 
 KR_EXPORT
-const knot_layer_api_t *hints_layer(struct kr_module *module)
+const kr_layer_api_t *hints_layer(struct kr_module *module)
 {
-       static knot_layer_api_t _layer = {
+       static kr_layer_api_t _layer = {
                .begin = &begin,
                .produce = &query,
        };
index 97a38290d45c4b2309f2feee31fc36bf8996e46f..55036e068b8b393147c4df437197a0dba89b9af3 100644 (file)
@@ -154,7 +154,7 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan, knot_
        }
 }
 
-static int collect_rtt(knot_layer_t *ctx, knot_pkt_t *pkt)
+static int collect_rtt(kr_layer_t *ctx, knot_pkt_t *pkt)
 {
        struct kr_request *req = ctx->data;
        struct kr_query *qry = req->current_query;
@@ -183,7 +183,7 @@ static int collect_rtt(knot_layer_t *ctx, knot_pkt_t *pkt)
        return ctx->state;
 }
 
-static int collect(knot_layer_t *ctx)
+static int collect(kr_layer_t *ctx)
 {
        struct kr_request *param = ctx->data;
        struct kr_module *module = ctx->api->data;
@@ -427,9 +427,9 @@ static char* dump_upstreams(void *env, struct kr_module *module, const char *arg
  */
 
 KR_EXPORT
-const knot_layer_api_t *stats_layer(struct kr_module *module)
+const kr_layer_api_t *stats_layer(struct kr_module *module)
 {
-       static knot_layer_api_t _layer = {
+       static kr_layer_api_t _layer = {
                .consume = &collect_rtt,
                .finish = &collect,
        };