/** It's simply inside of cycle taken out to decrease indentation. \return error code. */
static int stash_rrset(const ranked_rr_array_t *arr, int arr_i,
- const struct kr_query *qry, struct kr_cache *cache);
+ const struct kr_query *qry, struct kr_cache *cache,
+ int *unauth_cnt);
int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
{
/* Stash individual records. */
ranked_rr_array_t *selected[] = kr_request_selected(req);
int ret = 0;
+ int unauth_cnt = 0;
for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
const ranked_rr_array_t *arr = selected[psec];
/* uncached entries are located at the end */
continue;
/* TODO: probably safe to break but maybe not worth it */
}
- ret = stash_rrset(arr, i, qry, cache);
+ ret = stash_rrset(arr, i, qry, cache, &unauth_cnt);
if (ret) {
VERBOSE_MSG(qry, "=> stashing RRs errored out\n");
goto finally;
stash_pkt(pkt, qry, req);
finally:
+ if (unauth_cnt) {
+ VERBOSE_MSG(qry, "=> stashed also %d nonauth RRs\n", unauth_cnt);
+ };
kr_cache_sync(cache);
return ctx->state; /* we ignore cache-stashing errors */
}
static int stash_rrset(const ranked_rr_array_t *arr, int arr_i,
- const struct kr_query *qry, struct kr_cache *cache)
+ const struct kr_query *qry, struct kr_cache *cache,
+ int *unauth_cnt)
{
const ranked_rr_array_entry_t *entry = arr->at[arr_i];
if (entry->cached) {
assert(entry_h_consistent(val_new_entry, rr->type));
WITH_VERBOSE(qry) {
+ /* Reduce verbosity. */
+ if (!kr_rank_test(entry->rank, KR_RANK_AUTH)) {
+ ++*unauth_cnt;
+ return kr_ok();
+ }
auto_free char *type_str = kr_rrtype_text(rr->type),
*encl_str = kr_dname_text(encloser);
- VERBOSE_MSG(qry, "=> stashed rank: 0%0.2o, %s %s%s"
+ VERBOSE_MSG(qry, "=> stashed rank: 0%.2o, %s %s%s "
"(%d B total, incl. %d RRSIGs)\n",
entry->rank, type_str, (wild_labels ? "*." : ""), encl_str,
(int)val_new_entry.len, (rr_sigs ? rr_sigs->rrs.rr_count : 0)
struct kr_cache_p *peek)
{ /* Just wrap with extra verbose logging. */
const int ret = peek_exact_real(cache, name, type, peek);
- if (VERBOSE_STATUS) {
+ if (false && VERBOSE_STATUS) { /* too noisy for usual --verbose */
auto_free char *type_str = kr_rrtype_text(type),
*name_str = kr_dname_text(name);
const char *result_str = (ret == kr_ok() ? "hit" :
}
/** @internal Update NS address from record \a rr. Return _FAIL on error. */
-static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
+static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query, int *glue_cnt)
{
if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
const knot_rdata_t *rdata = rr->rrs.data;
if (ret != 0) {
return KR_STATE_FAIL;
}
- QVERBOSE_MSG(query, "<= using glue for "
+
+ ++*glue_cnt; /* reduced verbosity */
+ /* QVERBOSE_MSG(query, "<= using glue for "
"'%s': '%s'\n", name_str, addr_str);
+ */
}
return KR_STATE_CONSUME;
}
-/** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc. */
+/** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc.
+ *
+ * \param glue_cnt the number of accepted addresses (to be incremented)
+ */
static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, bool in_bailiwick,
- struct kr_request *req, const struct kr_query *qry)
+ struct kr_request *req, const struct kr_query *qry, int *glue_cnt)
{
ranked_rr_array_t *selected[] = kr_request_selected(req);
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
(req->ctx->options.NO_IPV6)) {
continue;
}
- (void) update_nsaddr(rr, req->current_query);
+ (void) update_nsaddr(rr, req->current_query, glue_cnt);
}
}
}
* @param current_cut is cut name before this packet.
* @return _DONE if cut->name changes, _FAIL on error, and _CONSUME otherwise. */
static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
- struct kr_request *req, const knot_dname_t *current_cut)
+ struct kr_request *req, const knot_dname_t *current_cut,
+ int *glue_cnt)
{
struct kr_query *qry = req->current_query;
struct kr_zonecut *cut = &qry->zone_cut;
do_fetch = in_bailiwick;
}
if (do_fetch) {
- fetch_glue(pkt, ns_name, in_bailiwick, req, qry);
+ fetch_glue(pkt, ns_name, in_bailiwick, req, qry, glue_cnt);
}
}
/* Remember current bailiwick for NS processing. */
const knot_dname_t *current_zone_cut = qry->zone_cut.name;
bool ns_record_exists = false;
+ int glue_cnt = 0;
/* Update zone cut information. */
for (unsigned i = 0; i < ns->count; ++i) {
const knot_rrset_t *rr = knot_pkt_rr(ns, i);
if (rr->type == KNOT_RRTYPE_NS) {
ns_record_exists = true;
- int state = update_cut(pkt, rr, req, current_zone_cut);
+ int state = update_cut(pkt, rr, req, current_zone_cut, &glue_cnt);
switch(state) {
case KR_STATE_DONE: result = state; break;
case KR_STATE_FAIL: return state; break;
}
}
+ if (glue_cnt) {
+ VERBOSE_MSG("<= loaded %d glue addresses\n", glue_cnt);
+ }
+
if ((qry->flags.DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
if (knot_wire_get_aa(pkt->wire) == 0 &&
/* if not referral, mark record to be written to final answer */
to_wire = !referral;
} else {
- state = update_nsaddr(rr, query->parent);
+ int cnt_ = 0;
+ state = update_nsaddr(rr, query->parent, &cnt_);
if (state == KR_STATE_FAIL) {
return state;
}