]> git.ipfire.org Git - thirdparty/unbound.git/commitdiff
- Patch for CVE-2022-3204 Non-Responsive Delegation Attack. release-1.16.3
authorW.C.A. Wijngaards <wouter@nlnetlabs.nl>
Wed, 21 Sep 2022 09:10:38 +0000 (11:10 +0200)
committerW.C.A. Wijngaards <wouter@nlnetlabs.nl>
Wed, 21 Sep 2022 09:10:38 +0000 (11:10 +0200)
doc/Changelog
iterator/iter_delegpt.c
iterator/iter_delegpt.h
iterator/iter_utils.c
iterator/iter_utils.h
iterator/iterator.c
services/cache/dns.c
services/mesh.c
services/mesh.h

index 13f0f11749e05d615904f1a52fd4eb67a3aa044e..78f6c7afcb2752cfdb0d90fdc488f9caa060fac5 100644 (file)
@@ -1,3 +1,6 @@
+21 September 2022: Wouter
+       - Patch for CVE-2022-3204 Non-Responsive Delegation Attack.
+
 1 August 2022: Wouter
        - Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
        - Tests for ghost domain fixes.
index 4bffa1b3a7d54ad5c9142417914857d399374627..fd07aaa1335522388ce0980e017220cf96151eeb 100644 (file)
@@ -78,6 +78,7 @@ struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region)
                if(!delegpt_add_ns(copy, region, ns->name, ns->lame,
                        ns->tls_auth_name, ns->port))
                        return NULL;
+               copy->nslist->cache_lookup_count = ns->cache_lookup_count;
                copy->nslist->resolved = ns->resolved;
                copy->nslist->got4 = ns->got4;
                copy->nslist->got6 = ns->got6;
@@ -121,6 +122,7 @@ delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
        ns->namelen = len;
        dp->nslist = ns;
        ns->name = regional_alloc_init(region, name, ns->namelen);
+       ns->cache_lookup_count = 0;
        ns->resolved = 0;
        ns->got4 = 0;
        ns->got6 = 0;
@@ -620,6 +622,7 @@ int delegpt_add_ns_mlc(struct delegpt* dp, uint8_t* name, uint8_t lame,
        }
        ns->next = dp->nslist;
        dp->nslist = ns;
+       ns->cache_lookup_count = 0;
        ns->resolved = 0;
        ns->got4 = 0;
        ns->got6 = 0;
index 62c8edc51225952d83780a79d5949806383af9e0..586597a69a1fd65a2ffba3b5c7aee04523f2604c 100644 (file)
@@ -101,6 +101,8 @@ struct delegpt_ns {
        uint8_t* name;
        /** length of name */
        size_t namelen;
+       /** number of cache lookups for the name */
+       int cache_lookup_count;
        /** 
         * If the name has been resolved. false if not queried for yet.
         * true if the A, AAAA queries have been generated.
index 3e13e595c63d6627dc40c25aab9c3c415116ce18..56b184a02fb893ec89536cf3a0ac809397782201 100644 (file)
@@ -1209,6 +1209,9 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env,
        struct delegpt_ns* ns;
        size_t num = delegpt_count_targets(dp);
        for(ns = dp->nslist; ns; ns = ns->next) {
+               if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
+                       continue;
+               ns->cache_lookup_count++;
                /* get cached parentside A */
                akey = rrset_cache_lookup(env->rrset_cache, ns->name,
                        ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
index 8583fde58a443fef4edb2e627451af006623f4e7..850be96a6e1678e7ce46665b63fb7a8cc29b7beb 100644 (file)
@@ -62,6 +62,15 @@ struct ub_packed_rrset_key;
 struct module_stack;
 struct outside_network;
 
+/* max number of lookups in the cache for target nameserver names.
+ * This stops, for large delegations, N*N lookups in the cache. */
+#define ITERATOR_NAME_CACHELOOKUP_MAX  3
+/* max number of lookups in the cache for parentside glue for nameserver names
+ * This stops, for larger delegations, N*N lookups in the cache.
+ * It is a little larger than the nonpside max, so it allows a couple extra
+ * lookups of parent side glue. */
+#define ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE    5
+
 /**
  * Process config options and set iterator module state.
  * Sets default values if no config is found.
index 25e5cfee46453533be20e6b5e9fcf0798a31d181..da9b7990c5062cc664cef3d9a97a0b7171cba542 100644 (file)
@@ -1218,6 +1218,15 @@ generate_dnskey_prefetch(struct module_qstate* qstate,
                (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
                return;
        }
+       /* we do not generate this prefetch when the query list is full,
+        * the query is fetched, if needed, when the validator wants it.
+        * At that time the validator waits for it, after spawning it.
+        * This means there is one state that uses cpu and a socket, the
+        * spawned while this one waits, and not several at the same time,
+        * if we had created the lookup here. And this helps to keep
+        * the total load down, but the query still succeeds to resolve. */
+       if(mesh_jostle_exceeded(qstate->env->mesh))
+               return;
 
        /* if the DNSKEY is in the cache this lookup will stop quickly */
        log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch", 
@@ -1911,6 +1920,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
                                return 0;
                        }
                        query_count++;
+                       /* If the mesh query list is full, exit the loop here.
+                        * This makes the routine spawn one query at a time,
+                        * and this means there is no query state load
+                        * increase, because the spawned state uses cpu and a
+                        * socket while this state waits for that spawned
+                        * state. Next time we can look up further targets */
+                       if(mesh_jostle_exceeded(qstate->env->mesh))
+                               break;
                }
                /* Send the A request. */
                if(ie->supports_ipv4 &&
@@ -1925,6 +1942,9 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
                                return 0;
                        }
                        query_count++;
+                       /* If the mesh query list is full, exit the loop. */
+                       if(mesh_jostle_exceeded(qstate->env->mesh))
+                               break;
                }
 
                /* mark this target as in progress. */
@@ -2085,6 +2105,15 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
                        }
                        ns->done_pside6 = 1;
                        query_count++;
+                       if(mesh_jostle_exceeded(qstate->env->mesh)) {
+                               /* Wait for the lookup; do not spawn multiple
+                                * lookups at a time. */
+                               verbose(VERB_ALGO, "try parent-side glue lookup");
+                               iq->num_target_queries += query_count;
+                               target_count_increase(iq, query_count);
+                               qstate->ext_state[id] = module_wait_subquery;
+                               return 0;
+                       }
                }
                if(ie->supports_ipv4 && !ns->done_pside4) {
                        /* Send the A request. */
@@ -2560,7 +2589,12 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
        if(iq->depth < ie->max_dependency_depth
                && iq->num_target_queries == 0
                && (!iq->target_count || iq->target_count[TARGET_COUNT_NX]==0)
-               && iq->sent_count < TARGET_FETCH_STOP) {
+               && iq->sent_count < TARGET_FETCH_STOP
+               /* if the mesh query list is full, then do not waste cpu
+                * and sockets to fetch promiscuous targets. They can be
+                * looked up when needed. */
+               && !mesh_jostle_exceeded(qstate->env->mesh)
+               ) {
                tf_policy = ie->target_fetch_policy[iq->depth];
        }
 
index 6bca8d85fadb5f6b0a885705ba4d41127762e64d..b6e5697349c29f726fca985d23d52dab8d28642e 100644 (file)
@@ -404,6 +404,9 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
        struct ub_packed_rrset_key* akey;
        time_t now = *env->now;
        for(ns = dp->nslist; ns; ns = ns->next) {
+               if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX)
+                       continue;
+               ns->cache_lookup_count++;
                akey = rrset_cache_lookup(env->rrset_cache, ns->name, 
                        ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
                if(akey) {
index 30bcf7cda15598c75e0d544751ceecdf8e8ccdd3..2a411942663dcd9b827eedc960039376bdb3cace 100644 (file)
@@ -2240,3 +2240,10 @@ mesh_serve_expired_callback(void* arg)
                mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
        }
 }
+
+int mesh_jostle_exceeded(struct mesh_area* mesh)
+{
+       if(mesh->all.count < mesh->max_reply_states)
+               return 0;
+       return 1;
+}
index 3be9b63faeddcabed3c40e1ac04d691219d487c4..25121a67b3a5595ecfe762a19cf4727bd18f9e33 100644 (file)
@@ -685,4 +685,15 @@ struct dns_msg*
 mesh_serve_expired_lookup(struct module_qstate* qstate,
        struct query_info* lookup_qinfo);
 
+/**
+ * See if the mesh has space for more queries. You can allocate queries
+ * anyway, but this checks for the allocated space.
+ * @param mesh: mesh area.
+ * @return true if the query list is full.
+ *     It checks the number of all queries, not just number of reply states,
+ *     that have a client address. So that spawned queries count too,
+ *     that were created by the iterator, or other modules.
+ */
+int mesh_jostle_exceeded(struct mesh_area* mesh);
+
 #endif /* SERVICES_MESH_H */