char b[260];
struct query_info qinfo;
struct iter_hints_stub* stub;
+ int nolock = 0;
regional_free_all(region);
qinfo.qname = nm;
qinfo.qname_len = nmlen;
"of %s\n", b))
return 0;
- lock_rw_rdlock(&worker->env.fwds->lock);
- dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass);
+ dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass, nolock);
if(dp) {
if(!ssl_printf(ssl, "forwarding request:\n")) {
lock_rw_unlock(&worker->env.fwds->lock);
lock_rw_unlock(&worker->env.fwds->lock);
return 1;
}
- lock_rw_unlock(&worker->env.fwds->lock);
while(1) {
dp = dns_cache_find_delegation(&worker->env, nm, nmlen,
continue;
}
}
- lock_rw_rdlock(&worker->env.hints->lock);
stub = hints_lookup_stub(worker->env.hints, nm, qinfo.qclass,
- dp);
+ dp, nolock);
if(stub) {
if(stub->noprime) {
if(!ssl_printf(ssl, "The noprime stub servers "
print_dp_details(ssl, worker, stub->dp);
lock_rw_unlock(&worker->env.hints->lock);
} else {
- lock_rw_unlock(&worker->env.hints->lock);
print_dp_main(ssl, dp, msg);
print_dp_details(ssl, worker, dp);
}
print_root_fwds(RES* ssl, struct iter_forwards* fwds, uint8_t* root)
{
struct delegpt* dp;
- lock_rw_rdlock(&fwds->lock);
- dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN);
+ int nolock = 0;
+ dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN, nolock);
if(!dp) {
- lock_rw_unlock(&fwds->lock);
return ssl_printf(ssl, "off (using root hints)\n");
}
/* if dp is returned it must be the root */
{
struct iter_forwards* fwd = worker->env.fwds;
uint8_t* root = (uint8_t*)"\000";
+ int nolock = 0;
if(!fwd) {
(void)ssl_printf(ssl, "error: structure not allocated\n");
return;
/* delete all the existing queries first */
mesh_delete_all(worker->env.mesh);
if(strcmp(args, "off") == 0) {
- lock_rw_wrlock(&fwd->lock);
- forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root);
- lock_rw_unlock(&fwd->lock);
+ forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root, nolock);
} else {
struct delegpt* dp;
if(!(dp = parse_delegpt(ssl, args, root)))
return;
- lock_rw_wrlock(&fwd->lock);
- if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
- lock_rw_unlock(&fwd->lock);
+ if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
return;
}
- lock_rw_unlock(&fwd->lock);
}
send_ok(ssl);
}
int insecure = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
+ int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, NULL, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
+ /* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors) {
if(!anchors_add_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
return;
}
}
- if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
+ if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
lock_rw_unlock(&fwd->lock);
(void)ssl_printf(ssl, "error out of memory\n");
free(nm);
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
+ int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
+ /* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
- forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm);
+ forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
free(nm);
send_ok(ssl);
int insecure = 0, prime = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
+ int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, &prime, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
+ /* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors) {
return;
}
}
- if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm)) {
+ if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock)) {
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
free(nm);
return;
}
- if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime)) {
+ if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime,
+ nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
- forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
+ forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
+ int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
+ /* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
- forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
- hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm);
+ forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
+ hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
free(nm);
}
struct delegpt*
-forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
+forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
- rbnode_type* res = NULL;
+ struct iter_forward_zone* res;
struct iter_forward_zone key;
+ int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
- res = rbtree_search(fwd->tree, &key);
- if(res) return ((struct iter_forward_zone*)res)->dp;
- return NULL;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
+ res = (struct iter_forward_zone*)rbtree_search(fwd->tree, &key);
+ has_dp = res && res->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
+ return has_dp?res->dp:NULL;
}
struct delegpt*
-forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
+forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
/* lookup the forward zone in the tree */
rbnode_type* res = NULL;
struct iter_forward_zone *result;
struct iter_forward_zone key;
+ int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
if(rbtree_find_less_equal(fwd->tree, &key, &res)) {
/* exact */
result = (struct iter_forward_zone*)res;
/* smaller element (or no element) */
int m;
result = (struct iter_forward_zone*)res;
- if(!result || result->dclass != qclass)
+ if(!result || result->dclass != qclass) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return NULL;
+ }
/* count number of labels matched */
(void)dname_lab_cmp(result->name, result->namelabs, key.name,
key.namelabs, &m);
result = result->parent;
}
}
- if(result)
- return result->dp;
- return NULL;
+ has_dp = result && result->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
+ return has_dp?result->dp:NULL;
}
struct delegpt*
-forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass)
+forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass, int nolock)
{
uint8_t root = 0;
- return forwards_lookup(fwd, &root, qclass);
+ return forwards_lookup(fwd, &root, qclass, nolock);
}
-int
-forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
+/* Finds next root item in forwards lookup tree.
+ * Caller needs to handle locking of the forwards structure. */
+static int
+next_root_locked(struct iter_forwards* fwd, uint16_t* dclass)
{
struct iter_forward_zone key;
rbnode_type* n;
}
/* root not first item? search for higher items */
*dclass = p->dclass + 1;
- return forwards_next_root(fwd, dclass);
+ return next_root_locked(fwd, dclass);
}
/* find class n in tree, we may get a direct hit, or if we don't
* this is the last item of the previous class so rbtree_next() takes
}
/* not a root node, return next higher item */
*dclass = p->dclass+1;
- return forwards_next_root(fwd, dclass);
+ return next_root_locked(fwd, dclass);
}
}
+int
+forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass, int nolock)
+{
+ int ret;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&fwd->lock); }
+ ret = next_root_locked(fwd, dclass);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
+ return ret;
+}
+
size_t
forwards_get_mem(struct iter_forwards* fwd)
{
}
int
-forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
+forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp,
+ int nolock)
{
struct iter_forward_zone *z;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if((z=fwd_zone_find(fwd, c, dp->name)) != NULL) {
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
}
- if(!forwards_insert(fwd, c, dp))
+ if(!forwards_insert(fwd, c, dp)) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
+ }
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
-forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
+ int nolock)
{
struct iter_forward_zone *z;
- if(!(z=fwd_zone_find(fwd, c, nm)))
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(!(z=fwd_zone_find(fwd, c, nm))) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
+ }
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
int
-forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
+ int nolock)
{
- if(fwd_zone_find(fwd, c, nm) != NULL)
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(fwd_zone_find(fwd, c, nm) != NULL) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1; /* already a stub zone there */
+ }
if(!fwd_add_stub_hole(fwd, c, nm)) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
-forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
+forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock)
{
struct iter_forward_zone *z;
- if(!(z=fwd_zone_find(fwd, c, nm)))
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&fwd->lock); }
+ if(!(z=fwd_zone_find(fwd, c, nm))) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
- if(z->dp != NULL)
+ }
+ if(z->dp != NULL) {
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* not a stub hole */
+ }
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
+ if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
/**
* Find forward zone exactly by name
- * The return value is contents of the forwards structure, caller should
- * lock and unlock a readlock on the forwards structure.
+ * The return value is contents of the forwards structure.
+ * Caller should lock and unlock a readlock on the forwards structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the forwards structure if a
+ * value was returned.
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point or null.
*/
struct delegpt* forwards_find(struct iter_forwards* fwd, uint8_t* qname,
- uint16_t qclass);
+ uint16_t qclass, int nolock);
/**
* Find forward zone information
* For this qname/qclass find forward zone information, returns delegation
* point with server names and addresses, or NULL if no forwarding is needed.
- * The return value is contents of the forwards structure, caller should
- * lock and unlock a readlock on the forwards structure.
+ * The return value is contents of the forwards structure.
+ * Caller should lock and unlock a readlock on the forwards structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the forwards structure if a
+ * value was returned.
*
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if the query has to be forwarded to that list,
* otherwise null.
*/
-struct delegpt* forwards_lookup(struct iter_forwards* fwd,
- uint8_t* qname, uint16_t qclass);
+struct delegpt* forwards_lookup(struct iter_forwards* fwd,
+ uint8_t* qname, uint16_t qclass, int nolock);
/**
* Same as forwards_lookup, but for the root only
* @param fwd: forward storage.
* @param qclass: The qclass of the query.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if root forward exists, otherwise null.
*/
-struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
- uint16_t qclass);
+struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
+ uint16_t qclass, int nolock);
/**
* Find next root item in forwards lookup tree.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a readlock on the forwards structure.
* @param fwd: the forward storage
* @param qclass: class to look at next, or higher.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false if none found, or if true stored in qclass.
*/
-int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass);
+int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass,
+ int nolock);
/**
* Get memory in use by forward storage
/**
* Add zone to forward structure. For external use since it recalcs
* the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* forward zone. malloced.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
-int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
- struct delegpt* dp);
+int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
+ struct delegpt* dp, int nolock);
/**
* Remove zone from forward structure. For external use since it
* recalcs the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
-void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
+void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock);
/**
* Add stub hole (empty entry in forward table, that makes resolution skip
* a forward-zone because the stub zone should override the forward zone).
* Does not add one if not necessary.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
-int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
+int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c,
+ uint8_t* nm, int nolock);
/**
* Remove stub hole, if one exists.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
- uint8_t* nm);
+ uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_FWD_H */
int
hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
{
+ int nolock = 1;
lock_rw_wrlock(&hints->lock);
hints_del_tree(hints);
name_tree_init(&hints->tree);
}
/* use fallback compiletime root hints */
- if(!hints_lookup_root(hints, LDNS_RR_CLASS_IN)) {
+ if(!hints_find_root(hints, LDNS_RR_CLASS_IN, nolock)) {
struct delegpt* dp = compile_time_root_prime(cfg->do_ip4,
cfg->do_ip6);
verbose(VERB_ALGO, "no config, using builtin root hints.");
return 1;
}
-struct delegpt*
-hints_lookup_root(struct iter_hints* hints, uint16_t qclass)
+struct delegpt*
+hints_find(struct iter_hints* hints, uint8_t* qname, uint16_t qclass,
+ int nolock)
{
- uint8_t rootlab = 0;
struct iter_hints_stub *stub;
+ size_t len;
+ int has_dp;
+ int labs = dname_count_size_labels(qname, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
- &rootlab, 1, 1, qclass);
- if(!stub)
- return NULL;
- return stub->dp;
+ qname, len, labs, qclass);
+ has_dp = stub && stub->dp;
+ if(!has_dp && !nolock) { lock_rw_unlock(&hints->lock); }
+ return has_dp?stub->dp:NULL;
+}
+
+struct delegpt*
+hints_find_root(struct iter_hints* hints, uint16_t qclass, int nolock)
+{
+ uint8_t rootlab = 0;
+ return hints_find(hints, &rootlab, qclass, nolock);
}
struct iter_hints_stub*
-hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
- uint16_t qclass, struct delegpt* cache_dp)
+hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
+ uint16_t qclass, struct delegpt* cache_dp, int nolock)
{
size_t len;
int labs;
/* first lookup the stub */
labs = dname_count_size_labels(qname, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname,
len, labs, qclass);
- if(!r) return NULL;
+ if(!r) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
+ return NULL;
+ }
/* If there is no cache (root prime situation) */
if(cache_dp == NULL) {
if(r->dp->namelabs != 1)
return r; /* no cache dp, use any non-root stub */
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
if(dname_strict_subdomain(r->dp->name, r->dp->namelabs,
cache_dp->name, cache_dp->namelabs))
return r; /* need to prime this stub */
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
-int hints_next_root(struct iter_hints* hints, uint16_t* qclass)
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock)
{
- return name_tree_next_root(&hints->tree, qclass);
+ int ret;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_rdlock(&hints->lock); }
+ ret = name_tree_next_root(&hints->tree, qclass);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
+ return ret;
}
size_t
int
hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
- int noprime)
+ int noprime, int nolock)
{
struct iter_hints_stub *z;
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&hints->lock); }
if((z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
dp->name, dp->namelen, dp->namelabs, c)) != NULL) {
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
}
- if(!hints_insert(hints, c, dp, noprime))
+ if(!hints_insert(hints, c, dp, noprime)) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return 0;
+ }
name_tree_init_parents(&hints->tree);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return 1;
}
void
-hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm)
+hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm,
+ int nolock)
{
struct iter_hints_stub *z;
size_t len;
int labs = dname_count_size_labels(nm, &len);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(!nolock) { lock_rw_wrlock(&hints->lock); }
if(!(z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
- nm, len, labs, c)))
+ nm, len, labs, c))) {
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
return; /* nothing to do */
+ }
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
name_tree_init_parents(&hints->tree);
+ if(!nolock) { lock_rw_unlock(&hints->lock); }
}
int hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg);
/**
- * Find root hints for the given class.
- * The return value is contents of the hints structure, caller should
- * lock and unlock a readlock on the hints structure.
+ * Find hints for the given class.
+ * The return value is contents of the hints structure.
+ * Caller should lock and unlock a readlock on the hints structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the hints structure if a
+ * value was returned.
* @param hints: hint storage.
+ * @param qname: the qname that generated the delegation point.
* @param qclass: class for which root hints are requested. host order.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
-struct delegpt* hints_lookup_root(struct iter_hints* hints, uint16_t qclass);
+struct delegpt* hints_find(struct iter_hints* hints, uint8_t* qname,
+ uint16_t qclass, int nolock);
+
+/**
+ * Same as hints_lookup, but for the root only.
+ * @param hints: hint storage.
+ * @param qclass: class for which root hints are requested. host order.
+ * @param nolock: Skip locking, locking is handled by the caller.
+ * @return: NULL if no hints, or a ptr to stored hints.
+ */
+struct delegpt* hints_find_root(struct iter_hints* hints,
+ uint16_t qclass, int nolock);
/**
* Find next root hints (to cycle through all root hints).
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a readlock on the hints structure.
* @param hints: hint storage
* @param qclass: class for which root hints are sought.
* 0 means give the first available root hints class.
* x means, give class x or a higher class if any.
* returns the found class in this variable.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return true if a root hint class is found.
* false if not root hint class is found (qclass may have been changed).
*/
-int hints_next_root(struct iter_hints* hints, uint16_t* qclass);
+int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock);
/**
* Given a qname/qclass combination, and the delegation point from the cache
* for this qname/qclass, determine if this combination indicates that a
* stub hint exists and must be primed.
- * The return value is contents of the hints structure, caller should
- * lock and unlock a readlock on the hints structure.
+ * The return value is contents of the hints structure.
+ * Caller should lock and unlock a readlock on the hints structure if nolock
+ * is set.
+ * Otherwise caller should unlock the readlock on the hints structure if a
+ * value was returned.
*
* @param hints: hint storage.
* @param qname: The qname that generated the delegation point.
* @param qclass: The qclass that generated the delegation point.
* @param dp: The cache generated delegation point.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return: A priming delegation point if there is a stub hint that must
* be primed, otherwise null.
*/
-struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
- uint8_t* qname, uint16_t qclass, struct delegpt* dp);
+struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
+ uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/**
* Get memory in use by hints
/**
* Add stub to hints structure. For external use since it recalcs
* the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* hints stub. malloced.
* @param noprime: set noprime option to true or false on new hint stub.
+ * @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
- int noprime);
+ int noprime, int nolock);
/**
* Remove stub from hints structure. For external use since it
* recalcs the tree parents.
+ * Handles its own locking unless nolock is set. In that case the caller
+ * should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of stub zone
* @param nm: name of stub zone (in uncompressed wireformat).
+ * @param nolock: Skip locking, locking is handled by the caller.
*/
-void hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm);
+void hints_delete_stub(struct iter_hints* hints, uint16_t c,
+ uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_HINTS_H */
{
uint16_t c1 = *c, c2 = *c;
int r1, r2;
+ int nolock = 1;
+ /* prelock both forwards and hints for atomic read. */
lock_rw_rdlock(&fwd->lock);
lock_rw_rdlock(&hints->lock);
- r1 = hints_next_root(hints, &c1);
- r2 = forwards_next_root(fwd, &c2);
+ r1 = hints_next_root(hints, &c1, nolock);
+ r2 = forwards_next_root(fwd, &c2, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&hints->lock);
{
struct iter_hints_stub *stub;
struct delegpt *dp;
+ int nolock = 1;
/* Check for stub. */
+ /* Lock both forwards and hints for atomic read. */
lock_rw_rdlock(&qstate->env->fwds->lock);
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
- qinf->qclass, NULL);
- dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
+ qinf->qclass, NULL, nolock);
+ dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass,
+ nolock);
/* see if forward or stub is more pertinent */
if(stub && stub->dp && dp) {
/** see if last resort is possible - does config allow queries to parent */
static int
-can_have_last_resort(struct module_env* env, uint8_t* nm, size_t nmlen,
+can_have_last_resort(struct module_env* env, uint8_t* nm, size_t ATTR_UNUSED(nmlen),
uint16_t qclass, int* have_dp, struct delegpt** retdp,
struct regional* region)
{
- struct delegpt* fwddp;
- struct iter_hints_stub* stub;
- int labs = dname_count_labels(nm);
+ struct delegpt* dp = NULL;
+ int nolock = 0;
/* do not process a last resort (the parent side) if a stub
* or forward is configured, because we do not want to go 'above'
* the configured servers */
- lock_rw_rdlock(&env->hints->lock);
- if(!dname_is_root(nm) && (stub = (struct iter_hints_stub*)
- name_tree_find(&env->hints->tree, nm, nmlen, labs, qclass)) &&
+ if(!dname_is_root(nm) &&
+ (dp = hints_find(env->hints, nm, qclass, nolock)) &&
/* has_parent side is turned off for stub_first, where we
* are allowed to go to the parent */
- stub->dp->has_parent_side_NS) {
- if(retdp) *retdp = delegpt_copy(stub->dp, region);
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->hints->lock);
if(have_dp) *have_dp = 1;
return 0;
}
- lock_rw_unlock(&env->hints->lock);
- lock_rw_rdlock(&env->fwds->lock);
- if((fwddp = forwards_find(env->fwds, nm, qclass)) &&
+ if(dp) {
+ lock_rw_unlock(&env->hints->lock);
+ dp = NULL;
+ }
+ if((dp = forwards_find(env->fwds, nm, qclass, nolock)) &&
/* has_parent_side is turned off for forward_first, where
* we are allowed to go to the parent */
- fwddp->has_parent_side_NS) {
- if(retdp) *retdp = delegpt_copy(fwddp, region);
+ dp->has_parent_side_NS) {
+ if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->fwds->lock);
if(have_dp) *have_dp = 1;
return 0;
}
- lock_rw_unlock(&env->fwds->lock);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(dp) { lock_rw_unlock(&env->fwds->lock); }
return 1;
}
{
struct delegpt* dp;
struct module_qstate* subq;
+ int nolock = 0;
verbose(VERB_DETAIL, "priming . %s NS",
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
- lock_rw_rdlock(&qstate->env->hints->lock);
- dp = hints_lookup_root(qstate->env->hints, qclass);
+ dp = hints_find_root(qstate->env->hints, qclass, nolock);
if(!dp) {
- lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "Cannot prime due to lack of hints");
return 0;
}
struct iter_hints_stub* stub;
struct delegpt* stub_dp;
struct module_qstate* subq;
+ int nolock = 0;
if(!qname) return 0;
- lock_rw_rdlock(&qstate->env->hints->lock);
- stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
+ stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp,
+ nolock);
/* The stub (if there is one) does not need priming. */
- if(!stub) {
- lock_rw_unlock(&qstate->env->hints->lock);
- return 0;
- }
+ if(!stub) return 0;
stub_dp = stub->dp;
/* if we have an auth_zone dp, and stub is equal, don't prime stub
* yet, unless we want to fallback and avoid the auth_zone */
struct delegpt* dp;
uint8_t* delname = iq->qchase.qname;
size_t delnamelen = iq->qchase.qname_len;
+ int nolock = 0;
if(iq->refetch_glue && iq->dp) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
&& !dname_is_root(iq->qchase.qname))
dname_remove_label(&delname, &delnamelen);
- lock_rw_rdlock(&qstate->env->fwds->lock);
- dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
- if(!dp) {
- lock_rw_unlock(&qstate->env->fwds->lock);
- return 0;
- }
+ dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass,
+ nolock);
+ if(!dp) return 0;
/* send recursion desired to forward addr */
iq->chase_flags |= BIT_RD;
iq->dp = delegpt_copy(dp, qstate->region);
* root priming situation. */
if(iq->dp == NULL) {
int r;
+ int nolock = 0;
/* if under auth zone, no prime needed */
if(!auth_zone_delegpt(qstate, iq, delname, delnamelen))
return error_response(qstate, id,
break; /* got noprime-stub-zone, continue */
else if(r)
return 0; /* stub prime request made */
- lock_rw_rdlock(&qstate->env->fwds->lock);
- if(forwards_lookup_root(qstate->env->fwds,
- iq->qchase.qclass)) {
+ if(forwards_lookup_root(qstate->env->fwds,
+ iq->qchase.qclass, nolock)) {
lock_rw_unlock(&qstate->env->fwds->lock);
/* forward zone root, no root prime needed */
/* fill in some dp - safety belt */
- lock_rw_rdlock(&qstate->env->hints->lock);
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
if(!iq->dp) {
- lock_rw_unlock(&qstate->env->hints->lock);
log_err("internal error: no hints dp");
errinf(qstate, "no hints for this class");
return error_response(qstate, id,
}
return next_state(iq, INIT_REQUEST_2_STATE);
}
- lock_rw_unlock(&qstate->env->fwds->lock);
/* Note that the result of this will set a new
* DelegationPoint based on the result of priming. */
if(!prime_root(qstate, iq, id, iq->qchase.qclass))
}
if(dname_is_root(iq->dp->name)) {
/* use safety belt */
+ int nolock = 0;
verbose(VERB_QUERY, "Cache has root NS but "
"no addresses. Fallback to the safety belt.");
- lock_rw_rdlock(&qstate->env->hints->lock);
- iq->dp = hints_lookup_root(qstate->env->hints,
- iq->qchase.qclass);
+ iq->dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
/* note deleg_msg is from previous lookup,
* but RD is on, so it is not used */
if(!iq->dp) {
- lock_rw_unlock(&qstate->env->hints->lock);
log_err("internal error: no hints dp");
return error_response(qstate, id,
LDNS_RCODE_REFUSED);
delnamelen = iq->qchase.qname_len;
if(iq->refetch_glue) {
struct iter_hints_stub* stub;
+ int nolock = 0;
if(!iq->dp) {
log_err("internal or malloc fail: no dp for refetch");
errinf(qstate, "malloc failure, no delegation info");
}
/* Do not send queries above stub, do not set delname to dp if
* this is above stub without stub-first. */
- lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(
qstate->env->hints, iq->qchase.qname, iq->qchase.qclass,
- iq->dp);
+ iq->dp, nolock);
if(!stub || !stub->dp->has_parent_side_NS ||
dname_subdomain_c(iq->dp->name, stub->dp->name)) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
}
- lock_rw_unlock(&qstate->env->hints->lock);
+ /* lock_() calls are macros that could be nothing, surround in {} */
+ if(stub) { lock_rw_unlock(&qstate->env->hints->lock); }
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue) {
if(!dname_is_root(delname))
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(!iq->dp->has_parent_side_NS && dname_is_root(iq->dp->name)) {
- struct delegpt* p;
- lock_rw_rdlock(&qstate->env->hints->lock);
- p = hints_lookup_root(qstate->env->hints, iq->qchase.qclass);
- if(p) {
+ struct delegpt* dp;
+ int nolock = 0;
+ dp = hints_find_root(qstate->env->hints,
+ iq->qchase.qclass, nolock);
+ if(dp) {
struct delegpt_addr* a;
iq->chase_flags &= ~BIT_RD; /* go to authorities */
- for(ns = p->nslist; ns; ns=ns->next) {
+ for(ns = dp->nslist; ns; ns=ns->next) {
(void)delegpt_add_ns(iq->dp, qstate->region,
ns->name, ns->lame, ns->tls_auth_name,
ns->port);
}
- for(a = p->target_list; a; a=a->next_target) {
+ for(a = dp->target_list; a; a=a->next_target) {
(void)delegpt_add_addr(iq->dp, qstate->region,
&a->addr, a->addrlen, a->bogus,
a->lame, a->tls_auth_name, -1, NULL);
}
+ lock_rw_unlock(&qstate->env->hints->lock);
}
- lock_rw_unlock(&qstate->env->hints->lock);
iq->dp->has_parent_side_NS = 1;
} else if(!iq->dp->has_parent_side_NS) {
if(!iter_lookup_parent_NS_from_cache(qstate->env, iq->dp,
int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
struct delegpt* dp, int supports_ipv4, int supports_ipv6, int use_nat64);
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
- uint8_t* qname, uint16_t qclass, struct delegpt* dp);
+ uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/* Custom function to perform logic similar to the one in daemon/cachedump.c */
struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t nmlen);
struct query_info qinfo;
struct iter_hints_stub* stub;
uint32_t timenow = *qstate->env->now;
+ int nolock = 0;
regional_free_all(region);
qinfo.qname = (uint8_t*)nm;
dname_str((uint8_t*)nm, b);
continue;
}
- lock_rw_rdlock(&qstate->env->hints->lock);
- stub = hints_lookup_stub(qstate->env->hints, qinfo.qname, qinfo.qclass, dp);
+ stub = hints_lookup_stub(qstate->env->hints, qinfo.qname,
+ qinfo.qclass, dp, nolock);
if (stub) {
struct delegpt* stubdp = delegpt_copy(stub->dp, region);
lock_rw_unlock(&qstate->env->hints->lock);
return stubdp;
} else {
- lock_rw_unlock(&qstate->env->hints->lock);
return dp;
}
}