#ifdef HAVE_OPENSSL_BN_H
#include <openssl/bn.h>
#endif
+#ifdef HAVE_STDATOMIC_H
+#include <stdatomic.h>
+#endif
#include <ctype.h>
#include "daemon/remote.h"
#include "util/config_file.h"
#include "util/net_help.h"
#include "util/module.h"
+#include "util/ub_event.h"
#include "services/listen_dnsport.h"
#include "services/cache/rrset.h"
#include "services/cache/infra.h"
#include "validator/val_kcache.h"
#include "validator/val_kentry.h"
#include "validator/val_anchor.h"
+#include "validator/val_neg.h"
#include "iterator/iterator.h"
#include "iterator/iter_fwd.h"
#include "iterator/iter_hints.h"
#include "iterator/iter_delegpt.h"
+#include "iterator/iter_utils.h"
+#include "iterator/iter_donotq.h"
+#include "iterator/iter_priv.h"
#include "services/outbound_list.h"
#include "services/outside_network.h"
#include "sldns/str2wire.h"
#include "sldns/wire2str.h"
#include "sldns/sbuffer.h"
#include "util/timeval_func.h"
+#include "util/tcp_conn_limit.h"
#include "util/edns.h"
#ifdef USE_CACHEDB
#include "cachedb/cachedb.h"
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
+#ifdef HAVE_POLL_H
+#include <poll.h>
+#endif
/* just for portability */
#ifdef SQ
/** Acceptable lengths of str lines */
#define MAX_CMD_STRLINE 1024
#define MAX_STDIN_STRLINE 2048
+/** What number of loop iterations is too much for ipc retries */
+#define IPC_LOOP_MAX 200
+/** Timeout in msec for ipc socket poll. */
+#define IPC_NOTIFICATION_WAIT 200
+
+static void fr_printq_delete(struct fast_reload_printq* printq);
+static void fr_main_perform_printout(struct fast_reload_thread* fr);
+static int fr_printq_empty(struct fast_reload_printq* printq);
+static void fr_printq_list_insert(struct fast_reload_printq* printq,
+ struct daemon* daemon);
+static void fr_printq_remove(struct fast_reload_printq* printq);
+static void fr_check_cmd_from_thread(struct fast_reload_thread* fr);
static int
remote_setup_ctx(struct daemon_remote* rc, struct config_file* cfg)
static void
clean_point(struct daemon_remote* rc, struct rc_state* s)
{
+ if(!s->rc) {
+ /* the state has been picked up and moved away */
+ free(s);
+ return;
+ }
state_list_remove_elem(&rc->busy_list, s->c);
rc->active --;
if(s->ssl) {
send_ok(ssl);
}
+#ifndef THREADS_DISABLED
+/** parse fast reload command options. */
+static int
+fr_parse_options(RES* ssl, char* arg, int* fr_verb, int* fr_nopause,
+ int* fr_drop_mesh)
+{
+ char* argp = arg;
+ while(*argp=='+') {
+ argp++;
+ while(*argp!=0 && *argp!=' ' && *argp!='\t') {
+ if(*argp == 'v') {
+ (*fr_verb)++;
+ } else if(*argp == 'p') {
+ (*fr_nopause) = 1;
+ } else if(*argp == 'd') {
+ (*fr_drop_mesh) = 1;
+ } else {
+ if(!ssl_printf(ssl,
+ "error: unknown option '+%c'\n",
+ *argp))
+ return 0;
+ return 0;
+ }
+ argp++;
+ }
+ argp = skipwhite(argp);
+ }
+ if(*argp!=0) {
+ if(!ssl_printf(ssl, "error: unknown option '%s'\n", argp))
+ return 0;
+ return 0;
+ }
+ return 1;
+}
+#endif /* !THREADS_DISABLED */
+
+/** do the fast_reload command */
+static void
+do_fast_reload(RES* ssl, struct worker* worker, struct rc_state* s, char* arg)
+{
+#ifdef THREADS_DISABLED
+ if(!ssl_printf(ssl, "error: no threads for fast_reload, compiled without threads.\n"))
+ return;
+ (void)worker;
+ (void)s;
+ (void)arg;
+#else
+ int fr_verb = 0, fr_nopause = 0, fr_drop_mesh = 0;
+ if(!fr_parse_options(ssl, arg, &fr_verb, &fr_nopause, &fr_drop_mesh))
+ return;
+ if(fr_verb >= 1) {
+ if(!ssl_printf(ssl, "start fast_reload\n"))
+ return;
+ }
+ fast_reload_thread_start(ssl, worker, s, fr_verb, fr_nopause,
+ fr_drop_mesh);
+#endif
+}
+
/** do the verbosity command */
static void
do_verbosity(RES* ssl, char* str)
struct view* v;
if(!find_arg2(ssl, arg, &arg2))
return;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
struct view* v;
if(!find_arg2(ssl, arg, &arg2))
return;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
struct view* v;
if(!find_arg2(ssl, arg, &arg2))
return;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
char buf[MAX_CMD_STRLINE + MAX_STDIN_STRLINE] = "view_local_data ";
size_t cmd_len;
int num = 0, line = 0;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
struct view* v;
if(!find_arg2(ssl, arg, &arg2))
return;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
char buf[MAX_CMD_STRLINE + MAX_STDIN_STRLINE] = "view_local_data_remove ";
int num = 0;
size_t cmd_len;
- v = views_find_view(worker->daemon->views,
- arg, 1 /* get write lock*/);
+ v = views_find_view(worker->env.views, arg, 1 /* get write lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
return;
static void
do_view_list_local_zones(RES* ssl, struct worker* worker, char* arg)
{
- struct view* v = views_find_view(worker->daemon->views,
+ struct view* v = views_find_view(worker->env.views,
arg, 0 /* get read lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
static void
do_view_list_local_data(RES* ssl, struct worker* worker, char* arg)
{
- struct view* v = views_find_view(worker->daemon->views,
+ struct view* v = views_find_view(worker->env.views,
arg, 0 /* get read lock*/);
if(!v) {
ssl_printf(ssl,"no view with name: %s\n", arg);
/** execute a remote control command */
static void
-execute_cmd(struct daemon_remote* rc, RES* ssl, char* cmd,
+execute_cmd(struct daemon_remote* rc, struct rc_state* s, RES* ssl, char* cmd,
struct worker* worker)
{
char* p = skipwhite(cmd);
} else if(cmdcmp(p, "reload", 6)) {
do_reload(ssl, worker, 0);
return;
+ } else if(cmdcmp(p, "fast_reload", 11)) {
+ do_fast_reload(ssl, worker, s, skipwhite(p+11));
+ return;
} else if(cmdcmp(p, "stats_noreset", 13)) {
do_stats(ssl, worker, 0);
return;
return;
}
verbose(VERB_ALGO, "remote exec distributed: %s", (char*)msg);
- execute_cmd(NULL, NULL, (char*)msg, worker);
+ execute_cmd(NULL, NULL, NULL, (char*)msg, worker);
free(msg);
}
verbose(VERB_DETAIL, "control cmd: %s", buf);
/* figure out what to do */
- execute_cmd(rc, res, buf, rc->worker);
+ execute_cmd(rc, s, res, buf, rc->worker);
}
/** handle SSL_do_handshake changes to the file descriptor to wait for later */
clean_point(rc, s);
return 0;
}
+
+/**
+ * This routine polls a socket for readiness.
+ * @param fd: file descriptor, -1 uses no fd for a timer only.
+ * @param timeout: time in msec to wait. 0 means nonblocking test,
+ * -1 waits blocking for events.
+ * @param pollin: check for input event.
+ * @param pollout: check for output event.
+ * @param event: output variable, set to true if the event happens.
+ * It is false if there was an error or timeout.
+ * @return false is system call failure, also logged.
+ */
+static int
+sock_poll_timeout(int fd, int timeout, int pollin, int pollout, int* event)
+{
+ int loopcount = 0;
+ /* Loop if the system call returns an errno to do so, like EINTR. */
+ while(1) {
+ struct pollfd p, *fds;
+ int nfds, ret;
+ if(++loopcount > IPC_LOOP_MAX) {
+ log_err("sock_poll_timeout: loop");
+ if(event)
+ *event = 0;
+ return 0;
+ }
+ if(fd == -1) {
+ fds = NULL;
+ nfds = 0;
+ } else {
+ fds = &p;
+ nfds = 1;
+ memset(&p, 0, sizeof(p));
+ p.fd = fd;
+ p.events = POLLERR
+#ifndef USE_WINSOCK
+ | POLLHUP
+#endif
+ ;
+ if(pollin)
+ p.events |= POLLIN;
+ if(pollout)
+ p.events |= POLLOUT;
+ }
+#ifndef USE_WINSOCK
+ ret = poll(fds, nfds, timeout);
+#else
+ if(fds == NULL) {
+ Sleep(timeout);
+ ret = 0;
+ } else {
+ ret = WSAPoll(fds, nfds, timeout);
+ }
+#endif
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("poll: %s", sock_strerror(errno));
+ if(event)
+ *event = 0;
+ return 0;
+ } else if(ret == 0) {
+ /* Timeout */
+ if(event)
+ *event = 0;
+ return 1;
+ }
+ break;
+ }
+ if(event)
+ *event = 1;
+ return 1;
+}
+
+/** fast reload convert fast reload notification status to string */
+static const char*
+fr_notification_to_string(enum fast_reload_notification status)
+{
+ switch(status) {
+ case fast_reload_notification_none:
+ return "none";
+ case fast_reload_notification_done:
+ return "done";
+ case fast_reload_notification_done_error:
+ return "done_error";
+ case fast_reload_notification_exit:
+ return "exit";
+ case fast_reload_notification_exited:
+ return "exited";
+ case fast_reload_notification_printout:
+ return "printout";
+ case fast_reload_notification_reload_stop:
+ return "reload_stop";
+ case fast_reload_notification_reload_ack:
+ return "reload_ack";
+ case fast_reload_notification_reload_nopause_poll:
+ return "reload_nopause_poll";
+ case fast_reload_notification_reload_start:
+ return "reload_start";
+ default:
+ break;
+ }
+ return "unknown";
+}
+
+#ifndef THREADS_DISABLED
+/** fast reload, poll for notification incoming. True if quit */
+static int
+fr_poll_for_quit(struct fast_reload_thread* fr)
+{
+ int inevent, loopexit = 0, bcount = 0;
+ uint32_t cmd;
+ ssize_t ret;
+
+ if(fr->need_to_quit)
+ return 1;
+ /* Is there data? */
+ if(!sock_poll_timeout(fr->commpair[1], 0, 1, 0, &inevent)) {
+ log_err("fr_poll_for_quit: poll failed");
+ return 0;
+ }
+ if(!inevent)
+ return 0;
+
+ /* Read the data */
+ while(1) {
+ if(++loopexit > IPC_LOOP_MAX) {
+ log_err("fr_poll_for_quit: recv loops %s",
+ sock_strerror(errno));
+ return 0;
+ }
+ ret = recv(fr->commpair[1], ((char*)&cmd)+bcount,
+ sizeof(cmd)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("fr_poll_for_quit: recv: %s",
+ sock_strerror(errno));
+ return 0;
+ } else if(ret+(ssize_t)bcount != sizeof(cmd)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(cmd))
+ continue;
+ }
+ break;
+ }
+ if(cmd == fast_reload_notification_exit) {
+ fr->need_to_quit = 1;
+ verbose(VERB_ALGO, "fast reload: exit notification received");
+ return 1;
+ }
+ log_err("fr_poll_for_quit: unknown notification status received: %d %s",
+ cmd, fr_notification_to_string(cmd));
+ return 0;
+}
+
+/** fast reload thread. Send notification from the fast reload thread */
+static void
+fr_send_notification(struct fast_reload_thread* fr,
+ enum fast_reload_notification status)
+{
+ int outevent, loopexit = 0, bcount = 0;
+ uint32_t cmd;
+ ssize_t ret;
+ verbose(VERB_ALGO, "fast reload: send notification %s",
+ fr_notification_to_string(status));
+ /* Make a blocking attempt to send. But meanwhile stay responsive,
+ * once in a while for quit commands. In case the server has to quit. */
+ /* see if there is incoming quit signals */
+ if(fr_poll_for_quit(fr))
+ return;
+ cmd = status;
+ while(1) {
+ if(++loopexit > IPC_LOOP_MAX) {
+ log_err("fast reload: could not send notification");
+ return;
+ }
+ /* wait for socket to become writable */
+ if(!sock_poll_timeout(fr->commpair[1], IPC_NOTIFICATION_WAIT,
+ 0, 1, &outevent)) {
+ log_err("fast reload: poll failed");
+ return;
+ }
+ if(fr_poll_for_quit(fr))
+ return;
+ if(!outevent)
+ continue;
+ ret = send(fr->commpair[1], ((char*)&cmd)+bcount,
+ sizeof(cmd)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("fast reload send notification: send: %s",
+ sock_strerror(errno));
+ return;
+ } else if(ret+(ssize_t)bcount != sizeof(cmd)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(cmd))
+ continue;
+ }
+ break;
+ }
+}
+
+/** fast reload thread queue up text string for output */
+static int
+fr_output_text(struct fast_reload_thread* fr, const char* msg)
+{
+ char* item = strdup(msg);
+ if(!item) {
+ log_err("fast reload output text: strdup out of memory");
+ return 0;
+ }
+ lock_basic_lock(&fr->fr_output_lock);
+ if(!cfg_strlist_append(fr->fr_output, item)) {
+ lock_basic_unlock(&fr->fr_output_lock);
+ /* The item is freed by cfg_strlist_append on failure. */
+ log_err("fast reload output text: append out of memory");
+ return 0;
+ }
+ lock_basic_unlock(&fr->fr_output_lock);
+ return 1;
+}
+
+/** fast reload thread output vmsg function */
+static int
+fr_output_vmsg(struct fast_reload_thread* fr, const char* format, va_list args)
+{
+ char msg[1024];
+ vsnprintf(msg, sizeof(msg), format, args);
+ return fr_output_text(fr, msg);
+}
+
+/** fast reload thread printout function, with printf arguments */
+static int fr_output_printf(struct fast_reload_thread* fr,
+ const char* format, ...) ATTR_FORMAT(printf, 2, 3);
+
+/** fast reload thread printout function, prints to list and signals
+ * the remote control thread to move that to get written to the socket
+ * of the remote control connection. */
+static int
+fr_output_printf(struct fast_reload_thread* fr, const char* format, ...)
+{
+ va_list args;
+ int ret;
+ va_start(args, format);
+ ret = fr_output_vmsg(fr, format, args);
+ va_end(args);
+ return ret;
+}
+
+/** fast reload thread, init time counters */
+static void
+fr_init_time(struct timeval* time_start, struct timeval* time_read,
+ struct timeval* time_construct, struct timeval* time_reload,
+ struct timeval* time_end)
+{
+ memset(time_start, 0, sizeof(*time_start));
+ memset(time_read, 0, sizeof(*time_read));
+ memset(time_construct, 0, sizeof(*time_construct));
+ memset(time_reload, 0, sizeof(*time_reload));
+ memset(time_end, 0, sizeof(*time_end));
+ if(gettimeofday(time_start, NULL) < 0)
+ log_err("gettimeofday: %s", strerror(errno));
+}
+
+/**
+ * Structure with constructed elements for use during fast reload.
+ * At the start it contains the tree items for the new config.
+ * After the tree items are swapped into the server, the old elements
+ * are kept in here. They can then be deleted.
+ */
+struct fast_reload_construct {
+ /** construct for views */
+ struct views* views;
+ /** construct for auth zones */
+ struct auth_zones* auth_zones;
+ /** construct for forwards */
+ struct iter_forwards* fwds;
+ /** construct for stubs */
+ struct iter_hints* hints;
+ /** construct for respip_set */
+ struct respip_set* respip_set;
+ /** construct for access control */
+ struct acl_list* acl;
+ /** construct for access control interface */
+ struct acl_list* acl_interface;
+ /** construct for tcp connection limit */
+ struct tcl_list* tcl;
+ /** construct for local zones */
+ struct local_zones* local_zones;
+ /** if there is response ip configuration in use */
+ int use_response_ip;
+ /** if there is an rpz zone */
+ int use_rpz;
+ /** construct for edns strings */
+ struct edns_strings* edns_strings;
+ /** construct for trust anchors */
+ struct val_anchors* anchors;
+ /** construct for nsec3 key size */
+ size_t* nsec3_keysize;
+ /** construct for nsec3 max iter */
+ size_t* nsec3_maxiter;
+ /** construct for nsec3 keyiter count */
+ int nsec3_keyiter_count;
+ /** construct for target fetch policy */
+ int* target_fetch_policy;
+ /** construct for max dependency depth */
+ int max_dependency_depth;
+ /** construct for donotquery addresses */
+ struct iter_donotq* donotq;
+ /** construct for private addresses and domains */
+ struct iter_priv* priv;
+ /** construct whitelist for capsforid names */
+ struct rbtree_type* caps_white;
+ /** construct for nat64 */
+ struct iter_nat64 nat64;
+ /** construct for wait_limits_netblock */
+ struct rbtree_type wait_limits_netblock;
+ /** construct for wait_limits_cookie_netblock */
+ struct rbtree_type wait_limits_cookie_netblock;
+ /** construct for domain limits */
+ struct rbtree_type domain_limits;
+ /** storage for the old configuration elements. The outer struct
+ * is allocated with malloc here, the items are from config. */
+ struct config_file* oldcfg;
+};
+
+/** fast reload thread, read config */
+static int
+fr_read_config(struct fast_reload_thread* fr, struct config_file** newcfg)
+{
+ /* Create new config structure. */
+ *newcfg = config_create();
+ if(!*newcfg) {
+ if(!fr_output_printf(fr, "config_create failed: out of memory\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ /* Read new config from file */
+ if(!config_read(*newcfg, fr->worker->daemon->cfgfile,
+ fr->worker->daemon->chroot)) {
+ config_delete(*newcfg);
+ if(!fr_output_printf(fr, "config_read %s failed: %s\n",
+ fr->worker->daemon->cfgfile, strerror(errno)))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+ if(fr->fr_verb >= 1) {
+ if(!fr_output_printf(fr, "done read config file %s\n",
+ fr->worker->daemon->cfgfile))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ }
+
+ return 1;
+}
+
+/** Check if two taglists are equal. */
+static int
+taglist_equal(char** tagname_a, int num_tags_a, char** tagname_b,
+ int num_tags_b)
+{
+ int i;
+ if(num_tags_a != num_tags_b)
+ return 0;
+ for(i=0; i<num_tags_a; i++) {
+ if(strcmp(tagname_a[i], tagname_b[i]) != 0)
+ return 0;
+ }
+ return 1;
+}
+
+/** Check the change from a to b is only new entries at the end. */
+static int
+taglist_change_at_end(char** tagname_a, int num_tags_a, char** tagname_b,
+ int num_tags_b)
+{
+ if(num_tags_a < 0 || num_tags_b < 0)
+ return 0;
+ if(num_tags_a >= num_tags_b)
+ return 0;
+ /* So, b is longer than a. Check if the initial start of the two
+ * taglists is the same. */
+ if(!taglist_equal(tagname_a, num_tags_a, tagname_b, num_tags_a))
+ return 0;
+ return 1;
+}
+
+/** fast reload thread, check tag defines. */
+static int
+fr_check_tag_defines(struct fast_reload_thread* fr, struct config_file* newcfg)
+{
+ /* The tags are kept in a bitlist for items. Some of them are stored
+ * in query info. If the tags change, then the old values are
+ * inaccurate. The solution is to then flush the query list.
+ * Unless the change only involves adding new tags at the end, that
+ * needs no changes. */
+ if(!taglist_equal(fr->worker->daemon->cfg->tagname,
+ fr->worker->daemon->cfg->num_tags, newcfg->tagname,
+ newcfg->num_tags) &&
+ !taglist_change_at_end(fr->worker->daemon->cfg->tagname,
+ fr->worker->daemon->cfg->num_tags, newcfg->tagname,
+ newcfg->num_tags)) {
+ /* The tags have changed too much, the define-tag config. */
+ if(fr->fr_drop_mesh)
+ return 1; /* already dropping queries */
+ fr->fr_drop_mesh = 1;
+ fr->worker->daemon->fast_reload_drop_mesh = fr->fr_drop_mesh;
+ if(!fr_output_printf(fr, "tags have changed, with "
+ "'define-tag', and the queries have to be dropped "
+ "for consistency, setting '+d'\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ }
+ return 1;
+}
+
+/** fast reload thread, check if config item has changed, if not add to
+ * the explanatory string. */
+static void
+fr_check_changed_cfg(int cmp, const char* desc, char* str, size_t len)
+{
+ if(cmp) {
+ size_t slen = strlen(str);
+ size_t desclen = strlen(desc);
+ if(slen == 0) {
+ snprintf(str, len, "%s", desc);
+ return;
+ }
+ if(len - slen < desclen+2)
+ return; /* It does not fit */
+ snprintf(str+slen, len-slen, " %s", desc);
+ }
+}
+
+/** fast reload thread, check if config string has changed, checks NULLs. */
+static void
+fr_check_changed_cfg_str(char* cmp1, char* cmp2, const char* desc, char* str,
+ size_t len)
+{
+ if((!cmp1 && cmp2) ||
+ (cmp1 && !cmp2) ||
+ (cmp1 && cmp2 && strcmp(cmp1, cmp2) != 0)) {
+ fr_check_changed_cfg(1, desc, str, len);
+ }
+}
+
+/** fast reload thread, check if config strlist has changed. */
+static void
+fr_check_changed_cfg_strlist(struct config_strlist* cmp1,
+ struct config_strlist* cmp2, const char* desc, char* str, size_t len)
+{
+ struct config_strlist* p1 = cmp1, *p2 = cmp2;
+ while(p1 && p2) {
+ if((!p1->str && p2->str) ||
+ (p1->str && !p2->str) ||
+ (p1->str && p2->str && strcmp(p1->str, p2->str) != 0)) {
+ /* The strlist is different. */
+ fr_check_changed_cfg(1, desc, str, len);
+ return;
+ }
+ p1 = p1->next;
+ p2 = p2->next;
+ }
+ if((!p1 && p2) || (p1 && !p2)) {
+ fr_check_changed_cfg(1, desc, str, len);
+ }
+}
+
+/** fast reload thread, check if config str2list has changed. */
+static void
+fr_check_changed_cfg_str2list(struct config_str2list* cmp1,
+ struct config_str2list* cmp2, const char* desc, char* str, size_t len)
+{
+ struct config_str2list* p1 = cmp1, *p2 = cmp2;
+ while(p1 && p2) {
+ if((!p1->str && p2->str) ||
+ (p1->str && !p2->str) ||
+ (p1->str && p2->str && strcmp(p1->str, p2->str) != 0)) {
+ /* The str2list is different. */
+ fr_check_changed_cfg(1, desc, str, len);
+ return;
+ }
+ if((!p1->str2 && p2->str2) ||
+ (p1->str2 && !p2->str2) ||
+ (p1->str2 && p2->str2 &&
+ strcmp(p1->str2, p2->str2) != 0)) {
+ /* The str2list is different. */
+ fr_check_changed_cfg(1, desc, str, len);
+ return;
+ }
+ p1 = p1->next;
+ p2 = p2->next;
+ }
+ if((!p1 && p2) || (p1 && !p2)) {
+ fr_check_changed_cfg(1, desc, str, len);
+ }
+}
+
+/** fast reload thread, check compatible config items */
+static int
+fr_check_compat_cfg(struct fast_reload_thread* fr, struct config_file* newcfg)
+{
+ int i;
+ char changed_str[1024];
+ struct config_file* cfg = fr->worker->env.cfg;
+ changed_str[0]=0;
+
+ /* Find incompatible options, and if so, print an error. */
+ fr_check_changed_cfg(cfg->num_threads != newcfg->num_threads,
+ "num-threads", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_ip4 != newcfg->do_ip4,
+ "do-ip4", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_ip6 != newcfg->do_ip6,
+ "do-ip6", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_udp != newcfg->do_udp,
+ "do-udp", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_tcp != newcfg->do_tcp,
+ "do-tcp", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->port != newcfg->port,
+ "port", changed_str, sizeof(changed_str));
+ /* But cfg->outgoing_num_ports has been changed at startup,
+ * possibly to reduce it, so do not check it here. */
+ fr_check_changed_cfg(cfg->outgoing_num_tcp != newcfg->outgoing_num_tcp,
+ "outgoing-num-tcp", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->incoming_num_tcp != newcfg->incoming_num_tcp,
+ "incoming-num-tcp", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->num_out_ifs != newcfg->num_out_ifs,
+ "outgoing-interface", changed_str, sizeof(changed_str));
+ if(cfg->num_out_ifs == newcfg->num_out_ifs) {
+ for(i=0; i<cfg->num_out_ifs; i++)
+ fr_check_changed_cfg(strcmp(cfg->out_ifs[i],
+ newcfg->out_ifs[i]) != 0, "outgoing-interface",
+ changed_str, sizeof(changed_str));
+ }
+ fr_check_changed_cfg(cfg->num_ifs != newcfg->num_ifs,
+ "interface", changed_str, sizeof(changed_str));
+ if(cfg->num_ifs == newcfg->num_ifs) {
+ for(i=0; i<cfg->num_ifs; i++)
+ fr_check_changed_cfg(strcmp(cfg->ifs[i],
+ newcfg->ifs[i]) != 0, "interface",
+ changed_str, sizeof(changed_str));
+ }
+ fr_check_changed_cfg(cfg->if_automatic != newcfg->if_automatic,
+ "interface-automatic", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->so_rcvbuf != newcfg->so_rcvbuf,
+ "so-rcvbuf", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->so_sndbuf != newcfg->so_sndbuf,
+ "so-sndbuf", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->so_reuseport != newcfg->so_reuseport,
+ "so-reuseport", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->ip_transparent != newcfg->ip_transparent,
+ "ip-transparent", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->ip_freebind != newcfg->ip_freebind,
+ "ip-freebind", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->udp_connect != newcfg->udp_connect,
+ "udp-connect", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->msg_buffer_size != newcfg->msg_buffer_size,
+ "msg-buffer-size", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_tcp_keepalive != newcfg->do_tcp_keepalive,
+ "edns-tcp-keepalive", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->tcp_keepalive_timeout != newcfg->tcp_keepalive_timeout,
+ "edns-tcp-keepalive-timeout", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->tcp_idle_timeout != newcfg->tcp_idle_timeout,
+ "tcp-idle-timeout", changed_str, sizeof(changed_str));
+ /* Not changed, only if DoH is used, it is then stored in commpoints,
+ * as well as used from cfg. */
+ fr_check_changed_cfg(
+ cfg->harden_large_queries != newcfg->harden_large_queries,
+ "harden-large-queries", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->http_max_streams != newcfg->http_max_streams,
+ "http-max-streams", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->http_endpoint, newcfg->http_endpoint,
+ "http-endpoint", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->http_notls_downstream != newcfg->http_notls_downstream,
+ "http_notls_downstream", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->https_port != newcfg->https_port,
+ "https-port", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->ssl_port != newcfg->ssl_port,
+ "tls-port", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->ssl_service_key, newcfg->ssl_service_key,
+ "tls-service-key", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->ssl_service_pem, newcfg->ssl_service_pem,
+ "tls-service-pem", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->tls_cert_bundle, newcfg->tls_cert_bundle,
+ "tls-cert-bundle", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->proxy_protocol_port,
+ newcfg->proxy_protocol_port, "proxy-protocol-port",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->tls_additional_port,
+ newcfg->tls_additional_port, "tls-additional-port",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->if_automatic_ports,
+ newcfg->if_automatic_ports, "interface-automatic-ports",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->udp_upstream_without_downstream !=
+ newcfg->udp_upstream_without_downstream,
+ "udp-upstream-without-downstream", changed_str,
+ sizeof(changed_str));
+
+ if(changed_str[0] != 0) {
+ /* The new config changes some items that do not work with
+ * fast reload. */
+ if(!fr_output_printf(fr, "The config changes items that are "
+ "not compatible with fast_reload, perhaps do reload "
+ "or restart: %s", changed_str) ||
+ !fr_output_printf(fr, "\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ return 1;
+}
+
+/** fast reload thread, check nopause config items */
+static int
+fr_check_nopause_cfg(struct fast_reload_thread* fr, struct config_file* newcfg)
+{
+ char changed_str[1024];
+ struct config_file* cfg = fr->worker->env.cfg;
+ if(!fr->fr_nopause)
+ return 1; /* The nopause is not enabled, so no problem. */
+ changed_str[0]=0;
+
+ /* Check for iter_env. */
+ fr_check_changed_cfg(
+ cfg->outbound_msg_retry != newcfg->outbound_msg_retry,
+ "outbound-msg-retry", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->max_sent_count != newcfg->max_sent_count,
+ "max-sent-count", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->max_query_restarts != newcfg->max_query_restarts,
+ "max-query-restarts", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(strcmp(cfg->target_fetch_policy,
+ newcfg->target_fetch_policy) != 0,
+ "target-fetch-policy", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->donotquery_localhost != newcfg->donotquery_localhost,
+ "do-not-query-localhost", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->donotqueryaddrs,
+ newcfg->donotqueryaddrs, "do-not-query-localhost",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->private_address,
+ newcfg->private_address, "private-address",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->private_domain,
+ newcfg->private_domain, "private-domain",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_strlist(cfg->caps_whitelist,
+ newcfg->caps_whitelist, "caps-exempt",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->do_nat64 != newcfg->do_nat64,
+ "do-nat64", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->nat64_prefix, newcfg->nat64_prefix,
+ "nat64-prefix", changed_str, sizeof(changed_str));
+
+ /* Check for val_env. */
+ fr_check_changed_cfg(cfg->bogus_ttl != newcfg->bogus_ttl,
+ "val-bogus-ttl", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->val_date_override != newcfg->val_date_override,
+ "val-date-override", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->val_sig_skew_min != newcfg->val_sig_skew_min,
+ "val-sig-skew-min", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->val_sig_skew_max != newcfg->val_sig_skew_max,
+ "val-sig-skew-max", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(cfg->val_max_restart != newcfg->val_max_restart,
+ "val-max-restart", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(strcmp(cfg->val_nsec3_key_iterations,
+ newcfg->val_nsec3_key_iterations) != 0,
+ "val-nsec3-keysize-iterations", changed_str,
+ sizeof(changed_str));
+
+ /* Check for infra. */
+ fr_check_changed_cfg(cfg->host_ttl != newcfg->host_ttl,
+ "infra-host-ttl", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->infra_keep_probing != newcfg->infra_keep_probing,
+ "infra-keep-probing", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->ratelimit != newcfg->ratelimit,
+ "ratelimit", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->ip_ratelimit != newcfg->ip_ratelimit,
+ "ip-ratelimit", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->ip_ratelimit_cookie != newcfg->ip_ratelimit_cookie,
+ "ip-ratelimit-cookie", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str2list(cfg->wait_limit_netblock,
+ newcfg->wait_limit_netblock, "wait-limit-netblock",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str2list(cfg->wait_limit_cookie_netblock,
+ newcfg->wait_limit_cookie_netblock,
+ "wait-limit-cookie-netblock", changed_str,
+ sizeof(changed_str));
+ fr_check_changed_cfg_str2list(cfg->ratelimit_below_domain,
+ newcfg->ratelimit_below_domain, "ratelimit-below-domain",
+ changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str2list(cfg->ratelimit_for_domain,
+ newcfg->ratelimit_for_domain, "ratelimit-for-domain",
+ changed_str, sizeof(changed_str));
+
+ /* Check for dnstap. */
+ fr_check_changed_cfg(
+ cfg->dnstap_send_identity != newcfg->dnstap_send_identity,
+ "dnstap-send-identity", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg(
+ cfg->dnstap_send_version != newcfg->dnstap_send_version,
+ "dnstap-send-version", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->dnstap_identity, newcfg->dnstap_identity,
+ "dnstap-identity", changed_str, sizeof(changed_str));
+ fr_check_changed_cfg_str(cfg->dnstap_version, newcfg->dnstap_version,
+ "dnstap-version", changed_str, sizeof(changed_str));
+
+ if(changed_str[0] != 0) {
+ /* The new config changes some items that need a pause,
+ * to be able to update the variables. */
+ if(!fr_output_printf(fr, "The config changes items that need "
+ "the fast_reload +p option, for nopause, "
+ "disabled to be reloaded: %s", changed_str) ||
+ !fr_output_printf(fr, "\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ return 1;
+}
+
+/** fast reload thread, clear construct information, deletes items */
+static void
+fr_construct_clear(struct fast_reload_construct* ct)
+{
+ if(!ct)
+ return;
+ auth_zones_delete(ct->auth_zones);
+ forwards_delete(ct->fwds);
+ hints_delete(ct->hints);
+ respip_set_delete(ct->respip_set);
+ local_zones_delete(ct->local_zones);
+ acl_list_delete(ct->acl);
+ acl_list_delete(ct->acl_interface);
+ tcl_list_delete(ct->tcl);
+ edns_strings_delete(ct->edns_strings);
+ anchors_delete(ct->anchors);
+ views_delete(ct->views);
+ free(ct->nsec3_keysize);
+ free(ct->nsec3_maxiter);
+ free(ct->target_fetch_policy);
+ donotq_delete(ct->donotq);
+ priv_delete(ct->priv);
+ caps_white_delete(ct->caps_white);
+ wait_limits_free(&ct->wait_limits_netblock);
+ wait_limits_free(&ct->wait_limits_cookie_netblock);
+ domain_limits_free(&ct->domain_limits);
+ /* Delete the log identity here so that the global value is not
+ * reset by config_delete. */
+ if(ct->oldcfg && ct->oldcfg->log_identity) {
+ free(ct->oldcfg->log_identity);
+ ct->oldcfg->log_identity = NULL;
+ }
+ config_delete(ct->oldcfg);
+}
+
+/** get memory for strlist */
+static size_t
+getmem_config_strlist(struct config_strlist* p)
+{
+ size_t m = 0;
+ struct config_strlist* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->str);
+ return m;
+}
+
+/** get memory for str2list */
+static size_t
+getmem_config_str2list(struct config_str2list* p)
+{
+ size_t m = 0;
+ struct config_str2list* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->str) + getmem_str(s->str2);
+ return m;
+}
+
+/** get memory for str3list */
+static size_t
+getmem_config_str3list(struct config_str3list* p)
+{
+ size_t m = 0;
+ struct config_str3list* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->str) + getmem_str(s->str2)
+ + getmem_str(s->str3);
+ return m;
+}
+
+/** get memory for strbytelist */
+static size_t
+getmem_config_strbytelist(struct config_strbytelist* p)
+{
+ size_t m = 0;
+ struct config_strbytelist* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->str) + (s->str2?s->str2len:0);
+ return m;
+}
+
+/** get memory used by ifs array */
+static size_t
+getmem_ifs(int numifs, char** ifs)
+{
+ size_t m = 0;
+ int i;
+ m += numifs * sizeof(char*);
+ for(i=0; i<numifs; i++)
+ m += getmem_str(ifs[i]);
+ return m;
+}
+
+/** get memory for config_stub */
+static size_t
+getmem_config_stub(struct config_stub* p)
+{
+ size_t m = 0;
+ struct config_stub* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->name)
+ + getmem_config_strlist(s->hosts)
+ + getmem_config_strlist(s->addrs);
+ return m;
+}
+
+/** get memory for config_auth */
+static size_t
+getmem_config_auth(struct config_auth* p)
+{
+ size_t m = 0;
+ struct config_auth* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->name)
+ + getmem_config_strlist(s->masters)
+ + getmem_config_strlist(s->urls)
+ + getmem_config_strlist(s->allow_notify)
+ + getmem_str(s->zonefile)
+ + s->rpz_taglistlen
+ + getmem_str(s->rpz_action_override)
+ + getmem_str(s->rpz_log_name)
+ + getmem_str(s->rpz_cname);
+ return m;
+}
+
+/** get memory for config_view */
+static size_t
+getmem_config_view(struct config_view* p)
+{
+ size_t m = 0;
+ struct config_view* s;
+ for(s = p; s; s = s->next)
+ m += sizeof(*s) + getmem_str(s->name)
+ + getmem_config_str2list(s->local_zones)
+ + getmem_config_strlist(s->local_data)
+ + getmem_config_strlist(s->local_zones_nodefault)
+#ifdef USE_IPSET
+ + getmem_config_strlist(s->local_zones_ipset)
+#endif
+ + getmem_config_str2list(s->respip_actions)
+ + getmem_config_str2list(s->respip_data);
+
+ return m;
+}
+
+/** get memory used by config_file item, estimate */
+static size_t
+config_file_getmem(struct config_file* cfg)
+{
+ size_t m = 0;
+ m += sizeof(*cfg);
+ m += getmem_config_strlist(cfg->proxy_protocol_port);
+ m += getmem_str(cfg->ssl_service_key);
+ m += getmem_str(cfg->ssl_service_pem);
+ m += getmem_str(cfg->tls_cert_bundle);
+ m += getmem_config_strlist(cfg->tls_additional_port);
+ m += getmem_config_strlist(cfg->tls_session_ticket_keys.first);
+ m += getmem_str(cfg->tls_ciphers);
+ m += getmem_str(cfg->tls_ciphersuites);
+ m += getmem_str(cfg->http_endpoint);
+ m += (cfg->outgoing_avail_ports?65536*sizeof(int):0);
+ m += getmem_str(cfg->target_fetch_policy);
+ m += getmem_str(cfg->if_automatic_ports);
+ m += getmem_ifs(cfg->num_ifs, cfg->ifs);
+ m += getmem_ifs(cfg->num_out_ifs, cfg->out_ifs);
+ m += getmem_config_strlist(cfg->root_hints);
+ m += getmem_config_stub(cfg->stubs);
+ m += getmem_config_stub(cfg->forwards);
+ m += getmem_config_auth(cfg->auths);
+ m += getmem_config_view(cfg->views);
+ m += getmem_config_strlist(cfg->donotqueryaddrs);
+#ifdef CLIENT_SUBNET
+ m += getmem_config_strlist(cfg->client_subnet);
+ m += getmem_config_strlist(cfg->client_subnet_zone);
+#endif
+ m += getmem_config_str2list(cfg->acls);
+ m += getmem_config_str2list(cfg->tcp_connection_limits);
+ m += getmem_config_strlist(cfg->caps_whitelist);
+ m += getmem_config_strlist(cfg->private_address);
+ m += getmem_config_strlist(cfg->private_domain);
+ m += getmem_str(cfg->chrootdir);
+ m += getmem_str(cfg->username);
+ m += getmem_str(cfg->directory);
+ m += getmem_str(cfg->logfile);
+ m += getmem_str(cfg->pidfile);
+ m += getmem_str(cfg->log_identity);
+ m += getmem_str(cfg->identity);
+ m += getmem_str(cfg->version);
+ m += getmem_str(cfg->http_user_agent);
+ m += getmem_str(cfg->nsid_cfg_str);
+ m += (cfg->nsid?cfg->nsid_len:0);
+ m += getmem_str(cfg->module_conf);
+ m += getmem_config_strlist(cfg->trust_anchor_file_list);
+ m += getmem_config_strlist(cfg->trust_anchor_list);
+ m += getmem_config_strlist(cfg->auto_trust_anchor_file_list);
+ m += getmem_config_strlist(cfg->trusted_keys_file_list);
+ m += getmem_config_strlist(cfg->domain_insecure);
+ m += getmem_str(cfg->val_nsec3_key_iterations);
+ m += getmem_config_str2list(cfg->local_zones);
+ m += getmem_config_strlist(cfg->local_zones_nodefault);
+#ifdef USE_IPSET
+ m += getmem_config_strlist(cfg->local_zones_ipset);
+#endif
+ m += getmem_config_strlist(cfg->local_data);
+ m += getmem_config_str3list(cfg->local_zone_overrides);
+ m += getmem_config_strbytelist(cfg->local_zone_tags);
+ m += getmem_config_strbytelist(cfg->acl_tags);
+ m += getmem_config_str3list(cfg->acl_tag_actions);
+ m += getmem_config_str3list(cfg->acl_tag_datas);
+ m += getmem_config_str2list(cfg->acl_view);
+ m += getmem_config_str2list(cfg->interface_actions);
+ m += getmem_config_strbytelist(cfg->interface_tags);
+ m += getmem_config_str3list(cfg->interface_tag_actions);
+ m += getmem_config_str3list(cfg->interface_tag_datas);
+ m += getmem_config_str2list(cfg->interface_view);
+ m += getmem_config_strbytelist(cfg->respip_tags);
+ m += getmem_config_str2list(cfg->respip_actions);
+ m += getmem_config_str2list(cfg->respip_data);
+ m += getmem_ifs(cfg->num_tags, cfg->tagname);
+ m += getmem_config_strlist(cfg->control_ifs.first);
+ m += getmem_str(cfg->server_key_file);
+ m += getmem_str(cfg->server_cert_file);
+ m += getmem_str(cfg->control_key_file);
+ m += getmem_str(cfg->control_cert_file);
+ m += getmem_config_strlist(cfg->python_script);
+ m += getmem_config_strlist(cfg->dynlib_file);
+ m += getmem_str(cfg->dns64_prefix);
+ m += getmem_config_strlist(cfg->dns64_ignore_aaaa);
+ m += getmem_str(cfg->nat64_prefix);
+ m += getmem_str(cfg->dnstap_socket_path);
+ m += getmem_str(cfg->dnstap_ip);
+ m += getmem_str(cfg->dnstap_tls_server_name);
+ m += getmem_str(cfg->dnstap_tls_cert_bundle);
+ m += getmem_str(cfg->dnstap_tls_client_key_file);
+ m += getmem_str(cfg->dnstap_tls_client_cert_file);
+ m += getmem_str(cfg->dnstap_identity);
+ m += getmem_str(cfg->dnstap_version);
+ m += getmem_config_str2list(cfg->ratelimit_for_domain);
+ m += getmem_config_str2list(cfg->ratelimit_below_domain);
+ m += getmem_config_str2list(cfg->edns_client_strings);
+ m += getmem_str(cfg->dnscrypt_provider);
+ m += getmem_config_strlist(cfg->dnscrypt_secret_key);
+ m += getmem_config_strlist(cfg->dnscrypt_provider_cert);
+ m += getmem_config_strlist(cfg->dnscrypt_provider_cert_rotated);
+#ifdef USE_IPSECMOD
+ m += getmem_config_strlist(cfg->ipsecmod_whitelist);
+ m += getmem_str(cfg->ipsecmod_hook);
+#endif
+#ifdef USE_CACHEDB
+ m += getmem_str(cfg->cachedb_backend);
+ m += getmem_str(cfg->cachedb_secret);
+#ifdef USE_REDIS
+ m += getmem_str(cfg->redis_server_host);
+ m += getmem_str(cfg->redis_server_path);
+ m += getmem_str(cfg->redis_server_password);
+#endif
+#endif
+#ifdef USE_IPSET
+ m += getmem_str(cfg->ipset_name_v4);
+ m += getmem_str(cfg->ipset_name_v6);
+#endif
+ return m;
+}
+
+/** fast reload thread, print memory used by construct of items. */
+static int
+fr_printmem(struct fast_reload_thread* fr,
+ struct config_file* newcfg, struct fast_reload_construct* ct)
+{
+ size_t mem = 0;
+ if(fr_poll_for_quit(fr))
+ return 1;
+ mem += views_get_mem(ct->views);
+ mem += respip_set_get_mem(ct->respip_set);
+ mem += auth_zones_get_mem(ct->auth_zones);
+ mem += forwards_get_mem(ct->fwds);
+ mem += hints_get_mem(ct->hints);
+ mem += local_zones_get_mem(ct->local_zones);
+ mem += acl_list_get_mem(ct->acl);
+ mem += acl_list_get_mem(ct->acl_interface);
+ mem += tcl_list_get_mem(ct->tcl);
+ mem += edns_strings_get_mem(ct->edns_strings);
+ mem += anchors_get_mem(ct->anchors);
+ mem += sizeof(*ct->oldcfg);
+ mem += config_file_getmem(newcfg);
+
+ if(!fr_output_printf(fr, "memory use %d bytes\n", (int)mem))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+
+ return 1;
+}
+
+/** fast reload thread, setup the acl_interface for the ports that
+ * the server has. */
+static int
+ct_acl_interface_setup_ports(struct acl_list* acl_interface,
+ struct daemon* daemon)
+{
+ /* clean acl_interface */
+ acl_interface_init(acl_interface);
+ if(!setup_acl_for_ports(acl_interface, daemon->ports[0]))
+ return 0;
+ if(daemon->reuseport) {
+ size_t i;
+ for(i=1; i<daemon->num_ports; i++) {
+ if(!setup_acl_for_ports(acl_interface,
+ daemon->ports[i]))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/** fast reload, add new change to list of auth zones */
+static int
+fr_add_auth_zone_change(struct fast_reload_thread* fr, struct auth_zone* old_z,
+ struct auth_zone* new_z, int is_deleted, int is_added, int is_changed)
+{
+ struct fast_reload_auth_change* item;
+ item = calloc(1, sizeof(*item));
+ if(!item) {
+ log_err("malloc failure in add auth zone change");
+ return 0;
+ }
+ item->old_z = old_z;
+ item->new_z = new_z;
+ item->is_deleted = is_deleted;
+ item->is_added = is_added;
+ item->is_changed = is_changed;
+
+ item->next = fr->auth_zone_change_list;
+ fr->auth_zone_change_list = item;
+ return 1;
+}
+
+/** See if auth master is equal */
+static int
+xfr_auth_master_equal(struct auth_master* m1, struct auth_master* m2)
+{
+ if(!m1 && !m2)
+ return 1;
+ if(!m1 || !m2)
+ return 0;
+
+ if((m1->host && !m2->host) || (!m1->host && m2->host))
+ return 0;
+ if(m1->host && m2->host && strcmp(m1->host, m2->host) != 0)
+ return 0;
+
+ if((m1->file && !m2->file) || (!m1->file && m2->file))
+ return 0;
+ if(m1->file && m2->file && strcmp(m1->file, m2->file) != 0)
+ return 0;
+
+ if((m1->http && !m2->http) || (!m1->http && m2->http))
+ return 0;
+ if((m1->ixfr && !m2->ixfr) || (!m1->ixfr && m2->ixfr))
+ return 0;
+ if((m1->allow_notify && !m2->allow_notify) || (!m1->allow_notify && m2->allow_notify))
+ return 0;
+ if((m1->ssl && !m2->ssl) || (!m1->ssl && m2->ssl))
+ return 0;
+ if(m1->port != m2->port)
+ return 0;
+ return 1;
+}
+
+/** See if list of auth masters is equal */
+static int
+xfr_masterlist_equal(struct auth_master* list1, struct auth_master* list2)
+{
+ struct auth_master* p1 = list1, *p2 = list2;
+ while(p1 && p2) {
+ if(!xfr_auth_master_equal(p1, p2))
+ return 0;
+ p1 = p1->next;
+ p2 = p2->next;
+ }
+ if(!p1 && !p2)
+ return 1;
+ return 0;
+}
+
+/** See if the list of masters has changed. */
+static int
+xfr_masters_equal(struct auth_xfer* xfr1, struct auth_xfer* xfr2)
+{
+ if(xfr1 == NULL && xfr2 == NULL)
+ return 1;
+ if(xfr1 == NULL && xfr2 != NULL)
+ return 0;
+ if(xfr1 != NULL && xfr2 == NULL)
+ return 0;
+ if(xfr_masterlist_equal(xfr1->task_probe->masters,
+ xfr2->task_probe->masters) &&
+ xfr_masterlist_equal(xfr1->task_transfer->masters,
+ xfr2->task_transfer->masters))
+ return 1;
+ return 0;
+}
+
+/** Check what has changed in auth zones, like added and deleted zones */
+static int
+auth_zones_check_changes(struct fast_reload_thread* fr,
+ struct fast_reload_construct* ct)
+{
+ /* Check every zone in turn. */
+ struct auth_zone* new_z, *old_z;
+ struct module_env* env = &fr->worker->env;
+
+ fr->old_auth_zones = ct->auth_zones;
+ /* Nobody is using the new ct version yet.
+ * Also the ct lock is picked up before the env lock for auth_zones. */
+ lock_rw_rdlock(&ct->auth_zones->lock);
+
+ /* Find deleted zones by looping over the current list and looking
+ * up in the new tree. */
+ lock_rw_rdlock(&env->auth_zones->lock);
+ RBTREE_FOR(old_z, struct auth_zone*, &env->auth_zones->ztree) {
+ new_z = auth_zone_find(ct->auth_zones, old_z->name,
+ old_z->namelen, old_z->dclass);
+ if(!new_z) {
+ /* The zone has been removed. */
+ if(!fr_add_auth_zone_change(fr, old_z, NULL, 1, 0,
+ 0)) {
+ lock_rw_unlock(&env->auth_zones->lock);
+ lock_rw_unlock(&ct->auth_zones->lock);
+ return 0;
+ }
+ }
+ }
+ lock_rw_unlock(&env->auth_zones->lock);
+
+ /* Find added zones by looping over new list and lookup in current. */
+ RBTREE_FOR(new_z, struct auth_zone*, &ct->auth_zones->ztree) {
+ lock_rw_rdlock(&env->auth_zones->lock);
+ old_z = auth_zone_find(env->auth_zones, new_z->name,
+ new_z->namelen, new_z->dclass);
+ if(!old_z) {
+ /* The zone has been added. */
+ lock_rw_unlock(&env->auth_zones->lock);
+ if(!fr_add_auth_zone_change(fr, NULL, new_z, 0, 1,
+ 0)) {
+ lock_rw_unlock(&ct->auth_zones->lock);
+ return 0;
+ }
+ } else {
+ uint32_t old_serial = 0, new_serial = 0;
+ int have_old = 0, have_new = 0;
+ struct auth_xfer* old_xfr, *new_xfr;
+ lock_rw_rdlock(&new_z->lock);
+ lock_rw_rdlock(&old_z->lock);
+ new_xfr = auth_xfer_find(ct->auth_zones, new_z->name,
+ new_z->namelen, new_z->dclass);
+ old_xfr = auth_xfer_find(env->auth_zones, old_z->name,
+ old_z->namelen, old_z->dclass);
+ if(new_xfr) {
+ lock_basic_lock(&new_xfr->lock);
+ }
+ if(old_xfr) {
+ lock_basic_lock(&old_xfr->lock);
+ }
+ lock_rw_unlock(&env->auth_zones->lock);
+
+ /* Change in the auth zone can be detected. */
+ /* A change in serial number means that auth_xfer
+ * has to be updated. */
+ have_old = (auth_zone_get_serial(old_z,
+ &old_serial)!=0);
+ have_new = (auth_zone_get_serial(new_z,
+ &new_serial)!=0);
+ if(have_old != have_new || old_serial != new_serial
+ || !xfr_masters_equal(old_xfr, new_xfr)) {
+ /* The zone has been changed. */
+ if(!fr_add_auth_zone_change(fr, old_z, new_z,
+ 0, 0, 1)) {
+ lock_rw_unlock(&old_z->lock);
+ lock_rw_unlock(&new_z->lock);
+ lock_rw_unlock(&ct->auth_zones->lock);
+ if(new_xfr) {
+ lock_basic_unlock(&new_xfr->lock);
+ }
+ if(old_xfr) {
+ lock_basic_unlock(&old_xfr->lock);
+ }
+ return 0;
+ }
+ }
+
+ if(new_xfr) {
+ lock_basic_unlock(&new_xfr->lock);
+ }
+ if(old_xfr) {
+ lock_basic_unlock(&old_xfr->lock);
+ }
+ lock_rw_unlock(&old_z->lock);
+ lock_rw_unlock(&new_z->lock);
+ }
+ }
+
+ lock_rw_unlock(&ct->auth_zones->lock);
+ return 1;
+}
+
+/** fast reload thread, construct from config the new items */
+static int
+fr_construct_from_config(struct fast_reload_thread* fr,
+ struct config_file* newcfg, struct fast_reload_construct* ct)
+{
+ int have_view_respip_cfg = 0;
+
+ if(!(ct->views = views_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!views_apply_cfg(ct->views, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->acl = acl_list_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!acl_list_apply_cfg(ct->acl, newcfg, ct->views)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->acl_interface = acl_list_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!ct_acl_interface_setup_ports(ct->acl_interface,
+ fr->worker->daemon)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!acl_interface_apply_cfg(ct->acl_interface, newcfg, ct->views)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->tcl = tcl_list_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!tcl_list_apply_cfg(ct->tcl, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr->worker->daemon->tcl->tree.count != 0)
+ fr->worker->daemon->fast_reload_tcl_has_changes = 1;
+ else fr->worker->daemon->fast_reload_tcl_has_changes = 0;
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->auth_zones = auth_zones_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!auth_zones_apply_cfg(ct->auth_zones, newcfg, 1, &ct->use_rpz,
+ fr->worker->daemon->env, &fr->worker->daemon->mods)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!auth_zones_check_changes(fr, ct)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->fwds = forwards_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!forwards_apply_cfg(ct->fwds, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->hints = hints_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!hints_apply_cfg(ct->hints, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->local_zones = local_zones_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!local_zones_apply_cfg(ct->local_zones, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->respip_set = respip_set_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!respip_global_apply_cfg(ct->respip_set, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+ if(!respip_views_apply_cfg(ct->views, newcfg, &have_view_respip_cfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ ct->use_response_ip = !respip_set_is_empty(ct->respip_set) ||
+ have_view_respip_cfg;
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->edns_strings = edns_strings_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!edns_strings_apply_cfg(ct->edns_strings, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(fr->worker->env.anchors) {
+ /* There are trust anchors already, so create it for reload. */
+ if(!(ct->anchors = anchors_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!anchors_apply_cfg(ct->anchors, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+ }
+
+ if(!val_env_parse_key_iter(newcfg->val_nsec3_key_iterations,
+ &ct->nsec3_keysize, &ct->nsec3_maxiter,
+ &ct->nsec3_keyiter_count)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!read_fetch_policy(&ct->target_fetch_policy,
+ &ct->max_dependency_depth, newcfg->target_fetch_policy)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!(ct->donotq = donotq_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!donotq_apply_cfg(ct->donotq, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!(ct->priv = priv_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!priv_apply_cfg(ct->priv, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(newcfg->caps_whitelist) {
+ if(!(ct->caps_white = caps_white_create())) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!caps_white_apply_cfg(ct->caps_white, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ }
+ if(!nat64_apply_cfg(&ct->nat64, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!setup_wait_limits(&ct->wait_limits_netblock,
+ &ct->wait_limits_cookie_netblock, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(!setup_domain_limits(&ct->domain_limits, newcfg)) {
+ fr_construct_clear(ct);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr))
+ return 1;
+
+ if(!(ct->oldcfg = (struct config_file*)calloc(1,
+ sizeof(*ct->oldcfg)))) {
+ fr_construct_clear(ct);
+ log_err("out of memory");
+ return 0;
+ }
+ if(fr->fr_verb >= 2) {
+ if(!fr_printmem(fr, newcfg, ct))
+ return 0;
+ }
+ return 1;
+}
+
+/** fast reload thread, finish timers */
+static int
+fr_finish_time(struct fast_reload_thread* fr, struct timeval* time_start,
+ struct timeval* time_read, struct timeval* time_construct,
+ struct timeval* time_reload, struct timeval* time_end)
+{
+ struct timeval total, readtime, constructtime, reloadtime, deletetime;
+ if(gettimeofday(time_end, NULL) < 0)
+ log_err("gettimeofday: %s", strerror(errno));
+
+ timeval_subtract(&total, time_end, time_start);
+ timeval_subtract(&readtime, time_read, time_start);
+ timeval_subtract(&constructtime, time_construct, time_read);
+ timeval_subtract(&reloadtime, time_reload, time_construct);
+ timeval_subtract(&deletetime, time_end, time_reload);
+ if(!fr_output_printf(fr, "read disk %3d.%6.6ds\n",
+ (int)readtime.tv_sec, (int)readtime.tv_usec))
+ return 0;
+ if(!fr_output_printf(fr, "construct %3d.%6.6ds\n",
+ (int)constructtime.tv_sec, (int)constructtime.tv_usec))
+ return 0;
+ if(!fr_output_printf(fr, "reload %3d.%6.6ds\n",
+ (int)reloadtime.tv_sec, (int)reloadtime.tv_usec))
+ return 0;
+ if(!fr_output_printf(fr, "deletes %3d.%6.6ds\n",
+ (int)deletetime.tv_sec, (int)deletetime.tv_usec))
+ return 0;
+ if(!fr_output_printf(fr, "total time %3d.%6.6ds\n", (int)total.tv_sec,
+ (int)total.tv_usec))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 1;
+}
+
+/** Swap auth zone information */
+static void
+auth_zones_swap(struct auth_zones* az, struct auth_zones* data)
+{
+ rbtree_type oldztree = az->ztree;
+ int old_have_downstream = az->have_downstream;
+ struct auth_zone* old_rpz_first = az->rpz_first;
+
+ az->ztree = data->ztree;
+ data->ztree = oldztree;
+
+ az->have_downstream = data->have_downstream;
+ data->have_downstream = old_have_downstream;
+
+ /* Leave num_query_up and num_query_down, the statistics can
+ * remain counted. */
+
+ az->rpz_first = data->rpz_first;
+ data->rpz_first = old_rpz_first;
+
+ /* The xtree is not swapped. This contains the auth_xfer elements
+ * that contain tasks in progress, like zone transfers.
+ * The unchanged zones can keep their tasks in the tree, and thus
+ * the xfer elements can continue to be their callbacks. */
+}
+
+#ifdef ATOMIC_POINTER_LOCK_FREE
+/** Fast reload thread, if atomics are available, copy the config items
+ * one by one with atomic store operations. */
+static void
+fr_atomic_copy_cfg(struct config_file* oldcfg, struct config_file* cfg,
+ struct config_file* newcfg)
+{
+#define COPY_VAR_int(var) oldcfg->var = cfg->var; atomic_store((_Atomic int*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_ptr(var) oldcfg->var = cfg->var; atomic_store((void* _Atomic*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_unsigned_int(var) oldcfg->var = cfg->var; atomic_store((_Atomic unsigned*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_size_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic size_t*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_uint8_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint8_t*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_uint16_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint16_t*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_uint32_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic uint32_t*)&cfg->var, newcfg->var); newcfg->var = 0;
+#define COPY_VAR_int32_t(var) oldcfg->var = cfg->var; atomic_store((_Atomic int32_t*)&cfg->var, newcfg->var); newcfg->var = 0;
+ /* If config file items are missing from this list, they are
+ * not updated by fast-reload +p. */
+ /* For missing items, the oldcfg item is not updated, still NULL,
+ * and the cfg stays the same. The newcfg item is untouched.
+ * The newcfg item is then deleted later. */
+ /* Items that need synchronisation are omitted from the list.
+ * Use fast-reload without +p to update them together. */
+ COPY_VAR_int(verbosity);
+ COPY_VAR_int(stat_interval);
+ COPY_VAR_int(stat_cumulative);
+ COPY_VAR_int(stat_extended);
+ COPY_VAR_int(stat_inhibit_zero);
+ COPY_VAR_int(num_threads);
+ COPY_VAR_int(port);
+ COPY_VAR_int(do_ip4);
+ COPY_VAR_int(do_ip6);
+ COPY_VAR_int(do_nat64);
+ COPY_VAR_int(prefer_ip4);
+ COPY_VAR_int(prefer_ip6);
+ COPY_VAR_int(do_udp);
+ COPY_VAR_int(do_tcp);
+ COPY_VAR_size_t(max_reuse_tcp_queries);
+ COPY_VAR_int(tcp_reuse_timeout);
+ COPY_VAR_int(tcp_auth_query_timeout);
+ COPY_VAR_int(tcp_upstream);
+ COPY_VAR_int(udp_upstream_without_downstream);
+ COPY_VAR_int(tcp_mss);
+ COPY_VAR_int(outgoing_tcp_mss);
+ COPY_VAR_int(tcp_idle_timeout);
+ COPY_VAR_int(do_tcp_keepalive);
+ COPY_VAR_int(tcp_keepalive_timeout);
+ COPY_VAR_int(sock_queue_timeout);
+ COPY_VAR_ptr(proxy_protocol_port);
+ COPY_VAR_ptr(ssl_service_key);
+ COPY_VAR_ptr(ssl_service_pem);
+ COPY_VAR_int(ssl_port);
+ COPY_VAR_int(ssl_upstream);
+ COPY_VAR_ptr(tls_cert_bundle);
+ COPY_VAR_int(tls_win_cert);
+ COPY_VAR_ptr(tls_additional_port);
+ /* The first is used to walk throught the list but last is
+ * only used during config read. */
+ COPY_VAR_ptr(tls_session_ticket_keys.first);
+ COPY_VAR_ptr(tls_session_ticket_keys.last);
+ COPY_VAR_ptr(tls_ciphers);
+ COPY_VAR_ptr(tls_ciphersuites);
+ COPY_VAR_int(tls_use_sni);
+ COPY_VAR_int(https_port);
+ COPY_VAR_ptr(http_endpoint);
+ COPY_VAR_uint32_t(http_max_streams);
+ COPY_VAR_size_t(http_query_buffer_size);
+ COPY_VAR_size_t(http_response_buffer_size);
+ COPY_VAR_int(http_nodelay);
+ COPY_VAR_int(http_notls_downstream);
+ COPY_VAR_int(outgoing_num_ports);
+ COPY_VAR_size_t(outgoing_num_tcp);
+ COPY_VAR_size_t(incoming_num_tcp);
+ COPY_VAR_ptr(outgoing_avail_ports);
+ COPY_VAR_size_t(edns_buffer_size);
+ COPY_VAR_size_t(stream_wait_size);
+ COPY_VAR_size_t(msg_buffer_size);
+ COPY_VAR_size_t(msg_cache_size);
+ COPY_VAR_size_t(msg_cache_slabs);
+ COPY_VAR_size_t(num_queries_per_thread);
+ COPY_VAR_size_t(jostle_time);
+ COPY_VAR_size_t(rrset_cache_size);
+ COPY_VAR_size_t(rrset_cache_slabs);
+ COPY_VAR_int(host_ttl);
+ COPY_VAR_size_t(infra_cache_slabs);
+ COPY_VAR_size_t(infra_cache_numhosts);
+ COPY_VAR_int(infra_cache_min_rtt);
+ COPY_VAR_int(infra_cache_max_rtt);
+ COPY_VAR_int(infra_keep_probing);
+ COPY_VAR_int(delay_close);
+ COPY_VAR_int(udp_connect);
+ COPY_VAR_ptr(target_fetch_policy);
+ COPY_VAR_int(fast_server_permil);
+ COPY_VAR_size_t(fast_server_num);
+ COPY_VAR_int(if_automatic);
+ COPY_VAR_ptr(if_automatic_ports);
+ COPY_VAR_size_t(so_rcvbuf);
+ COPY_VAR_size_t(so_sndbuf);
+ COPY_VAR_int(so_reuseport);
+ COPY_VAR_int(ip_transparent);
+ COPY_VAR_int(ip_freebind);
+ COPY_VAR_int(ip_dscp);
+ /* Not copied because the length and items could then not match.
+ num_ifs, ifs, num_out_ifs, out_ifs
+ */
+ COPY_VAR_ptr(root_hints);
+ COPY_VAR_ptr(stubs);
+ COPY_VAR_ptr(forwards);
+ COPY_VAR_ptr(auths);
+ COPY_VAR_ptr(views);
+ COPY_VAR_ptr(donotqueryaddrs);
+#ifdef CLIENT_SUBNET
+ COPY_VAR_ptr(client_subnet);
+ COPY_VAR_ptr(client_subnet_zone);
+ COPY_VAR_uint16_t(client_subnet_opcode);
+ COPY_VAR_int(client_subnet_always_forward);
+ COPY_VAR_uint8_t(max_client_subnet_ipv4);
+ COPY_VAR_uint8_t(max_client_subnet_ipv6);
+ COPY_VAR_uint8_t(min_client_subnet_ipv4);
+ COPY_VAR_uint8_t(min_client_subnet_ipv6);
+ COPY_VAR_uint32_t(max_ecs_tree_size_ipv4);
+ COPY_VAR_uint32_t(max_ecs_tree_size_ipv6);
+#endif
+ COPY_VAR_ptr(acls);
+ COPY_VAR_int(donotquery_localhost);
+ COPY_VAR_ptr(tcp_connection_limits);
+ COPY_VAR_int(harden_short_bufsize);
+ COPY_VAR_int(harden_large_queries);
+ COPY_VAR_int(harden_glue);
+ COPY_VAR_int(harden_dnssec_stripped);
+ COPY_VAR_int(harden_below_nxdomain);
+ COPY_VAR_int(harden_referral_path);
+ COPY_VAR_int(harden_algo_downgrade);
+ COPY_VAR_int(harden_unknown_additional);
+ COPY_VAR_int(use_caps_bits_for_id);
+ COPY_VAR_ptr(caps_whitelist);
+ COPY_VAR_ptr(private_address);
+ COPY_VAR_ptr(private_domain);
+ COPY_VAR_size_t(unwanted_threshold);
+ COPY_VAR_int(max_ttl);
+ COPY_VAR_int(min_ttl);
+ COPY_VAR_int(max_negative_ttl);
+ COPY_VAR_int(min_negative_ttl);
+ COPY_VAR_int(prefetch);
+ COPY_VAR_int(prefetch_key);
+ COPY_VAR_int(deny_any);
+ COPY_VAR_ptr(chrootdir);
+ COPY_VAR_ptr(username);
+ COPY_VAR_ptr(directory);
+ COPY_VAR_ptr(logfile);
+ COPY_VAR_ptr(pidfile);
+ COPY_VAR_int(use_syslog);
+ COPY_VAR_int(log_time_ascii);
+ COPY_VAR_int(log_queries);
+ COPY_VAR_int(log_replies);
+ COPY_VAR_int(log_tag_queryreply);
+ COPY_VAR_int(log_local_actions);
+ COPY_VAR_int(log_servfail);
+ COPY_VAR_ptr(log_identity);
+ COPY_VAR_int(log_destaddr);
+ COPY_VAR_int(hide_identity);
+ COPY_VAR_int(hide_version);
+ COPY_VAR_int(hide_trustanchor);
+ COPY_VAR_int(hide_http_user_agent);
+ COPY_VAR_ptr(identity);
+ COPY_VAR_ptr(version);
+ COPY_VAR_ptr(http_user_agent);
+ COPY_VAR_ptr(nsid_cfg_str);
+ /* Not copied because the length and items could then not match.
+ nsid;
+ nsid_len;
+ */
+ COPY_VAR_ptr(module_conf);
+ COPY_VAR_ptr(trust_anchor_file_list);
+ COPY_VAR_ptr(trust_anchor_list);
+ COPY_VAR_ptr(auto_trust_anchor_file_list);
+ COPY_VAR_ptr(trusted_keys_file_list);
+ COPY_VAR_ptr(domain_insecure);
+ COPY_VAR_int(trust_anchor_signaling);
+ COPY_VAR_int(root_key_sentinel);
+ COPY_VAR_int32_t(val_date_override);
+ COPY_VAR_int32_t(val_sig_skew_min);
+ COPY_VAR_int32_t(val_sig_skew_max);
+ COPY_VAR_int32_t(val_max_restart);
+ COPY_VAR_int(bogus_ttl);
+ COPY_VAR_int(val_clean_additional);
+ COPY_VAR_int(val_log_level);
+ COPY_VAR_int(val_log_squelch);
+ COPY_VAR_int(val_permissive_mode);
+ COPY_VAR_int(aggressive_nsec);
+ COPY_VAR_int(ignore_cd);
+ COPY_VAR_int(disable_edns_do);
+ COPY_VAR_int(serve_expired);
+ COPY_VAR_int(serve_expired_ttl);
+ COPY_VAR_int(serve_expired_ttl_reset);
+ COPY_VAR_int(serve_expired_reply_ttl);
+ COPY_VAR_int(serve_expired_client_timeout);
+ COPY_VAR_int(ede_serve_expired);
+ COPY_VAR_int(serve_original_ttl);
+ COPY_VAR_ptr(val_nsec3_key_iterations);
+ COPY_VAR_int(zonemd_permissive_mode);
+ COPY_VAR_unsigned_int(add_holddown);
+ COPY_VAR_unsigned_int(del_holddown);
+ COPY_VAR_unsigned_int(keep_missing);
+ COPY_VAR_int(permit_small_holddown);
+ COPY_VAR_size_t(key_cache_size);
+ COPY_VAR_size_t(key_cache_slabs);
+ COPY_VAR_size_t(neg_cache_size);
+ COPY_VAR_ptr(local_zones);
+ COPY_VAR_ptr(local_zones_nodefault);
+#ifdef USE_IPSET
+ COPY_VAR_ptr(local_zones_ipset);
+#endif
+ COPY_VAR_int(local_zones_disable_default);
+ COPY_VAR_ptr(local_data);
+ COPY_VAR_ptr(local_zone_overrides);
+ COPY_VAR_int(unblock_lan_zones);
+ COPY_VAR_int(insecure_lan_zones);
+ /* These reference tags
+ COPY_VAR_ptr(local_zone_tags);
+ COPY_VAR_ptr(acl_tags);
+ COPY_VAR_ptr(acl_tag_actions);
+ COPY_VAR_ptr(acl_tag_datas);
+ */
+ COPY_VAR_ptr(acl_view);
+ COPY_VAR_ptr(interface_actions);
+ /* These reference tags
+ COPY_VAR_ptr(interface_tags);
+ COPY_VAR_ptr(interface_tag_actions);
+ COPY_VAR_ptr(interface_tag_datas);
+ */
+ COPY_VAR_ptr(interface_view);
+ /* This references tags
+ COPY_VAR_ptr(respip_tags);
+ */
+ COPY_VAR_ptr(respip_actions);
+ COPY_VAR_ptr(respip_data);
+ /* Not copied because the length and items could then not match.
+ * also the respip module keeps a pointer to the array in its state.
+ tagname, num_tags
+ */
+ COPY_VAR_int(remote_control_enable);
+ /* The first is used to walk throught the list but last is
+ * only used during config read. */
+ COPY_VAR_ptr(control_ifs.first);
+ COPY_VAR_ptr(control_ifs.last);
+ COPY_VAR_int(control_use_cert);
+ COPY_VAR_int(control_port);
+ COPY_VAR_ptr(server_key_file);
+ COPY_VAR_ptr(server_cert_file);
+ COPY_VAR_ptr(control_key_file);
+ COPY_VAR_ptr(control_cert_file);
+ COPY_VAR_ptr(python_script);
+ COPY_VAR_ptr(dynlib_file);
+ COPY_VAR_int(use_systemd);
+ COPY_VAR_int(do_daemonize);
+ COPY_VAR_int(minimal_responses);
+ COPY_VAR_int(rrset_roundrobin);
+ COPY_VAR_int(unknown_server_time_limit);
+ COPY_VAR_int(discard_timeout);
+ COPY_VAR_int(wait_limit);
+ COPY_VAR_int(wait_limit_cookie);
+ COPY_VAR_ptr(wait_limit_netblock);
+ COPY_VAR_ptr(wait_limit_cookie_netblock);
+ COPY_VAR_size_t(max_udp_size);
+ COPY_VAR_ptr(dns64_prefix);
+ COPY_VAR_int(dns64_synthall);
+ COPY_VAR_ptr(dns64_ignore_aaaa);
+ COPY_VAR_ptr(nat64_prefix);
+ COPY_VAR_int(dnstap);
+ COPY_VAR_int(dnstap_bidirectional);
+ COPY_VAR_ptr(dnstap_socket_path);
+ COPY_VAR_ptr(dnstap_ip);
+ COPY_VAR_int(dnstap_tls);
+ COPY_VAR_ptr(dnstap_tls_server_name);
+ COPY_VAR_ptr(dnstap_tls_cert_bundle);
+ COPY_VAR_ptr(dnstap_tls_client_key_file);
+ COPY_VAR_ptr(dnstap_tls_client_cert_file);
+ COPY_VAR_int(dnstap_send_identity);
+ COPY_VAR_int(dnstap_send_version);
+ COPY_VAR_ptr(dnstap_identity);
+ COPY_VAR_ptr(dnstap_version);
+ COPY_VAR_int(dnstap_sample_rate);
+ COPY_VAR_int(dnstap_log_resolver_query_messages);
+ COPY_VAR_int(dnstap_log_resolver_response_messages);
+ COPY_VAR_int(dnstap_log_client_query_messages);
+ COPY_VAR_int(dnstap_log_client_response_messages);
+ COPY_VAR_int(dnstap_log_forwarder_query_messages);
+ COPY_VAR_int(dnstap_log_forwarder_response_messages);
+ COPY_VAR_int(disable_dnssec_lame_check);
+ COPY_VAR_int(ip_ratelimit);
+ COPY_VAR_int(ip_ratelimit_cookie);
+ COPY_VAR_size_t(ip_ratelimit_slabs);
+ COPY_VAR_size_t(ip_ratelimit_size);
+ COPY_VAR_int(ip_ratelimit_factor);
+ COPY_VAR_int(ip_ratelimit_backoff);
+ COPY_VAR_int(ratelimit);
+ COPY_VAR_size_t(ratelimit_slabs);
+ COPY_VAR_size_t(ratelimit_size);
+ COPY_VAR_ptr(ratelimit_for_domain);
+ COPY_VAR_ptr(ratelimit_below_domain);
+ COPY_VAR_int(ratelimit_factor);
+ COPY_VAR_int(ratelimit_backoff);
+ COPY_VAR_int(outbound_msg_retry);
+ COPY_VAR_int(max_sent_count);
+ COPY_VAR_int(max_query_restarts);
+ COPY_VAR_int(qname_minimisation);
+ COPY_VAR_int(qname_minimisation_strict);
+ COPY_VAR_int(shm_enable);
+ COPY_VAR_int(shm_key);
+ COPY_VAR_ptr(edns_client_strings);
+ COPY_VAR_uint16_t(edns_client_string_opcode);
+ COPY_VAR_int(dnscrypt);
+ COPY_VAR_int(dnscrypt_port);
+ COPY_VAR_ptr(dnscrypt_provider);
+ COPY_VAR_ptr(dnscrypt_secret_key);
+ COPY_VAR_ptr(dnscrypt_provider_cert);
+ COPY_VAR_ptr(dnscrypt_provider_cert_rotated);
+ COPY_VAR_size_t(dnscrypt_shared_secret_cache_size);
+ COPY_VAR_size_t(dnscrypt_shared_secret_cache_slabs);
+ COPY_VAR_size_t(dnscrypt_nonce_cache_size);
+ COPY_VAR_size_t(dnscrypt_nonce_cache_slabs);
+ COPY_VAR_int(pad_responses);
+ COPY_VAR_size_t(pad_responses_block_size);
+ COPY_VAR_int(pad_queries);
+ COPY_VAR_size_t(pad_queries_block_size);
+#ifdef USE_IPSECMOD
+ COPY_VAR_int(ipsecmod_enabled);
+ COPY_VAR_ptr(ipsecmod_whitelist);
+ COPY_VAR_ptr(ipsecmod_hook);
+ COPY_VAR_int(ipsecmod_ignore_bogus);
+ COPY_VAR_int(ipsecmod_max_ttl);
+ COPY_VAR_int(ipsecmod_strict);
+#endif
+#ifdef USE_CACHEDB
+ COPY_VAR_ptr(cachedb_backend);
+ COPY_VAR_ptr(cachedb_secret);
+ COPY_VAR_int(cachedb_no_store);
+ COPY_VAR_int(cachedb_check_when_serve_expired);
+#ifdef USE_REDIS
+ COPY_VAR_ptr(redis_server_host);
+ COPY_VAR_int(redis_server_port);
+ COPY_VAR_ptr(redis_server_path);
+ COPY_VAR_ptr(redis_server_password);
+ COPY_VAR_int(redis_timeout);
+ COPY_VAR_int(redis_expire_records);
+ COPY_VAR_int(redis_logical_db);
+#endif
+#endif
+ COPY_VAR_int(do_answer_cookie);
+ /* Not copied because the length and content could then not match.
+ cookie_secret[40], cookie_secret_len
+ */
+#ifdef USE_IPSET
+ COPY_VAR_ptr(ipset_name_v4);
+ COPY_VAR_ptr(ipset_name_v6);
+#endif
+ COPY_VAR_int(ede);
+}
+#endif /* ATOMIC_POINTER_LOCK_FREE */
+
+/** fast reload thread, adjust the cache sizes */
+static void
+fr_adjust_cache(struct module_env* env, struct config_file* oldcfg)
+{
+ if(env->cfg->msg_cache_size != oldcfg->msg_cache_size)
+ slabhash_adjust_size(env->msg_cache, env->cfg->msg_cache_size);
+ if(env->cfg->rrset_cache_size != oldcfg->rrset_cache_size)
+ slabhash_adjust_size(&env->rrset_cache->table,
+ env->cfg->rrset_cache_size);
+ if(env->key_cache &&
+ env->cfg->key_cache_size != oldcfg->key_cache_size)
+ slabhash_adjust_size(env->key_cache->slab,
+ env->cfg->key_cache_size);
+ if(env->cfg->infra_cache_numhosts != oldcfg->infra_cache_numhosts) {
+ size_t inframem = env->cfg->infra_cache_numhosts *
+ (sizeof(struct infra_key) + sizeof(struct infra_data)
+ + INFRA_BYTES_NAME);
+ slabhash_adjust_size(env->infra_cache->hosts, inframem);
+ }
+ if(env->cfg->ratelimit_size != oldcfg->ratelimit_size) {
+ slabhash_adjust_size(env->infra_cache->domain_rates,
+ env->cfg->ratelimit_size);
+ slabhash_adjust_size(env->infra_cache->client_ip_rates,
+ env->cfg->ratelimit_size);
+ }
+ if(env->neg_cache &&
+ env->cfg->neg_cache_size != oldcfg->neg_cache_size) {
+ val_neg_adjust_size(env->neg_cache, env->cfg->neg_cache_size);
+ }
+}
+
+/** fast reload thread, adjust the iterator env */
+static void
+fr_adjust_iter_env(struct module_env* env, struct fast_reload_construct* ct)
+{
+ int m;
+ struct iter_env* iter_env = NULL;
+ /* There is no comparison here to see if no options changed and thus
+ * no swap is needed, the trees with addresses and domains can be
+ * large and that would take too long. Instead the trees are
+ * swapped in. */
+
+ /* Because the iterator env is not locked, the update cannot happen
+ * when fr nopause is used. Without it the fast reload pauses the
+ * other threads, so they are not currently using the structure. */
+ m = modstack_find(env->modstack, "iterator");
+ if(m != -1) iter_env = (struct iter_env*)env->modinfo[m];
+ if(iter_env) {
+ /* Swap the data so that the delete happens afterwards. */
+ int* oldtargetfetchpolicy = iter_env->target_fetch_policy;
+ int oldmaxdependencydepth = iter_env->max_dependency_depth;
+ struct iter_donotq* olddonotq = iter_env->donotq;
+ struct iter_priv* oldpriv = iter_env->priv;
+ struct rbtree_type* oldcapswhite = iter_env->caps_white;
+ struct iter_nat64 oldnat64 = iter_env->nat64;
+
+ iter_env->target_fetch_policy = ct->target_fetch_policy;
+ iter_env->max_dependency_depth = ct->max_dependency_depth;
+ iter_env->donotq = ct->donotq;
+ iter_env->priv = ct->priv;
+ iter_env->caps_white = ct->caps_white;
+ iter_env->nat64 = ct->nat64;
+ iter_env->outbound_msg_retry = env->cfg->outbound_msg_retry;
+ iter_env->max_sent_count = env->cfg->max_sent_count;
+ iter_env->max_query_restarts = env->cfg->max_query_restarts;
+
+ ct->target_fetch_policy = oldtargetfetchpolicy;
+ ct->max_dependency_depth = oldmaxdependencydepth;
+ ct->donotq = olddonotq;
+ ct->priv = oldpriv;
+ ct->caps_white = oldcapswhite;
+ ct->nat64 = oldnat64;
+ }
+}
+
+/** fast reload thread, adjust the validator env */
+static void
+fr_adjust_val_env(struct module_env* env, struct fast_reload_construct* ct,
+ struct config_file* oldcfg)
+{
+ int m;
+ struct val_env* val_env = NULL;
+ if(env->cfg->bogus_ttl == oldcfg->bogus_ttl &&
+ env->cfg->val_date_override == oldcfg->val_date_override &&
+ env->cfg->val_sig_skew_min == oldcfg->val_sig_skew_min &&
+ env->cfg->val_sig_skew_max == oldcfg->val_sig_skew_max &&
+ env->cfg->val_max_restart == oldcfg->val_max_restart &&
+ strcmp(env->cfg->val_nsec3_key_iterations,
+ oldcfg->val_nsec3_key_iterations) == 0)
+ return; /* no changes */
+
+ /* Because the validator env is not locked, the update cannot happen
+ * when fr nopause is used. Without it the fast reload pauses the
+ * other threads, so they are not currently using the structure. */
+ m = modstack_find(env->modstack, "validator");
+ if(m != -1) val_env = (struct val_env*)env->modinfo[m];
+ if(val_env) {
+ /* Swap the arrays so that the delete happens afterwards. */
+ size_t* oldkeysize = val_env->nsec3_keysize;
+ size_t* oldmaxiter = val_env->nsec3_maxiter;
+ val_env->nsec3_keysize = NULL;
+ val_env->nsec3_maxiter = NULL;
+ val_env_apply_cfg(val_env, env->cfg, ct->nsec3_keysize,
+ ct->nsec3_maxiter, ct->nsec3_keyiter_count);
+ ct->nsec3_keysize = oldkeysize;
+ ct->nsec3_maxiter = oldmaxiter;
+ if(env->neg_cache) {
+ lock_basic_lock(&env->neg_cache->lock);
+ env->neg_cache->nsec3_max_iter = val_env->
+ nsec3_maxiter[val_env->nsec3_keyiter_count-1];
+ lock_basic_unlock(&env->neg_cache->lock);
+ }
+ }
+}
+
+/** fast reload thread, adjust the infra cache parameters */
+static void
+fr_adjust_infra(struct module_env* env, struct fast_reload_construct* ct)
+{
+ struct infra_cache* infra = env->infra_cache;
+ struct config_file* cfg = env->cfg;
+ struct rbtree_type oldwaitlim = infra->wait_limits_netblock;
+ struct rbtree_type oldwaitlimcookie =
+ infra->wait_limits_cookie_netblock;
+ struct rbtree_type olddomainlim = infra->domain_limits;
+
+ /* The size of the infra cache and ip rates is changed
+ * in fr_adjust_cache. */
+ infra->host_ttl = cfg->host_ttl;
+ infra->infra_keep_probing = cfg->infra_keep_probing;
+ infra_dp_ratelimit = cfg->ratelimit;
+ infra_ip_ratelimit = cfg->ip_ratelimit;
+ infra_ip_ratelimit_cookie = cfg->ip_ratelimit_cookie;
+ infra->wait_limits_netblock = ct->wait_limits_netblock;
+ infra->wait_limits_cookie_netblock = ct->wait_limits_cookie_netblock;
+ infra->domain_limits = ct->domain_limits;
+
+ ct->wait_limits_netblock = oldwaitlim;
+ ct->wait_limits_cookie_netblock = oldwaitlimcookie;
+ ct->domain_limits = olddomainlim;
+}
+
+/** fast reload thread, reload config with putting the new config items
+ * in place and swapping out the old items. */
+static int
+fr_reload_config(struct fast_reload_thread* fr, struct config_file* newcfg,
+ struct fast_reload_construct* ct)
+{
+ struct daemon* daemon = fr->worker->daemon;
+ struct module_env* env = daemon->env;
+
+ /* These are constructed in the fr_construct_from_config routine. */
+ log_assert(ct->oldcfg);
+ log_assert(ct->fwds);
+ log_assert(ct->hints);
+
+ /* Grab big locks to satisfy lock conditions. */
+ lock_rw_wrlock(&ct->views->lock);
+ lock_rw_wrlock(&env->views->lock);
+ lock_rw_wrlock(&ct->respip_set->lock);
+ lock_rw_wrlock(&env->respip_set->lock);
+ lock_rw_wrlock(&ct->local_zones->lock);
+ lock_rw_wrlock(&daemon->local_zones->lock);
+ lock_rw_wrlock(&ct->auth_zones->rpz_lock);
+ lock_rw_wrlock(&env->auth_zones->rpz_lock);
+ lock_rw_wrlock(&ct->auth_zones->lock);
+ lock_rw_wrlock(&env->auth_zones->lock);
+ lock_rw_wrlock(&ct->fwds->lock);
+ lock_rw_wrlock(&env->fwds->lock);
+ lock_rw_wrlock(&ct->hints->lock);
+ lock_rw_wrlock(&env->hints->lock);
+ if(ct->anchors) {
+ lock_basic_lock(&ct->anchors->lock);
+ lock_basic_lock(&env->anchors->lock);
+ }
+
+#ifdef ATOMIC_POINTER_LOCK_FREE
+ if(fr->fr_nopause) {
+ fr_atomic_copy_cfg(ct->oldcfg, env->cfg, newcfg);
+ } else {
+#endif
+ /* Store old config elements. */
+ *ct->oldcfg = *env->cfg;
+ /* Insert new config elements. */
+ *env->cfg = *newcfg;
+#ifdef ATOMIC_POINTER_LOCK_FREE
+ }
+#endif
+
+ if(env->cfg->log_identity || ct->oldcfg->log_identity) {
+ /* pick up new log_identity string to use for log output. */
+ log_ident_set_or_default(env->cfg->log_identity);
+ }
+ /* the newcfg elements are in env->cfg, so should not be freed here. */
+#ifdef ATOMIC_POINTER_LOCK_FREE
+ /* if used, the routine that copies the config has zeroed items. */
+ if(!fr->fr_nopause)
+#endif
+ memset(newcfg, 0, sizeof(*newcfg));
+
+ /* Quickly swap the tree roots themselves with the already allocated
+ * elements. This is a quick swap operation on the pointer.
+ * The other threads are stopped and locks are held, so that a
+ * consistent view of the configuration, before, and after, exists
+ * towards the state machine for query resolution. */
+ forwards_swap_tree(env->fwds, ct->fwds);
+ hints_swap_tree(env->hints, ct->hints);
+ views_swap_tree(env->views, ct->views);
+ acl_list_swap_tree(daemon->acl, ct->acl);
+ acl_list_swap_tree(daemon->acl_interface, ct->acl_interface);
+ tcl_list_swap_tree(daemon->tcl, ct->tcl);
+ local_zones_swap_tree(daemon->local_zones, ct->local_zones);
+ respip_set_swap_tree(env->respip_set, ct->respip_set);
+ daemon->use_response_ip = ct->use_response_ip;
+ daemon->use_rpz = ct->use_rpz;
+ auth_zones_swap(env->auth_zones, ct->auth_zones);
+ edns_strings_swap_tree(env->edns_strings, ct->edns_strings);
+ anchors_swap_tree(env->anchors, ct->anchors);
+#ifdef USE_CACHEDB
+ daemon->env->cachedb_enabled = cachedb_is_enabled(&daemon->mods,
+ daemon->env);
+#endif
+#ifdef USE_DNSTAP
+ if(env->cfg->dnstap) {
+ if(!fr->fr_nopause)
+ dt_apply_cfg(daemon->dtenv, env->cfg);
+ else dt_apply_logcfg(daemon->dtenv, env->cfg);
+ }
+#endif
+ fr_adjust_cache(env, ct->oldcfg);
+ if(!fr->fr_nopause) {
+ fr_adjust_iter_env(env, ct);
+ fr_adjust_val_env(env, ct, ct->oldcfg);
+ fr_adjust_infra(env, ct);
+ }
+
+ /* Set globals with new config. */
+ config_apply(env->cfg);
+
+ lock_rw_unlock(&ct->views->lock);
+ lock_rw_unlock(&env->views->lock);
+ lock_rw_unlock(&ct->respip_set->lock);
+ lock_rw_unlock(&env->respip_set->lock);
+ lock_rw_unlock(&ct->local_zones->lock);
+ lock_rw_unlock(&daemon->local_zones->lock);
+ lock_rw_unlock(&ct->auth_zones->lock);
+ lock_rw_unlock(&env->auth_zones->lock);
+ lock_rw_unlock(&ct->auth_zones->rpz_lock);
+ lock_rw_unlock(&env->auth_zones->rpz_lock);
+ lock_rw_unlock(&ct->fwds->lock);
+ lock_rw_unlock(&env->fwds->lock);
+ lock_rw_unlock(&ct->hints->lock);
+ lock_rw_unlock(&env->hints->lock);
+ if(ct->anchors) {
+ lock_basic_unlock(&ct->anchors->lock);
+ lock_basic_unlock(&env->anchors->lock);
+ }
+
+ return 1;
+}
+
+/** fast reload, poll for ack incoming. */
+static void
+fr_poll_for_ack(struct fast_reload_thread* fr)
+{
+ int loopexit = 0, bcount = 0;
+ uint32_t cmd;
+ ssize_t ret;
+
+ if(fr->need_to_quit)
+ return;
+ /* Is there data? */
+ if(!sock_poll_timeout(fr->commpair[1], -1, 1, 0, NULL)) {
+ log_err("fr_poll_for_ack: poll failed");
+ return;
+ }
+
+ /* Read the data */
+ while(1) {
+ if(++loopexit > IPC_LOOP_MAX) {
+ log_err("fr_poll_for_ack: recv loops %s",
+ sock_strerror(errno));
+ return;
+ }
+ ret = recv(fr->commpair[1], ((char*)&cmd)+bcount,
+ sizeof(cmd)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("fr_poll_for_ack: recv: %s",
+ sock_strerror(errno));
+ return;
+ } else if(ret+(ssize_t)bcount != sizeof(cmd)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(cmd))
+ continue;
+ }
+ break;
+ }
+ if(cmd == fast_reload_notification_exit) {
+ fr->need_to_quit = 1;
+ verbose(VERB_ALGO, "fast reload wait for ack: "
+ "exit notification received");
+ return;
+ }
+ if(cmd != fast_reload_notification_reload_ack) {
+ verbose(VERB_ALGO, "fast reload wait for ack: "
+ "wrong notification %d", (int)cmd);
+ }
+}
+
+/** fast reload thread, reload ipc communication to stop and start threads. */
+static int
+fr_reload_ipc(struct fast_reload_thread* fr, struct config_file* newcfg,
+ struct fast_reload_construct* ct)
+{
+ int result = 1;
+ if(!fr->fr_nopause) {
+ fr_send_notification(fr, fast_reload_notification_reload_stop);
+ fr_poll_for_ack(fr);
+ }
+ if(!fr_reload_config(fr, newcfg, ct)) {
+ result = 0;
+ }
+ if(!fr->fr_nopause) {
+ fr_send_notification(fr, fast_reload_notification_reload_start);
+ fr_poll_for_ack(fr);
+ }
+ return result;
+}
+
+/** fast reload thread, load config */
+static int
+fr_load_config(struct fast_reload_thread* fr, struct timeval* time_read,
+ struct timeval* time_construct, struct timeval* time_reload)
+{
+ struct fast_reload_construct ct;
+ struct config_file* newcfg = NULL;
+ memset(&ct, 0, sizeof(ct));
+
+ /* Read file. */
+ if(!fr_read_config(fr, &newcfg))
+ return 0;
+ if(gettimeofday(time_read, NULL) < 0)
+ log_err("gettimeofday: %s", strerror(errno));
+ if(fr_poll_for_quit(fr)) {
+ config_delete(newcfg);
+ return 1;
+ }
+
+ /* Check if the config can be loaded */
+ if(!fr_check_tag_defines(fr, newcfg)) {
+ config_delete(newcfg);
+ return 0;
+ }
+ if(!fr_check_compat_cfg(fr, newcfg)) {
+ config_delete(newcfg);
+ return 0;
+ }
+ if(!fr_check_nopause_cfg(fr, newcfg)) {
+ config_delete(newcfg);
+ return 0;
+ }
+ if(fr_poll_for_quit(fr)) {
+ config_delete(newcfg);
+ return 1;
+ }
+
+ /* Construct items. */
+ if(!fr_construct_from_config(fr, newcfg, &ct)) {
+ config_delete(newcfg);
+ if(!fr_output_printf(fr, "Could not construct from the "
+ "config, check for errors with unbound-checkconf, or "
+ "out of memory. The parse errors are printed in "
+ "the log.\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ if(gettimeofday(time_construct, NULL) < 0)
+ log_err("gettimeofday: %s", strerror(errno));
+ if(fr_poll_for_quit(fr)) {
+ config_delete(newcfg);
+ fr_construct_clear(&ct);
+ return 1;
+ }
+
+ /* Reload server. */
+ if(!fr_reload_ipc(fr, newcfg, &ct)) {
+ config_delete(newcfg);
+ fr_construct_clear(&ct);
+ if(!fr_output_printf(fr, "error: reload failed\n"))
+ return 0;
+ fr_send_notification(fr, fast_reload_notification_printout);
+ return 0;
+ }
+ if(gettimeofday(time_reload, NULL) < 0)
+ log_err("gettimeofday: %s", strerror(errno));
+
+ if(fr_poll_for_quit(fr)) {
+ config_delete(newcfg);
+ fr_construct_clear(&ct);
+ return 1;
+ }
+ if(fr->fr_nopause) {
+ /* Poll every thread, with a no-work poll item over the
+ * command pipe. This makes the worker thread surely move
+ * to deal with that event, and thus the thread is no longer
+ * holding, eg. a string item from the old config struct.
+ * And then the old config struct can safely be deleted.
+ * Only needed when nopause is used, because without that
+ * the worker threads are already waiting on a command pipe
+ * item. This nopause command pipe item does not take work,
+ * it returns immediately, so it does not delay the workers.
+ * They can be polled one at a time. But its processing causes
+ * the worker to have released data items from old config.
+ * This also makes sure the threads are not holding locks on
+ * individual items in the local_zones, views, respip_set. */
+ fr_send_notification(fr,
+ fast_reload_notification_reload_nopause_poll);
+ fr_poll_for_ack(fr);
+ }
+
+ /* Delete old. */
+ config_delete(newcfg);
+ fr_construct_clear(&ct);
+ return 1;
+}
+
+/** fast reload thread. the thread main function */
+static void* fast_reload_thread_main(void* arg)
+{
+ struct fast_reload_thread* fast_reload_thread = (struct fast_reload_thread*)arg;
+ struct timeval time_start, time_read, time_construct, time_reload,
+ time_end;
+ log_thread_set(&fast_reload_thread->threadnum);
+
+ verbose(VERB_ALGO, "start fast reload thread");
+ if(fast_reload_thread->fr_verb >= 1) {
+ fr_init_time(&time_start, &time_read, &time_construct,
+ &time_reload, &time_end);
+ if(fr_poll_for_quit(fast_reload_thread))
+ goto done;
+ }
+
+ /* print output to the client */
+ if(fast_reload_thread->fr_verb >= 1) {
+ if(!fr_output_printf(fast_reload_thread, "thread started\n"))
+ goto done_error;
+ fr_send_notification(fast_reload_thread,
+ fast_reload_notification_printout);
+ if(fr_poll_for_quit(fast_reload_thread))
+ goto done;
+ }
+
+ if(!fr_load_config(fast_reload_thread, &time_read, &time_construct,
+ &time_reload))
+ goto done_error;
+ if(fr_poll_for_quit(fast_reload_thread))
+ goto done;
+
+ if(fast_reload_thread->fr_verb >= 1) {
+ if(!fr_finish_time(fast_reload_thread, &time_start, &time_read,
+ &time_construct, &time_reload, &time_end))
+ goto done_error;
+ if(fr_poll_for_quit(fast_reload_thread))
+ goto done;
+ }
+
+ if(!fr_output_printf(fast_reload_thread, "ok\n"))
+ goto done_error;
+ fr_send_notification(fast_reload_thread,
+ fast_reload_notification_printout);
+ verbose(VERB_ALGO, "stop fast reload thread");
+ /* If this is not an exit due to quit earlier, send regular done. */
+ if(!fast_reload_thread->need_to_quit)
+ fr_send_notification(fast_reload_thread,
+ fast_reload_notification_done);
+ /* If during the fast_reload_notification_done send,
+ * fast_reload_notification_exit was received, ack it. If the
+ * thread is exiting due to quit received earlier, also ack it.*/
+done:
+ if(fast_reload_thread->need_to_quit)
+ fr_send_notification(fast_reload_thread,
+ fast_reload_notification_exited);
+ return NULL;
+done_error:
+ verbose(VERB_ALGO, "stop fast reload thread with done_error");
+ fr_send_notification(fast_reload_thread,
+ fast_reload_notification_done_error);
+ return NULL;
+}
+#endif /* !THREADS_DISABLED */
+
+/** create a socketpair for bidirectional communication, false on failure */
+static int
+create_socketpair(int* pair, struct ub_randstate* rand)
+{
+#ifndef USE_WINSOCK
+ if(socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ log_err("socketpair: %s", strerror(errno));
+ return 0;
+ }
+ (void)rand;
+#else
+ struct sockaddr_in addr, baddr, accaddr, connaddr;
+ socklen_t baddrlen, accaddrlen, connaddrlen;
+ uint8_t localhost[] = {127, 0, 0, 1};
+ uint8_t nonce[16], recvnonce[16];
+ size_t i;
+ int lst, pollin_event, bcount, loopcount;
+ int connect_poll_timeout = 200; /* msec to wait for connection */
+ ssize_t ret;
+ pair[0] = -1;
+ pair[1] = -1;
+ for(i=0; i<sizeof(nonce); i++) {
+ nonce[i] = ub_random_max(rand, 256);
+ }
+ lst = socket(AF_INET, SOCK_STREAM, 0);
+ if(lst == -1) {
+ log_err("create_socketpair: socket: %s", sock_strerror(errno));
+ return 0;
+ }
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = 0;
+ memcpy(&addr.sin_addr, localhost, 4);
+ if(bind(lst, (struct sockaddr*)&addr, (socklen_t)sizeof(addr))
+ == -1) {
+ log_err("create socketpair: bind: %s", sock_strerror(errno));
+ sock_close(lst);
+ return 0;
+ }
+ if(listen(lst, 12) == -1) {
+ log_err("create socketpair: listen: %s", sock_strerror(errno));
+ sock_close(lst);
+ return 0;
+ }
+
+ pair[1] = socket(AF_INET, SOCK_STREAM, 0);
+ if(pair[1] == -1) {
+ log_err("create socketpair: socket: %s", sock_strerror(errno));
+ sock_close(lst);
+ return 0;
+ }
+ baddrlen = (socklen_t)sizeof(baddr);
+ if(getsockname(lst, (struct sockaddr*)&baddr, &baddrlen) == -1) {
+ log_err("create socketpair: getsockname: %s",
+ sock_strerror(errno));
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ if(baddrlen > (socklen_t)sizeof(baddr)) {
+ log_err("create socketpair: getsockname returned addr too big");
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ /* the socket is blocking */
+ if(connect(pair[1], (struct sockaddr*)&baddr, baddrlen) == -1) {
+ log_err("create socketpair: connect: %s",
+ sock_strerror(errno));
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ if(!sock_poll_timeout(lst, connect_poll_timeout, 1, 0, &pollin_event)) {
+ log_err("create socketpair: poll for accept failed: %s",
+ sock_strerror(errno));
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ if(!pollin_event) {
+ log_err("create socketpair: poll timeout for accept");
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ accaddrlen = (socklen_t)sizeof(accaddr);
+ pair[0] = accept(lst, (struct sockaddr*)&accaddr, &accaddrlen);
+ if(pair[0] == -1) {
+ log_err("create socketpair: accept: %s", sock_strerror(errno));
+ sock_close(lst);
+ sock_close(pair[1]);
+ pair[1] = -1;
+ return 0;
+ }
+ if(accaddrlen > (socklen_t)sizeof(accaddr)) {
+ log_err("create socketpair: accept returned addr too big");
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ if(accaddr.sin_family != AF_INET ||
+ memcmp(localhost, &accaddr.sin_addr, 4) != 0) {
+ log_err("create socketpair: accept from wrong address");
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ connaddrlen = (socklen_t)sizeof(connaddr);
+ if(getsockname(pair[1], (struct sockaddr*)&connaddr, &connaddrlen)
+ == -1) {
+ log_err("create socketpair: getsockname connectedaddr: %s",
+ sock_strerror(errno));
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ if(connaddrlen > (socklen_t)sizeof(connaddr)) {
+ log_err("create socketpair: getsockname connectedaddr returned addr too big");
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ if(connaddr.sin_family != AF_INET ||
+ memcmp(localhost, &connaddr.sin_addr, 4) != 0) {
+ log_err("create socketpair: getsockname connectedaddr returned wrong address");
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ if(accaddr.sin_port != connaddr.sin_port) {
+ log_err("create socketpair: accept from wrong port");
+ sock_close(lst);
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ sock_close(lst);
+
+ loopcount = 0;
+ bcount = 0;
+ while(1) {
+ if(++loopcount > IPC_LOOP_MAX) {
+ log_err("create socketpair: send failed due to loop");
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ ret = send(pair[1], (void*)(nonce+bcount),
+ sizeof(nonce)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("create socketpair: send: %s", sock_strerror(errno));
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ } else if(ret+(ssize_t)bcount != sizeof(nonce)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(nonce))
+ continue;
+ }
+ break;
+ }
+
+ if(!sock_poll_timeout(pair[0], connect_poll_timeout, 1, 0, &pollin_event)) {
+ log_err("create socketpair: poll failed: %s",
+ sock_strerror(errno));
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ if(!pollin_event) {
+ log_err("create socketpair: poll timeout for recv");
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+
+ loopcount = 0;
+ bcount = 0;
+ while(1) {
+ if(++loopcount > IPC_LOOP_MAX) {
+ log_err("create socketpair: recv failed due to loop");
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+ ret = recv(pair[0], (void*)(recvnonce+bcount),
+ sizeof(nonce)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("create socketpair: recv: %s", sock_strerror(errno));
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ } else if(ret == 0) {
+ log_err("create socketpair: stream closed");
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ } else if(ret+(ssize_t)bcount != sizeof(nonce)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(nonce))
+ continue;
+ }
+ break;
+ }
+
+ if(memcmp(nonce, recvnonce, sizeof(nonce)) != 0) {
+ log_err("create socketpair: recv wrong nonce");
+ sock_close(pair[0]);
+ sock_close(pair[1]);
+ pair[0] = -1;
+ pair[1] = -1;
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+/** fast reload thread. setup the thread info */
+static int
+fast_reload_thread_setup(struct worker* worker, int fr_verb, int fr_nopause,
+ int fr_drop_mesh)
+{
+ struct fast_reload_thread* fr;
+ int numworkers = worker->daemon->num;
+ worker->daemon->fast_reload_thread = (struct fast_reload_thread*)
+ calloc(1, sizeof(*worker->daemon->fast_reload_thread));
+ if(!worker->daemon->fast_reload_thread)
+ return 0;
+ fr = worker->daemon->fast_reload_thread;
+ fr->fr_verb = fr_verb;
+ fr->fr_nopause = fr_nopause;
+ fr->fr_drop_mesh = fr_drop_mesh;
+ worker->daemon->fast_reload_drop_mesh = fr->fr_drop_mesh;
+ /* The thread id printed in logs, numworker+1 is the dnstap thread.
+ * This is numworkers+2. */
+ fr->threadnum = numworkers+2;
+ fr->commpair[0] = -1;
+ fr->commpair[1] = -1;
+ fr->commreload[0] = -1;
+ fr->commreload[1] = -1;
+ if(!create_socketpair(fr->commpair, worker->daemon->rand)) {
+ free(fr);
+ worker->daemon->fast_reload_thread = NULL;
+ return 0;
+ }
+ fr->worker = worker;
+ fr->fr_output = (struct config_strlist_head*)calloc(1,
+ sizeof(*fr->fr_output));
+ if(!fr->fr_output) {
+ sock_close(fr->commpair[0]);
+ sock_close(fr->commpair[1]);
+ free(fr);
+ worker->daemon->fast_reload_thread = NULL;
+ return 0;
+ }
+ if(!create_socketpair(fr->commreload, worker->daemon->rand)) {
+ sock_close(fr->commpair[0]);
+ sock_close(fr->commpair[1]);
+ free(fr->fr_output);
+ free(fr);
+ worker->daemon->fast_reload_thread = NULL;
+ return 0;
+ }
+ lock_basic_init(&fr->fr_output_lock);
+ lock_protect(&fr->fr_output_lock, fr->fr_output,
+ sizeof(*fr->fr_output));
+ return 1;
+}
+
+/** fast reload, delete auth zone change list */
+static void
+fr_auth_change_list_delete(
+ struct fast_reload_auth_change* auth_zone_change_list)
+{
+ struct fast_reload_auth_change* item, *next;
+ item = auth_zone_change_list;
+ while(item) {
+ next = item->next;
+ free(item);
+ item = next;
+ }
+}
+
+/** fast reload thread. desetup and delete the thread info. */
+static void
+fast_reload_thread_desetup(struct fast_reload_thread* fast_reload_thread)
+{
+ if(!fast_reload_thread)
+ return;
+ if(fast_reload_thread->service_event &&
+ fast_reload_thread->service_event_is_added) {
+ ub_event_del(fast_reload_thread->service_event);
+ fast_reload_thread->service_event_is_added = 0;
+ }
+ if(fast_reload_thread->service_event)
+ ub_event_free(fast_reload_thread->service_event);
+ sock_close(fast_reload_thread->commpair[0]);
+ sock_close(fast_reload_thread->commpair[1]);
+ sock_close(fast_reload_thread->commreload[0]);
+ sock_close(fast_reload_thread->commreload[1]);
+ if(fast_reload_thread->printq) {
+ fr_main_perform_printout(fast_reload_thread);
+ /* If it is empty now, there is nothing to print on fd. */
+ if(fr_printq_empty(fast_reload_thread->printq)) {
+ fr_printq_delete(fast_reload_thread->printq);
+ } else {
+ /* Keep the printq around to printout the remaining
+ * text to the remote client. Until it is done, it
+ * sits on a list, that is in the daemon struct.
+ * The event can then spool the remaining text to the
+ * remote client and eventually delete itself from the
+ * callback. */
+ fr_printq_list_insert(fast_reload_thread->printq,
+ fast_reload_thread->worker->daemon);
+ fast_reload_thread->printq = NULL;
+ }
+ }
+ lock_basic_destroy(&fast_reload_thread->fr_output_lock);
+ if(fast_reload_thread->fr_output) {
+ config_delstrlist(fast_reload_thread->fr_output->first);
+ free(fast_reload_thread->fr_output);
+ }
+ fr_auth_change_list_delete(fast_reload_thread->auth_zone_change_list);
+
+ free(fast_reload_thread);
+}
+
+/**
+ * Fast reload thread, send a command to the thread. Blocking on timeout.
+ * It handles received input from the thread, if any is received.
+ */
+static void
+fr_send_cmd_to(struct fast_reload_thread* fr,
+ enum fast_reload_notification status, int check_cmds, int blocking)
+{
+ int outevent, loopexit = 0, bcount = 0;
+ uint32_t cmd;
+ ssize_t ret;
+ verbose(VERB_ALGO, "send notification to fast reload thread: %s",
+ fr_notification_to_string(status));
+ cmd = status;
+ while(1) {
+ if(++loopexit > IPC_LOOP_MAX) {
+ log_err("send notification to fast reload: could not send notification: loop");
+ return;
+ }
+ if(check_cmds)
+ fr_check_cmd_from_thread(fr);
+ /* wait for socket to become writable */
+ if(!sock_poll_timeout(fr->commpair[0],
+ (blocking?-1:IPC_NOTIFICATION_WAIT),
+ 0, 1, &outevent)) {
+ log_err("send notification to fast reload: poll failed");
+ return;
+ }
+ if(!outevent)
+ continue;
+ /* keep static analyzer happy; send(-1,..) */
+ log_assert(fr->commpair[0] >= 0);
+ ret = send(fr->commpair[0], ((char*)&cmd)+bcount,
+ sizeof(cmd)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("send notification to fast reload: send: %s",
+ sock_strerror(errno));
+ return;
+ } else if(ret+(ssize_t)bcount != sizeof(cmd)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(cmd))
+ continue;
+ }
+ break;
+ }
+}
+
+/** Fast reload, the main thread handles that the fast reload thread has
+ * exited. */
+static void
+fr_main_perform_done(struct fast_reload_thread* fr)
+{
+ struct worker* worker = fr->worker;
+ verbose(VERB_ALGO, "join with fastreload thread");
+ ub_thread_join(fr->tid);
+ verbose(VERB_ALGO, "joined with fastreload thread");
+ fast_reload_thread_desetup(fr);
+ worker->daemon->fast_reload_thread = NULL;
+}
+
+/** Append strlist after strlist */
+static void
+cfg_strlist_append_listhead(struct config_strlist_head* list,
+ struct config_strlist_head* more)
+{
+ if(!more->first)
+ return;
+ if(list->last)
+ list->last->next = more->first;
+ else
+ list->first = more->first;
+ list->last = more->last;
+}
+
+/** Fast reload, the remote control thread handles that the fast reload thread
+ * has output to be printed, on the linked list that is locked. */
+static void
+fr_main_perform_printout(struct fast_reload_thread* fr)
+{
+ struct config_strlist_head out;
+
+ /* Fetch the list of items to be printed */
+ lock_basic_lock(&fr->fr_output_lock);
+ out.first = fr->fr_output->first;
+ out.last = fr->fr_output->last;
+ fr->fr_output->first = NULL;
+ fr->fr_output->last = NULL;
+ lock_basic_unlock(&fr->fr_output_lock);
+
+ if(!fr->printq || !fr->printq->client_cp) {
+ /* There is no output socket, delete it. */
+ config_delstrlist(out.first);
+ return;
+ }
+
+ /* Put them on the output list, not locked because the list
+ * producer and consumer are both owned by the remote control thread,
+ * it moves the items to the list for printing in the event callback
+ * for the client_cp. */
+ cfg_strlist_append_listhead(fr->printq->to_print, &out);
+
+ /* Set the client_cp to output if not already */
+ if(!fr->printq->client_cp->event_added)
+ comm_point_listen_for_rw(fr->printq->client_cp, 0, 1);
+}
+
+/** fast reload, receive ack from workers that they are waiting, run
+ * by the mainthr after sending them reload_stop. */
+static void
+fr_read_ack_from_workers(struct fast_reload_thread* fr)
+{
+ struct daemon* daemon = fr->worker->daemon;
+ /* Every worker sends one byte, wait for num-1 bytes. */
+ int count=0, total=daemon->num-1;
+ while(count < total) {
+ uint8_t r;
+ ssize_t ret;
+ ret = recv(fr->commreload[0], (void*)&r, 1, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again */
+ log_err("worker reload ack: recv failed: %s",
+ sock_strerror(errno));
+ return;
+ }
+ count++;
+ verbose(VERB_ALGO, "worker reload ack from (uint8_t)%d",
+ (int)r);
+ }
+}
+
+/** fast reload, poll for reload_start in mainthr waiting on a notification
+ * from the fast reload thread. */
+static void
+fr_poll_for_reload_start(struct fast_reload_thread* fr)
+{
+ int loopexit = 0, bcount = 0;
+ uint32_t cmd;
+ ssize_t ret;
+
+ /* Is there data? */
+ if(!sock_poll_timeout(fr->commpair[0], -1, 1, 0, NULL)) {
+ log_err("fr_poll_for_reload_start: poll failed");
+ return;
+ }
+
+ /* Read the data */
+ while(1) {
+ if(++loopexit > IPC_LOOP_MAX) {
+ log_err("fr_poll_for_reload_start: recv loops %s",
+ sock_strerror(errno));
+ return;
+ }
+ ret = recv(fr->commpair[0], ((char*)&cmd)+bcount,
+ sizeof(cmd)-bcount, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ continue; /* Try again. */
+ log_err("fr_poll_for_reload_start: recv: %s",
+ sock_strerror(errno));
+ return;
+ } else if(ret+(ssize_t)bcount != sizeof(cmd)) {
+ bcount += ret;
+ if((size_t)bcount < sizeof(cmd))
+ continue;
+ }
+ break;
+ }
+ if(cmd != fast_reload_notification_reload_start) {
+ verbose(VERB_ALGO, "fast reload wait for ack: "
+ "wrong notification %d", (int)cmd);
+ }
+}
+
+/** Pick up the worker mesh changes, after fast reload. */
+static void
+fr_worker_pickup_mesh(struct worker* worker)
+{
+ struct mesh_area* mesh = worker->env.mesh;
+ struct config_file* cfg = worker->env.cfg;
+ mesh->use_response_ip = worker->daemon->use_response_ip;
+ mesh->use_rpz = worker->daemon->use_rpz;
+ mesh->max_reply_states = cfg->num_queries_per_thread;
+ mesh->max_forever_states = (mesh->max_reply_states+1)/2;
+#ifndef S_SPLINT_S
+ mesh->jostle_max.tv_sec = (time_t)(cfg->jostle_time / 1000);
+ mesh->jostle_max.tv_usec = (time_t)((cfg->jostle_time % 1000)*1000);
+#endif
+}
+
+/**
+ * Remove the old tcl_addr entries from the open connections.
+ * They are only incremented when an accept is performed on a tcp comm point.
+ * @param front: listening comm ports of the worker.
+ */
+static void
+tcl_remove_old(struct listen_dnsport* front)
+{
+ struct listen_list* l;
+ l = front->cps;
+ while(l) {
+ if(l->com->type == comm_tcp_accept) {
+ int i;
+ for(i=0; i<l->com->max_tcp_count; i++) {
+ if(l->com->tcp_handlers[i]->tcl_addr) {
+ /* Because the increment of the
+ * connection limit was in the old
+ * tcl list, the new list does not
+ * need a decrement. With NULL it is
+ * not decremented when the connection
+ * is done, and also there is no
+ * reference to the old connection
+ * limit structure. */
+ l->com->tcp_handlers[i]->tcl_addr =
+ NULL;
+ }
+ }
+ }
+ l = l->next;
+ }
+}
+
+/** Stop zonemd lookup */
+static void
+auth_zone_zonemd_stop_lookup(struct auth_zone* z, struct mesh_area* mesh)
+{
+ struct query_info qinfo;
+ uint16_t qflags = BIT_RD;
+ qinfo.qname_len = z->namelen;
+ qinfo.qname = z->name;
+ qinfo.qclass = z->dclass;
+ qinfo.qtype = z->zonemd_callback_qtype;
+ qinfo.local_alias = NULL;
+
+ mesh_remove_callback(mesh, &qinfo, qflags,
+ &auth_zonemd_dnskey_lookup_callback, z);
+}
+
+/** Pick up the auth zone locks. */
+static void
+fr_pickup_auth_locks(struct worker* worker, struct auth_zone* namez,
+ struct auth_zone* old_z, struct auth_zone* new_z,
+ struct auth_xfer** xfr, struct auth_xfer** loadxfr)
+{
+ uint8_t nm[LDNS_MAX_DOMAINLEN+1];
+ size_t nmlen;
+ uint16_t dclass;
+
+ log_assert(namez->namelen <= sizeof(nm));
+ lock_rw_rdlock(&namez->lock);
+ nmlen = namez->namelen;
+ dclass = namez->dclass;
+ memmove(nm, namez->name, nmlen);
+ lock_rw_unlock(&namez->lock);
+
+ lock_rw_wrlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
+ lock_rw_wrlock(&worker->env.auth_zones->lock);
+ if(new_z) {
+ lock_rw_wrlock(&new_z->lock);
+ }
+ if(old_z) {
+ lock_rw_wrlock(&old_z->lock);
+ }
+ if(loadxfr)
+ *loadxfr = auth_xfer_find(worker->daemon->fast_reload_thread->
+ old_auth_zones, nm, nmlen, dclass);
+ if(xfr)
+ *xfr = auth_xfer_find(worker->env.auth_zones, nm, nmlen,
+ dclass);
+ if(loadxfr && *loadxfr) {
+ lock_basic_lock(&(*loadxfr)->lock);
+ }
+ if(xfr && *xfr) {
+ lock_basic_lock(&(*xfr)->lock);
+ }
+}
+
+/** Fast reload, worker picks up deleted auth zone */
+static void
+fr_worker_auth_del(struct worker* worker, struct fast_reload_auth_change* item,
+ int for_change)
+{
+ int released = 0; /* Did this routine release callbacks. */
+ struct auth_xfer* xfr = NULL;
+
+ lock_rw_wrlock(&item->old_z->lock);
+ if(item->old_z->zonemd_callback_env &&
+ item->old_z->zonemd_callback_env->worker == worker){
+ /* This worker was performing a zonemd lookup,
+ * stop the lookup and remove that entry. */
+ auth_zone_zonemd_stop_lookup(item->old_z, worker->env.mesh);
+ item->old_z->zonemd_callback_env = NULL;
+ }
+ lock_rw_unlock(&item->old_z->lock);
+
+ fr_pickup_auth_locks(worker, item->old_z, item->old_z, NULL, &xfr,
+ NULL);
+ lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
+ lock_rw_unlock(&worker->env.auth_zones->lock);
+ lock_rw_unlock(&item->old_z->lock);
+ if(xfr) {
+ /* Release callbacks on the xfr, if this worker holds them. */
+ if(xfr->task_nextprobe->worker == worker ||
+ xfr->task_probe->worker == worker ||
+ xfr->task_transfer->worker == worker) {
+ released = 1;
+ xfr_disown_tasks(xfr, worker);
+ }
+ lock_basic_unlock(&xfr->lock);
+ }
+
+ if(!for_change && (released || worker->thread_num == 0)) {
+ /* See if the xfr item can be deleted. */
+ xfr = NULL;
+ fr_pickup_auth_locks(worker, item->old_z, item->old_z, NULL,
+ &xfr, NULL);
+ lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
+ lock_rw_unlock(&item->old_z->lock);
+ if(xfr && xfr->task_nextprobe->worker == NULL &&
+ xfr->task_probe->worker == NULL &&
+ xfr->task_transfer->worker == NULL) {
+ (void)rbtree_delete(&worker->env.auth_zones->xtree,
+ &xfr->node);
+ lock_rw_unlock(&worker->env.auth_zones->lock);
+ lock_basic_unlock(&xfr->lock);
+ auth_xfer_delete(xfr);
+ } else {
+ lock_rw_unlock(&worker->env.auth_zones->lock);
+ if(xfr) {
+ lock_basic_unlock(&xfr->lock);
+ }
+ }
+ }
+}
+
+/** Fast reload, auth xfer config is picked up */
+static void
+auth_xfr_pickup_config(struct auth_xfer* loadxfr, struct auth_xfer* xfr)
+{
+ struct auth_master *probe_masters, *transfer_masters;
+ log_assert(loadxfr->namelen == xfr->namelen);
+ log_assert(loadxfr->namelabs == xfr->namelabs);
+ log_assert(loadxfr->dclass == xfr->dclass);
+
+ /* The lists can be swapped in, the other xfr struct will be deleted
+ * afterwards. */
+ probe_masters = xfr->task_probe->masters;
+ transfer_masters = xfr->task_transfer->masters;
+ xfr->task_probe->masters = loadxfr->task_probe->masters;
+ xfr->task_transfer->masters = loadxfr->task_transfer->masters;
+ loadxfr->task_probe->masters = probe_masters;
+ loadxfr->task_transfer->masters = transfer_masters;
+}
+
+/** Fast reload, worker picks up added auth zone */
+static void
+fr_worker_auth_add(struct worker* worker, struct fast_reload_auth_change* item,
+ int for_change)
+{
+ struct auth_xfer* xfr = NULL, *loadxfr = NULL;
+
+ /* Start zone transfers and lookups. */
+ fr_pickup_auth_locks(worker, item->new_z, NULL, item->new_z, &xfr,
+ &loadxfr);
+ if(xfr == NULL && item->new_z->zone_is_slave) {
+ /* The xfr item needs to be created. The auth zones lock
+ * is held to make this possible. */
+ xfr = auth_xfer_create(worker->env.auth_zones, item->new_z);
+ auth_xfr_pickup_config(loadxfr, xfr);
+ /* Serial information is copied into the xfr struct. */
+ if(!xfr_find_soa(item->new_z, xfr)) {
+ xfr->serial = 0;
+ }
+ } else if(for_change && xfr) {
+ if(!xfr_find_soa(item->new_z, xfr)) {
+ xfr->serial = 0;
+ }
+ }
+ lock_rw_unlock(&item->new_z->lock);
+ lock_rw_unlock(&worker->env.auth_zones->lock);
+ lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
+ if(loadxfr) {
+ lock_basic_unlock(&loadxfr->lock);
+ }
+ if(xfr) {
+ auth_xfer_pickup_initial_zone(xfr, &worker->env);
+ if(for_change) {
+ xfr->task_probe->only_lookup = 0;
+ }
+ lock_basic_unlock(&xfr->lock);
+ }
+
+ /* Perform ZONEMD verification lookups. */
+ lock_rw_wrlock(&item->new_z->lock);
+ /* holding only the new_z lock */
+ auth_zone_verify_zonemd(item->new_z, &worker->env,
+ &worker->env.mesh->mods, NULL, 0, 1);
+ lock_rw_unlock(&item->new_z->lock);
+}
+
+/** Fast reload, worker picks up changed auth zone */
+static void
+fr_worker_auth_cha(struct worker* worker, struct fast_reload_auth_change* item)
+{
+ int todelete = 0;
+ struct auth_xfer* loadxfr = NULL, *xfr = NULL;
+ /* Since the zone has been changed, by rereading it from zone file,
+ * existing transfers and probes are likely for the old version.
+ * Stop them, and start new ones if needed. */
+ fr_worker_auth_del(worker, item, 1);
+
+ if(worker->thread_num != 0)
+ return;
+
+ /* The old callbacks are stopped, tasks have been disowned. The
+ * new config contents can be picked up. SOA information is picked
+ * up in the auth_add routine, as it has the new_z ready. */
+
+ fr_pickup_auth_locks(worker, item->new_z, item->old_z, item->new_z,
+ &xfr, &loadxfr);
+
+ /* The xfr is not there any more if the zone is not set to have
+ * zone transfers. Or the xfr needs to be created if it is set to
+ * have zone transfers. */
+ if(loadxfr && xfr) {
+ /* Copy the config from loadxfr to the xfr in current use. */
+ auth_xfr_pickup_config(loadxfr, xfr);
+ } else if(!loadxfr && xfr) {
+ /* Delete the xfr. */
+ (void)rbtree_delete(&worker->env.auth_zones->xtree,
+ &xfr->node);
+ todelete = 1;
+ item->new_z->zone_is_slave = 0;
+ } else if(loadxfr && !xfr) {
+ /* Create the xfr. */
+ xfr = auth_xfer_create(worker->env.auth_zones, item->new_z);
+ auth_xfr_pickup_config(loadxfr, xfr);
+ item->new_z->zone_is_slave = 1;
+ }
+ lock_rw_unlock(&item->new_z->lock);
+ lock_rw_unlock(&item->old_z->lock);
+ lock_rw_unlock(&worker->daemon->fast_reload_thread->old_auth_zones->lock);
+ lock_rw_unlock(&worker->env.auth_zones->lock);
+ if(loadxfr) {
+ lock_basic_unlock(&loadxfr->lock);
+ }
+ if(xfr) {
+ lock_basic_unlock(&xfr->lock);
+ }
+ if(todelete) {
+ auth_xfer_delete(xfr);
+ }
+
+ fr_worker_auth_add(worker, item, 1);
+}
+
+/** Fast reload, the worker picks up changes in auth zones. */
+static void
+fr_worker_pickup_auth_changes(struct worker* worker,
+ struct fast_reload_auth_change* auth_zone_change_list)
+{
+ struct fast_reload_auth_change* item;
+ for(item = auth_zone_change_list; item; item = item->next) {
+ if(item->is_deleted) {
+ fr_worker_auth_del(worker, item, 0);
+ }
+ if(item->is_added) {
+ if(worker->thread_num == 0) {
+ fr_worker_auth_add(worker, item, 0);
+ }
+ }
+ if(item->is_changed) {
+ fr_worker_auth_cha(worker, item);
+ }
+ }
+}
+
+/** Fast reload, the worker picks up changes in outside_network. */
+static void
+fr_worker_pickup_outside_network(struct worker* worker)
+{
+ struct outside_network* outnet = worker->back;
+ struct config_file* cfg = worker->env.cfg;
+ outnet->use_caps_for_id = cfg->use_caps_bits_for_id;
+ outnet->unwanted_threshold = cfg->unwanted_threshold;
+ outnet->tls_use_sni = cfg->tls_use_sni;
+ outnet->tcp_mss = cfg->outgoing_tcp_mss;
+ outnet->ip_dscp = cfg->ip_dscp;
+ outnet->max_reuse_tcp_queries = cfg->max_reuse_tcp_queries;
+ outnet->tcp_reuse_timeout = cfg->tcp_reuse_timeout;
+ outnet->tcp_auth_query_timeout = cfg->tcp_auth_query_timeout;
+ outnet->delayclose = cfg->delay_close;
+ if(outnet->delayclose) {
+#ifndef S_SPLINT_S
+ outnet->delay_tv.tv_sec = cfg->delay_close/1000;
+ outnet->delay_tv.tv_usec = (cfg->delay_close%1000)*1000;
+#endif
+ }
+}
+
+void
+fast_reload_worker_pickup_changes(struct worker* worker)
+{
+ /* The pickup of changes is called when the fast reload has
+ * a syncronized moment, and all the threads are paused and the
+ * reload has been applied. Then the worker can pick up the new
+ * changes and store them in worker-specific structs.
+ * The pickup is also called when there is no pause, and then
+ * it is called after the reload has completed, and the worker
+ * get a signal to release old information, it can then pick
+ * up the new information. But in the mean time, the reload has
+ * swapped in trees, and the worker has been running with the
+ * older information for some time. */
+ fr_worker_pickup_mesh(worker);
+
+ /* If the tcp connection limit has changed, the open connections
+ * need to remove their reference for the old tcp limits counters. */
+ if(worker->daemon->fast_reload_tcl_has_changes)
+ tcl_remove_old(worker->front);
+
+ /* If there are zonemd lookups, but the zone was deleted, the
+ * lookups should be cancelled. */
+ fr_worker_pickup_auth_changes(worker,
+ worker->daemon->fast_reload_thread->auth_zone_change_list);
+#ifdef USE_CACHEDB
+ worker->env.cachedb_enabled = worker->daemon->env->cachedb_enabled;
+#endif
+ fr_worker_pickup_outside_network(worker);
+}
+
+/** fast reload thread, handle reload_stop notification, send reload stop
+ * to other threads over IPC and collect their ack. When that is done,
+ * ack to the caller, the fast reload thread, and wait for it to send start. */
+static void
+fr_main_perform_reload_stop(struct fast_reload_thread* fr)
+{
+ struct daemon* daemon = fr->worker->daemon;
+ int i;
+
+ /* Send reload_stop to other threads. */
+ for(i=0; i<daemon->num; i++) {
+ if(i == fr->worker->thread_num)
+ continue; /* Do not send to ourselves. */
+ worker_send_cmd(daemon->workers[i], worker_cmd_reload_stop);
+ }
+
+ /* Wait for the other threads to ack. */
+ fr_read_ack_from_workers(fr);
+
+ /* Send ack to fast reload thread. */
+ fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
+
+ /* Wait for reload_start from fast reload thread to resume. */
+ fr_poll_for_reload_start(fr);
+
+ /* Send reload_start to other threads */
+ for(i=0; i<daemon->num; i++) {
+ if(i == fr->worker->thread_num)
+ continue; /* Do not send to ourselves. */
+ worker_send_cmd(daemon->workers[i], worker_cmd_reload_start);
+ }
+
+ /* Pick up changes for this worker. */
+ if(fr->worker->daemon->fast_reload_drop_mesh) {
+ verbose(VERB_ALGO, "worker: drop mesh queries after reload");
+ mesh_delete_all(fr->worker->env.mesh);
+ }
+ fast_reload_worker_pickup_changes(fr->worker);
+
+ /* Wait for the other threads to ack. */
+ fr_read_ack_from_workers(fr);
+
+ /* Send ack to fast reload thread. */
+ fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
+
+ verbose(VERB_ALGO, "worker resume after reload");
+}
+
+/** Fast reload, the main thread performs the nopause poll. It polls every
+ * other worker thread briefly over the command pipe ipc. The command takes
+ * no time for the worker, it can return immediately. After that it sends
+ * an acknowledgement to the fastreload thread. */
+static void
+fr_main_perform_reload_nopause_poll(struct fast_reload_thread* fr)
+{
+ struct daemon* daemon = fr->worker->daemon;
+ int i;
+
+ /* Send the reload_poll to other threads. They can respond
+ * one at a time. */
+ for(i=0; i<daemon->num; i++) {
+ if(i == fr->worker->thread_num)
+ continue; /* Do not send to ourselves. */
+ worker_send_cmd(daemon->workers[i], worker_cmd_reload_poll);
+ }
+
+ /* Wait for the other threads to ack. */
+ fr_read_ack_from_workers(fr);
+ fast_reload_worker_pickup_changes(fr->worker);
+
+ /* Send ack to fast reload thread. */
+ fr_send_cmd_to(fr, fast_reload_notification_reload_ack, 0, 1);
+}
+
+/** Fast reload, perform the command received from the fast reload thread */
+static void
+fr_main_perform_cmd(struct fast_reload_thread* fr,
+ enum fast_reload_notification status)
+{
+ verbose(VERB_ALGO, "main perform fast reload status: %s",
+ fr_notification_to_string(status));
+ if(status == fast_reload_notification_printout) {
+ fr_main_perform_printout(fr);
+ } else if(status == fast_reload_notification_done ||
+ status == fast_reload_notification_done_error ||
+ status == fast_reload_notification_exited) {
+ fr_main_perform_done(fr);
+ } else if(status == fast_reload_notification_reload_stop) {
+ fr_main_perform_reload_stop(fr);
+ } else if(status == fast_reload_notification_reload_nopause_poll) {
+ fr_main_perform_reload_nopause_poll(fr);
+ } else {
+ log_err("main received unknown status from fast reload: %d %s",
+ (int)status, fr_notification_to_string(status));
+ }
+}
+
+/** Fast reload, handle command from fast reload to the main thread. */
+static void
+fr_main_handle_cmd(struct fast_reload_thread* fr)
+{
+ enum fast_reload_notification status;
+ ssize_t ret;
+ /* keep static analyzer happy; recv(-1,..) */
+ log_assert(fr->commpair[0] >= 0);
+ ret = recv(fr->commpair[0],
+ ((char*)&fr->service_read_cmd)+fr->service_read_cmd_count,
+ sizeof(fr->service_read_cmd)-fr->service_read_cmd_count, 0);
+ if(ret == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ )
+ return; /* Continue later. */
+ log_err("read cmd from fast reload thread, recv: %s",
+ sock_strerror(errno));
+ return;
+ } else if(ret == 0) {
+ verbose(VERB_ALGO, "closed connection from fast reload thread");
+ fr->service_read_cmd_count = 0;
+ /* handle this like an error */
+ fr->service_read_cmd = fast_reload_notification_done_error;
+ } else if(ret + (ssize_t)fr->service_read_cmd_count <
+ (ssize_t)sizeof(fr->service_read_cmd)) {
+ fr->service_read_cmd_count += ret;
+ /* Continue later. */
+ return;
+ }
+ status = fr->service_read_cmd;
+ fr->service_read_cmd = 0;
+ fr->service_read_cmd_count = 0;
+ fr_main_perform_cmd(fr, status);
+}
+
+/** Fast reload, poll for and handle cmd from fast reload thread. */
+static void
+fr_check_cmd_from_thread(struct fast_reload_thread* fr)
+{
+ int inevent = 0;
+ struct worker* worker = fr->worker;
+ /* Stop in case the thread has exited, or there is no read event. */
+ while(worker->daemon->fast_reload_thread) {
+ if(!sock_poll_timeout(fr->commpair[0], 0, 1, 0, &inevent)) {
+ log_err("check for cmd from fast reload thread: "
+ "poll failed");
+ return;
+ }
+ if(!inevent)
+ return;
+ fr_main_handle_cmd(fr);
+ }
+}
+
+void fast_reload_service_cb(int ATTR_UNUSED(fd), short ATTR_UNUSED(bits),
+ void* arg)
+{
+ struct fast_reload_thread* fast_reload_thread =
+ (struct fast_reload_thread*)arg;
+ struct worker* worker = fast_reload_thread->worker;
+
+ /* Read and handle the command */
+ fr_main_handle_cmd(fast_reload_thread);
+ if(worker->daemon->fast_reload_thread != NULL) {
+ /* If not exited, see if there are more pending statuses
+ * from the fast reload thread. */
+ fr_check_cmd_from_thread(fast_reload_thread);
+ }
+}
+
+#ifdef HAVE_SSL
+/** fast reload, send client item over SSL. Returns number of bytes
+ * printed, 0 on wait later, or -1 on failure. */
+static int
+fr_client_send_item_ssl(struct fast_reload_printq* printq)
+{
+ int r;
+ ERR_clear_error();
+ r = SSL_write(printq->remote.ssl,
+ printq->client_item+printq->client_byte_count,
+ printq->client_len - printq->client_byte_count);
+ if(r <= 0) {
+ int want = SSL_get_error(printq->remote.ssl, r);
+ if(want == SSL_ERROR_ZERO_RETURN) {
+ log_err("fast_reload print to remote client: "
+ "SSL_write says connection closed.");
+ return -1;
+ } else if(want == SSL_ERROR_WANT_READ) {
+ /* wait for read condition */
+ printq->client_cp->ssl_shake_state = comm_ssl_shake_hs_read;
+ comm_point_listen_for_rw(printq->client_cp, 1, 0);
+ return 0;
+ } else if(want == SSL_ERROR_WANT_WRITE) {
+#ifdef USE_WINSOCK
+ ub_winsock_tcp_wouldblock(comm_point_internal(printq->client_cp), UB_EV_WRITE);
+#endif
+ return 0; /* write more later */
+ } else if(want == SSL_ERROR_SYSCALL) {
+#ifdef EPIPE
+ if(errno == EPIPE && verbosity < 2) {
+ /* silence 'broken pipe' */
+ return -1;
+ }
+#endif
+ if(errno != 0)
+ log_err("fast_reload print to remote client: "
+ "SSL_write syscall: %s",
+ sock_strerror(errno));
+ return -1;
+ }
+ log_crypto_err_io("fast_reload print to remote client: "
+ "could not SSL_write", want);
+ return -1;
+ }
+ return r;
+}
+#endif /* HAVE_SSL */
+
+/** fast reload, send client item for fd, returns bytes sent, or 0 for wait
+ * later, or -1 on failure. */
+static int
+fr_client_send_item_fd(struct fast_reload_printq* printq)
+{
+ int r;
+ r = (int)send(printq->remote.fd,
+ printq->client_item+printq->client_byte_count,
+ printq->client_len - printq->client_byte_count, 0);
+ if(r == -1) {
+ if(
+#ifndef USE_WINSOCK
+ errno == EINTR || errno == EAGAIN
+# ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+# endif
+#else
+ WSAGetLastError() == WSAEINTR ||
+ WSAGetLastError() == WSAEINPROGRESS ||
+ WSAGetLastError() == WSAEWOULDBLOCK
+#endif
+ ) {
+#ifdef USE_WINSOCK
+ ub_winsock_tcp_wouldblock(comm_point_internal(printq->client_cp), UB_EV_WRITE);
+#endif
+ return 0; /* Try again. */
+ }
+ log_err("fast_reload print to remote client: send failed: %s",
+ sock_strerror(errno));
+ return -1;
+ }
+ return r;
+}
+
+/** fast reload, send current client item. false on failure or wait later. */
+static int
+fr_client_send_item(struct fast_reload_printq* printq)
+{
+ int r;
+#ifdef HAVE_SSL
+ if(printq->remote.ssl) {
+ r = fr_client_send_item_ssl(printq);
+ } else {
+#endif
+ r = fr_client_send_item_fd(printq);
+#ifdef HAVE_SSL
+ }
+#endif
+ if(r == 0) {
+ /* Wait for later. */
+ return 0;
+ } else if(r == -1) {
+ /* It failed, close comm point and stop sending. */
+ fr_printq_remove(printq);
+ return 0;
+ }
+ printq->client_byte_count += r;
+ if(printq->client_byte_count < printq->client_len)
+ return 0; /* Print more later. */
+ return 1;
+}
+
+/** fast reload, pick up the next item to print */
+static void
+fr_client_pickup_next_item(struct fast_reload_printq* printq)
+{
+ struct config_strlist* item;
+ /* Pop first off the list. */
+ if(!printq->to_print->first) {
+ printq->client_item = NULL;
+ printq->client_len = 0;
+ printq->client_byte_count = 0;
+ return;
+ }
+ item = printq->to_print->first;
+ if(item->next) {
+ printq->to_print->first = item->next;
+ } else {
+ printq->to_print->first = NULL;
+ printq->to_print->last = NULL;
+ }
+ item->next = NULL;
+ printq->client_len = 0;
+ printq->client_byte_count = 0;
+ printq->client_item = item->str;
+ item->str = NULL;
+ free(item);
+ /* The len is the number of bytes to print out, and thus excludes
+ * the terminator zero. */
+ if(printq->client_item)
+ printq->client_len = (int)strlen(printq->client_item);
+}
+
+int fast_reload_client_callback(struct comm_point* ATTR_UNUSED(c), void* arg,
+ int err, struct comm_reply* ATTR_UNUSED(rep))
+{
+ struct fast_reload_printq* printq = (struct fast_reload_printq*)arg;
+ if(!printq->client_cp) {
+ fr_printq_remove(printq);
+ return 0; /* the output is closed and deleted */
+ }
+ if(err != NETEVENT_NOERROR) {
+ verbose(VERB_ALGO, "fast reload client: error, close it");
+ fr_printq_remove(printq);
+ return 0;
+ }
+#ifdef HAVE_SSL
+ if(printq->client_cp->ssl_shake_state == comm_ssl_shake_hs_read) {
+ /* read condition satisfied back to writing */
+ comm_point_listen_for_rw(printq->client_cp, 0, 1);
+ printq->client_cp->ssl_shake_state = comm_ssl_shake_none;
+ }
+#endif /* HAVE_SSL */
+
+ /* Pickup an item if there are none */
+ if(!printq->client_item) {
+ fr_client_pickup_next_item(printq);
+ }
+ if(!printq->client_item) {
+ if(printq->in_list) {
+ /* Nothing more to print, it can be removed. */
+ fr_printq_remove(printq);
+ return 0;
+ }
+ /* Done with printing for now. */
+ comm_point_stop_listening(printq->client_cp);
+ return 0;
+ }
+
+ /* Try to print out a number of items, if they can print in full. */
+ while(printq->client_item) {
+ /* Send current item, if any. */
+ if(printq->client_item && printq->client_len != 0 &&
+ printq->client_byte_count < printq->client_len) {
+ if(!fr_client_send_item(printq))
+ return 0;
+ }
+
+ /* The current item is done. */
+ if(printq->client_item) {
+ free(printq->client_item);
+ printq->client_item = NULL;
+ printq->client_len = 0;
+ printq->client_byte_count = 0;
+ }
+ if(!printq->to_print->first) {
+ if(printq->in_list) {
+ /* Nothing more to print, it can be removed. */
+ fr_printq_remove(printq);
+ return 0;
+ }
+ /* Done with printing for now. */
+ comm_point_stop_listening(printq->client_cp);
+ return 0;
+ }
+ fr_client_pickup_next_item(printq);
+ }
+
+ return 0;
+}
+
+#ifndef THREADS_DISABLED
+/** fast reload printq create */
+static struct fast_reload_printq*
+fr_printq_create(struct comm_point* c, struct worker* worker)
+{
+ struct fast_reload_printq* printq = calloc(1, sizeof(*printq));
+ if(!printq)
+ return NULL;
+ printq->to_print = calloc(1, sizeof(*printq->to_print));
+ if(!printq->to_print) {
+ free(printq);
+ return NULL;
+ }
+ printq->worker = worker;
+ printq->client_cp = c;
+ printq->client_cp->callback = fast_reload_client_callback;
+ printq->client_cp->cb_arg = printq;
+ return printq;
+}
+#endif /* !THREADS_DISABLED */
+
+/** fast reload printq delete */
+static void
+fr_printq_delete(struct fast_reload_printq* printq)
+{
+ if(!printq)
+ return;
+#ifdef HAVE_SSL
+ if(printq->remote.ssl) {
+ SSL_shutdown(printq->remote.ssl);
+ SSL_free(printq->remote.ssl);
+ }
+#endif
+ comm_point_delete(printq->client_cp);
+ if(printq->to_print) {
+ config_delstrlist(printq->to_print->first);
+ free(printq->to_print);
+ }
+ free(printq);
+}
+
+/** fast reload printq, returns true if the list is empty and no item */
+static int
+fr_printq_empty(struct fast_reload_printq* printq)
+{
+ if(printq->to_print->first == NULL && printq->client_item == NULL)
+ return 1;
+ return 0;
+}
+
+/** fast reload printq, insert onto list */
+static void
+fr_printq_list_insert(struct fast_reload_printq* printq, struct daemon* daemon)
+{
+ if(printq->in_list)
+ return;
+ printq->next = daemon->fast_reload_printq_list;
+ if(printq->next)
+ printq->next->prev = printq;
+ printq->prev = NULL;
+ printq->in_list = 1;
+ daemon->fast_reload_printq_list = printq;
+}
+
+/** fast reload printq delete list */
+void
+fast_reload_printq_list_delete(struct fast_reload_printq* list)
+{
+ struct fast_reload_printq* printq = list, *next;
+ while(printq) {
+ next = printq->next;
+ fr_printq_delete(printq);
+ printq = next;
+ }
+}
+
+/** fast reload printq remove the item from the printq list */
+static void
+fr_printq_list_remove(struct fast_reload_printq* printq)
+{
+ struct daemon* daemon = printq->worker->daemon;
+ if(printq->prev == NULL)
+ daemon->fast_reload_printq_list = printq->next;
+ else printq->prev->next = printq->next;
+ if(printq->next)
+ printq->next->prev = printq->prev;
+ printq->in_list = 0;
+}
+
+/** fast reload printq, remove the printq when no longer needed,
+ * like the stream is closed. */
+static void
+fr_printq_remove(struct fast_reload_printq* printq)
+{
+ if(!printq)
+ return;
+ if(printq->worker->daemon->fast_reload_thread &&
+ printq->worker->daemon->fast_reload_thread->printq == printq)
+ printq->worker->daemon->fast_reload_thread->printq = NULL;
+ if(printq->in_list)
+ fr_printq_list_remove(printq);
+ fr_printq_delete(printq);
+}
+
+/** fast reload thread, send stop command to the thread, from the main thread.
+ */
+static void
+fr_send_stop(struct fast_reload_thread* fr)
+{
+ fr_send_cmd_to(fr, fast_reload_notification_exit, 1, 0);
+}
+
+void
+fast_reload_thread_start(RES* ssl, struct worker* worker, struct rc_state* s,
+ int fr_verb, int fr_nopause, int fr_drop_mesh)
+{
+ if(worker->daemon->fast_reload_thread) {
+ log_err("fast reload thread already running");
+ return;
+ }
+ if(!fast_reload_thread_setup(worker, fr_verb, fr_nopause,
+ fr_drop_mesh)) {
+ if(!ssl_printf(ssl, "error could not setup thread\n"))
+ return;
+ return;
+ }
+ worker->daemon->fast_reload_thread->started = 1;
+
+#ifndef THREADS_DISABLED
+ /* Setup command listener in remote servicing thread */
+ /* The listener has to be nonblocking, so the the remote servicing
+ * thread can continue to service DNS queries, the fast reload
+ * thread is going to read the config from disk and apply it. */
+ /* The commpair[1] element can stay blocking, it is used by the
+ * fast reload thread to communicate back. The thread needs to wait
+ * at these times, when it has to check briefly it can use poll. */
+ fd_set_nonblock(worker->daemon->fast_reload_thread->commpair[0]);
+ worker->daemon->fast_reload_thread->service_event = ub_event_new(
+ comm_base_internal(worker->base),
+ worker->daemon->fast_reload_thread->commpair[0],
+ UB_EV_READ | UB_EV_PERSIST, fast_reload_service_cb,
+ worker->daemon->fast_reload_thread);
+ if(!worker->daemon->fast_reload_thread->service_event) {
+ fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
+ if(!ssl_printf(ssl, "error out of memory\n"))
+ return;
+ return;
+ }
+ if(ub_event_add(worker->daemon->fast_reload_thread->service_event,
+ NULL) != 0) {
+ fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
+ if(!ssl_printf(ssl, "error out of memory adding service event\n"))
+ return;
+ return;
+ }
+ worker->daemon->fast_reload_thread->service_event_is_added = 1;
+
+ /* Setup the comm point to the remote control client as an event
+ * on the remote servicing thread, which it already is.
+ * It needs a new callback to service it. */
+ log_assert(s);
+ state_list_remove_elem(&s->rc->busy_list, s->c);
+ s->rc->active --;
+ /* Set the comm point file descriptor to nonblocking. So that
+ * printout to the remote control client does not block the
+ * server thread from servicing DNS queries. */
+ fd_set_nonblock(s->c->fd);
+ worker->daemon->fast_reload_thread->printq = fr_printq_create(s->c,
+ worker);
+ if(!worker->daemon->fast_reload_thread->printq) {
+ fast_reload_thread_desetup(worker->daemon->fast_reload_thread);
+ if(!ssl_printf(ssl, "error out of memory create printq\n"))
+ return;
+ return;
+ }
+ worker->daemon->fast_reload_thread->printq->remote = *ssl;
+ s->rc = NULL; /* move away the rc state */
+ /* Nothing to print right now, so no need to have it active. */
+ comm_point_stop_listening(worker->daemon->fast_reload_thread->printq->client_cp);
+
+ /* Start fast reload thread */
+ ub_thread_create(&worker->daemon->fast_reload_thread->tid,
+ fast_reload_thread_main, worker->daemon->fast_reload_thread);
+#else
+ (void)s;
+#endif
+}
+
+void
+fast_reload_thread_stop(struct fast_reload_thread* fast_reload_thread)
+{
+ struct worker* worker = fast_reload_thread->worker;
+ if(!fast_reload_thread)
+ return;
+ fr_send_stop(fast_reload_thread);
+ if(worker->daemon->fast_reload_thread != NULL) {
+ /* If it did not exit yet, join with the thread now. It is
+ * going to exit because the exit command is sent to it. */
+ fr_main_perform_done(fast_reload_thread);
+ }
+}