#include <sys/types.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base64.h>
#include <isc/buffer.h>
static isc_mem_t *mctx = NULL;
static isc_log_t *lctx = NULL;
+/* Managers */
+static isc_nm_t *netmgr = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
+static isc_taskmgr_t *taskmgr = NULL;
+
/* Configurables */
static char *server = NULL;
static const char *port = "53";
static dns_fixedname_t afn;
static dns_name_t *anchor_name = NULL;
+static dns_master_style_t *style = NULL;
+static dns_fixedname_t qfn;
+
/* Default bind.keys contents */
static char anchortext[] = TRUST_ANCHORS;
}
static isc_result_t
-printdata(dns_rdataset_t *rdataset, dns_name_t *owner,
- dns_master_style_t *style) {
+printdata(dns_rdataset_t *rdataset, dns_name_t *owner) {
isc_result_t result = ISC_R_SUCCESS;
static dns_trust_t trust;
static bool first = true;
}
static isc_result_t
-setup_style(dns_master_style_t **stylep) {
+setup_style(void) {
isc_result_t result;
- dns_master_style_t *style = NULL;
-
- REQUIRE(stylep != NULL && *stylep == NULL);
styleflags |= DNS_STYLEFLAG_REL_OWNER;
if (yaml) {
48, 80, 8, splitwidth, mctx);
}
- if (result == ISC_R_SUCCESS) {
- *stylep = style;
- }
return (result);
}
}
}
+static void
+resolve_cb(dns_client_t *client, const dns_name_t *query_name,
+ dns_namelist_t *namelist, isc_result_t result) {
+ char namestr[DNS_NAME_FORMATSIZE];
+ dns_rdataset_t *rdataset;
+
+ if (result != ISC_R_SUCCESS && !yaml) {
+ delv_log(ISC_LOG_ERROR, "resolution failed: %s",
+ isc_result_totext(result));
+ }
+
+ if (yaml) {
+ printf("type: DELV_RESULT\n");
+ dns_name_format(query_name, namestr, sizeof(namestr));
+ printf("query_name: %s\n", namestr);
+ printf("status: %s\n", isc_result_totext(result));
+ printf("records:\n");
+ }
+
+ for (dns_name_t *response_name = ISC_LIST_HEAD(*namelist);
+ response_name != NULL;
+ response_name = ISC_LIST_NEXT(response_name, link))
+ {
+ for (rdataset = ISC_LIST_HEAD(response_name->list);
+ rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link))
+ {
+ result = printdata(rdataset, response_name);
+ if (result != ISC_R_SUCCESS) {
+ delv_log(ISC_LOG_ERROR, "print data failed");
+ }
+ }
+ }
+
+ dns_client_freeresanswer(client, namelist);
+ isc_mem_put(mctx, namelist, sizeof(*namelist));
+
+ dns_client_detach(&client);
+
+ isc_loopmgr_shutdown(loopmgr);
+}
+
+static void
+resolve(void *arg) {
+ dns_client_t *client = arg;
+ dns_namelist_t *namelist;
+ unsigned int resopt;
+ isc_result_t result;
+ dns_name_t *query_name;
+
+ namelist = isc_mem_get(mctx, sizeof(*namelist));
+ ISC_LIST_INIT(*namelist);
+
+ /* Construct QNAME */
+ CHECK(convert_name(&qfn, &query_name, qname));
+
+ /* Set up resolution options */
+ resopt = DNS_CLIENTRESOPT_NOCDFLAG;
+ if (no_sigs) {
+ resopt |= DNS_CLIENTRESOPT_NODNSSEC;
+ }
+ if (!root_validation) {
+ resopt |= DNS_CLIENTRESOPT_NOVALIDATE;
+ }
+ if (cdflag) {
+ resopt &= ~DNS_CLIENTRESOPT_NOCDFLAG;
+ }
+ if (use_tcp) {
+ resopt |= DNS_CLIENTRESOPT_TCP;
+ }
+
+ /* Perform resolution */
+ result = dns_client_resolve(client, query_name, dns_rdataclass_in,
+ qtype, resopt, namelist, resolve_cb);
+
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup;
+ }
+
+ return;
+cleanup:
+ if (!yaml) {
+ delv_log(ISC_LOG_ERROR, "resolution failed: %s",
+ isc_result_totext(result));
+ }
+
+ isc_mem_put(mctx, namelist, sizeof(*namelist));
+ isc_loopmgr_shutdown(loopmgr);
+
+ dns_client_detach(&client);
+}
+
int
main(int argc, char *argv[]) {
dns_client_t *client = NULL;
isc_result_t result;
- dns_fixedname_t qfn;
- dns_name_t *query_name, *response_name;
- char namestr[DNS_NAME_FORMATSIZE];
- dns_rdataset_t *rdataset;
- dns_namelist_t namelist;
- unsigned int resopt;
- isc_loopmgr_t *loopmgr = NULL;
- isc_nm_t *netmgr = NULL;
- isc_taskmgr_t *taskmgr = NULL;
- dns_master_style_t *style = NULL;
progname = argv[0];
preparse_args(argc, argv);
argc--;
argv++;
- isc_mem_create(&mctx);
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
result = dst_lib_init(mctx, NULL);
if (result != ISC_R_SUCCESS) {
fatal("dst_lib_init failed: %d", result);
}
- isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr);
-
parse_args(argc, argv);
- CHECK(setup_style(&style));
+ CHECK(setup_style());
setup_logging(stderr);
CHECK(setup_dnsseckeys(client));
- /* Construct QNAME */
- CHECK(convert_name(&qfn, &query_name, qname));
+ isc_loop_setup(isc_loop_main(loopmgr), resolve, client);
- /* Set up resolution options */
- resopt = DNS_CLIENTRESOPT_NOCDFLAG;
- if (no_sigs) {
- resopt |= DNS_CLIENTRESOPT_NODNSSEC;
- }
- if (!root_validation) {
- resopt |= DNS_CLIENTRESOPT_NOVALIDATE;
- }
- if (cdflag) {
- resopt &= ~DNS_CLIENTRESOPT_NOCDFLAG;
- }
- if (use_tcp) {
- resopt |= DNS_CLIENTRESOPT_TCP;
- }
-
- /* Perform resolution */
- ISC_LIST_INIT(namelist);
- result = dns_client_resolve(client, query_name, dns_rdataclass_in,
- qtype, resopt, &namelist);
- if (result != ISC_R_SUCCESS && !yaml) {
- delv_log(ISC_LOG_ERROR, "resolution failed: %s",
- isc_result_totext(result));
- }
-
- if (yaml) {
- printf("type: DELV_RESULT\n");
- dns_name_format(query_name, namestr, sizeof(namestr));
- printf("query_name: %s\n", namestr);
- printf("status: %s\n", isc_result_totext(result));
- printf("records:\n");
- }
-
- for (response_name = ISC_LIST_HEAD(namelist); response_name != NULL;
- response_name = ISC_LIST_NEXT(response_name, link))
- {
- for (rdataset = ISC_LIST_HEAD(response_name->list);
- rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link))
- {
- result = printdata(rdataset, response_name, style);
- if (result != ISC_R_SUCCESS) {
- delv_log(ISC_LOG_ERROR, "print data failed");
- }
- }
- }
-
- dns_client_freeresanswer(client, &namelist);
+ isc_loopmgr_run(loopmgr);
cleanup:
if (trust_anchor != NULL) {
if (style != NULL) {
dns_master_styledestroy(&style, mctx);
}
- if (client != NULL) {
- dns_client_detach(&client);
- }
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
-
- if (lctx != NULL) {
- isc_log_destroy(&lctx);
- }
- isc_mem_detach(&mctx);
+ isc_log_destroy(&lctx);
dst_lib_destroy();
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
+
return (0);
}
#include <isc/sockaddr.h>
#include <isc/string.h>
#include <isc/task.h>
+#include <isc/timer.h>
#include <isc/types.h>
#include <isc/util.h>
fatal("can't find either v4 or v6 networking");
}
- isc_mem_create(&mctx);
- isc_mem_setname(mctx, "dig");
-
- isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr);
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
isc_log_create(mctx, &lctx, &logconfig);
isc_log_setcontext(lctx);
isc_log_setdebuglevel(lctx, 0);
+ isc_mem_setname(mctx, "dig");
mainloop = isc_loop_main(loopmgr);
result = dst_lib_init(mctx, NULL);
debug("udp_ready(%p, %s, %p)", handle, isc_result_totext(eresult),
query);
- LOCK_LOOKUP;
lookup_attach(query->lookup, &l);
if (eresult == ISC_R_CANCELED || query->canceled) {
query_detach(&query);
lookup_detach(&l);
clear_current_lookup();
- UNLOCK_LOOKUP;
return;
}
nssearch_next(l, query);
check_if_done();
- UNLOCK_LOOKUP;
return;
}
}
check_if_done();
- UNLOCK_LOOKUP;
return;
}
query_detach(&query);
lookup_detach(&l);
- UNLOCK_LOOKUP;
}
/*%
* Send a UDP packet to the remote nameserver, possible starting the
- * recv action as well.
+ * recv action as well. Also make sure that the timer is running and
+ * is properly reset.
*/
static void
start_udp(dig_query_t *query) {
*/
if (l->ns_search_only && !l->trace_root) {
nssearch_next(l, query);
-
check_if_done();
- UNLOCK_LOOKUP;
return;
}
* the timeout to much longer, so brief network
* outages won't cause the XFR to abort
*/
- if (timeout != INT_MAX) {
+ if (timeout != INT_MAX && query->timer != NULL) {
unsigned int local_timeout;
if (timeout == 0) {
isc_mem_stats(mctx, stderr);
}
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
}
#ifdef HAVE_LIBIDN2
isc_time_t time_sent;
isc_time_t time_recv;
uint64_t byte_count;
+ isc_timer_t *timer;
};
struct dig_server {
#include <time.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/atomic.h>
#include <isc/attributes.h>
#include <isc/base32.h>
#include <isc/file.h>
#include <isc/hash.h>
#include <isc/hex.h>
+#include <isc/job.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/md.h>
#include <isc/mem.h>
#include <isc/stdio.h>
#include <isc/string.h>
#include <isc/task.h>
+#include <isc/tid.h>
#include <isc/time.h>
#include <isc/util.h>
static unsigned int nverified = 0, nverifyfailed = 0;
static const char *directory = NULL, *dsdir = NULL;
static isc_mutex_t namelock, statslock;
-static isc_loopmgr_t *loopmgr = NULL;
static isc_nm_t *netmgr = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static isc_taskmgr_t *taskmgr = NULL;
static dns_db_t *gdb; /* The database */
static dns_dbversion_t *gversion; /* The database version */
static unsigned char saltbuf[255];
static unsigned char *gsalt = saltbuf;
static size_t salt_length = 0;
-static isc_task_t *main_task = NULL;
+static isc_task_t *write_task = NULL;
static unsigned int ntasks = 0;
static atomic_bool shuttingdown;
static atomic_bool finished;
result = dns_dbiterator_current(gdbiter, &node, name);
check_dns_dbiterator_current(result);
signname(node, name);
+ LOCK(&namelock);
dumpnode(name, node);
+ UNLOCK(&namelock);
cleannode(gdb, gversion, node);
dns_db_detachnode(gdb, &node);
result = dns_dbiterator_first(gdbiter);
* lock.
*/
static void
-assignwork(isc_task_t *task, isc_task_t *worker) {
- dns_fixedname_t *fname;
- dns_name_t *name;
- dns_dbnode_t *node;
- sevent_t *sevent;
+assignwork(isc_task_t *task) {
+ dns_fixedname_t *fname = NULL;
+ dns_name_t *name = NULL;
+ dns_dbnode_t *node = NULL;
+ sevent_t *sevent = NULL;
dns_rdataset_t nsec;
bool found;
isc_result_t result;
if (atomic_load(&finished)) {
ended++;
if (ended == ntasks) {
- isc_task_detach(&task);
- isc_app_shutdown();
+ isc_task_detach(&write_task);
+ isc_loopmgr_shutdown(loopmgr);
}
goto unlock;
}
if (!found) {
ended++;
if (ended == ntasks) {
- isc_task_detach(&task);
- isc_app_shutdown();
+ isc_task_detach(&write_task);
+ isc_loopmgr_shutdown(loopmgr);
}
isc_mem_put(mctx, fname, sizeof(dns_fixedname_t));
goto unlock;
sevent->node = node;
sevent->fname = fname;
- isc_task_send(worker, ISC_EVENT_PTR(&sevent));
+ isc_task_send(task, ISC_EVENT_PTR(&sevent));
unlock:
UNLOCK(&namelock);
}
* Start a worker task
*/
static void
-startworker(isc_task_t *task, isc_event_t *event) {
- isc_task_t *worker;
+startworker(void *arg) {
+ isc_task_t **tasks = (isc_task_t **)arg;
+ isc_result_t result;
+ int tid;
- worker = (isc_task_t *)event->ev_arg;
- assignwork(task, worker);
- isc_event_free(&event);
+ REQUIRE(tasks != NULL);
+
+ tid = isc_tid();
+ result = isc_task_create(taskmgr, &tasks[tid], tid);
+ if (result != ISC_R_SUCCESS) {
+ fatal("failed to create task: %s", isc_result_totext(result));
+ }
+
+ assignwork(tasks[tid]);
+}
+
+/*%
+ * Finish a worker task
+ */
+static void
+workerdone(void *arg) {
+ isc_task_t **tasks = (isc_task_t **)arg;
+
+ isc_task_detach(&tasks[isc_tid()]);
}
/*%
*/
static void
writenode(isc_task_t *task, isc_event_t *event) {
- isc_task_t *worker;
sevent_t *sevent = (sevent_t *)event;
- worker = (isc_task_t *)event->ev_sender;
+ LOCK(&namelock);
dumpnode(dns_fixedname_name(sevent->fname), sevent->node);
+ UNLOCK(&namelock);
cleannode(gdb, gversion, sevent->node);
dns_db_detachnode(gdb, &sevent->node);
isc_mem_put(mctx, sevent->fname, sizeof(dns_fixedname_t));
- assignwork(task, worker);
+ assignwork(task);
isc_event_free(&event);
}
dns_dbnode_t *node;
sevent_t *sevent, *wevent;
+ UNUSED(task);
+
sevent = (sevent_t *)event;
node = sevent->node;
fname = sevent->fname;
isc_event_free(&event);
signname(node, dns_fixedname_name(fname));
- wevent = (sevent_t *)isc_event_allocate(mctx, task, SIGNER_EVENT_WRITE,
- writenode, NULL,
- sizeof(sevent_t));
+ wevent = (sevent_t *)isc_event_allocate(mctx, write_task,
+ SIGNER_EVENT_WRITE, writenode,
+ NULL, sizeof(sevent_t));
wevent->node = node;
wevent->fname = fname;
- isc_task_send(main_task, ISC_EVENT_PTR(&wevent));
+ isc_task_send(write_task, ISC_EVENT_PTR(&wevent));
}
/*%
int
main(int argc, char *argv[]) {
- int i, ch;
+ int ch;
char *startstr = NULL, *endstr = NULL, *classname = NULL;
char *dnskey_endstr = NULL;
char *origin = NULL, *file = NULL, *output = NULL;
masterstyle = &dns_master_style_explicitttl;
- check_result(isc_app_start(), "isc_app_start");
-
- isc_mem_create(&mctx);
-
isc_commandline_errprint = false;
-
while ((ch = isc_commandline_parse(argc, argv, CMDLINE_FLAGS)) != -1) {
switch (ch) {
case '3':
}
}
- result = dst_lib_init(mctx, engine);
- if (result != ISC_R_SUCCESS) {
- fatal("could not initialize dst: %s",
- isc_result_totext(result));
- }
-
isc_stdtime_get(&now);
if (startstr != NULL) {
}
if (ntasks == 0) {
- ntasks = isc_os_ncpus() * 2;
+ ntasks = isc_os_ncpus();
}
vbprintf(4, "using %d cpus\n", ntasks);
directory = ".";
}
+ isc_managers_create(&mctx, ntasks, &loopmgr, &netmgr, &taskmgr);
+
+ isc_task_create(taskmgr, &write_task, 0);
+
+ result = dst_lib_init(mctx, engine);
+ if (result != ISC_R_SUCCESS) {
+ fatal("could not initialize dst: %s",
+ isc_result_totext(result));
+ }
+
setup_logging(mctx, &log);
argc -= isc_commandline_index;
print_time(outfp);
print_version(outfp);
- isc_managers_create(mctx, ntasks, 0, &loopmgr, &netmgr, &taskmgr);
-
- main_task = NULL;
- result = isc_task_create(taskmgr, 0, &main_task, 0);
- if (result != ISC_R_SUCCESS) {
- fatal("failed to create task: %s", isc_result_totext(result));
- }
-
- tasks = isc_mem_get(mctx, ntasks * sizeof(isc_task_t *));
- for (i = 0; i < (int)ntasks; i++) {
- tasks[i] = NULL;
- result = isc_task_create(taskmgr, 0, &tasks[i], i);
- if (result != ISC_R_SUCCESS) {
- fatal("failed to create task: %s",
- isc_result_totext(result));
- }
- }
-
isc_mutex_init(&namelock);
if (printstats) {
* There is more work to do. Spread it out over multiple
* processors if possible.
*/
- for (i = 0; i < (int)ntasks; i++) {
- result = isc_app_onrun(mctx, main_task, startworker,
- tasks[i]);
- if (result != ISC_R_SUCCESS) {
- fatal("failed to start task: %s",
- isc_result_totext(result));
- }
- }
- (void)isc_app_run();
+ tasks = isc_mem_get(mctx, ntasks * sizeof(isc_task_t *));
+ memset(tasks, 0, ntasks * sizeof(isc_task_t *));
+
+ isc_loopmgr_setup(loopmgr, startworker, tasks);
+ isc_loopmgr_teardown(loopmgr, workerdone, tasks);
+
+ isc_loopmgr_run(loopmgr);
+
if (!atomic_load(&finished)) {
fatal("process aborted by user");
}
- } else {
- isc_task_detach(&main_task);
+
+ isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *));
}
atomic_store(&shuttingdown, true);
- for (i = 0; i < (int)ntasks; i++) {
- isc_task_detach(&tasks[i]);
- }
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
- isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *));
postsign();
TIME_NOW(&sign_finish);
check_result(result, "dns_master_dumptostream3");
}
- isc_mutex_destroy(&namelock);
- if (printstats) {
- isc_mutex_destroy(&statslock);
- }
-
if (!output_stdout) {
result = isc_stdio_close(outfp);
check_result(result, "isc_stdio_close");
if (verbose > 10) {
isc_mem_stats(mctx, stdout);
}
- isc_mem_destroy(&mctx);
- (void)isc_app_finish();
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
if (printstats) {
TIME_NOW(&timer_finish);
print_stats(&timer_start, &timer_finish, &sign_start,
&sign_finish);
+ isc_mutex_destroy(&statslock);
}
+ isc_mutex_destroy(&namelock);
return (vresult == ISC_R_SUCCESS ? 0 : 1);
}
#include <stdlib.h>
#include <time.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base32.h>
#include <isc/commandline.h>
}
}
isc_commandline_reset = true;
- check_result(isc_app_start(), "isc_app_start");
isc_mem_create(&mctx);
}
isc_mem_destroy(&mctx);
- (void)isc_app_finish();
-
return (result == ISC_R_SUCCESS ? 0 : 1);
}
#include <stdbool.h>
-#include <isc/app.h>
#include <isc/event.h>
#include <isc/lex.h>
#include <isc/mem.h>
named_smf_want_disable = 1;
}
/*
- * If named_smf_got_instance = 0, named_smf_chroot
- * is not relevant and we fall through to
- * isc_app_shutdown below.
+ * If named_smf_got_instance = 0, named_smf_chroot is
+ * not relevant and we fall through to shutdown below.
*/
#endif /* ifdef HAVE_LIBSCF */
/* Do not flush master files */
#include <inttypes.h>
#include <stdbool.h>
-#include <isc/app.h>
#include <isc/base64.h>
#include <isc/buffer.h>
#include <isc/event.h>
#include <named/config.h>
#include <named/control.h>
#include <named/log.h>
+#include <named/main.h>
#include <named/server.h>
typedef struct controlkey controlkey_t;
isc_refcount_destroy(&listener->refs);
- if (listener->sock != NULL) {
- isc_nmsocket_close(&listener->sock);
- }
+ REQUIRE(listener->sock == NULL);
free_controlkeylist(&listener->keys, listener->mctx);
}
isc_nm_stoplistening(listener->sock);
+ isc_nmsocket_close(&listener->sock);
maybe_free_listener(listener);
}
conn->sending = false;
if (conn->result == ISC_R_SHUTTINGDOWN) {
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
goto cleanup_sendhandle;
}
#include <string.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/condition.h>
+#include <isc/loop.h>
#include <isc/mutex.h>
#include <isc/thread.h>
#include <isc/util.h>
close(sockfd);
named_server_flushonshutdown(named_g_server,
false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
return (NULL);
}
raise(SIGSTOP);
close(sockfd);
named_server_flushonshutdown(named_g_server, false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
return (NULL);
}
close(listenfd);
named_server_flushonshutdown(named_g_server,
false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
return (NULL);
}
raise(SIGSTOP);
close(sockfd);
close(listenfd);
named_server_flushonshutdown(named_g_server, false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
#ifdef __AFL_LOOP
/*
free(buf);
close(sockfd);
named_server_flushonshutdown(named_g_server, false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
return (NULL);
}
#ifdef ENABLE_AFL
if (getenv("AFL_CMIN")) {
named_server_flushonshutdown(named_g_server, false);
- isc_app_shutdown();
+ isc_loopmgr_shutdown(named_g_loopmgr);
return;
}
#include <isc/log.h>
#include <isc/magic.h>
#include <isc/quota.h>
+#include <isc/signal.h>
#include <isc/sockaddr.h>
#include <isc/tls.h>
#include <isc/types.h>
#define NAMED_EVENT_DELZONE (NAMED_EVENTCLASS + 1)
#define NAMED_EVENT_COMMAND (NAMED_EVENTCLASS + 2)
#define NAMED_EVENT_TATSEND (NAMED_EVENTCLASS + 3)
-#define NAMED_EVENT_SHUTDOWN (NAMED_EVENTCLASS + 4)
+#define NAMED_EVENT_RUN (NAMED_EVENTCLASS + 4)
+#define NAMED_EVENT_SHUTDOWN (NAMED_EVENTCLASS + 5)
/*%
* Name server state. Better here than in lots of separate global variables.
isc_tlsctx_cache_t *tlsctx_server_cache;
isc_tlsctx_cache_t *tlsctx_client_cache;
+
+ isc_signal_t *sighup;
};
#define NAMED_SERVER_MAGIC ISC_MAGIC('S', 'V', 'E', 'R')
*/
void
-named_server_reloadwanted(named_server_t *server);
+named_server_reloadwanted(void *arg, int signum);
/*%<
* Inform a server that a reload is wanted. This function
* may be called asynchronously, from outside the server's task.
named_log_init(bool safe) {
isc_result_t result;
isc_logconfig_t *lcfg = NULL;
+ isc_mem_t *log_mctx = NULL;
named_g_categories = categories;
named_g_modules = modules;
/*
* Setup a logging context.
*/
- isc_log_create(named_g_mctx, &named_g_lctx, &lcfg);
+ isc_mem_create(&log_mctx);
+ isc_log_create(log_mctx, &named_g_lctx, &lcfg);
+ isc_mem_detach(&log_mctx);
/*
* named-checktool.c:setup_logging() needs to be kept in sync.
#include <protobuf-c/protobuf-c.h>
#endif
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/backtrace.h>
#include <isc/commandline.h>
#include <isc/print.h>
#include <isc/resource.h>
#include <isc/result.h>
+#include <isc/signal.h>
#include <isc/stdio.h>
#include <isc/string.h>
#include <isc/task.h>
static isc_result_t
create_managers(void) {
- isc_result_t result;
-
+ /*
+ * Set the default named_g_cpus if it was not set from the command line
+ */
INSIST(named_g_cpus_detected > 0);
-
if (named_g_cpus == 0) {
named_g_cpus = named_g_cpus_detected;
}
+
isc_log_write(
named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER,
ISC_LOG_INFO, "found %u CPU%s, using %u worker thread%s",
"using %u UDP listener%s per interface", named_g_udpdisp,
named_g_udpdisp == 1 ? "" : "s");
- result = isc_managers_create(named_g_mctx, named_g_cpus,
- 0 /* quantum */, &named_g_loopmgr,
- &named_g_netmgr, &named_g_taskmgr);
- if (result != ISC_R_SUCCESS) {
- return (result);
- }
+ isc_managers_create(&named_g_mctx, named_g_cpus, &named_g_loopmgr,
+ &named_g_netmgr, &named_g_taskmgr);
isc_nm_maxudp(named_g_netmgr, maxudp);
return (ISC_R_SUCCESS);
}
-static void
-destroy_managers(void) {
- isc_managers_destroy(&named_g_loopmgr, &named_g_netmgr,
- &named_g_taskmgr);
-}
-
static void
setup(void) {
isc_result_t result;
named_os_daemonize();
}
- /*
- * We call isc_app_start() here as some versions of FreeBSD's fork()
- * destroys all the signal handling it sets up.
- */
- result = isc_app_start();
- if (result != ISC_R_SUCCESS) {
- named_main_earlyfatal("isc_app_start() failed: %s",
- isc_result_totext(result));
- }
-
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_MAIN, ISC_LOG_NOTICE,
"starting %s%s <id:%s>", PACKAGE_STRING,
static void
cleanup(void) {
- named_server_shutdown(named_g_server);
-
- destroy_managers();
-
if (named_g_mapped != NULL) {
dns_acl_detach(&named_g_mapped);
}
}
}
- isc_mem_create(&named_g_mctx);
- isc_mem_setname(named_g_mctx, "main");
-
setup();
+ isc_mem_setname(named_g_mctx, "main");
/*
- * Start things running and then wait for a shutdown request
- * or reload.
+ * Start things running
*/
- do {
- result = isc_app_run();
+ isc_signal_start(named_g_server->sighup);
- if (result == ISC_R_RELOAD) {
- named_server_reloadwanted(named_g_server);
- } else if (result != ISC_R_SUCCESS) {
- UNEXPECTED_ERROR(__FILE__, __LINE__,
- "isc_app_run(): %s",
- isc_result_totext(result));
- /*
- * Force exit.
- */
- result = ISC_R_SUCCESS;
- }
- } while (result != ISC_R_SUCCESS);
+ isc_loopmgr_run(named_g_loopmgr);
#ifdef HAVE_LIBSCF
if (named_smf_want_disable == 1) {
(void)isc_stdio_close(fp);
}
}
- isc_mem_destroy(&named_g_mctx);
+
+ isc_managers_destroy(&named_g_mctx, &named_g_loopmgr, &named_g_netmgr,
+ &named_g_taskmgr);
isc_mem_checkdestroyed(stderr);
named_main_setmemstats(NULL);
- isc_app_finish();
-
named_os_closedevnull();
named_os_shutdown();
#include <ctype.h>
#include <inttypes.h>
#include <limits.h>
+#include <signal.h>
#include <stdbool.h>
#include <stdlib.h>
#include <sys/stat.h>
#endif
#include <isc/aes.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base64.h>
#include <isc/commandline.h>
#include <isc/hex.h>
#include <isc/hmac.h>
#include <isc/httpd.h>
+#include <isc/job.h>
#include <isc/lex.h>
#include <isc/loop.h>
#include <isc/meminfo.h>
#include <isc/refcount.h>
#include <isc/resource.h>
#include <isc/result.h>
+#include <isc/signal.h>
#include <isc/siphash.h>
#include <isc/stat.h>
#include <isc/stats.h>
/* Mark view unfrozen so that zone can be added */
- result = isc_task_beginexclusive(task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(task);
dns_view_thaw(ev->view);
result = configure_zone(cfg->config, zoneobj, cfg->vconfig, ev->view,
&ev->cbd->server->viewlist,
char cname[DNS_NAME_FORMATSIZE];
const char *file;
- result = isc_task_beginexclusive(task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(task);
dns_name_format(dns_catz_entry_getname(ev->entry), cname,
DNS_NAME_FORMATSIZE);
* string or NULL, with a dynamically allocated copy of the
* null-terminated string pointed to by 'value', or NULL.
*/
-static isc_result_t
+static void
setstring(named_server_t *server, char **field, const char *value) {
char *copy;
}
*field = copy;
- return (ISC_R_SUCCESS);
}
/*
* string or NULL, with another dynamically allocated string
* or NULL if whether 'obj' is a string or void value, respectively.
*/
-static isc_result_t
+static void
setoptstring(named_server_t *server, char **field, const cfg_obj_t *obj) {
if (cfg_obj_isvoid(obj)) {
- return (setstring(server, field, NULL));
+ setstring(server, field, NULL);
} else {
- return (setstring(server, field, cfg_obj_asstring(obj)));
+ setstring(server, field, cfg_obj_asstring(obj));
}
}
dns_kasp_t *default_kasp = NULL;
dns_kasplist_t tmpkasplist, kasplist;
const cfg_obj_t *views;
- dns_view_t *view = NULL;
+
dns_view_t *view_next = NULL;
dns_viewlist_t tmpviewlist;
dns_viewlist_t viewlist, builtin_viewlist;
in_port_t listen_port, udpport_low, udpport_high;
int i, backlog;
- bool exclusive = false;
isc_interval_t interval;
isc_logconfig_t *logc = NULL;
isc_portset_t *v4portset = NULL;
uint32_t max;
uint64_t initial, idle, keepalive, advertised;
bool loadbalancesockets;
+ bool exclusive = true;
dns_aclenv_t *env =
ns_interfacemgr_getaclenv(named_g_server->interfacemgr);
ISC_LIST_INIT(cachelist);
ISC_LIST_INIT(altsecrets);
+ /* Ensure exclusive access to configuration data. */
+ isc_task_beginexclusive(server->task);
+
/* Create the ACL configuration context */
if (named_g_aclconfctx != NULL) {
cfg_aclconfctx_detach(&named_g_aclconfctx);
}
- CHECK(cfg_aclconfctx_create(named_g_mctx, &named_g_aclconfctx));
+ result = cfg_aclconfctx_create(named_g_mctx, &named_g_aclconfctx);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_exclusive;
+ }
/*
* Shut down all dyndb instances.
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_INFO,
"loading configuration from '%s'", filename);
- CHECK(cfg_parser_create(named_g_mctx, named_g_lctx, &conf_parser));
+ result = cfg_parser_create(named_g_mctx, named_g_lctx, &conf_parser);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_exclusive;
+ }
+
cfg_parser_setcallback(conf_parser, directory_callback, NULL);
result = cfg_parse_file(conf_parser, filename, &cfg_type_namedconf,
&config);
-
- CHECK(result);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_conf_parser;
+ }
/*
* Check the validity of the configuration.
* checked later when the modules are actually loaded and
* registered.)
*/
- CHECK(bind9_check_namedconf(config, false, named_g_lctx, named_g_mctx));
+ result = bind9_check_namedconf(config, false, named_g_lctx,
+ named_g_mctx);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_config;
+ }
/* Let's recreate the TLS context cache */
if (server->tlsctx_server_cache != NULL) {
obj = NULL;
result = named_config_get(maps, "bindkeys-file", &obj);
INSIST(result == ISC_R_SUCCESS);
- CHECKM(setstring(server, &server->bindkeysfile, cfg_obj_asstring(obj)),
- "strdup");
+ setstring(server, &server->bindkeysfile, cfg_obj_asstring(obj));
INSIST(server->bindkeysfile != NULL);
if (access(server->bindkeysfile, R_OK) == 0) {
"from file '%s'",
server->bindkeysfile);
- CHECK(cfg_parser_create(named_g_mctx, named_g_lctx,
- &bindkeys_parser));
+ result = cfg_parser_create(named_g_mctx, named_g_lctx,
+ &bindkeys_parser);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_config;
+ }
result = cfg_parse_file(bindkeys_parser, server->bindkeysfile,
&cfg_type_bindkeys, &bindkeys);
server->bindkeysfile);
}
- /* Ensure exclusive access to configuration data. */
- if (!exclusive) {
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
- exclusive = true;
- }
-
/*
* Set process limits, which (usually) needs to be done as root.
*/
/*
* Check the process lockfile.
*/
- CHECK(check_lockfile(server, config, first_time));
+ result = check_lockfile(server, config, first_time);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_bindkeys_parser;
+ }
#if defined(HAVE_GEOIP2)
/*
"'recursive-clients %d' too low when "
"running with %d worker threads",
max, named_g_cpus);
- CHECK(ISC_R_RANGE);
+ result = ISC_R_RANGE;
+
+ goto cleanup_bindkeys_parser;
}
softquota = max - margin;
} else {
* Set "blackhole". Only legal at options level; there is
* no default.
*/
- CHECK(configure_view_acl(NULL, config, NULL, "blackhole", NULL,
- named_g_aclconfctx, named_g_mctx,
- &server->sctx->blackholeacl));
+ result = configure_view_acl(NULL, config, NULL, "blackhole", NULL,
+ named_g_aclconfctx, named_g_mctx,
+ &server->sctx->blackholeacl);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_bindkeys_parser;
+ }
+
if (server->sctx->blackholeacl != NULL) {
dns_dispatchmgr_setblackhole(named_g_dispatchmgr,
server->sctx->blackholeacl);
INSIST(result == ISC_R_SUCCESS);
env->match_mapped = cfg_obj_asboolean(obj);
- CHECKM(named_statschannels_configure(named_g_server, config,
- named_g_aclconfctx),
- "configuring statistics server(s)");
-
/*
* Configure the network manager
*/
/*
* Configure sets of UDP query source ports.
*/
- CHECKM(isc_portset_create(named_g_mctx, &v4portset), "creating UDP "
- "port set");
- CHECKM(isc_portset_create(named_g_mctx, &v6portset), "creating UDP "
- "port set");
+ result = isc_portset_create(named_g_mctx, &v4portset);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "creating UDP/IPv4 port set: %s",
+ isc_result_totext(result));
+ goto cleanup_bindkeys_parser;
+ }
+ isc_portset_create(named_g_mctx, &v6portset);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "creating UDP/IPv6 port set: %s",
+ isc_result_totext(result));
+ goto cleanup_v4portset;
+ }
usev4ports = NULL;
usev6ports = NULL;
if (usev4ports != NULL) {
portset_fromconf(v4portset, usev4ports, true);
} else {
- CHECKM(isc_net_getudpportrange(AF_INET, &udpport_low,
- &udpport_high),
- "get the default UDP/IPv4 port range");
+ result = isc_net_getudpportrange(AF_INET, &udpport_low,
+ &udpport_high);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "get the default UDP/IPv4 port range: %s",
+ isc_result_totext(result));
+ goto cleanup_v6portset;
+ }
+
if (udpport_low == udpport_high) {
isc_portset_add(v4portset, udpport_low);
} else {
if (usev6ports != NULL) {
portset_fromconf(v6portset, usev6ports, true);
} else {
- CHECKM(isc_net_getudpportrange(AF_INET6, &udpport_low,
- &udpport_high),
- "get the default UDP/IPv6 port range");
+ result = isc_net_getudpportrange(AF_INET6, &udpport_low,
+ &udpport_high);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "get the default UDP/IPv6 port range: %s",
+ isc_result_totext(result));
+ goto cleanup_v6portset;
+ }
if (udpport_low == udpport_high) {
isc_portset_add(v6portset, udpport_low);
} else {
if (named_g_port != 0) {
listen_port = named_g_port;
} else {
- CHECKM(named_config_getport(config, "port", &listen_port),
- "port");
+ result = named_config_getport(config, "port", &listen_port);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_v6portset;
+ }
}
/*
* Determining the default DSCP code point.
*/
- CHECKM(named_config_getdscp(config, &named_g_dscp), "dscp");
+ result = named_config_getdscp(config, &named_g_dscp);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, "dscp: %s",
+ isc_result_totext(result));
+ goto cleanup_v6portset;
+ }
/*
* Find the listen queue depth.
(void)cfg_map_get(options, "listen-on", &clistenon);
}
if (clistenon != NULL) {
- CHECK(listenlist_fromconfig(
+ result = listenlist_fromconfig(
clistenon, config, named_g_aclconfctx,
named_g_mctx, AF_INET,
- server->tlsctx_server_cache, &listenon));
+ server->tlsctx_server_cache, &listenon);
} else {
/*
* Not specified, use default.
*/
- CHECK(ns_listenlist_default(named_g_mctx, listen_port,
- -1, true, AF_INET,
- &listenon));
+ result = ns_listenlist_default(named_g_mctx,
+ listen_port, -1, true,
+ AF_INET, &listenon);
}
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_v6portset;
+ }
+
if (listenon != NULL) {
ns_interfacemgr_setlistenon4(server->interfacemgr,
listenon);
(void)cfg_map_get(options, "listen-on-v6", &clistenon);
}
if (clistenon != NULL) {
- CHECK(listenlist_fromconfig(
+ result = listenlist_fromconfig(
clistenon, config, named_g_aclconfctx,
named_g_mctx, AF_INET6,
- server->tlsctx_server_cache, &listenon));
+ server->tlsctx_server_cache, &listenon);
} else {
/*
* Not specified, use default.
*/
- CHECK(ns_listenlist_default(named_g_mctx, listen_port,
- -1, true, AF_INET6,
- &listenon));
+ result = ns_listenlist_default(named_g_mctx,
+ listen_port, -1, true,
+ AF_INET6, &listenon);
+ }
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_v6portset;
}
if (listenon != NULL) {
ns_interfacemgr_setlistenon6(server->interfacemgr,
}
}
- /*
- * Rescan the interface list to pick up changes in the
- * listen-on option. It's important that we do this before we try
- * to configure the query source, since the dispatcher we use might
- * be shared with an interface.
- */
- result = ns_interfacemgr_scan(server->interfacemgr, true, true);
-
- /*
- * Check that named is able to TCP listen on at least one
- * interface. Otherwise, another named process could be running
- * and we should fail.
- */
- if (first_time && (result == ISC_R_ADDRINUSE)) {
- isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
- NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
- "unable to listen on any configured interfaces");
- result = ISC_R_FAILURE;
- goto cleanup;
- }
-
/*
* Arrange for further interface scanning to occur periodically
* as specified by the "interface-interval" option.
cfg_obj_t *kconfig = cfg_listelt_value(element);
kasp = NULL;
- CHECK(cfg_kasp_fromconfig(kconfig, default_kasp, named_g_mctx,
- named_g_lctx, &kasplist, &kasp));
+ result = cfg_kasp_fromconfig(kconfig, default_kasp,
+ named_g_mctx, named_g_lctx,
+ &kasplist, &kasp);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_kasplist;
+ }
INSIST(kasp != NULL);
dns_kasp_freeze(kasp);
{
cfg_obj_t *kconfig = cfg_listelt_value(element);
kasp = NULL;
- CHECK(cfg_kasp_fromconfig(kconfig, default_kasp, named_g_mctx,
- named_g_lctx, &kasplist, &kasp));
+ result = cfg_kasp_fromconfig(kconfig, default_kasp,
+ named_g_mctx, named_g_lctx,
+ &kasplist, &kasp);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_kasplist;
+ }
INSIST(kasp != NULL);
dns_kasp_freeze(kasp);
dns_kasp_detach(&kasp);
element = cfg_list_next(element))
{
cfg_obj_t *vconfig = cfg_listelt_value(element);
+ dns_view_t *view = NULL;
- view = NULL;
-
- CHECK(create_view(vconfig, &viewlist, &view));
+ result = create_view(vconfig, &viewlist, &view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_viewlist;
+ }
INSIST(view != NULL);
- CHECK(setup_newzones(view, config, vconfig, conf_parser,
- named_g_aclconfctx));
-
+ result = setup_newzones(view, config, vconfig, conf_parser,
+ named_g_aclconfctx);
dns_view_detach(&view);
+
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_viewlist;
+ }
}
/*
* view here.
*/
if (views == NULL) {
- CHECK(create_view(NULL, &viewlist, &view));
+ dns_view_t *view = NULL;
+
+ result = create_view(NULL, &viewlist, &view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_viewlist;
+ }
INSIST(view != NULL);
- CHECK(setup_newzones(view, config, NULL, conf_parser,
- named_g_aclconfctx));
+ result = setup_newzones(view, config, NULL, conf_parser,
+ named_g_aclconfctx);
dns_view_detach(&view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_viewlist;
+ }
}
/*
element = cfg_list_next(element))
{
cfg_obj_t *vconfig = cfg_listelt_value(element);
+ dns_view_t *view = NULL;
view = NULL;
- CHECK(find_view(vconfig, &viewlist, &view));
- CHECK(configure_view(view, &viewlist, config, vconfig,
- &cachelist, &server->kasplist, bindkeys,
- named_g_mctx, named_g_aclconfctx, true));
+ result = find_view(vconfig, &viewlist, &view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_cachelist;
+ }
+
+ result = configure_view(view, &viewlist, config, vconfig,
+ &cachelist, &server->kasplist, bindkeys,
+ named_g_mctx, named_g_aclconfctx, true);
+ if (result != ISC_R_SUCCESS) {
+ dns_view_detach(&view);
+ goto cleanup_cachelist;
+ }
dns_view_freeze(view);
dns_view_detach(&view);
}
* were no explicit views.
*/
if (views == NULL) {
- view = NULL;
- CHECK(find_view(NULL, &viewlist, &view));
- CHECK(configure_view(view, &viewlist, config, NULL, &cachelist,
- &server->kasplist, bindkeys, named_g_mctx,
- named_g_aclconfctx, true));
+ dns_view_t *view = NULL;
+ result = find_view(NULL, &viewlist, &view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_cachelist;
+ }
+ result = configure_view(view, &viewlist, config, NULL,
+ &cachelist, &server->kasplist, bindkeys,
+ named_g_mctx, named_g_aclconfctx, true);
+ if (result != ISC_R_SUCCESS) {
+ dns_view_detach(&view);
+ goto cleanup_cachelist;
+ }
dns_view_freeze(view);
dns_view_detach(&view);
}
element = cfg_list_next(element))
{
cfg_obj_t *vconfig = cfg_listelt_value(element);
+ dns_view_t *view = NULL;
- CHECK(create_view(vconfig, &builtin_viewlist, &view));
- CHECK(configure_view(view, &viewlist, config, vconfig,
- &cachelist, &server->kasplist, bindkeys,
- named_g_mctx, named_g_aclconfctx, false));
+ result = create_view(vconfig, &builtin_viewlist, &view);
+ if (result != ISC_R_SUCCESS) {
+ goto cleanup_cachelist;
+ }
+
+ result = configure_view(view, &viewlist, config, vconfig,
+ &cachelist, &server->kasplist, bindkeys,
+ named_g_mctx, named_g_aclconfctx,
+ false);
+ if (result != ISC_R_SUCCESS) {
+ dns_view_detach(&view);
+ goto cleanup_cachelist;
+ }
dns_view_freeze(view);
dns_view_detach(&view);
- view = NULL;
}
/* Now combine the two viewlists into one */
* Commit any dns_zone_setview() calls on all zones in the new
* view.
*/
- for (view = ISC_LIST_HEAD(viewlist); view != NULL;
+ for (dns_view_t *view = ISC_LIST_HEAD(viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
{
dns_view_setviewcommit(view);
viewlist = tmpviewlist;
/* Make the view list available to each of the views */
- view = ISC_LIST_HEAD(server->viewlist);
- while (view != NULL) {
+ for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist); view != NULL;
+ view = ISC_LIST_NEXT(view, link))
+ {
view->viewlist = &server->viewlist;
- view = ISC_LIST_NEXT(view, link);
}
/* Swap our new cache list with the production one. */
/* Load the TKEY information from the configuration. */
if (options != NULL) {
- dns_tkeyctx_t *t = NULL;
- CHECKM(named_tkeyctx_fromconfig(options, named_g_mctx, &t),
- "configuring TKEY");
+ dns_tkeyctx_t *tkeyctx = NULL;
+
+ result = named_tkeyctx_fromconfig(options, named_g_mctx,
+ &tkeyctx);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "configuring TKEY: %s",
+ isc_result_totext(result));
+ goto cleanup_cachelist;
+ }
if (server->sctx->tkeyctx != NULL) {
dns_tkeyctx_destroy(&server->sctx->tkeyctx);
}
- server->sctx->tkeyctx = t;
+ server->sctx->tkeyctx = tkeyctx;
}
- /*
- * Bind the control port(s).
- */
- CHECKM(named_controls_configure(named_g_server->controls, config,
- named_g_aclconfctx),
- "binding control channel(s)");
-
#ifdef HAVE_LMDB
/*
* If we're using LMDB, we may have created newzones databases
* after relinquishing privileges them.
*/
if (first_time) {
- for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
- view = ISC_LIST_NEXT(view, link))
+ for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist);
+ view != NULL; view = ISC_LIST_NEXT(view, link))
{
nzd_env_close(view);
}
NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
"the working directory is not writable");
result = ISC_R_NOPERM;
- goto cleanup;
+ goto cleanup_cachelist;
}
#ifdef HAVE_LMDB
* Reopen NZD databases.
*/
if (first_time) {
- for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
- view = ISC_LIST_NEXT(view, link))
+ for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist);
+ view != NULL; view = ISC_LIST_NEXT(view, link))
{
nzd_env_reopen(view);
}
"checking logging configuration "
"failed: %s",
isc_result_totext(result));
- goto cleanup;
+ goto cleanup_cachelist;
}
}
} else {
logobj = NULL;
(void)cfg_map_get(config, "logging", &logobj);
if (logobj != NULL) {
- CHECKM(named_logconfig(logc, logobj),
- "configuring logging");
+ result = named_logconfig(logc, logobj);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(
+ named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "configuring logging: %s",
+ isc_result_totext(result));
+ goto cleanup_logc;
+ }
} else {
named_log_setdefaultchannels(logc);
named_log_setdefaultsslkeylogfile(logc);
- CHECKM(named_log_setunmatchedcategory(logc),
- "setting up default 'category unmatched'");
- CHECKM(named_log_setdefaultcategory(logc),
- "setting up default 'category default'");
+ result = named_log_setunmatchedcategory(logc);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(
+ named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "setting up default 'category "
+ "unmatched': %s",
+ isc_result_totext(result));
+ goto cleanup_logc;
+ }
+ result = named_log_setdefaultcategory(logc);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(
+ named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "setting up default 'category "
+ "default': %s",
+ isc_result_totext(result));
+ goto cleanup_logc;
+ }
}
isc_logconfig_use(named_g_lctx, logc);
obj = NULL;
result = named_config_get(maps, "statistics-file", &obj);
INSIST(result == ISC_R_SUCCESS);
- CHECKM(setstring(server, &server->statsfile, cfg_obj_asstring(obj)),
- "strdup");
+ setstring(server, &server->statsfile, cfg_obj_asstring(obj));
obj = NULL;
result = named_config_get(maps, "dump-file", &obj);
INSIST(result == ISC_R_SUCCESS);
- CHECKM(setstring(server, &server->dumpfile, cfg_obj_asstring(obj)),
- "strdup");
+ setstring(server, &server->dumpfile, cfg_obj_asstring(obj));
obj = NULL;
result = named_config_get(maps, "secroots-file", &obj);
INSIST(result == ISC_R_SUCCESS);
- CHECKM(setstring(server, &server->secrootsfile, cfg_obj_asstring(obj)),
- "strdup");
+ setstring(server, &server->secrootsfile, cfg_obj_asstring(obj));
obj = NULL;
result = named_config_get(maps, "recursing-file", &obj);
INSIST(result == ISC_R_SUCCESS);
- CHECKM(setstring(server, &server->recfile, cfg_obj_asstring(obj)),
- "strdup");
+ setstring(server, &server->recfile, cfg_obj_asstring(obj));
obj = NULL;
result = named_config_get(maps, "version", &obj);
if (result == ISC_R_SUCCESS) {
- CHECKM(setoptstring(server, &server->version, obj), "strdup");
+ setoptstring(server, &server->version, obj);
server->version_set = true;
} else {
server->version_set = false;
obj = NULL;
result = named_config_get(maps, "hostname", &obj);
if (result == ISC_R_SUCCESS) {
- CHECKM(setoptstring(server, &server->hostname, obj), "strdup");
+ setoptstring(server, &server->hostname, obj);
server->hostname_set = true;
} else {
server->hostname_set = false;
result = isc_hex_decodestring(str, &b);
if (result != ISC_R_SUCCESS &&
result != ISC_R_NOSPACE) {
- goto cleanup;
+ goto cleanup_altsecrets;
}
first = false;
} else {
isc_mem_put(server->sctx->mctx,
altsecret,
sizeof(*altsecret));
- goto cleanup;
+ goto cleanup_altsecrets;
}
ISC_LIST_INITANDAPPEND(altsecrets, altsecret,
link);
case ns_cookiealg_siphash24:
expectedlength = ISC_SIPHASH24_KEY_LENGTH;
if (usedlength != expectedlength) {
- CHECKM(ISC_R_RANGE, "SipHash-2-4 "
- "cookie-secret "
- "must be 128 bits");
+ result = ISC_R_RANGE;
+ isc_log_write(
+ named_g_lctx,
+ NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER,
+ ISC_LOG_ERROR,
+ "SipHash-2-4 cookie-secret "
+ "must be 128 bits: %s",
+ isc_result_totext(result));
+ goto cleanup_altsecrets;
}
break;
case ns_cookiealg_aes:
expectedlength = ISC_AES128_KEYLENGTH;
if (usedlength != expectedlength) {
- CHECKM(ISC_R_RANGE, "AES cookie-secret "
- "must be 128 bits");
+ result = ISC_R_RANGE;
+ isc_log_write(
+ named_g_lctx,
+ NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER,
+ ISC_LOG_ERROR,
+ "AES cookie-secret must be 128 "
+ "bits: %s",
+ isc_result_totext(result));
+ goto cleanup_altsecrets;
}
break;
}
result = dns_dnsrps_connect(view->rpzs);
if (result != ISC_R_SUCCESS) {
view = NULL;
- goto cleanup;
+ goto cleanup_altsecrets;
}
}
#endif /* ifdef USE_DNSRPS */
- result = ISC_R_SUCCESS;
-
-cleanup:
- if (logc != NULL) {
- isc_logconfig_destroy(&logc);
+ /*
+ * Record the time of most recent configuration
+ */
+ tresult = isc_time_now(&named_g_configtime);
+ if (tresult != ISC_R_SUCCESS) {
+ named_main_earlyfatal("isc_time_now() failed: %s",
+ isc_result_totext(result));
}
- if (v4portset != NULL) {
- isc_portset_destroy(named_g_mctx, &v4portset);
+ isc_task_endexclusive(server->task);
+ exclusive = false;
+
+ /* Configure the statistics channel(s) */
+ result = named_statschannels_configure(named_g_server, config,
+ named_g_aclconfctx);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "configuring statistics server(s): %s",
+ isc_result_totext(result));
+ goto cleanup_altsecrets;
}
- if (v6portset != NULL) {
- isc_portset_destroy(named_g_mctx, &v6portset);
+ /*
+ * Bind the control port(s).
+ */
+ result = named_controls_configure(named_g_server->controls, config,
+ named_g_aclconfctx);
+ if (result != ISC_R_SUCCESS) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "binding control channel(s): %s",
+ isc_result_totext(result));
+ goto cleanup_altsecrets;
}
- if (conf_parser != NULL) {
- if (config != NULL) {
- cfg_obj_destroy(conf_parser, &config);
- }
- cfg_parser_destroy(&conf_parser);
+ /*
+ * Rescan the interface list to pick up changes in the
+ * listen-on option. It's important that we do this before we try
+ * to configure the query source, since the dispatcher we use might
+ * be shared with an interface.
+ */
+ result = ns_interfacemgr_scan(server->interfacemgr, true, true);
+
+ /*
+ * Check that named is able to TCP listen on at least one
+ * interface. Otherwise, another named process could be running
+ * and we should fail.
+ */
+ if (first_time && (result == ISC_R_ADDRINUSE)) {
+ isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
+ NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR,
+ "unable to listen on any configured interfaces");
+ result = ISC_R_FAILURE;
+ goto cleanup_altsecrets;
}
- if (bindkeys_parser != NULL) {
- if (bindkeys != NULL) {
- cfg_obj_destroy(bindkeys_parser, &bindkeys);
- }
- cfg_parser_destroy(&bindkeys_parser);
+ /*
+ * These cleans up either the old production view list
+ * or our temporary list depending on whether they
+ * were swapped above or not.
+ */
+cleanup_altsecrets:
+ while ((altsecret = ISC_LIST_HEAD(altsecrets)) != NULL) {
+ ISC_LIST_UNLINK(altsecrets, altsecret, link);
+ isc_mem_put(server->sctx->mctx, altsecret, sizeof(*altsecret));
}
- if (view != NULL) {
- dns_view_detach(&view);
+cleanup_logc:
+ if (logc != NULL) {
+ isc_logconfig_destroy(&logc);
}
- if (kasp != NULL) {
- dns_kasp_detach(&kasp);
+cleanup_cachelist:
+ while ((nsc = ISC_LIST_HEAD(cachelist)) != NULL) {
+ ISC_LIST_UNLINK(cachelist, nsc, link);
+ dns_cache_detach(&nsc->cache);
+ isc_mem_put(server->mctx, nsc, sizeof(*nsc));
}
ISC_LIST_APPENDLIST(viewlist, builtin_viewlist, link);
- /*
- * This cleans up either the old production view list
- * or our temporary list depending on whether they
- * were swapped above or not.
- */
- for (view = ISC_LIST_HEAD(viewlist); view != NULL; view = view_next) {
+cleanup_viewlist:
+ for (dns_view_t *view = ISC_LIST_HEAD(viewlist); view != NULL;
+ view = view_next) {
view_next = ISC_LIST_NEXT(view, link);
ISC_LIST_UNLINK(viewlist, view, link);
if (result == ISC_R_SUCCESS && strcmp(view->name, "_bind") != 0)
dns_view_detach(&view);
}
- /*
- * Same cleanup for kasp list.
- */
+cleanup_kasplist:
for (kasp = ISC_LIST_HEAD(kasplist); kasp != NULL; kasp = kasp_next) {
kasp_next = ISC_LIST_NEXT(kasp, link);
ISC_LIST_UNLINK(kasplist, kasp, link);
dns_kasp_detach(&kasp);
}
- /* Same cleanup for cache list. */
- while ((nsc = ISC_LIST_HEAD(cachelist)) != NULL) {
- ISC_LIST_UNLINK(cachelist, nsc, link);
- dns_cache_detach(&nsc->cache);
- isc_mem_put(server->mctx, nsc, sizeof(*nsc));
- }
+cleanup_v6portset:
+ isc_portset_destroy(named_g_mctx, &v6portset);
- /* Cleanup for altsecrets list. */
- while ((altsecret = ISC_LIST_HEAD(altsecrets)) != NULL) {
- ISC_LIST_UNLINK(altsecrets, altsecret, link);
- isc_mem_put(server->sctx->mctx, altsecret, sizeof(*altsecret));
- }
+cleanup_v4portset:
+ isc_portset_destroy(named_g_mctx, &v4portset);
- /*
- * Record the time of most recent configuration
- */
- tresult = isc_time_now(&named_g_configtime);
- if (tresult != ISC_R_SUCCESS) {
- named_main_earlyfatal("isc_time_now() failed: %s",
- isc_result_totext(result));
+cleanup_bindkeys_parser:
+
+ if (bindkeys_parser != NULL) {
+ if (bindkeys != NULL) {
+ cfg_obj_destroy(bindkeys_parser, &bindkeys);
+ }
+ cfg_parser_destroy(&bindkeys_parser);
}
- /* Relinquish exclusive access to configuration data. */
+cleanup_config:
+ cfg_obj_destroy(conf_parser, &config);
+
+cleanup_conf_parser:
+ cfg_parser_destroy(&conf_parser);
+
+cleanup_exclusive:
if (exclusive) {
isc_task_endexclusive(server->task);
}
static isc_result_t
load_zones(named_server_t *server, bool reconfig) {
- isc_result_t result;
+ isc_result_t result = ISC_R_SUCCESS;
ns_zoneload_t *zl = NULL;
dns_view_t *view = NULL;
zl->server = server;
zl->reconfig = reconfig;
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
isc_refcount_init(&zl->refs, 1);
run_server(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
named_server_t *server = (named_server_t *)event->ev_arg;
- dns_geoip_databases_t *geoip;
+ dns_geoip_databases_t *geoip = NULL;
+
+ fprintf(stderr, "%s\n", __func__);
INSIST(task == server->task);
#endif /* if defined(HAVE_GEOIP2) */
CHECKFATAL(ns_interfacemgr_create(
- named_g_mctx, server->sctx, named_g_taskmgr,
- named_g_loopmgr, named_g_netmgr, named_g_dispatchmgr,
+ named_g_mctx, server->sctx, named_g_loopmgr,
+ named_g_taskmgr, named_g_netmgr, named_g_dispatchmgr,
server->task, geoip, true, &server->interfacemgr),
"creating interface manager");
#endif /* ifdef ENABLE_AFL */
}
+static void
+launch_server(void *arg) {
+ named_server_t *server = (named_server_t *)arg;
+ isc_event_t *event = isc_event_allocate(named_g_mctx, server->task,
+ NAMED_EVENT_RUN, run_server,
+ server, sizeof(*event));
+ isc_task_send(server->task, &event);
+}
+
void
named_server_flushonshutdown(named_server_t *server, bool flush) {
REQUIRE(NAMED_SERVER_VALID(server));
static void
shutdown_server(isc_task_t *task, isc_event_t *event) {
- isc_result_t result;
dns_view_t *view, *view_next = NULL;
dns_kasp_t *kasp, *kasp_next = NULL;
named_server_t *server = (named_server_t *)event->ev_arg;
bool flush = server->flushonshutdown;
named_cache_t *nsc;
- UNUSED(task);
INSIST(task == server->task);
+ isc_event_free(&event);
+
/*
* We need to shutdown the interface before going
* exclusive (which would pause the netmgr).
*/
ns_interfacemgr_shutdown(server->interfacemgr);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ named_controls_shutdown(server->controls);
+
+ named_statschannels_shutdown(server);
+
+ isc_task_beginexclusive(server->task);
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, "shutting down%s",
flush ? ": flushing changes" : "");
- named_statschannels_shutdown(server);
- named_controls_shutdown(server->controls);
end_reserved_dispatches(server, true);
cleanup_session_key(server, server->mctx);
isc_task_endexclusive(server->task);
isc_task_detach(&server->task);
+}
- isc_event_free(&event);
+static void
+close_server(void *arg) {
+ named_server_t *server = arg;
+
+ /*
+ * Cleanup loopmgr resources directly, because shuttingdown the server
+ * happens on async task
+ */
+ isc_signal_stop(server->sighup);
+ isc_signal_destroy(&server->sighup);
+
+ isc_event_t *event = isc_event_allocate(
+ named_g_mctx, server->task, NAMED_EVENT_SHUTDOWN,
+ shutdown_server, server, sizeof(*event));
+ isc_task_send(server->task, &event);
}
void
* startup and shutdown of the server, as well as all exclusive
* tasks.
*/
- CHECKFATAL(isc_task_create(named_g_taskmgr, 0, &server->task, 0),
+ CHECKFATAL(isc_task_create(named_g_taskmgr, &server->task, 0),
"creating server task");
isc_task_setname(server->task, "server", server);
isc_taskmgr_setexcltask(named_g_taskmgr, server->task);
named_g_mainloop = isc_loop_main(named_g_loopmgr);
- CHECKFATAL(
- isc_app_onrun(named_g_mctx, server->task, run_server, server),
- "isc_app_onrun");
+ isc_loop_setup(named_g_mainloop, launch_server, server);
+ isc_loop_teardown(named_g_mainloop, close_server, server);
+
+ /* Add SIGHUP reload handler */
+ server->sighup = isc_signal_new(
+ named_g_loopmgr, named_server_reloadwanted, server, SIGHUP);
server->interface_timer = NULL;
server->heartbeat_timer = NULL;
* function and any other OpenSSL calls from other tasks
* by requesting exclusive access to the task manager.
*/
- (void)isc_task_beginexclusive(server->task);
+ isc_task_beginexclusive(server->task);
}
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_CRITICAL, "%s: %s", msg,
named_server_t *server = (named_server_t *)event->ev_sender;
INSIST(task == server->task);
- UNUSED(task);
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_INFO,
}
void
-named_server_reloadwanted(named_server_t *server) {
+named_server_reloadwanted(void *arg, int signum) {
+ named_server_t *server = (named_server_t *)arg;
+
+ REQUIRE(signum == SIGHUP);
+
isc_event_t *event = isc_event_allocate(
named_g_mctx, server, NAMED_EVENT_RELOAD, named_server_reload,
NULL, sizeof(isc_event_t));
/* Look for the view name. */
ptr = next_token(lex, text);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
{
/* Look for the view name. */
ptr = next_token(lex, NULL);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
flushed = true;
found = false;
/* Look for the view name. */
viewname = next_token(lex, NULL);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
flushed = true;
found = false;
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
viewname = next_token(lex, text);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
{
}
if (zone == NULL) {
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
tresult = ISC_R_SUCCESS;
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
return (tresult);
}
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
result = synczone(zone, &cleanup);
isc_task_endexclusive(server->task);
return (result);
}
if (mayberaw == NULL) {
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
tresult = ISC_R_SUCCESS;
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
return (DNS_R_NOTDYNAMIC);
}
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
frozen = dns_zone_getupdatedisabled(mayberaw);
if (freeze) {
if (frozen) {
goto cleanup;
}
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(server->task);
#ifndef HAVE_LMDB
/*
isc_result_t result, tresult;
dns_zone_t *zone = NULL;
bool added;
- bool exclusive = false;
#ifndef HAVE_LMDB
FILE *fp = NULL;
cfg_obj_t *z;
}
#endif /* ifndef HAVE_LMDB */
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
- exclusive = true;
+ isc_task_beginexclusive(server->task);
#ifndef HAVE_LMDB
/* Make sure we can open the configuration save file */
TCHECK(putstr(text, view->new_zone_file));
TCHECK(putstr(text, "': "));
TCHECK(putstr(text, isc_result_totext(result)));
+ isc_task_endexclusive(server->task);
goto cleanup;
}
(void)isc_stdio_close(fp);
TCHECK(putstr(text, view->new_zone_db));
TCHECK(putstr(text, "'"));
result = ISC_R_FAILURE;
+ isc_task_endexclusive(server->task);
goto cleanup;
}
#endif /* HAVE_LMDB */
true, false, true);
dns_view_freeze(view);
- exclusive = false;
isc_task_endexclusive(server->task);
if (result != ISC_R_SUCCESS) {
}
cleanup:
- if (exclusive) {
- isc_task_endexclusive(server->task);
- }
#ifndef HAVE_LMDB
if (fp != NULL) {
const dns_name_t *ntaname;
dns_name_t *fname;
dns_ttl_t ntattl;
- bool ttlset = false, excl = false, viewfound = false;
+ bool ttlset = false, viewfound = false;
dns_rdataclass_t rdclass = dns_rdataclass_in;
bool first = true;
isc_stdtime_get(&now);
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
- excl = true;
+ isc_task_beginexclusive(server->task);
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
{
} else if (result == ISC_R_NOTFOUND) {
wasremoved = false;
} else {
- goto cleanup;
+ goto cleanup_exclusive;
}
if (!first) {
if (!viewfound) {
msg = "No such view";
- CHECK(ISC_R_NOTFOUND);
+ result = ISC_R_NOTFOUND;
+ } else {
+ (void)putnull(text);
}
- (void)putnull(text);
+cleanup_exclusive:
+ isc_task_endexclusive(server->task);
cleanup:
+
if (msg != NULL) {
(void)putstr(text, msg);
(void)putnull(text);
}
- if (excl) {
- isc_task_endexclusive(server->task);
- }
if (ntatable != NULL) {
dns_ntatable_detach(&ntatable);
}
mkey_destroy(named_server_t *server, dns_view_t *view, isc_buffer_t **text) {
isc_result_t result;
char msg[DNS_NAME_FORMATSIZE + 500] = "";
- bool exclusive = false;
const char *file = NULL;
dns_db_t *dbp = NULL;
dns_zone_t *mkzone = NULL;
view->name);
CHECK(putstr(text, msg));
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
- exclusive = true;
+ isc_task_beginexclusive(server->task);
/* Remove and clean up managed keys zone from view */
mkzone = view->managed_keys;
result = ISC_R_SUCCESS;
cleanup:
- if (exclusive) {
- isc_task_endexclusive(server->task);
- }
+ isc_task_endexclusive(server->task);
return (result);
}
CHECK(ISC_R_RANGE);
}
- result = isc_task_beginexclusive(named_g_server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(named_g_server->task);
isc_nm_settimeouts(named_g_netmgr, initial, idle, keepalive,
advertised);
dns_stale_answer_t staleanswersok = dns_stale_answer_conf;
bool wantstatus = false;
isc_result_t result = ISC_R_SUCCESS;
- bool exclusive = false;
REQUIRE(text != NULL);
}
}
- result = isc_task_beginexclusive(server->task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
- exclusive = true;
+ isc_task_beginexclusive(server->task);
for (view = ISC_LIST_HEAD(server->viewlist); view != NULL;
view = ISC_LIST_NEXT(view, link))
switch (view->staleanswersok) {
case dns_stale_answer_yes:
if (stale_ttl > 0) {
- CHECK(putstr(text, "stale cache enabled; stale "
+ CHECK(putstr(text, "stale cache "
+ "enabled; stale "
"answers enabled"));
} else {
- CHECK(putstr(text,
- "stale cache disabled; stale "
- "answers unavailable"));
+ CHECK(putstr(text, "stale cache disabled; "
+ "stale "
+ "answers unavailable"));
}
break;
case dns_stale_answer_no:
if (stale_ttl > 0) {
- CHECK(putstr(text, "stale cache enabled; stale "
+ CHECK(putstr(text, "stale cache "
+ "enabled; stale "
"answers disabled"));
} else {
- CHECK(putstr(text,
- "stale cache disabled; stale "
- "answers unavailable"));
+ CHECK(putstr(text, "stale cache disabled; "
+ "stale "
+ "answers unavailable"));
}
break;
case dns_stale_answer_conf:
if (view->staleanswersenable && stale_ttl > 0) {
- CHECK(putstr(text, "stale cache enabled; stale "
+ CHECK(putstr(text, "stale cache "
+ "enabled; stale "
"answers enabled"));
} else if (stale_ttl > 0) {
- CHECK(putstr(text, "stale cache enabled; stale "
+ CHECK(putstr(text, "stale cache "
+ "enabled; stale "
"answers disabled"));
} else {
- CHECK(putstr(text,
- "stale cache disabled; stale "
- "answers unavailable"));
+ CHECK(putstr(text, "stale cache disabled; "
+ "stale "
+ "answers unavailable"));
}
break;
}
if (stale_ttl > 0) {
snprintf(msg, sizeof(msg),
- " (stale-answer-ttl=%u max-stale-ttl=%u "
+ " (stale-answer-ttl=%u "
+ "max-stale-ttl=%u "
"stale-refresh-time=%u)",
view->staleanswerttl, stale_ttl,
stale_refresh);
}
cleanup:
- if (exclusive) {
- isc_task_endexclusive(named_g_server->task);
- }
+ isc_task_endexclusive(named_g_server->task);
if (isc_buffer_usedlength(*text) > 0) {
(void)putnull(text);
#include <stdlib.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base64.h>
#include <isc/buffer.h>
#include <isc/event.h>
#include <isc/file.h>
#include <isc/hash.h>
+#include <isc/job.h>
#include <isc/lex.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
static bool tried_other_gsstsig = false;
static bool local_only = false;
static isc_nm_t *netmgr = NULL;
-static isc_loopmgr_t *loopmgr = NULL;
static isc_taskmgr_t *taskmgr = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static isc_task_t *global_task = NULL;
-static isc_event_t *global_event = NULL;
static isc_log_t *glctx = NULL;
static isc_mem_t *gmctx = NULL;
static dns_dispatchmgr_t *dispatchmgr = NULL;
static bool checknames = true;
static const char *resolvconf = RESOLV_CONF;
+bool done = false;
+
typedef struct nsu_requestinfo {
dns_message_t *msg;
isc_sockaddr_t *addr;
static void
send_update(dns_name_t *zonename, isc_sockaddr_t *primary);
+static void
+getinput(void *arg);
+
noreturn static void
fatal(const char *format, ...) ISC_FORMAT_PRINTF(1, 2);
static void
doshutdown(void) {
- isc_task_detach(&global_task);
-
/*
* The isc_mem_put of primary_servers must be before the
* isc_mem_put of servers as it sets the servers pointer
}
static void
-shutdown_program(void) {
+shutdown_program(void *arg) {
+ UNUSED(arg);
+
ddebug("shutdown_program()");
shuttingdown = true;
irs_resconf_destroy(&resconf);
- result = isc_managers_create(gmctx, 1, 0, &loopmgr, &netmgr, &taskmgr);
- check_result(result, "isc_managers_create");
-
result = dns_dispatchmgr_create(gmctx, netmgr, &dispatchmgr);
check_result(result, "dns_dispatchmgr_create");
- result = isc_task_create(taskmgr, 0, &global_task, 0);
+ result = isc_task_create(taskmgr, &global_task, 0);
check_result(result, "isc_task_create");
result = dst_lib_init(gmctx, NULL);
int count = 0;
isc_result_t result;
- isc_app_block();
+ isc_loopmgr_blocking(loopmgr);
result = bind9_getaddresses(host, port, sockaddr, naddrs, &count);
- isc_app_unblock();
+ isc_loopmgr_nonblocking(loopmgr);
if (result != ISC_R_SUCCESS) {
error("couldn't get address for '%s': %s", host,
isc_result_totext(result));
char cmdlinebuf[MAXCMD];
char *cmdline = NULL, *ptr = NULL;
- isc_app_block();
if (interactive) {
cmdline = ptr = readline("> ");
if (ptr != NULL && *ptr != 0) {
} else {
cmdline = fgets(cmdlinebuf, MAXCMD, input);
}
- isc_app_unblock();
if (cmdline != NULL) {
char *tmp = cmdline;
static void
done_update(void) {
- isc_event_t *event = global_event;
ddebug("done_update()");
- isc_task_send(global_task, &event);
+
+ isc_job_run(loopmgr, getinput, NULL);
}
static void
}
UNLOCK(&answer_lock);
- ddebug("Shutting down managers");
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
-
#if HAVE_GSSAPI
if (tsigkey != NULL) {
ddebug("detach tsigkey x%p", tsigkey);
dst_key_free(&sig0key);
}
- ddebug("Destroying event");
- isc_event_free(&global_event);
-
#ifdef HAVE_GSSAPI
/*
* Cleanup GSSAPI resources after taskmgr has been destroyed.
if (memdebugging) {
isc_mem_stats(gmctx, stderr);
}
- isc_mem_destroy(&gmctx);
isc_mutex_destroy(&answer_lock);
dst_lib_destroy();
is_dst_up = false;
}
+
+ ddebug("Shutting down managers");
+ isc_managers_destroy(&gmctx, &loopmgr, &netmgr, &taskmgr);
}
static void
-getinput(isc_task_t *task, isc_event_t *event) {
+getinput(void *arg) {
bool more;
- UNUSED(task);
+ UNUSED(arg);
if (shuttingdown) {
maybeshutdown();
return;
}
- if (global_event == NULL) {
- global_event = event;
- }
-
reset_system();
+ isc_loopmgr_blocking(loopmgr);
more = user_interaction();
+ isc_loopmgr_nonblocking(loopmgr);
if (!more) {
- isc_app_shutdown();
+ isc_task_detach(&global_task);
+ isc_loopmgr_shutdown(loopmgr);
return;
}
+
+ done = false;
start_update();
- return;
}
int
main(int argc, char **argv) {
- isc_result_t result;
style = &dns_master_style_debug;
input = stdin;
interactive = isatty(0);
- isc_app_start();
-
if (isc_net_probeipv4() == ISC_R_SUCCESS) {
have_ipv4 = true;
}
pre_parse_args(argc, argv);
- isc_mem_create(&gmctx);
+ isc_managers_create(&gmctx, 1, &loopmgr, &netmgr, &taskmgr);
parse_args(argc, argv);
setup_system();
- result = isc_app_onrun(gmctx, global_task, getinput, NULL);
- check_result(result, "isc_app_onrun");
-
- (void)isc_app_run();
-
- shutdown_program();
+ isc_loopmgr_setup(loopmgr, getinput, NULL);
+ isc_loopmgr_teardown(loopmgr, shutdown_program, NULL);
+ isc_loopmgr_run(loopmgr);
cleanup();
- isc_app_finish();
-
if (seenerror) {
return (2);
} else {
#include <stdbool.h>
#include <stdlib.h>
-#include <isc/app.h>
#include <isc/atomic.h>
#include <isc/attributes.h>
#include <isc/buffer.h>
#include <isc/commandline.h>
-#include <isc/event.h>
#include <isc/file.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/net.h>
bool verbose;
static isc_nm_t *netmgr = NULL;
-static isc_loopmgr_t *loopmgr = NULL;
static isc_taskmgr_t *taskmgr = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static isc_task_t *rndc_task = NULL;
static const char *admin_conffile = NULL;
atomic_load_acquire(&recvs) == 0)
{
shuttingdown = true;
- isc_app_shutdown();
+ isc_task_detach(&rndc_task);
+ isc_loopmgr_shutdown(loopmgr);
}
}
atomic_fetch_sub_release(&recvs, 1) == 1)
{
shuttingdown = true;
- isc_app_shutdown();
+ isc_task_detach(&rndc_task);
+ isc_loopmgr_shutdown(loopmgr);
}
}
}
static void
-rndc_start(isc_task_t *task, isc_event_t *event) {
- isc_event_free(&event);
-
- UNUSED(task);
+rndc_start(void *arg) {
+ UNUSED(arg);
currentaddr = 0;
rndc_startconnect(&serveraddrs[currentaddr]);
isc_sockaddr_any(&local4);
isc_sockaddr_any6(&local6);
- result = isc_app_start();
- if (result != ISC_R_SUCCESS) {
- fatal("isc_app_start() failed: %s", isc_result_totext(result));
- }
-
isc_commandline_errprint = false;
preparse_args(argc, argv);
serial = isc_random32();
- isc_mem_create(&rndc_mctx);
- isc_managers_create(rndc_mctx, 1, 0, &loopmgr, &netmgr, &taskmgr);
- DO("create task", isc_task_create(taskmgr, 0, &rndc_task, 0));
+ isc_managers_create(&rndc_mctx, 1, &loopmgr, &netmgr, &taskmgr);
+ isc_loopmgr_setup(loopmgr, rndc_start, rndc_task);
+
+ DO("create task", isc_task_create(taskmgr, &rndc_task, 0));
isc_log_create(rndc_mctx, &log, &logconfig);
isc_log_setcontext(log);
isc_log_settag(logconfig, progname);
get_addresses(servername, (in_port_t)remoteport);
}
- DO("post event", isc_app_onrun(rndc_mctx, rndc_task, rndc_start, NULL));
-
- result = isc_app_run();
- if (result != ISC_R_SUCCESS) {
- fatal("isc_app_run() failed: %s", isc_result_totext(result));
- }
-
- isc_task_detach(&rndc_task);
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
+ isc_loopmgr_run(loopmgr);
/*
* Note: when TCP connections are shut down, there will be a final
isc_mem_stats(rndc_mctx, stderr);
}
- isc_mem_destroy(&rndc_mctx);
+ isc_managers_destroy(&rndc_mctx, &loopmgr, &netmgr, &taskmgr);
if (failed) {
return (1);
db.c \
driver.c \
instance.c \
- lock.c \
log.c \
syncptr.c \
zone.c \
db.h \
instance.h \
- lock.h \
log.h \
syncptr.h \
util.h \
+++ /dev/null
-/*
- * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
- *
- * SPDX-License-Identifier: MPL-2.0 AND ISC
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at https://mozilla.org/MPL/2.0/.
- *
- * See the COPYRIGHT file distributed with this work for additional
- * information regarding copyright ownership.
- */
-
-/*
- * Copyright (C) Red Hat
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND AUTHORS DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "lock.h"
-
-#include <isc/task.h>
-#include <isc/util.h>
-
-/*
- * Lock BIND dispatcher and allow only single task to run.
- *
- * @warning
- * All calls to isc_task_beginexclusive() have to operate on the same task
- * otherwise it would not be possible to distinguish recursive locking
- * from real conflict on the dispatcher lock.
- * For this reason this wrapper function always works with inst->task.
- * As a result, this function have to be be called only from inst->task.
- *
- * Recursive locking is allowed. Auxiliary variable pointed to by "statep"
- * stores information if last run_exclusive_enter() operation really locked
- * something or if the lock was called recursively and was no-op.
- *
- * The pair (inst, state) used for run_exclusive_enter() has to be
- * used for run_exclusive_exit().
- *
- * @param[in] inst The instance with the only task which is allowed to
- * run.
- * @param[in,out] statep Lock state: ISC_R_SUCCESS or ISC_R_LOCKBUSY
- */
-void
-run_exclusive_enter(sample_instance_t *inst, isc_result_t *statep) {
- REQUIRE(statep != NULL);
- REQUIRE(*statep == ISC_R_IGNORE);
-
- *statep = isc_task_beginexclusive(inst->task);
- RUNTIME_CHECK(*statep == ISC_R_SUCCESS || *statep == ISC_R_LOCKBUSY);
-}
-
-/*
- * Exit task-exclusive mode.
- *
- * @param[in] inst The instance used for previous run_exclusive_enter() call.
- * @param[in] state Lock state as returned by run_exclusive_enter().
- */
-void
-run_exclusive_exit(sample_instance_t *inst, isc_result_t state) {
- if (state == ISC_R_SUCCESS) {
- isc_task_endexclusive(inst->task);
- } else {
- /* Unlocking recursive lock or the lock was never locked. */
- INSIST(state == ISC_R_LOCKBUSY || state == ISC_R_IGNORE);
- }
-
- return;
-}
+++ /dev/null
-/*
- * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
- *
- * SPDX-License-Identifier: MPL-2.0 AND ISC
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at https://mozilla.org/MPL/2.0/.
- *
- * See the COPYRIGHT file distributed with this work for additional
- * information regarding copyright ownership.
- */
-
-/*
- * Copyright (C) Red Hat
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND AUTHORS DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-#pragma once
-
-#include "instance.h"
-#include "util.h"
-
-void
-run_exclusive_enter(sample_instance_t *inst, isc_result_t *statep);
-
-void
-run_exclusive_exit(sample_instance_t *inst, isc_result_t state);
#include <inttypes.h>
#include <stdbool.h>
+#include <isc/task.h>
#include <isc/util.h>
#include <dns/dyndb.h>
#include <dns/zone.h>
#include "instance.h"
-#include "lock.h"
#include "log.h"
#include "util.h"
bool freeze = false;
dns_zone_t *zone_in_view = NULL;
dns_view_t *view_in_zone = NULL;
- isc_result_t lock_state = ISC_R_IGNORE;
REQUIRE(inst != NULL);
REQUIRE(zone != NULL);
CLEANUP_WITH(ISC_R_UNEXPECTED);
}
- run_exclusive_enter(inst, &lock_state);
+ isc_task_beginexclusive(inst->task);
if (inst->view->frozen) {
freeze = true;
dns_view_thaw(inst->view);
if (freeze) {
dns_view_freeze(inst->view);
}
- run_exclusive_exit(inst, lock_state);
+ isc_task_endexclusive(inst->task);
return (result);
}
#include <string.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/base64.h>
#include <isc/commandline.h>
#include <isc/hash.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/net.h>
static isc_mem_t *mctx = NULL;
static dns_requestmgr_t *requestmgr = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static bool have_src = false;
static isc_sockaddr_t srcaddr;
static isc_sockaddr_t dstaddr;
isc_event_free(&event);
if (--onfly == 0) {
- isc_app_shutdown();
+ isc_task_detach(&task);
+ isc_loopmgr_shutdown(loopmgr);
}
return;
}
}
static void
-sendqueries(isc_task_t *task, isc_event_t *event) {
+sendqueries(void *arg) {
+ isc_task_t *task = (isc_task_t *)arg;
isc_result_t result;
- isc_event_free(&event);
-
do {
result = sendquery(task);
} while (result == ISC_R_SUCCESS);
if (onfly == 0) {
- isc_app_shutdown();
+ isc_task_detach(&task);
+ isc_loopmgr_shutdown(loopmgr);
}
return;
}
uint16_t port = PORT;
int c;
- RUNCHECK(isc_app_start());
-
isc_commandline_errprint = false;
while ((c = isc_commandline_parse(argc, argv, "p:r:")) != -1) {
switch (c) {
}
isc_sockaddr_fromin(&dstaddr, &inaddr, port);
- isc_mem_create(&mctx);
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
isc_log_create(mctx, &lctx, &lcfg);
RUNCHECK(dst_lib_init(mctx, NULL));
- isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL);
- RUNCHECK(isc_task_create(taskmgr, 0, &task, 0));
+ RUNCHECK(isc_task_create(taskmgr, &task, 0));
RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr));
RUNCHECK(dns_dispatch_createudp(
NULL, &requestmgr));
RUNCHECK(dns_view_create(mctx, 0, "_test", &view));
- RUNCHECK(isc_app_onrun(mctx, task, sendqueries, NULL));
- (void)isc_app_run();
+ isc_loopmgr_setup(loopmgr, sendqueries, task);
+ isc_loopmgr_run(loopmgr);
dns_view_detach(&view);
dns_dispatch_detach(&dispatchv4);
dns_dispatchmgr_detach(&dispatchmgr);
- isc_task_detach(&task);
-
- isc_managers_destroy(&netmgr, &taskmgr, NULL);
-
dst_lib_destroy();
isc_log_destroy(&lctx);
- isc_mem_destroy(&mctx);
-
- isc_app_finish();
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
return (0);
}
#include <sys/types.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base64.h>
#include <isc/buffer.h>
#include <isc/commandline.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
*/
isc_mem_t *ctxs_mctx = NULL;
+isc_loopmgr_t *ctxs_loopmgr = NULL;
isc_nm_t *ctxs_netmgr = NULL;
isc_taskmgr_t *ctxs_taskmgr = NULL;
-isc_timermgr_t *ctxs_timermgr = NULL;
-
-static void
-ctxs_destroy(void) {
- isc_managers_destroy(&ctxs_netmgr, &ctxs_taskmgr, &ctxs_timermgr);
-
- isc_mem_destroy(&ctxs_mctx);
-}
-
-static void
-ctxs_init(void) {
- isc_mem_create(&ctxs_mctx);
-
- isc_managers_create(ctxs_mctx, 1, 0, &ctxs_netmgr, &ctxs_taskmgr,
- &ctxs_timermgr);
-}
static char *algname = NULL;
result = dns_name_fromtext(name, &b, dns_rootname, 0, NULL);
if (result != ISC_R_SUCCESS) {
fprintf(stderr, "failed to convert qname: %u\n",
- result);
+ (unsigned int)result);
exit(1);
}
}
result = dns_client_setservers(client, dns_rdataclass_in, name,
&servers);
if (result != ISC_R_SUCCESS) {
- fprintf(stderr, "set server failed: %u\n", result);
+ fprintf(stderr, "set server failed: %u\n",
+ (unsigned int)result);
exit(1);
}
}
+static dns_name_t *qname = NULL;
+static unsigned int resopt = 0;
+static dns_rdatatype_t type = dns_rdatatype_a;
+
+static void
+resolve_cb(dns_client_t *client, const dns_name_t *query_name,
+ dns_namelist_t *namelist, isc_result_t result) {
+ UNUSED(query_name);
+
+ if (result != ISC_R_SUCCESS) {
+ fprintf(stderr, "resolution failed: %s\n",
+ isc_result_totext(result));
+ goto cleanup;
+ }
+
+ for (dns_name_t *name = ISC_LIST_HEAD(*namelist); name != NULL;
+ name = ISC_LIST_NEXT(name, link))
+ {
+ for (dns_rdataset_t *rdataset = ISC_LIST_HEAD(name->list);
+ rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link))
+ {
+ if (printdata(rdataset, name) != ISC_R_SUCCESS) {
+ fprintf(stderr, "print data failed\n");
+ }
+ }
+ }
+
+cleanup:
+ dns_client_freeresanswer(client, namelist);
+
+ dns_client_detach(&client);
+
+ isc_mem_put(ctxs_mctx, namelist, sizeof(*namelist));
+
+ isc_loopmgr_shutdown(ctxs_loopmgr);
+}
+
+static void
+resolve(void *arg) {
+ dns_client_t *client = (void *)arg;
+ dns_namelist_t *namelist = isc_mem_get(ctxs_mctx, sizeof(*namelist));
+ isc_result_t result;
+
+ ISC_LIST_INIT(*namelist);
+ result = dns_client_resolve(client, qname, dns_rdataclass_in, type,
+ resopt, namelist, resolve_cb);
+
+ if (result != ISC_R_SUCCESS) {
+ fprintf(stderr, "resolution failed: %s\n",
+ isc_result_totext(result));
+ isc_mem_put(ctxs_mctx, namelist, sizeof(*namelist));
+ isc_loopmgr_shutdown(ctxs_loopmgr);
+ }
+}
+
int
main(int argc, char *argv[]) {
int ch;
isc_buffer_t b;
dns_fixedname_t qname0;
unsigned int namelen;
- dns_name_t *qname = NULL, *name = NULL;
- dns_rdatatype_t type = dns_rdatatype_a;
- dns_rdataset_t *rdataset = NULL;
- dns_namelist_t namelist;
- unsigned int clientopt, resopt = 0;
+ unsigned int clientopt;
bool is_sep = false;
const char *port = "53";
struct in_addr in4;
altserveraddr = cp + 1;
}
- ctxs_init();
+ isc_managers_create(&ctxs_mctx, 1, &ctxs_loopmgr, &ctxs_netmgr,
+ &ctxs_taskmgr);
result = dst_lib_init(ctxs_mctx, NULL);
if (result != ISC_R_SUCCESS) {
- fprintf(stderr, "dst_lib_init failed: %u\n", result);
+ fprintf(stderr, "dst_lib_init failed: %u\n",
+ (unsigned int)result);
exit(1);
}
clientopt = 0;
- result = dns_client_create(ctxs_mctx, ctxs_taskmgr, ctxs_netmgr,
- ctxs_timermgr, clientopt, &client, addr4,
+ result = dns_client_create(ctxs_mctx, ctxs_loopmgr, ctxs_taskmgr,
+ ctxs_netmgr, clientopt, &client, addr4,
addr6);
if (result != ISC_R_SUCCESS) {
- fprintf(stderr, "dns_client_create failed: %u, %s\n", result,
- isc_result_totext(result));
+ fprintf(stderr, "dns_client_create failed: %u, %s\n",
+ (unsigned int)result, isc_result_totext(result));
exit(1);
}
&resconf);
if (result != ISC_R_SUCCESS && result != ISC_R_FILENOTFOUND) {
fprintf(stderr, "irs_resconf_load failed: %u\n",
- result);
+ (unsigned int)result);
exit(1);
}
nameservers = irs_resconf_getnameservers(resconf);
if (result != ISC_R_SUCCESS) {
irs_resconf_destroy(&resconf);
fprintf(stderr, "dns_client_setservers failed: %u\n",
- result);
+ (unsigned int)result);
exit(1);
}
irs_resconf_destroy(&resconf);
qname = dns_fixedname_initname(&qname0);
result = dns_name_fromtext(qname, &b, dns_rootname, 0, NULL);
if (result != ISC_R_SUCCESS) {
- fprintf(stderr, "failed to convert qname: %u\n", result);
+ fprintf(stderr, "failed to convert qname: %u\n",
+ (unsigned int)result);
+ exit(1);
}
/* Perform resolution */
if (keynamestr == NULL) {
resopt |= DNS_CLIENTRESOPT_NODNSSEC;
}
- ISC_LIST_INIT(namelist);
- result = dns_client_resolve(client, qname, dns_rdataclass_in, type,
- resopt, &namelist);
- if (result != ISC_R_SUCCESS) {
- fprintf(stderr, "resolution failed: %s\n",
- isc_result_totext(result));
- }
- for (name = ISC_LIST_HEAD(namelist); name != NULL;
- name = ISC_LIST_NEXT(name, link))
- {
- for (rdataset = ISC_LIST_HEAD(name->list); rdataset != NULL;
- rdataset = ISC_LIST_NEXT(rdataset, link))
- {
- if (printdata(rdataset, name) != ISC_R_SUCCESS) {
- fprintf(stderr, "print data failed\n");
- }
- }
- }
- dns_client_freeresanswer(client, &namelist);
+ isc_loopmgr_setup(ctxs_loopmgr, resolve, client);
- /* Cleanup */
- dns_client_detach(&client);
+ isc_loopmgr_run(ctxs_loopmgr);
- ctxs_destroy();
dst_lib_destroy();
+ isc_managers_destroy(&ctxs_mctx, &ctxs_loopmgr, &ctxs_netmgr,
+ &ctxs_taskmgr);
+
return (0);
}
# grep "<h2>Glue cache statistics</h2>" xsltproc.out.${n} >/dev/null || ret=1
grep "<h3>View _default" xsltproc.out.${n} >/dev/null || ret=1
grep "<h4>Zone example" xsltproc.out.${n} >/dev/null || ret=1
- grep "<h2>Task Manager Configuration</h2>" xsltproc.out.${n} >/dev/null || ret=1
+ # grep "<h2>Task Manager Configuration</h2>" xsltproc.out.${n} >/dev/null || ret=1
grep "<h2>Tasks</h2>" xsltproc.out.${n} >/dev/null || ret=1
grep "<h2>Memory Usage Summary</h2>" xsltproc.out.${n} >/dev/null || ret=1
grep "<h2>Memory Contexts</h2>" xsltproc.out.${n} >/dev/null || ret=1
#include <stdlib.h>
#include <string.h>
-#include <isc/app.h>
#include <isc/base64.h>
#include <isc/hash.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
static dst_key_t *ourkey = NULL;
static isc_mem_t *mctx = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static dns_tsigkey_t *tsigkey = NULL, *initialkey = NULL;
static dns_tsig_keyring_t *ring = NULL;
static unsigned char noncedata[16];
dns_message_detach(&response);
dns_request_destroy(&reqev->request);
isc_event_free(&event);
- isc_app_shutdown();
- return;
+ isc_task_detach(&task);
+ isc_loopmgr_shutdown(loopmgr);
}
static void
-sendquery(isc_task_t *task, isc_event_t *event) {
+sendquery(void *arg) {
+ isc_task_t *task = (isc_task_t *)arg;
struct in_addr inaddr;
isc_sockaddr_t address;
isc_region_t r;
dns_request_t *request = NULL;
static char keystr[] = "0123456789ab";
- isc_event_free(&event);
-
result = ISC_R_FAILURE;
if (inet_pton(AF_INET, ip_address, &inaddr) != 1) {
CHECK("inet_pton", result);
isc_result_t result;
int type;
- RUNCHECK(isc_app_start());
-
if (argc < 4) {
fprintf(stderr, "I:no DH key provided\n");
exit(-1);
}
isc_mem_debugging = ISC_MEM_DEBUGRECORD;
- isc_mem_create(&mctx);
+
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
isc_log_create(mctx, &log, &logconfig);
RUNCHECK(dst_lib_init(mctx, NULL));
- isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL);
-
- RUNCHECK(isc_task_create(taskmgr, 0, &task, 0));
+ RUNCHECK(isc_task_create(taskmgr, &task, 0));
RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr));
isc_sockaddr_any(&bind_any);
dns_view_setkeyring(view, ring);
dns_tsigkeyring_detach(&ring);
- RUNCHECK(isc_app_onrun(mctx, task, sendquery, NULL));
-
type = DST_TYPE_PUBLIC | DST_TYPE_PRIVATE | DST_TYPE_KEY;
result = dst_key_fromnamedfile(ourkeyname, NULL, type, mctx, &ourkey);
CHECK("dst_key_fromnamedfile", result);
isc_nonce_buf(noncedata, sizeof(noncedata));
isc_buffer_add(&nonce, sizeof(noncedata));
- (void)isc_app_run();
+ isc_loopmgr_setup(loopmgr, sendquery, task);
+ isc_loopmgr_run(loopmgr);
dns_requestmgr_shutdown(requestmgr);
dns_requestmgr_detach(&requestmgr);
dns_dispatch_detach(&dispatchv4);
dns_dispatchmgr_detach(&dispatchmgr);
- isc_task_detach(&task);
- isc_managers_destroy(&netmgr, &taskmgr, NULL);
dst_key_free(&ourkey);
dns_tsigkey_detach(&initialkey);
dst_lib_destroy();
- isc_mem_destroy(&mctx);
-
- isc_app_finish();
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
return (0);
}
#include <stdlib.h>
#include <string.h>
-#include <isc/app.h>
#include <isc/base64.h>
#include <isc/hash.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
static char *ip_address = NULL;
static int port;
static isc_mem_t *mctx = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static dns_tsigkey_t *tsigkey = NULL;
static dns_tsig_keyring_t *ring = NULL;
static dns_requestmgr_t *requestmgr = NULL;
dns_message_detach(&response);
dns_request_destroy(&reqev->request);
isc_event_free(&event);
- isc_app_shutdown();
- return;
+ isc_task_detach(&task);
+ isc_loopmgr_shutdown(loopmgr);
}
static void
-sendquery(isc_task_t *task, isc_event_t *event) {
+sendquery(void *arg) {
+ isc_task_t *task = (isc_task_t *)arg;
struct in_addr inaddr;
isc_sockaddr_t address;
isc_result_t result;
dns_message_t *query = NULL;
dns_request_t *request = NULL;
- isc_event_free(&event);
-
result = ISC_R_FAILURE;
if (inet_pton(AF_INET, ip_address, &inaddr) != 1) {
CHECK("inet_pton", result);
isc_result_t result;
int type;
- RUNCHECK(isc_app_start());
-
if (argc < 4) {
fprintf(stderr, "I:no key to delete\n");
exit(-1);
port = atoi(argv[2]);
keyname = argv[3];
- isc_mem_create(&mctx);
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
isc_log_create(mctx, &log, &logconfig);
RUNCHECK(dst_lib_init(mctx, NULL));
- isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL);
-
- RUNCHECK(isc_task_create(taskmgr, 0, &task, 0));
+ RUNCHECK(isc_task_create(taskmgr, &task, 0));
RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr));
isc_sockaddr_any(&bind_any);
RUNCHECK(dns_dispatch_createudp(dispatchmgr, &bind_any, &dispatchv4));
RUNCHECK(dns_view_create(mctx, 0, "_test", &view));
dns_view_setkeyring(view, ring);
- RUNCHECK(isc_app_onrun(mctx, task, sendquery, NULL));
-
type = DST_TYPE_PUBLIC | DST_TYPE_PRIVATE | DST_TYPE_KEY;
result = dst_key_fromnamedfile(keyname, NULL, type, mctx, &dstkey);
CHECK("dst_key_fromnamedfile", result);
dst_key_free(&dstkey);
CHECK("dns_tsigkey_createfromkey", result);
- (void)isc_app_run();
+ isc_loopmgr_setup(loopmgr, sendquery, task);
+ isc_loopmgr_run(loopmgr);
dns_requestmgr_shutdown(requestmgr);
dns_requestmgr_detach(&requestmgr);
dns_dispatch_detach(&dispatchv4);
dns_dispatchmgr_detach(&dispatchmgr);
- isc_task_detach(&task);
- isc_managers_destroy(&netmgr, &taskmgr, NULL);
dns_tsigkeyring_detach(&ring);
dst_lib_destroy();
- isc_mem_destroy(&mctx);
-
- isc_app_finish();
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
return (0);
}
#include <sys/types.h>
#include <unistd.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netaddr.h>
"http-plain-get" };
static isc_mem_t *mctx = NULL;
-static isc_nm_t *netmgr = NULL;
static isc_loopmgr_t *loopmgr = NULL;
+static isc_nm_t *netmgr = NULL;
+static isc_taskmgr_t *taskmgr = NULL;
static protocol_t protocol;
static const char *address;
printf(" to %s, %d workers\n", buf, workers);
}
-static void
-_signal(int sig, void (*handler)(int)) {
- struct sigaction sa = { .sa_handler = handler };
-
- RUNTIME_CHECK(sigfillset(&sa.sa_mask) == 0);
- RUNTIME_CHECK(sigaction(sig, &sa, NULL) >= 0);
-}
-
static void
setup(void) {
- sigset_t sset;
-
- _signal(SIGPIPE, SIG_IGN);
- _signal(SIGHUP, SIG_DFL);
- _signal(SIGTERM, SIG_DFL);
- _signal(SIGINT, SIG_DFL);
-
- RUNTIME_CHECK(sigemptyset(&sset) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGHUP) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGINT) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGTERM) == 0);
- RUNTIME_CHECK(pthread_sigmask(SIG_BLOCK, &sset, NULL) == 0);
-
- isc_mem_create(&mctx);
-
- isc_managers_create(mctx, workers, 0, &loopmgr, &netmgr, NULL);
+ isc_managers_create(&mctx, workers, &loopmgr, &netmgr, &taskmgr);
}
static void
close(out);
}
- isc_managers_destroy(&loopmgr, &netmgr, NULL);
- isc_mem_destroy(&mctx);
if (tls_ctx) {
isc_tlsctx_free(&tls_ctx);
}
+
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
}
static void
static const char *protocols[] = { "udp", "tcp", "dot", "https", "http-plain" };
static isc_mem_t *mctx = NULL;
-static isc_nm_t *netmgr = NULL;
static isc_loopmgr_t *loopmgr = NULL;
+static isc_nm_t *netmgr = NULL;
+static isc_taskmgr_t *taskmgr = NULL;
static protocol_t protocol;
static in_port_t port;
workers);
}
-static void
-_signal(int sig, void (*handler)(int)) {
- struct sigaction sa = { .sa_handler = handler };
-
- RUNTIME_CHECK(sigfillset(&sa.sa_mask) == 0);
- RUNTIME_CHECK(sigaction(sig, &sa, NULL) >= 0);
-}
-
static void
setup(void) {
- sigset_t sset;
-
- _signal(SIGPIPE, SIG_IGN);
- _signal(SIGHUP, SIG_DFL);
- _signal(SIGTERM, SIG_DFL);
- _signal(SIGINT, SIG_DFL);
-
- RUNTIME_CHECK(sigemptyset(&sset) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGHUP) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGINT) == 0);
- RUNTIME_CHECK(sigaddset(&sset, SIGTERM) == 0);
- RUNTIME_CHECK(pthread_sigmask(SIG_BLOCK, &sset, NULL) == 0);
-
- isc_mem_create(&mctx);
-
- isc_managers_create(mctx, workers, 0, &loopmgr, &netmgr, NULL);
+ isc_managers_create(&mctx, workers, &loopmgr, &netmgr, &taskmgr);
}
static void
teardown(void) {
- isc_managers_destroy(&loopmgr, &netmgr, NULL);
- isc_mem_destroy(&mctx);
if (tls_ctx) {
isc_tlsctx_free(&tls_ctx);
}
+
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
}
static void
#include <string.h>
#include <unistd.h>
-#include <isc/app.h>
#include <isc/attributes.h>
#include <isc/base64.h>
#include <isc/hash.h>
#include <isc/hex.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/net.h>
#define US_PER_MS 1000 /*%< Microseconds per millisecond. */
static isc_mem_t *mctx = NULL;
+static isc_task_t *global_task = NULL;
+static isc_loopmgr_t *loopmgr = NULL;
static dns_requestmgr_t *requestmgr = NULL;
static const char *batchname = NULL;
static FILE *batchfp = NULL;
return (totext.deconsttext);
}
-/* receive response event handler */
static void
recvresponse(isc_task_t *task, isc_event_t *event) {
dns_requestevent_t *reqev = (dns_requestevent_t *)event;
isc_event_free(&event);
if (--onfly == 0) {
- isc_app_shutdown();
+ isc_task_detach(&global_task);
+ isc_loopmgr_shutdown(loopmgr);
}
return;
}
}
static isc_result_t
-sendquery(struct query *query, isc_task_t *task) {
+sendquery(struct query *query) {
dns_request_t *request = NULL;
dns_message_t *message = NULL;
dns_name_t *qname = NULL;
isc_result_t result;
dns_fixedname_t queryname;
isc_buffer_t buf;
- unsigned int options;
+ unsigned int options = 0;
onfly++;
add_opt(message, query->udpsize, query->edns, flags, opts, i);
}
- options = 0;
if (tcp_mode) {
options |= DNS_REQUESTOPT_TCP;
}
- request = NULL;
+
result = dns_request_createvia(
requestmgr, message, have_src ? &srcaddr : NULL, &dstaddr, dscp,
options, NULL, query->timeout, query->udptimeout,
- query->udpretries, task, recvresponse, message, &request);
+ query->udpretries, global_task, recvresponse, message,
+ &request);
CHECK("dns_request_createvia", result);
return (ISC_R_SUCCESS);
}
static void
-sendqueries(isc_task_t *task, isc_event_t *event) {
- struct query *query = (struct query *)event->ev_arg;
-
- isc_event_free(&event);
+sendqueries(void *arg) {
+ struct query *query = (struct query *)arg;
while (query != NULL) {
struct query *next = ISC_LIST_NEXT(query, link);
- sendquery(query, task);
+ sendquery(query);
query = next;
}
if (onfly == 0) {
- isc_app_shutdown();
+ isc_task_detach(&global_task);
+ isc_loopmgr_shutdown(loopmgr);
}
- return;
}
noreturn static void
isc_sockaddr_t bind_any;
isc_log_t *lctx = NULL;
isc_logconfig_t *lcfg = NULL;
- isc_loopmgr_t *loopmgr = NULL;
isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL;
- isc_task_t *task = NULL;
dns_dispatchmgr_t *dispatchmgr = NULL;
dns_dispatch_t *dispatchvx = NULL;
dns_view_t *view = NULL;
unsigned int i;
int ns;
- RUNCHECK(isc_app_start());
-
if (isc_net_probeipv4() == ISC_R_SUCCESS) {
have_ipv4 = true;
}
preparse_args(argc, argv);
- isc_mem_create(&mctx);
+ isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr);
isc_log_create(mctx, &lctx, &lcfg);
RUNCHECK(dst_lib_init(mctx, NULL));
fatal("can't choose between IPv4 and IPv6");
}
- isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr);
- RUNCHECK(isc_task_create(taskmgr, 0, &task, 0));
+ RUNCHECK(isc_task_create(taskmgr, &global_task, 0));
RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr));
set_source_ports(dispatchmgr);
RUNCHECK(dns_view_create(mctx, 0, "_test", &view));
query = ISC_LIST_HEAD(queries);
- RUNCHECK(isc_app_onrun(mctx, task, sendqueries, query));
+ isc_loopmgr_setup(loopmgr, sendqueries, query);
/*
* Stall to the start of a new second.
} while (1);
}
- (void)isc_app_run();
+ isc_loopmgr_run(loopmgr);
dns_view_detach(&view);
dns_dispatch_detach(&dispatchvx);
dns_dispatchmgr_detach(&dispatchmgr);
- isc_task_detach(&task);
-
- isc_managers_destroy(&loopmgr, &netmgr, &taskmgr);
-
dst_lib_destroy();
isc_log_destroy(&lctx);
isc_mem_free(mctx, default_query.ecs_addr);
}
- isc_mem_destroy(&mctx);
-
- isc_app_finish();
-
+ isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr);
return (0);
}
Initiate transfer of the zone from the given server or the
primary servers listed in the zone structure.
- dns_zone_maintenance(dns_zone_t *zone);
-
- Perform any maintenance operations required on the zone
- * initiate up to date checks
- * expire zones
- * initiate ixfr version expire consolidation
-
dns_zone_locateprimary(dns_zone_t *zone);
Working from the root zone locate the primary server for the zone.
* seen, it is ignored. If more than 'addrsize' addresses are seen, the
* first 'addrsize' are returned and the remainder silently truncated.
*
- * This routine may block. If called by a program using the isc_app
- * framework, it should be surrounded by isc_app_block()/isc_app_unblock().
+ * This routine may block. If called by a program using the isc_loopmgr
+ * framework, it should be surrounded by isc_loopmgr_blocking() and
+ * isc_loopmgr_nonblocking().
*
* Requires:
*\li 'hostname' is not NULL.
/*
* Allocate an internal task.
*/
- result = isc_task_create(adb->taskmgr, 0, &adb->task, 0);
+ result = isc_task_create(adb->taskmgr, &adb->task, 0);
if (result != ISC_R_SUCCESS) {
goto free_lock;
}
dns_cache_t *cache;
isc_task_t *task;
+ isc_event_t *shutdown_event;
isc_event_t *resched_event; /*% Sent by cleaner task to
* itself to reschedule */
isc_event_t *overmem_event;
}
if (taskmgr != NULL) {
dbtask = NULL;
- result = isc_task_create(taskmgr, 1, &dbtask, 0);
+ result = isc_task_create(taskmgr, &dbtask, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_db;
}
isc_event_free(&cache->cleaner.resched_event);
}
+ if (cache->cleaner.shutdown_event != NULL) {
+ isc_event_free(&cache->cleaner.shutdown_event);
+ }
+
if (cache->cleaner.iterator != NULL) {
dns_dbiterator_destroy(&cache->cleaner.iterator);
}
if (isc_refcount_decrement(&cache->references) == 1) {
cache->cleaner.overmem = false;
- /*
- * If the cleaner task exists, let it free the cache.
- */
if (isc_refcount_decrement(&cache->live_tasks) > 1) {
- isc_event_t *event = isc_event_allocate(
- cache->mctx, &cache->cleaner,
- DNS_EVENT_CACHESHUTDOWN,
- cleaner_shutdown_action, &cache->cleaner,
- sizeof(*event));
- isc_task_send(cache->cleaner.task, &event);
+ isc_task_send(cache->cleaner.task,
+ &cache->cleaner.shutdown_event);
} else {
cache_free(cache);
}
cleaner->replaceiterator = false;
cleaner->task = NULL;
+ cleaner->shutdown_event = NULL;
cleaner->resched_event = NULL;
cleaner->overmem_event = NULL;
result = dns_db_createiterator(cleaner->cache->db, false,
&cleaner->iterator);
if (result != ISC_R_SUCCESS) {
- goto cleanup;
+ goto cleanup_mutex;
}
if (taskmgr != NULL) {
- result = isc_task_create(taskmgr, 1, &cleaner->task, 0);
+ result = isc_task_create(taskmgr, &cleaner->task, 0);
if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"isc_task_create() failed: %s",
isc_result_totext(result));
result = ISC_R_UNEXPECTED;
- goto cleanup;
+ goto cleanup_iterator;
}
isc_refcount_increment(&cleaner->cache->live_tasks);
isc_task_setname(cleaner->task, "cachecleaner", cleaner);
+ cleaner->shutdown_event = isc_event_allocate(
+ cache->mctx, cleaner, DNS_EVENT_CACHESHUTDOWN,
+ cleaner_shutdown_action, cleaner, sizeof(isc_event_t));
+
cleaner->resched_event = isc_event_allocate(
cache->mctx, cleaner, DNS_EVENT_CACHECLEAN,
incremental_cleaning_action, cleaner,
return (ISC_R_SUCCESS);
-cleanup:
- if (cleaner->overmem_event != NULL) {
- isc_event_free(&cleaner->overmem_event);
- }
- if (cleaner->resched_event != NULL) {
- isc_event_free(&cleaner->resched_event);
- }
- if (cleaner->task != NULL) {
- isc_task_detach(&cleaner->task);
- }
- if (cleaner->iterator != NULL) {
- dns_dbiterator_destroy(&cleaner->iterator);
- }
+cleanup_iterator:
+ dns_dbiterator_destroy(&cleaner->iterator);
+cleanup_mutex:
isc_mutex_destroy(&cleaner->lock);
return (result);
isc_event_free(&event);
}
- /* Make sure we don't reschedule anymore. */
- (void)isc_task_purgeevent(task, cache->cleaner.resched_event);
+ /* FIXME: Make sure we don't reschedule anymore. */
+ /* (void)isc_task_purgeevent(task, cache->cleaner.resched_event); */
isc_refcount_decrementz(&cache->live_tasks);
new_zones->loopmgr = loopmgr;
new_zones->taskmgr = taskmgr;
- result = isc_task_create(taskmgr, 0, &new_zones->updater, 0);
+ result = isc_task_create(taskmgr, &new_zones->updater, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_ht;
}
#include <stdbool.h>
#include <stddef.h>
-#include <isc/app.h>
#include <isc/buffer.h>
#include <isc/md.h>
#include <isc/mem.h>
#include <isc/mutex.h>
+#include <isc/netmgr.h>
#include <isc/portset.h>
#include <isc/refcount.h>
#include <isc/result.h>
/* Unlocked */
unsigned int magic;
unsigned int attributes;
- isc_mutex_t lock;
isc_mem_t *mctx;
- bool readydone;
- isc_mutex_t readylock;
- isc_condition_t ready;
isc_taskmgr_t *taskmgr;
isc_task_t *task;
isc_nm_t *nm;
isc_refcount_t references;
- /* Locked */
- dns_viewlist_t viewlist;
+ dns_view_t *view;
ISC_LIST(struct resctx) resctxs;
};
* Internal state for a single name resolution procedure
*/
typedef struct resctx {
- /* Unlocked */
unsigned int magic;
- isc_mutex_t lock;
dns_client_t *client;
bool want_dnssec;
bool want_validation;
bool want_cdflag;
bool want_tcp;
- /* Locked */
ISC_LINK(struct resctx) link;
isc_task_t *task;
dns_view_t *view;
dns_namelist_t namelist;
isc_result_t result;
dns_clientresevent_t *event;
- bool canceled;
dns_rdataset_t *rdataset;
dns_rdataset_t *sigrdataset;
} resctx_t;
* Argument of an internal event for synchronous name resolution.
*/
typedef struct resarg {
- /* Unlocked */
- isc_appctx_t *actx;
+ isc_mem_t *mctx;
dns_client_t *client;
- isc_mutex_t lock;
+ const dns_name_t *name;
- /* Locked */
isc_result_t result;
isc_result_t vresult;
dns_namelist_t *namelist;
dns_clientrestrans_t *trans;
- bool canceled;
+ dns_client_resolve_cb resolve_cb;
} resarg_t;
static void
client_resfind(resctx_t *rctx, dns_fetchevent_t *event);
static void
-cancelresolve(dns_clientrestrans_t *trans);
-static void
destroyrestrans(dns_clientrestrans_t **transp);
/*
}
static isc_result_t
-createview(isc_mem_t *mctx, dns_rdataclass_t rdclass, isc_taskmgr_t *taskmgr,
- isc_nm_t *nm, isc_loopmgr_t *loopmgr, dns_dispatchmgr_t *dispatchmgr,
+createview(isc_mem_t *mctx, dns_rdataclass_t rdclass, isc_loopmgr_t *loopmgr,
+ isc_taskmgr_t *taskmgr, isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr,
dns_dispatch_t *dispatchv4, dns_dispatch_t *dispatchv6,
dns_view_t **viewp) {
isc_result_t result;
/* Initialize view security roots */
result = dns_view_initsecroots(view, mctx);
if (result != ISC_R_SUCCESS) {
- dns_view_detach(&view);
- return (result);
+ goto cleanup_view;
}
result = dns_view_createresolver(view, loopmgr, taskmgr, 1, nm, 0,
dispatchmgr, dispatchv4, dispatchv6);
if (result != ISC_R_SUCCESS) {
- dns_view_detach(&view);
- return (result);
+ goto cleanup_view;
}
result = dns_db_create(mctx, "rbt", dns_rootname, dns_dbtype_cache,
rdclass, 0, NULL, &view->cachedb);
if (result != ISC_R_SUCCESS) {
- dns_view_detach(&view);
- return (result);
+ goto cleanup_view;
}
*viewp = view;
return (ISC_R_SUCCESS);
+
+cleanup_view:
+ dns_view_detach(&view);
+ return (result);
}
isc_result_t
REQUIRE(mctx != NULL);
REQUIRE(taskmgr != NULL);
- REQUIRE(loopmgr != NULL);
REQUIRE(nm != NULL);
REQUIRE(clientp != NULL && *clientp == NULL);
.nm = nm,
};
- isc_mutex_init(&client->lock);
-
- isc_mutex_init(&client->readylock);
- isc_condition_init(&client->ready);
-
- result = isc_task_create(client->taskmgr, 0, &client->task, 0);
+ result = isc_task_create(client->taskmgr, &client->task, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_lock;
}
isc_refcount_init(&client->references, 1);
/* Create the default view for class IN */
- result = createview(mctx, dns_rdataclass_in, taskmgr, nm, loopmgr,
+ result = createview(mctx, dns_rdataclass_in, loopmgr, taskmgr, nm,
client->dispatchmgr, dispatchv4, dispatchv6, &view);
if (result != ISC_R_SUCCESS) {
goto cleanup_references;
}
- ISC_LIST_INIT(client->viewlist);
- ISC_LIST_APPEND(client->viewlist, view, link);
+ client->view = view;
dns_view_freeze(view); /* too early? */
cleanup_task:
isc_task_detach(&client->task);
cleanup_lock:
- isc_mutex_destroy(&client->lock);
isc_mem_put(mctx, client, sizeof(*client));
return (result);
static void
destroyclient(dns_client_t *client) {
- dns_view_t *view = NULL;
-
isc_refcount_destroy(&client->references);
- while ((view = ISC_LIST_HEAD(client->viewlist)) != NULL) {
- ISC_LIST_UNLINK(client->viewlist, view, link);
- dns_view_detach(&view);
- }
+ dns_view_detach(&client->view);
if (client->dispatchv4 != NULL) {
dns_dispatch_detach(&client->dispatchv4);
isc_task_detach(&client->task);
- isc_condition_destroy(&client->ready);
- isc_mutex_destroy(&client->readylock);
-
- isc_mutex_destroy(&client->lock);
client->magic = 0;
isc_mem_putanddetach(&client->mctx, client, sizeof(*client));
dns_client_setservers(dns_client_t *client, dns_rdataclass_t rdclass,
const dns_name_t *name_space, isc_sockaddrlist_t *addrs) {
isc_result_t result;
- dns_view_t *view = NULL;
REQUIRE(DNS_CLIENT_VALID(client));
REQUIRE(addrs != NULL);
+ REQUIRE(rdclass == dns_rdataclass_in);
if (name_space == NULL) {
name_space = dns_rootname;
}
- LOCK(&client->lock);
- result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME,
- rdclass, &view);
- if (result != ISC_R_SUCCESS) {
- UNLOCK(&client->lock);
- return (result);
- }
- UNLOCK(&client->lock);
-
- result = dns_fwdtable_add(view->fwdtable, name_space, addrs,
+ result = dns_fwdtable_add(client->view->fwdtable, name_space, addrs,
dns_fwdpolicy_only);
- dns_view_detach(&view);
-
return (result);
}
dns_client_clearservers(dns_client_t *client, dns_rdataclass_t rdclass,
const dns_name_t *name_space) {
isc_result_t result;
- dns_view_t *view = NULL;
REQUIRE(DNS_CLIENT_VALID(client));
+ REQUIRE(rdclass == dns_rdataclass_in);
if (name_space == NULL) {
name_space = dns_rootname;
}
- LOCK(&client->lock);
- result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME,
- rdclass, &view);
- if (result != ISC_R_SUCCESS) {
- UNLOCK(&client->lock);
- return (result);
- }
- UNLOCK(&client->lock);
-
- result = dns_fwdtable_delete(view->fwdtable, name_space);
-
- dns_view_detach(&view);
+ result = dns_fwdtable_delete(client->view->fwdtable, name_space);
return (result);
}
REQUIRE(RCTX_VALID(rctx));
- LOCK(&rctx->lock);
-
mctx = rctx->view->mctx;
name = dns_fixedname_name(&rctx->name);
rctx->restarts++;
want_restart = false;
- if (event == NULL && !rctx->canceled) {
+ if (event == NULL) {
fname = dns_fixedname_initname(&foundname);
INSIST(!dns_rdataset_isassociated(rctx->rdataset));
INSIST(rctx->sigrdataset == NULL ||
}
/*
- * If we've been canceled, forget about the result.
+ * Get some resource for copying the
+ * result.
*/
- if (rctx->canceled) {
- result = ISC_R_CANCELED;
- } else {
- /*
- * Otherwise, get some resource for copying the
- * result.
- */
- dns_name_t *aname = dns_fixedname_name(&rctx->name);
+ dns_name_t *aname = dns_fixedname_name(&rctx->name);
- ansname = isc_mem_get(mctx, sizeof(*ansname));
- dns_name_init(ansname, NULL);
+ ansname = isc_mem_get(mctx, sizeof(*ansname));
+ dns_name_init(ansname, NULL);
- dns_name_dup(aname, mctx, ansname);
- }
+ dns_name_dup(aname, mctx, ansname);
switch (result) {
case ISC_R_SUCCESS:
rctx->event->ev_sender = rctx;
isc_task_sendanddetach(&task, ISC_EVENT_PTR(&rctx->event));
}
-
- UNLOCK(&rctx->lock);
}
static void
resarg_t *resarg = event->ev_arg;
dns_clientresevent_t *rev = (dns_clientresevent_t *)event;
dns_name_t *name = NULL;
- dns_client_t *client = resarg->client;
+ isc_result_t result;
UNUSED(task);
- LOCK(&resarg->lock);
-
resarg->result = rev->result;
resarg->vresult = rev->vresult;
while ((name = ISC_LIST_HEAD(rev->answerlist)) != NULL) {
ISC_LIST_APPEND(*resarg->namelist, name, link);
}
- destroyrestrans(&resarg->trans);
isc_event_free(&event);
- resarg->client = NULL;
- if (!resarg->canceled) {
- UNLOCK(&resarg->lock);
+ destroyrestrans(&resarg->trans);
+ result = resarg->result;
+
+ if (result != ISC_R_SUCCESS && resarg->vresult != ISC_R_SUCCESS) {
/*
- * Signal that the entire process is done.
- */
- LOCK(&client->readylock);
- client->readydone = true;
- SIGNAL(&client->ready);
- UNLOCK(&client->readylock);
- } else {
- /*
- * We have already exited from the loop (due to some
- * unexpected event). Just clean the arg up.
+ * If this lookup failed due to some error in DNSSEC
+ * validation, return the validation error code.
+ * XXX: or should we pass the validation result separately?
*/
- UNLOCK(&resarg->lock);
- isc_mutex_destroy(&resarg->lock);
- isc_mem_put(client->mctx, resarg, sizeof(*resarg));
+ result = resarg->vresult;
}
- dns_client_detach(&client);
+ resarg->resolve_cb(resarg->client, resarg->name, resarg->namelist,
+ result);
+
+ dns_client_detach(&resarg->client);
+
+ isc_mem_putanddetach(&resarg->mctx, resarg, sizeof(*resarg));
}
isc_result_t
dns_client_resolve(dns_client_t *client, const dns_name_t *name,
dns_rdataclass_t rdclass, dns_rdatatype_t type,
- unsigned int options, dns_namelist_t *namelist) {
+ unsigned int options, dns_namelist_t *namelist,
+ dns_client_resolve_cb resolve_cb) {
isc_result_t result;
resarg_t *resarg = NULL;
REQUIRE(DNS_CLIENT_VALID(client));
REQUIRE(namelist != NULL && ISC_LIST_EMPTY(*namelist));
+ REQUIRE(rdclass == dns_rdataclass_in);
resarg = isc_mem_get(client->mctx, sizeof(*resarg));
*resarg = (resarg_t){
.client = client,
+ .name = name,
.result = DNS_R_SERVFAIL,
.namelist = namelist,
+ .resolve_cb = resolve_cb,
};
- isc_mutex_init(&resarg->lock);
+ isc_mem_attach(client->mctx, &resarg->mctx);
result = dns_client_startresolve(client, name, rdclass, type, options,
client->task, resolve_done, resarg,
&resarg->trans);
if (result != ISC_R_SUCCESS) {
- isc_mutex_destroy(&resarg->lock);
isc_mem_put(client->mctx, resarg, sizeof(*resarg));
return (result);
}
- /*
- * Block until the entire process is completed.
- */
- LOCK(&client->readylock);
- if (!client->readydone) {
- WAIT(&client->ready, &client->readylock);
- }
- UNLOCK(&client->readylock);
-
- LOCK(&resarg->lock);
- if (result == ISC_R_SUCCESS) {
- result = resarg->result;
- }
- if (result != ISC_R_SUCCESS && resarg->vresult != ISC_R_SUCCESS) {
- /*
- * If this lookup failed due to some error in DNSSEC
- * validation, return the validation error code.
- * XXX: or should we pass the validation result separately?
- */
- result = resarg->vresult;
- }
- if (resarg->trans != NULL) {
- /*
- * Unusual termination (perhaps due to signal). We need some
- * tricky cleanup process.
- */
- resarg->canceled = true;
- cancelresolve(resarg->trans);
-
- UNLOCK(&resarg->lock);
-
- /* resarg will be freed in the event handler. */
- } else {
- UNLOCK(&resarg->lock);
-
- isc_mutex_destroy(&resarg->lock);
- isc_mem_put(client->mctx, resarg, sizeof(*resarg));
- }
-
return (result);
}
unsigned int options, isc_task_t *task,
isc_taskaction_t action, void *arg,
dns_clientrestrans_t **transp) {
- dns_view_t *view = NULL;
dns_clientresevent_t *event = NULL;
resctx_t *rctx = NULL;
isc_task_t *tclone = NULL;
REQUIRE(DNS_CLIENT_VALID(client));
REQUIRE(transp != NULL && *transp == NULL);
-
- LOCK(&client->lock);
- result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME,
- rdclass, &view);
- UNLOCK(&client->lock);
- if (result != ISC_R_SUCCESS) {
- return (result);
- }
+ REQUIRE(rdclass == dns_rdataclass_in);
mctx = client->mctx;
rdataset = NULL;
ISC_LIST_INIT(event->answerlist);
rctx = isc_mem_get(mctx, sizeof(*rctx));
- isc_mutex_init(&rctx->lock);
+ *rctx = (resctx_t){
+ .client = client,
+ .task = client->task,
+ .event = event,
+ .type = type,
+ .want_dnssec = want_dnssec,
+ .want_validation = want_validation,
+ .want_cdflag = want_cdflag,
+ .want_tcp = want_tcp,
+ };
result = getrdataset(mctx, &rdataset);
if (result != ISC_R_SUCCESS) {
dns_fixedname_init(&rctx->name);
dns_name_copy(name, dns_fixedname_name(&rctx->name));
- rctx->client = client;
ISC_LINK_INIT(rctx, link);
- rctx->canceled = false;
- rctx->task = client->task;
- rctx->type = type;
- rctx->view = view;
- rctx->restarts = 0;
- rctx->fetch = NULL;
- rctx->want_dnssec = want_dnssec;
- rctx->want_validation = want_validation;
- rctx->want_cdflag = want_cdflag;
- rctx->want_tcp = want_tcp;
+ dns_view_attach(client->view, &rctx->view);
ISC_LIST_INIT(rctx->namelist);
- rctx->event = event;
rctx->magic = RCTX_MAGIC;
isc_refcount_increment(&client->references);
- LOCK(&client->lock);
ISC_LIST_APPEND(client->resctxs, rctx, link);
- UNLOCK(&client->lock);
*transp = (dns_clientrestrans_t *)rctx;
client_resfind(rctx, NULL);
if (sigrdataset != NULL) {
putrdataset(client->mctx, &sigrdataset);
}
- isc_mutex_destroy(&rctx->lock);
isc_mem_put(mctx, rctx, sizeof(*rctx));
isc_event_free(ISC_EVENT_PTR(&event));
isc_task_detach(&tclone);
- dns_view_detach(&view);
return (result);
}
-/*%<
- * Cancel an ongoing resolution procedure started via
- * dns_client_startresolve().
- *
- * If the resolution procedure has not completed, post its CLIENTRESDONE
- * event with a result code of #ISC_R_CANCELED.
- */
-static void
-cancelresolve(dns_clientrestrans_t *trans) {
- resctx_t *rctx = NULL;
-
- REQUIRE(trans != NULL);
- rctx = (resctx_t *)trans;
- REQUIRE(RCTX_VALID(rctx));
-
- LOCK(&rctx->lock);
-
- if (!rctx->canceled) {
- rctx->canceled = true;
- if (rctx->fetch != NULL) {
- dns_resolver_cancelfetch(rctx->fetch);
- }
- }
-
- UNLOCK(&rctx->lock);
-}
-
void
dns_client_freeresanswer(dns_client_t *client, dns_namelist_t *namelist) {
dns_name_t *name;
* Wait for the lock in client_resfind to be released before
* destroying the lock.
*/
- LOCK(&rctx->lock);
- UNLOCK(&rctx->lock);
-
- LOCK(&client->lock);
INSIST(ISC_LINK_LINKED(rctx, link));
ISC_LIST_UNLINK(client->resctxs, rctx, link);
- UNLOCK(&client->lock);
-
INSIST(ISC_LIST_EMPTY(rctx->namelist));
- isc_mutex_destroy(&rctx->lock);
rctx->magic = 0;
isc_mem_put(mctx, rctx, sizeof(*rctx));
dns_rdatatype_t rdtype, const dns_name_t *keyname,
isc_buffer_t *databuf) {
isc_result_t result;
- dns_view_t *view = NULL;
dns_keytable_t *secroots = NULL;
dns_name_t *name = NULL;
char rdatabuf[DST_KEY_MAXSIZE];
isc_buffer_t b;
REQUIRE(DNS_CLIENT_VALID(client));
+ REQUIRE(rdclass == dns_rdataclass_in);
- LOCK(&client->lock);
- result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME,
- rdclass, &view);
- UNLOCK(&client->lock);
- CHECK(result);
-
- CHECK(dns_view_getsecroots(view, &secroots));
+ CHECK(dns_view_getsecroots(client->view, &secroots));
DE_CONST(keyname, name);
CHECK(dns_keytable_add(secroots, false, false, name, &ds, NULL, NULL));
cleanup:
- if (view != NULL) {
- dns_view_detach(&view);
- }
if (secroots != NULL) {
dns_keytable_detach(&secroots);
}
/*
* Run in task-exclusive mode.
*/
- result = isc_task_beginexclusive(env->reopen_task);
- RUNTIME_CHECK(result == ISC_R_SUCCESS);
+ isc_task_beginexclusive(env->reopen_task);
/*
* Check that we can create a new fw object.
*
*\li 'nm' is a valid network manager.
*
- *\li 'loopmgr' is a valid loop manager.
+ *\li 'timermgr' is a valid timer manager.
*
*\li clientp != NULL && *clientp == NULL.
*
*\li Anything else Failure.
*/
+typedef void (*dns_client_resolve_cb)(dns_client_t *client,
+ const dns_name_t *name,
+ dns_namelist_t *namelist,
+ isc_result_t result);
+
isc_result_t
dns_client_resolve(dns_client_t *client, const dns_name_t *name,
dns_rdataclass_t rdclass, dns_rdatatype_t type,
- unsigned int options, dns_namelist_t *namelist);
+ unsigned int options, dns_namelist_t *namelist,
+ dns_client_resolve_cb resolve_cb);
isc_result_t
dns_client_startresolve(dns_client_t *client, const dns_name_t *name,
#include <isc/time.h>
#include <isc/types.h>
#include <isc/util.h>
+#include <isc/work.h>
#include <dns/db.h>
#include <dns/dbiterator.h>
* This will run in a network/task manager thread when the dump is complete.
*/
static void
-master_dump_done_cb(void *data, isc_result_t result) {
+master_dump_done_cb(void *data) {
dns_dumpctx_t *dctx = data;
- if (result == ISC_R_SUCCESS && dctx->result != ISC_R_SUCCESS) {
- result = dctx->result;
- }
-
- (dctx->done)(dctx->done_arg, result);
+ (dctx->done)(dctx->done_arg, dctx->result);
dns_dumpctx_detach(&dctx);
}
static void
setup_dump(isc_task_t *task, isc_event_t *event) {
dns_dumpctx_t *dctx = NULL;
+ isc_loopmgr_t *loopmgr = isc_task_getloopmgr(task);
+ isc_loop_t *loop = isc_loop_current(loopmgr);
- REQUIRE(isc_nm_tid() >= 0);
REQUIRE(event != NULL);
dctx = event->ev_arg;
REQUIRE(DNS_DCTX_VALID(dctx));
- isc_nm_work_offload(isc_task_getnetmgr(task), master_dump_cb,
- master_dump_done_cb, dctx);
+ isc_work_enqueue(loop, master_dump_cb, master_dump_done_cb, dctx);
isc_event_free(&event);
}
ntatable = isc_mem_get(view->mctx, sizeof(*ntatable));
ntatable->task = NULL;
- result = isc_task_create(taskmgr, 0, &ntatable->task, 0);
+ result = isc_task_create(taskmgr, &ntatable->task, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_ntatable;
}
* Since we have a pool of tasks we bind them to task
* queues to spread the load evenly
*/
- result = isc_task_create(taskmgr, 0, &res->tasks[i], i);
+ result = isc_task_create(taskmgr, &res->tasks[i], i);
if (result != ISC_R_SUCCESS) {
goto cleanup_tasks;
}
goto cleanup_rbt;
}
- result = isc_task_create(taskmgr, 0, &rpzs->updater, 0);
+ result = isc_task_create(taskmgr, &rpzs->updater, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_task;
}
REQUIRE(!view->frozen);
REQUIRE(view->resolver == NULL);
- result = isc_task_create(taskmgr, 0, &view->task, 0);
+ result = isc_task_create(taskmgr, &view->task, 0);
if (result != ISC_R_SUCCESS) {
return (result);
}
zmgr->mctx, zmgr->workers * sizeof(zmgr->zonetasks[0]));
memset(zmgr->zonetasks, 0, zmgr->workers * sizeof(zmgr->zonetasks[0]));
for (size_t i = 0; i < zmgr->workers; i++) {
- result = isc_task_create(zmgr->taskmgr, 2, &zmgr->zonetasks[i],
- i);
+ result = isc_task_create(zmgr->taskmgr, &zmgr->zonetasks[i], i);
INSIST(result == ISC_R_SUCCESS);
if (result != ISC_R_SUCCESS) {
INSIST(result == ISC_R_SUCCESS);
zmgr->mctx, zmgr->workers * sizeof(zmgr->loadtasks[0]));
memset(zmgr->loadtasks, 0, zmgr->workers * sizeof(zmgr->loadtasks[0]));
for (size_t i = 0; i < zmgr->workers; i++) {
- result = isc_task_create(zmgr->taskmgr, UINT_MAX,
- &zmgr->loadtasks[i], i);
+ result = isc_task_create(zmgr->taskmgr, &zmgr->loadtasks[i], i);
INSIST(result == ISC_R_SUCCESS);
if (result != ISC_R_SUCCESS) {
goto free_loadtasks;
void
isc_httpdmgr_shutdown(isc_httpdmgr_t **httpdmgrp) {
- isc_httpdmgr_t *httpdmgr;
- isc_httpd_t *httpd;
+ isc_httpdmgr_t *httpdmgr = NULL;
+ isc_httpd_t *httpd = NULL;
REQUIRE(httpdmgrp != NULL);
REQUIRE(VALID_HTTPDMGR(*httpdmgrp));
}
UNLOCK(&httpdmgr->lock);
+ isc_nmsocket_close(&httpdmgr->sock);
+
httpdmgr_detach(&httpdmgr);
}
#pragma once
+#include <isc/loop.h>
#include <isc/netmgr.h>
#include <isc/result.h>
#include <isc/task.h>
typedef struct isc_managers isc_managers_t;
-isc_result_t
-isc_managers_create(isc_mem_t *mctx, size_t workers, size_t quantum,
+void
+isc_managers_create(isc_mem_t **mctx, uint32_t workers,
isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp,
isc_taskmgr_t **taskmgrp);
void
-isc_managers_destroy(isc_loopmgr_t **loopmgr, isc_nm_t **netmgrp,
- isc_taskmgr_t **taskmgrp);
+isc_managers_destroy(isc_mem_t **mctx, isc_loopmgr_t **loopmgrp,
+ isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp);
* callbacks.
*/
-typedef void (*isc_nm_workcb_t)(void *arg);
-typedef void (*isc_nm_after_workcb_t)(void *arg, isc_result_t result);
+void
+isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netgmrp);
+/*%<
+ * Creates a new network manager and starts it running when loopmgr is started.
+ */
+
+void
+isc_netmgr_destroy(isc_nm_t **netmgrp);
/*%<
- * Callback functions for libuv threadpool work (see uv_work_t)
+ * Similar to isc_nm_detach(), but requires all other references to be gone.
*/
void
/*%<
* Attach/detach a network manager. When all references have been
* released, the network manager is shut down, freeing all resources.
- * Destroy is working the same way as detach, but it actively waits
- * for all other references to be gone.
*/
-#define ISC_NETMGR_TID_UNKNOWN -1
-
-/* Return thread ID of current thread, or ISC_NETMGR_TID_UNKNOWN */
-int
-isc_nm_tid(void);
-
void
isc_nmsocket_close(isc_nmsocket_t **sockp);
/*%<
* \li 'handle' is a valid netmgr handle object.
*/
-void
-isc_nm_task_enqueue(isc_nm_t *mgr, isc_task_t *task, int tid);
-/*%<
- * Enqueue the 'task' onto the netmgr ievents queue.
- *
- * Requires:
- * \li 'mgr' is a valid netmgr object
- * \li 'task' is a valid task
- * \li 'tid' is either the preferred netmgr tid or -1, in which case
- * tid will be picked randomly. The tid is capped (by modulo) to
- * maximum number of 'workers' as specifed in isc_nm_start()
- */
-
-void
-isc_nm_work_offload(isc_nm_t *mgr, isc_nm_workcb_t work_cb,
- isc_nm_after_workcb_t after_work_cb, void *data);
-/*%<
- * Schedules a job to be handled by the libuv thread pool (see uv_work_t).
- * The function specified in `work_cb` will be run by a thread in the
- * thread pool; when complete, the `after_work_cb` function will run.
- *
- * Requires:
- * \li 'mgr' is a valid netmgr object.
- * \li We are currently running in a network manager thread.
- */
-
void
isc__nm_force_tid(int tid);
/*%<
* tests and should not be used in any production code.
*/
-uint32_t
-isc_nm_getnworkers(const isc_nm_t *);
-/*%<
- * Return the number of active workers
- */
-
void
isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout);
#include <isc/eventclass.h>
#include <isc/lang.h>
#include <isc/netmgr.h>
+#include <isc/refcount.h>
#include <isc/stdtime.h>
#include <isc/types.h>
#include <isc/util.h>
*** Types
***/
-#define isc_task_create(m, q, t, i) \
- isc__task_create(m, q, t, i ISC__TASKFILELINE)
+#define isc_task_create(manager, taskp, tid) \
+ isc__task_create(manager, taskp, tid ISC__TASKFILELINE)
isc_result_t
-isc__task_create(isc_taskmgr_t *manager, unsigned int quantum,
- isc_task_t **taskp, int tid ISC__TASKFLARG);
+isc__task_create(isc_taskmgr_t *manager, isc_task_t **taskp,
+ int tid ISC__TASKFLARG);
/*%<
* Create a task, bound to a particular thread id.
*
- * Notes:
- *
- *\li If 'quantum' is non-zero, then only that many events can be dispatched
- * before the task must yield to other tasks waiting to execute. If
- * quantum is zero, then the default quantum of the task manager will
- * be used.
- *
- *\li The 'quantum' option may be removed from isc_task_create() in the
- * future. If this happens, isc_task_getquantum() and
- * isc_task_setquantum() will be provided.
- *
* Requires:
*
*\li 'manager' is a valid task manager.
*\li #ISC_R_SHUTTINGDOWN
*/
-void
-isc_task_ready(isc_task_t *task);
-/*%<
- * Enqueue the task onto netmgr queue.
- */
-
-isc_result_t
-isc_task_run(isc_task_t *task);
-/*%<
- * Run all the queued events for the 'task', returning
- * when the queue is empty or the number of events executed
- * exceeds the 'quantum' specified when the task was created.
- *
- * Requires:
- *
- *\li 'task' is a valid task.
- *
- * Returns:
- *
- *\li #ISC_R_SUCCESS
- *\li #ISC_R_QUOTA
- */
-
-void
-isc_task_attach(isc_task_t *source, isc_task_t **targetp);
-/*%<
- * Attach *targetp to source.
- *
- * Requires:
- *
- *\li 'source' is a valid task.
- *
- *\li 'targetp' points to a NULL isc_task_t *.
- *
- * Ensures:
- *
- *\li *targetp is attached to source.
- */
-
-void
-isc_task_detach(isc_task_t **taskp);
-/*%<
- * Detach *taskp from its task.
- *
- * Requires:
- *
- *\li '*taskp' is a valid task.
- *
- * Ensures:
- *
- *\li *taskp is NULL.
- *
- *\li If '*taskp' is the last reference to the task, the task is idle (has
- * an empty event queue), and has not been shutdown, the task will be
- * shutdown.
- *
- *\li If '*taskp' is the last reference to the task and
- * the task has been shutdown,
- * all resources used by the task will be freed.
- */
+ISC_REFCOUNT_DECL(isc_task);
void
isc_task_send(isc_task_t *task, isc_event_t **eventp);
* all resources used by the task will be freed.
*/
-bool
-isc_task_purgeevent(isc_task_t *task, isc_event_t *event);
-/*%<
- * Purge 'event' from a task's event queue.
- *
- * Notes:
- *
- *\li If 'event' is on the task's event queue, it will be purged. 'event'
- * does not have to be on the task's event queue; in fact, it can even be
- * an invalid pointer. Purging only occurs if the event is actually on the
- * task's event queue.
- *
- * \li Purging never changes the state of the task.
- *
- * Requires:
- *
- *\li 'task' is a valid task.
- *
- * Ensures:
- *
- *\li 'event' is not in the event queue for 'task'.
- *
- * Returns:
- *
- *\li #true The event was purged.
- *\li #false The event was not in the event queue.
- */
-
void
isc_task_setname(isc_task_t *task, const char *name, void *tag);
/*%<
*\li 'task' is a valid task.
*/
+isc_loopmgr_t *
+isc_task_getloopmgr(isc_task_t *task);
+
const char *
isc_task_getname(isc_task_t *task);
/*%<
*
*/
-isc_nm_t *
-isc_task_getnetmgr(isc_task_t *task);
-
void *
isc_task_gettag(isc_task_t *task);
/*%<
*/
void
-isc_task_setquantum(isc_task_t *task, unsigned int quantum);
-/*%<
- * Set future 'task' quantum to 'quantum'. The current 'task' quantum will be
- * kept for the current isc_task_run() loop, and will be changed for the next
- * run. Therefore, the function is save to use from the event callback as it
- * will not affect the current event loop processing.
- */
-
-isc_result_t
isc_task_beginexclusive(isc_task_t *task);
/*%<
* Request exclusive access for 'task', which must be the calling
void
isc_task_endexclusive(isc_task_t *task);
/*%<
- * Relinquish the exclusive access obtained by isc_task_beginexclusive(),
- * allowing other tasks to execute.
+ * Relinquish the exclusive access obtained by
+ *isc_task_beginexclusive(), allowing other tasks to execute.
*
* Requires:
*\li 'task' is the calling task, and has obtained
***** Task Manager.
*****/
+void
+isc_taskmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr,
+ isc_taskmgr_t **managerp);
+/*%<
+ * Create a new task manager.
+ *
+ * Notes:
+ *
+ *\li This is meant to be called from isc_managers_create().
+ *
+ * Requires:
+ *
+ *\li 'mctx' is a valid memory context.
+
+ *\li 'loopmgr' is a valid loop manager.
+ *
+ *\li managerp != NULL && *managerp == NULL
+ *
+ * Ensures:
+ *
+ *\li On success, '*managerp' will be attached to the newly created task
+ * manager.
+ *
+ * Returns:
+ *
+ *\li #ISC_R_SUCCESS
+ *\li #ISC_R_NOMEMORY
+ *\li #ISC_R_NOTHREADS No threads could be created.
+ *\li #ISC_R_UNEXPECTED An unexpected error occurred.
+ *\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task
+ * manager shutting down.
+ */
+
+void
+isc_taskmgr_destroy(isc_taskmgr_t **managerp);
+/*%<
+ * Destroy '*managerp'.
+ *
+ * Notes:
+ *
+ *\li Calling isc__taskmgr_destroy() will shut down all tasks managed by
+ * *managerp that haven't already been shutdown. The call will block
+ * until all tasks have entered the done state.
+ *
+ *\li isc__taskmgr_destroy() must not be called by a task event action,
+ * because it would block forever waiting for the event action to
+ * complete. An event action that wants to cause task manager shutdown
+ * should request some non-event action thread of execution to do the
+ * shutdown, e.g. by signaling a condition variable or using
+ * isc_loopmgr_shutdown().
+ *
+ *\li The task manager is reference counted and will be destroyed when
+ * the last reference is detached. The only difference between this
+ * function and isc_task_detach() is that this one will assert if
+ * more than 1 reference is held. This function is only meant to be
+ * called from isc_managers_destroy(), by which time all other
+ * references should have been detached. If any are still being held,
+ * it's a programming error, and we want to crash.
+ *
+ * Requires:
+ *
+ *\li '*managerp' is a valid task manager.
+ *
+ *\li No other references to the task manager are being held.
+ */
+
void
isc_taskmgr_attach(isc_taskmgr_t *, isc_taskmgr_t **);
void
typedef struct isc_sockaddr isc_sockaddr_t; /*%< Socket Address */
typedef ISC_LIST(isc_sockaddr_t) isc_sockaddrlist_t; /*%< Socket Address List
* */
-typedef struct isc_stats isc_stats_t; /*%< Statistics */
-typedef int_fast64_t isc_statscounter_t;
-typedef struct isc_symtab isc_symtab_t; /*%< Symbol Table */
-typedef struct isc_task isc_task_t; /*%< Task */
-typedef ISC_LIST(isc_task_t) isc_tasklist_t; /*%< Task List */
+typedef struct isc_stats isc_stats_t; /*%< Statistics */
+typedef int_fast64_t isc_statscounter_t;
+typedef struct isc_symtab isc_symtab_t; /*%< Symbol Table */
+typedef struct isc_task isc_task_t; /*%< Task */
typedef struct isc_taskmgr isc_taskmgr_t; /*%< Task Manager */
typedef struct isc_textregion isc_textregion_t; /*%< Text Region */
typedef struct isc_time isc_time_t; /*%< Time */
typedef struct isc_timer isc_timer_t; /*%< Timer */
-typedef struct isc_timermgr isc_timermgr_t; /*%< Timer Manager */
typedef struct isc_work isc_work_t; /*%< Work offloaded to an
* external thread */
isc_rwlock_unlock((lp), (t)); \
}
+#define RDLOCK(lp) RWLOCK(lp, isc_rwlocktype_read)
+#define RDUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_read)
+#define WRLOCK(lp) RWLOCK(lp, isc_rwlocktype_write)
+#define WRUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_write)
+
/*
* List Macros.
*/
#define LCFG_MAGIC ISC_MAGIC('L', 'c', 'f', 'g')
#define VALID_CONFIG(lcfg) ISC_MAGIC_VALID(lcfg, LCFG_MAGIC)
-#define RDLOCK(lp) RWLOCK(lp, isc_rwlocktype_read);
-#define WRLOCK(lp) RWLOCK(lp, isc_rwlocktype_write);
-#define RDUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_read);
-#define WRUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_write);
-
static thread_local bool forcelog = false;
/*
* information regarding copyright ownership.
*/
-#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/util.h>
-#include "netmgr_p.h"
-#include "task_p.h"
-
-isc_result_t
-isc_managers_create(isc_mem_t *mctx, size_t workers, size_t quantum,
+void
+isc_managers_create(isc_mem_t **mctxp, uint32_t workers,
isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp,
isc_taskmgr_t **taskmgrp) {
- isc_result_t result;
- isc_nm_t *netmgr = NULL;
- isc_taskmgr_t *taskmgr = NULL;
-
- REQUIRE(netmgrp != NULL && *netmgrp == NULL);
- isc__netmgr_create(mctx, workers, &netmgr);
- *netmgrp = netmgr;
- INSIST(netmgr != NULL);
-
- REQUIRE(taskmgrp == NULL || *taskmgrp == NULL);
- if (taskmgrp != NULL) {
- INSIST(netmgr != NULL);
- result = isc__taskmgr_create(mctx, quantum, netmgr, &taskmgr);
- if (result != ISC_R_SUCCESS) {
- UNEXPECTED_ERROR(__FILE__, __LINE__,
- "isc_taskmgr_create() failed: %s",
- isc_result_totext(result));
- goto fail;
- }
- *taskmgrp = taskmgr;
- }
+ REQUIRE(mctxp != NULL && *mctxp == NULL);
+ isc_mem_create(mctxp);
+ INSIST(*mctxp != NULL);
- isc_loopmgr_create(mctx, workers, loopmgrp);
+ REQUIRE(loopmgrp != NULL && *loopmgrp == NULL);
+ isc_loopmgr_create(*mctxp, workers, loopmgrp);
+ INSIST(*loopmgrp != NULL);
- return (ISC_R_SUCCESS);
-fail:
- isc_managers_destroy(loopmgrp, netmgrp, taskmgrp);
+ REQUIRE(netmgrp != NULL && *netmgrp == NULL);
+ isc_netmgr_create(*mctxp, *loopmgrp, netmgrp);
+ INSIST(*netmgrp != NULL);
- return (result);
+ REQUIRE(taskmgrp != NULL && *taskmgrp == NULL);
+ isc_taskmgr_create(*mctxp, *loopmgrp, taskmgrp);
+ INSIST(*taskmgrp != NULL);
}
void
-isc_managers_destroy(isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp,
- isc_taskmgr_t **taskmgrp) {
- /*
- * If we have a taskmgr to clean up, then we must also have a netmgr.
- */
- REQUIRE(taskmgrp == NULL || netmgrp != NULL);
+isc_managers_destroy(isc_mem_t **mctxp, isc_loopmgr_t **loopmgrp,
+ isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp) {
+ REQUIRE(mctxp != NULL && *mctxp != NULL);
+ REQUIRE(loopmgrp != NULL && *loopmgrp != NULL);
+ REQUIRE(netmgrp != NULL && *netmgrp != NULL);
+ REQUIRE(taskmgrp != NULL && *taskmgrp != NULL);
/*
* The sequence of operations here is important:
- *
- * 1. Initiate shutdown of the taskmgr, sending shutdown events to
- * all tasks that are not already shutting down.
- */
- if (taskmgrp != NULL) {
- INSIST(*taskmgrp != NULL);
- isc__taskmgr_shutdown(*taskmgrp);
- }
-
- /*
- * 2. Initiate shutdown of the network manager, freeing clients
- * and other resources and preventing new connections, but do
- * not stop processing of existing events.
- */
- if (netmgrp != NULL) {
- INSIST(*netmgrp != NULL);
- isc__netmgr_shutdown(*netmgrp);
- }
-
- /*
- * 3. Finish destruction of the task manager when all tasks
- * have completed.
- */
- if (taskmgrp != NULL) {
- isc__taskmgr_destroy(taskmgrp);
- }
-
- /*
- * 4. Finish destruction of the netmgr, and wait until all
- * references have been released.
*/
- if (netmgrp != NULL) {
- isc__netmgr_destroy(netmgrp);
- }
+ isc_taskmgr_destroy(taskmgrp);
+ isc_netmgr_destroy(netmgrp);
isc_loopmgr_destroy(loopmgrp);
+ isc_mem_destroy(mctxp);
}
static isc_result_t
new_http_cstream(isc_nmsocket_t *sock, http_cstream_t **streamp) {
- isc_mem_t *mctx = sock->mgr->mctx;
+ isc_mem_t *mctx = sock->worker->mctx;
const char *uri = NULL;
bool post;
http_cstream_t *stream = NULL;
on_server_data_chunk_recv_callback(int32_t stream_id, const uint8_t *data,
size_t len, isc_nm_http_session_t *session) {
isc_nmsocket_h2_t *h2 = ISC_LIST_HEAD(session->sstreams);
+ isc_mem_t *mctx = h2->psock->worker->mctx;
+
while (h2 != NULL) {
if (stream_id == h2->stream_id) {
if (isc_buffer_base(&h2->rbuf) == NULL) {
isc_buffer_init(
&h2->rbuf,
- isc_mem_allocate(session->mctx,
+ isc_mem_allocate(mctx,
h2->content_length),
MAX_DNS_MESSAGE_SIZE);
}
while (cbreq != NULL) {
isc__nm_uvreq_t *next = ISC_LIST_NEXT(cbreq, link);
ISC_LIST_UNLINK(pending_callbacks, cbreq, link);
- isc__nm_sendcb(cbreq->handle->sock, cbreq, result, false);
+ isc__nm_sendcb(cbreq->handle->sock, cbreq, result, true);
cbreq = next;
}
}
return (false);
}
- /* We need to attach to the session->handle earlier because as an
+ /*
+ * We need to attach to the session->handle earlier because as an
* indirect result of the nghttp2_session_mem_send() the session
* might get closed and the handle detached. However, there is
* still some outgoing data to handle and we need to call it
* anyway if only to get the write callback passed here to get
- * called properly. */
+ * called properly.
+ */
isc_nmhandle_attach(session->handle, &transphandle);
while (nghttp2_session_want_write(session->ngsession)) {
nghttp2_session_mem_send(session->ngsession, &data);
const size_t new_total = total + pending;
- /* Sometimes nghttp2_session_mem_send() does not return any
+ /*
+ * Sometimes nghttp2_session_mem_send() does not return any
* data to send even though nghttp2_session_want_write()
- * returns success. */
+ * returns success.
+ */
if (pending == 0 || data == NULL) {
break;
}
isc_buffer_usedlength(session->pending_write_data);
}
- /* Here we are trying to flush the pending writes buffer earlier
+ /*
+ * Here we are trying to flush the pending writes buffer earlier
* to avoid hitting unnecessary limitations on a TLS record size
- * within some tools (e.g. flamethrower). */
+ * within some tools (e.g. flamethrower).
+ */
if (max_total_write_size >= FLUSH_HTTP_WRITE_BUFFER_AFTER) {
- /* Case 1: We have equal or more than
- * FLUSH_HTTP_WRITE_BUFFER_AFTER bytes to send. Let's flush it.
+ /*
+ * Case 1: We have at least FLUSH_HTTP_WRITE_BUFFER_AFTER
+ * bytes to send. Let's flush it.
*/
total = max_total_write_size;
} else if (session->sending > 0 && total > 0) {
- /* Case 2: There is one or more write requests in flight and
+ /*
+ * Case 2: There is one or more write requests in flight and
* we have some new data form nghttp2 to send. Let's put the
* write callback (if any) into the pending write callbacks
* list. Then let's return from the function: as soon as the
* "in-flight" write callback get's called or we have reached
* FLUSH_HTTP_WRITE_BUFFER_AFTER bytes in the write buffer, we
- * will flush the buffer. */
+ * will flush the buffer.
+ */
if (cb != NULL) {
isc__nm_uvreq_t *newcb = NULL;
INSIST(VALID_NMHANDLE(httphandle));
- newcb = isc__nm_uvreq_get(httphandle->sock->mgr,
+ newcb = isc__nm_uvreq_get(httphandle->sock->worker,
httphandle->sock);
newcb->cb.send = cb;
newcb->cbarg = cbarg;
} else if (session->sending == 0 && total == 0 &&
session->pending_write_data != NULL)
{
- /* Case 3: There is no write in flight and we haven't got
+ /*
+ * Case 3: There is no write in flight and we haven't got
* anything new from nghttp2, but there is some data pending
- * in the write buffer. Let's flush the buffer. */
+ * in the write buffer. Let's flush the buffer.
+ */
isc_region_t region = { 0 };
total = isc_buffer_usedlength(session->pending_write_data);
INSIST(total > 0);
isc_buffer_usedregion(session->pending_write_data, ®ion);
INSIST(total == region.length);
} else {
- /* The other cases are, uninteresting, fall-through ones. */
- /* In the following cases (4-6) we will just bail out. */
- /* Case 4: There is nothing new to send, nor anything in the
- * write buffer. */
- /* Case 5: There is nothing new to send and there is write
- * request(s) in flight. */
- /* Case 6: There is nothing new to send nor there are any
- * write requests in flight. */
-
- /* Case 7: There is some new data to send and there are no any
- * write requests in flight: Let's send the data.*/
+ /*
+ * The other cases are uninteresting, fall-through ones.
+ * In the following cases (4-6) we will just bail out:
+ *
+ * Case 4: There is nothing new to send, nor anything in the
+ * write buffer.
+ * Case 5: There is nothing new to send and there are write
+ * request(s) in flight.
+ * Case 6: There is nothing new to send nor are there any
+ * write requests in flight.
+ *
+ * Case 7: There is some new data to send and there are no
+ * write requests in flight: Let's send the data.
+ */
INSIST((total == 0 && session->pending_write_data == NULL) ||
(total == 0 && session->sending > 0) ||
(total == 0 && session->sending == 0) ||
goto nothing_to_send;
}
- /* If we have reached the point it means that we need to send some
- * data and flush the outgoing buffer. The code below does that. */
+ /*
+ * If we have reached this point it means that we need to send some
+ * data and flush the outgoing buffer. The code below does that.
+ */
send = isc_mem_get(session->mctx, sizeof(*send));
*send = (isc_http_send_req_t){ .pending_write_data =
isc_buffer_usedregion(send->pending_write_data, &send_data);
isc_nm_send(transphandle, &send_data, http_writecb, send);
return (true);
+
nothing_to_send:
isc_nmhandle_detach(&transphandle);
return (false);
REQUIRE(sock->connect_cb != NULL);
if (result == ISC_R_SUCCESS) {
- req = isc__nm_uvreq_get(sock->mgr, sock);
+ req = isc__nm_uvreq_get(sock->worker, sock);
req->cb.connect = sock->connect_cb;
req->cbarg = sock->connect_cbarg;
if (session != NULL) {
REQUIRE(VALID_NMSOCK(transp_sock));
- mctx = transp_sock->mgr->mctx;
+ mctx = transp_sock->worker->mctx;
INSIST(http_sock->h2.connect.uri != NULL);
- http_sock->tid = transp_sock->tid;
http_sock->h2.connect.tls_peer_verify_string =
isc_nm_verify_tls_peer_result_string(handle);
if (result != ISC_R_SUCCESS) {
http_call_connect_cb(http_sock, session, result);
if (http_sock->h2.connect.uri != NULL) {
- isc_mem_free(mctx, http_sock->h2.connect.uri);
+ isc_mem_free(http_sock->worker->mctx,
+ http_sock->h2.connect.uri);
}
isc__nmsocket_prep_destroy(http_sock);
unsigned int timeout) {
isc_sockaddr_t local_interface;
isc_nmsocket_t *sock = NULL;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
REQUIRE(cb != NULL);
local = &local_interface;
}
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_httpsocket, local);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_httpsocket, local);
sock->connect_timeout = timeout;
- sock->result = ISC_R_UNSET;
sock->connect_cb = cb;
sock->connect_cbarg = cbarg;
atomic_init(&sock->client, true);
- if (isc__nm_closing(sock)) {
- isc__nm_uvreq_t *req = isc__nm_uvreq_get(mgr, sock);
+ if (isc__nm_closing(worker)) {
+ isc__nm_uvreq_t *req = isc__nm_uvreq_get(worker, sock);
req->cb.connect = cb;
req->cbarg = cbarg;
req->local = *local;
req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface);
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- }
-
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, ISC_R_SHUTTINGDOWN, true);
isc__nmsocket_prep_destroy(sock);
return;
}
- sock->h2 = (isc_nmsocket_h2_t){ .connect.uri = isc_mem_strdup(mgr->mctx,
- uri),
+ sock->h2 = (isc_nmsocket_h2_t){ .connect.uri = isc_mem_strdup(
+ sock->worker->mctx, uri),
.connect.post = post,
.connect.tlsctx = tlsctx };
ISC_LINK_INIT(&sock->h2, link);
client_send(isc_nmhandle_t *handle, const isc_region_t *region) {
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = handle->sock;
- isc_mem_t *mctx = sock->mgr->mctx;
+ isc_mem_t *mctx = sock->worker->mctx;
isc_nm_http_session_t *session = sock->h2.session;
http_cstream_t *cstream = sock->h2.connect.cstream;
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
- REQUIRE(handle->sock->tid == isc_nm_tid());
+ REQUIRE(handle->sock->tid == isc_tid());
REQUIRE(atomic_load(&handle->sock->client));
REQUIRE(cb != NULL);
const nghttp2_frame *frame, void *user_data) {
isc_nm_http_session_t *session = (isc_nm_http_session_t *)user_data;
isc_nmsocket_t *socket = NULL;
+ isc__networker_t *worker = NULL;
if (frame->hd.type != NGHTTP2_HEADERS ||
frame->headers.cat != NGHTTP2_HCAT_REQUEST)
return (NGHTTP2_ERR_CALLBACK_FAILURE);
}
- socket = isc_mem_get(session->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(socket, session->serversocket->mgr,
- isc_nm_httpsocket,
+ INSIST(session->handle->sock->tid == isc_tid());
+
+ worker = session->handle->sock->worker;
+ socket = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(socket, worker, isc_nm_httpsocket,
(isc_sockaddr_t *)&session->handle->sock->iface);
socket->peer = session->handle->sock->peer;
socket->h2 = (isc_nmsocket_h2_t){
isc_buffer_initnull(&socket->h2.wbuf);
session->nsstreams++;
isc__nm_httpsession_attach(session, &socket->h2.session);
- socket->tid = session->handle->sock->tid;
ISC_LINK_INIT(&socket->h2, link);
ISC_LIST_APPEND(session->sstreams, &socket->h2, link);
}
if (socket->h2.request_path != NULL) {
- isc_mem_free(socket->mgr->mctx, socket->h2.request_path);
+ isc_mem_free(socket->worker->mctx, socket->h2.request_path);
}
socket->h2.request_path = isc_mem_strndup(
- socket->mgr->mctx, (const char *)value, vlen + 1);
+ socket->worker->mctx, (const char *)value, vlen + 1);
if (!isc_nm_http_path_isvalid(socket->h2.request_path)) {
- isc_mem_free(socket->mgr->mctx, socket->h2.request_path);
+ isc_mem_free(socket->worker->mctx, socket->h2.request_path);
socket->h2.request_path = NULL;
return (ISC_HTTP_ERROR_BAD_REQUEST);
}
socket->h2.cb = handler->cb;
socket->h2.cbarg = handler->cbarg;
} else {
- isc_mem_free(socket->mgr->mctx, socket->h2.request_path);
+ isc_mem_free(socket->worker->mctx, socket->h2.request_path);
socket->h2.request_path = NULL;
return (ISC_HTTP_ERROR_NOT_FOUND);
}
const size_t decoded_size = dns_value_len / 4 * 3;
if (decoded_size <= MAX_DNS_MESSAGE_SIZE) {
if (socket->h2.query_data != NULL) {
- isc_mem_free(socket->mgr->mctx,
+ isc_mem_free(socket->worker->mctx,
socket->h2.query_data);
}
socket->h2.query_data =
isc__nm_base64url_to_base64(
- socket->mgr->mctx, dns_value,
+ socket->worker->mctx, dns_value,
dns_value_len,
&socket->h2.query_data_len);
} else {
sock = handle->sock;
REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(sock->tid == isc_tid());
- uvreq = isc__nm_uvreq_get(sock->mgr, sock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
isc_nmhandle_attach(handle, &uvreq->handle);
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
- ievent = isc__nm_get_netievent_httpsend(sock->mgr, sock, uvreq);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_httpsend(sock->worker, sock, uvreq);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
static void
return;
}
- INSIST(handle->httpsession->handle->sock->tid == isc_nm_tid());
+ INSIST(handle->sock->tid == isc_tid());
INSIST(VALID_NMHANDLE(handle->httpsession->handle));
INSIST(VALID_NMSOCK(handle->httpsession->handle->sock));
http_transpost_tcp_nodelay(handle);
- new_session(httplistensock->mgr->mctx, NULL, &session);
+ new_session(handle->sock->worker->mctx, NULL, &session);
session->max_concurrent_streams =
atomic_load(&httplistensock->h2.max_concurrent_streams);
initialize_nghttp2_server_session(session);
isc_nmsocket_t **sockp) {
isc_nmsocket_t *sock = NULL;
isc_result_t result;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(!ISC_LIST_EMPTY(eps->handlers));
REQUIRE(!ISC_LIST_EMPTY(eps->handler_cbargs));
REQUIRE(atomic_load(&eps->in_use) == false);
+ REQUIRE(isc_tid() == 0);
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_httplistener, iface);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_httplistener, iface);
atomic_init(&sock->h2.max_concurrent_streams,
NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS);
isc__nmsocket_attach(sock, &sock->outer->h2.httpserver);
sock->nchildren = sock->outer->nchildren;
- sock->result = ISC_R_UNSET;
- sock->tid = 0;
sock->fd = (uv_os_sock_t)-1;
atomic_store(&sock->listening, true);
UNREACHABLE();
}
- if (!isc__nm_in_netthread()) {
- isc__netievent_httpstop_t *ievent =
- isc__nm_get_netievent_httpstop(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- } else {
- REQUIRE(isc_nm_tid() == sock->tid);
- isc__netievent_httpstop_t ievent = { .sock = sock };
- isc__nm_async_httpstop(NULL, (isc__netievent_t *)&ievent);
- }
+ REQUIRE(isc_tid() == sock->tid);
+ isc__netievent_httpstop_t ievent = { .sock = sock };
+ isc__nm_async_httpstop(NULL, (isc__netievent_t *)&ievent);
}
void
}
if (sock->h2.session != NULL && sock->h2.session->closed &&
- sock->tid == isc_nm_tid())
+ sock->tid == isc_tid())
{
isc__nm_httpsession_detach(&sock->h2.session);
destroy = true;
- } else if (sock->h2.session == NULL && sock->tid == isc_nm_tid()) {
+ } else if (sock->h2.session == NULL && sock->tid == isc_tid()) {
destroy = true;
}
}
isc__netievent_httpclose_t *ievent =
- isc__nm_get_netievent_httpclose(sock->mgr, sock);
+ isc__nm_get_netievent_httpclose(sock->worker, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
void
isc_nm_http_set_endpoints(isc_nmsocket_t *listener,
isc_nm_http_endpoints_t *eps) {
+ isc_loopmgr_t *loopmgr = NULL;
+
REQUIRE(VALID_NMSOCK(listener));
REQUIRE(listener->type == isc_nm_httplistener);
REQUIRE(VALID_HTTP_ENDPOINTS(eps));
+ loopmgr = listener->worker->netmgr->loopmgr;
+
atomic_store(&eps->in_use, true);
- for (size_t i = 0; i < isc_nm_getnworkers(listener->mgr); i++) {
+ for (size_t i = 0; i < isc_loopmgr_nloops(loopmgr); i++) {
isc__netievent__http_eps_t *ievent =
- isc__nm_get_netievent_httpendpoints(listener->mgr,
- listener, eps);
- isc__nm_enqueue_ievent(&listener->mgr->workers[i],
+ isc__nm_get_netievent_httpendpoints(
+ &listener->worker->netmgr->workers[i], listener,
+ eps);
+ isc__nm_enqueue_ievent(&listener->worker->netmgr->workers[i],
(isc__netievent_t *)ievent);
}
}
void
isc__nm_async_httpendpoints(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent__http_eps_t *ievent = (isc__netievent__http_eps_t *)ev0;
- const int tid = isc_nm_tid();
+ const int tid = isc_tid();
isc_nmsocket_t *listener = ievent->sock;
isc_nm_http_endpoints_t *eps = ievent->endpoints;
+
UNUSED(worker);
isc_nm_http_endpoints_detach(&listener->h2.listener_endpoints[tid]);
http_init_listener_endpoints(isc_nmsocket_t *listener,
isc_nm_http_endpoints_t *epset) {
size_t nworkers;
+ isc_loopmgr_t *loopmgr = NULL;
REQUIRE(VALID_NMSOCK(listener));
- REQUIRE(VALID_NM(listener->mgr));
+ REQUIRE(listener->worker != NULL && VALID_NM(listener->worker->netmgr));
REQUIRE(VALID_HTTP_ENDPOINTS(epset));
- nworkers = (size_t)isc_nm_getnworkers(listener->mgr);
+ loopmgr = listener->worker->netmgr->loopmgr;
+ nworkers = (size_t)isc_loopmgr_nloops(loopmgr);
INSIST(nworkers > 0);
listener->h2.listener_endpoints =
- isc_mem_get(listener->mgr->mctx,
+ isc_mem_get(listener->worker->mctx,
sizeof(isc_nm_http_endpoints_t *) * nworkers);
listener->h2.n_listener_endpoints = nworkers;
for (size_t i = 0; i < nworkers; i++) {
static void
http_cleanup_listener_endpoints(isc_nmsocket_t *listener) {
- REQUIRE(VALID_NM(listener->mgr));
+ REQUIRE(listener->worker != NULL && VALID_NM(listener->worker->netmgr));
if (listener->h2.listener_endpoints == NULL) {
return;
isc_nm_http_endpoints_detach(
&listener->h2.listener_endpoints[i]);
}
- isc_mem_put(listener->mgr->mctx, listener->h2.listener_endpoints,
+ isc_mem_put(listener->worker->mctx, listener->h2.listener_endpoints,
sizeof(isc_nm_http_endpoints_t *) *
listener->h2.n_listener_endpoints);
listener->h2.n_listener_endpoints = 0;
}
if (sock->h2.request_path != NULL) {
- isc_mem_free(sock->mgr->mctx, sock->h2.request_path);
+ isc_mem_free(sock->worker->mctx, sock->h2.request_path);
sock->h2.request_path = NULL;
}
if (sock->h2.query_data != NULL) {
- isc_mem_free(sock->mgr->mctx, sock->h2.query_data);
+ isc_mem_free(sock->worker->mctx, sock->h2.query_data);
sock->h2.query_data = NULL;
}
if (isc_buffer_base(&sock->h2.rbuf) != NULL) {
void *base = isc_buffer_base(&sock->h2.rbuf);
- isc_mem_free(sock->mgr->mctx, base);
+ isc_mem_free(sock->worker->mctx, base);
isc_buffer_initnull(&sock->h2.rbuf);
}
}
sock->h2.session != NULL)
{
if (sock->h2.connect.uri != NULL) {
- isc_mem_free(sock->mgr->mctx, sock->h2.connect.uri);
+ isc_mem_free(sock->worker->mctx, sock->h2.connect.uri);
sock->h2.connect.uri = NULL;
}
isc__nm_httpsession_detach(&sock->h2.session);
#include <isc/sockaddr.h>
#include <isc/stats.h>
#include <isc/thread.h>
+#include <isc/tid.h>
#include <isc/tls.h>
#include <isc/util.h>
#include <isc/uv.h>
-/* Must be different from ISC_NETMGR_TID_UNKNOWN */
-#define ISC_NETMGR_NON_INTERLOCKED -2
+#include "../loop_p.h"
+
+#define ISC_NETMGR_TID_UNKNOWN -1
/*
* Receive buffers
#ifdef NETMGR_TRACE_VERBOSE
#define NETMGR_TRACE_LOG(format, ...) \
fprintf(stderr, "%" PRIu32 ":%d:%s:%u:%s:" format, gettid(), \
- isc_nm_tid(), file, line, func, __VA_ARGS__)
+ isc_tid(), file, line, func, __VA_ARGS__)
#else
#define NETMGR_TRACE_LOG(format, ...) \
(void)file; \
#define isc__nmsocket_prep_destroy(sock) isc___nmsocket_prep_destroy(sock)
#endif
-/*
- * Queue types in the order of processing priority.
- */
-typedef enum {
- NETIEVENT_PRIORITY = 0,
- NETIEVENT_TASK = 1,
- NETIEVENT_NORMAL = 2,
- NETIEVENT_MAX = 3,
-} netievent_type_t;
-
typedef struct isc__nm_uvreq isc__nm_uvreq_t;
typedef struct isc__netievent isc__netievent_t;
-typedef ISC_LIST(isc__netievent_t) isc__netievent_list_t;
-
-typedef struct ievent {
- isc_mutex_t lock;
- isc_condition_t cond;
- isc__netievent_list_t list;
-} ievent_t;
-
/*
* Single network event loop worker.
*/
typedef struct isc__networker {
- isc_nm_t *mgr;
- int id; /* thread id */
- uv_loop_t loop; /* libuv loop structure */
- uv_async_t async; /* async channel to send
- * data to this networker */
- bool paused;
- bool finished;
- isc_thread_t thread;
- ievent_t ievents[NETIEVENT_MAX];
-
+ isc_mem_t *mctx;
isc_refcount_t references;
- atomic_int_fast64_t pktcount;
+ isc_loop_t *loop;
+ isc_nm_t *netmgr;
+ bool shuttingdown;
+
char *recvbuf;
char *sendbuf;
bool recvbuf_inuse;
} isc__networker_t;
+ISC_REFCOUNT_DECL(isc__networker);
+
/*
* A general handle for a connection bound to a networker. For UDP
* connections we have peer address here, so both TCP and UDP can be
};
typedef enum isc__netievent_type {
- netievent_udpconnect,
- netievent_udpclose,
- netievent_udpsend,
- netievent_udpread,
netievent_udpcancel,
- netievent_routeconnect,
-
netievent_tcpconnect,
netievent_tcpclose,
netievent_tcpsend,
netievent_httpsend,
netievent_httpendpoints,
- netievent_shutdown,
- netievent_stop,
- netievent_pause,
-
netievent_connectcb,
netievent_readcb,
netievent_sendcb,
- netievent_task,
-
netievent_settlsctx,
- /*
- * event type values higher than this will be treated
- * as high-priority events, which can be processed
- * while the netmgr is pausing or paused.
- */
- netievent_prio = 0xff,
-
netievent_udplisten,
netievent_udpstop,
+ netievent_udpread,
+
netievent_tcplisten,
netievent_tcpstop,
netievent_tcpdnslisten,
netievent_tlsdnsstop,
netievent_httpstop,
- netievent_resume,
netievent_detach,
- netievent_close,
} isc__netievent_type;
typedef union {
uv_connect_t connect;
uv_udp_send_t udp_send;
uv_fs_t fs;
- uv_work_t work;
} uv_req;
ISC_LINK(isc__nm_uvreq_t) link;
};
void *
-isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type);
+isc__nm_get_netievent(isc__networker_t *worker, isc__netievent_type type);
/*%<
* Allocate an ievent and set the type.
*/
void
-isc__nm_put_netievent(isc_nm_t *mgr, void *ievent);
+isc__nm_put_netievent(isc__networker_t *worker, void *ievent);
/*
* The macros here are used to simulate the "inheritance" in C, there's the base
#define NETIEVENT__SOCKET \
isc__netievent_type type; \
ISC_LINK(isc__netievent_t) link; \
+ isc__networker_t *worker; \
isc_nmsocket_t *sock; \
const char *file; \
unsigned int line; \
#define NETIEVENT_SOCKET_TYPE(type) \
typedef isc__netievent__socket_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
typedef struct isc__netievent__socket_req {
#define NETIEVENT_SOCKET_REQ_TYPE(type) \
typedef isc__netievent__socket_req_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_REQ_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_REQ_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc__nm_uvreq_t *req); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_REQ_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc__nm_uvreq_t *req) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
ievent->req = req; \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
typedef struct isc__netievent__socket_req_result {
#define NETIEVENT_SOCKET_REQ_RESULT_TYPE(type) \
typedef isc__netievent__socket_req_result_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_REQ_RESULT_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req, \
- isc_result_t result); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_REQ_RESULT_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc__nm_uvreq_t *req, isc_result_t result); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_REQ_RESULT_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req, \
- isc_result_t result) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc__nm_uvreq_t *req, isc_result_t result) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
ievent->req = req; \
ievent->result = result; \
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
typedef struct isc__netievent__socket_handle {
#define NETIEVENT_SOCKET_HANDLE_TYPE(type) \
typedef isc__netievent__socket_handle_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_HANDLE_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_HANDLE_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_nmhandle_t *handle); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_HANDLE_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_nmhandle_t *handle) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
isc_nmhandle_attach(handle, &ievent->handle); \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc__nmsocket_detach(&ievent->sock); \
isc_nmhandle_detach(&ievent->handle); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
typedef struct isc__netievent__socket_quota {
#define NETIEVENT_SOCKET_QUOTA_TYPE(type) \
typedef isc__netievent__socket_quota_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_QUOTA_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_QUOTA_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_quota_t *quota); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_QUOTA_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_quota_t *quota) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
ievent->quota = quota; \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
- }
-
-typedef struct isc__netievent__task {
- isc__netievent_type type;
- ISC_LINK(isc__netievent_t) link;
- isc_task_t *task;
-} isc__netievent__task_t;
-
-#define NETIEVENT_TASK_TYPE(type) \
- typedef isc__netievent__task_t isc__netievent_##type##_t;
-
-#define NETIEVENT_TASK_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_task_t *task); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
- isc__netievent_##type##_t *ievent);
-
-#define NETIEVENT_TASK_DEF(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_task_t *task) { \
- isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
- ievent->task = task; \
- \
- return (ievent); \
- } \
- \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
- isc__netievent_##type##_t *ievent) { \
- ievent->task = NULL; \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
-typedef struct isc__netievent_udpsend {
- NETIEVENT__SOCKET;
- isc_sockaddr_t peer;
- isc__nm_uvreq_t *req;
-} isc__netievent_udpsend_t;
-
typedef struct isc__netievent_tlsconnect {
NETIEVENT__SOCKET;
SSL_CTX *ctx;
typedef struct isc__netievent {
isc__netievent_type type;
ISC_LINK(isc__netievent_t) link;
+ isc__networker_t *worker;
} isc__netievent_t;
#define NETIEVENT_TYPE(type) typedef isc__netievent_t isc__netievent_##type##_t;
-#define NETIEVENT_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type(isc_nm_t *nm); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm) { \
+ isc__networker_t *worker) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
typedef struct isc__netievent__tlsctx {
#define NETIEVENT_SOCKET_TLSCTX_TYPE(type) \
typedef isc__netievent__tlsctx_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_TLSCTX_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_tlsctx_t *tlsctx); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_TLSCTX_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_tlsctx_t *tlsctx); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_TLSCTX_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, isc_tlsctx_t *tlsctx) { \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_tlsctx_t *tlsctx) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
isc_tlsctx_attach(tlsctx, &ievent->tlsctx); \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc_tlsctx_free(&ievent->tlsctx); \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
#ifdef HAVE_LIBNGHTTP2
#define NETIEVENT_SOCKET_HTTP_EPS_TYPE(type) \
typedef isc__netievent__http_eps_t isc__netievent_##type##_t;
-#define NETIEVENT_SOCKET_HTTP_EPS_DECL(type) \
- isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, \
- isc_nm_http_endpoints_t *endpoints); \
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+#define NETIEVENT_SOCKET_HTTP_EPS_DECL(type) \
+ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
+ isc_nm_http_endpoints_t *endpoints); \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_SOCKET_HTTP_EPS_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
- isc_nm_t *nm, isc_nmsocket_t *sock, \
+ isc__networker_t *worker, isc_nmsocket_t *sock, \
isc_nm_http_endpoints_t *endpoints) { \
isc__netievent_##type##_t *ievent = \
- isc__nm_get_netievent(nm, netievent_##type); \
+ isc__nm_get_netievent(worker, netievent_##type); \
isc__nmsocket_attach(sock, &ievent->sock); \
isc_nm_http_endpoints_attach(endpoints, &ievent->endpoints); \
\
return (ievent); \
} \
\
- void isc__nm_put_netievent_##type(isc_nm_t *nm, \
+ void isc__nm_put_netievent_##type(isc__networker_t *worker, \
isc__netievent_##type##_t *ievent) { \
isc_nm_http_endpoints_detach(&ievent->endpoints); \
isc__nmsocket_detach(&ievent->sock); \
- isc__nm_put_netievent(nm, ievent); \
+ isc__nm_put_netievent(worker, ievent); \
}
#endif /* HAVE_LIBNGHTTP2 */
isc__netievent_t ni;
isc__netievent__socket_t nis;
isc__netievent__socket_req_t nisr;
- isc__netievent_udpsend_t nius;
isc__netievent__socket_quota_t nisq;
isc__netievent_tlsconnect_t nitc;
isc__netievent__tlsctx_t nitls;
#endif /* HAVE_LIBNGHTTP2 */
} isc__netievent_storage_t;
-/*
- * Work item for a uv_work threadpool.
- */
-typedef struct isc__nm_work {
- isc_nm_t *netmgr;
- uv_work_t req;
- isc_nm_workcb_t cb;
- isc_nm_after_workcb_t after_cb;
- void *data;
-} isc__nm_work_t;
-
/*
* Network manager
*/
int magic;
isc_refcount_t references;
isc_mem_t *mctx;
- int nworkers;
+ isc_loopmgr_t *loopmgr;
+ uint32_t nloops;
isc_mutex_t lock;
- isc_condition_t wkstatecond;
- isc_condition_t wkpausecond;
isc__networker_t *workers;
isc_stats_t *stats;
- uint_fast32_t workers_running;
- atomic_uint_fast32_t workers_paused;
atomic_uint_fast32_t maxudp;
bool load_balance_sockets;
- atomic_bool paused;
-
/*
* Active connections are being closed and new connections are
* no longer allowed.
*/
- atomic_bool closing;
-
- /*
- * A worker is actively waiting for other workers, for example to
- * stop listening; that means no other thread can do the same thing
- * or pause, or we'll deadlock. We have to either re-enqueue our
- * event or wait for the other one to finish if we want to pause.
- */
- atomic_int interlocked;
+ atomic_bool shuttingdown;
/*
* Timeout values for TCP connections, corresponding to
atomic_uint_fast32_t keepalive;
atomic_uint_fast32_t advertised;
- isc_barrier_t pausing;
- isc_barrier_t resuming;
-
/*
* Socket SO_RCVBUF and SO_SNDBUF values
*/
struct isc_nmsocket {
/*% Unlocked, RO */
int magic;
- int tid;
+ uint32_t tid;
isc_nmsocket_type type;
- isc_nm_t *mgr;
+ isc__networker_t *worker;
+
+ isc_mutex_t lock;
+ isc_barrier_t barrier;
/*% Parent socket for multithreaded listeners */
isc_nmsocket_t *parent;
/*% Self socket */
isc_nmsocket_t *self;
- isc_barrier_t startlistening;
- isc_barrier_t stoplistening;
-
/*% TLS stuff */
struct tls {
isc_tls_t *tls;
isc_astack_t *inactivehandles;
isc_astack_t *inactivereqs;
- /*%
- * Used to wait for TCP listening events to complete, and
- * for the number of running children to reach zero during
- * shutdown.
- *
- * We use two condition variables to prevent the race where the netmgr
- * threads would be able to finish and destroy the socket before it's
- * unlocked by the isc_nm_listen<proto>() function. So, the flow is as
- * follows:
- *
- * 1. parent thread creates all children sockets and passes then to
- * netthreads, looks at the signaling variable and WAIT(cond) until
- * the childrens are done initializing
- *
- * 2. the events get picked by netthreads, calls the libuv API (and
- * either succeeds or fails) and WAIT(scond) until all other
- * children sockets in netthreads are initialized and the listening
- * socket lock is unlocked
- *
- * 3. the control is given back to the parent thread which now either
- * returns success or shutdowns the listener if an error has
- * occured in the children netthread
- *
- * NOTE: The other approach would be doing an extra attach to the parent
- * listening socket, and then detach it in the parent thread, but that
- * breaks the promise that once the libuv socket is initialized on the
- * nmsocket, the nmsocket needs to be handled only by matching
- * netthread, so in fact that would add a complexity in a way that
- * isc__nmsocket_detach would have to be converted to use an
- * asynchrounous netievent.
- */
- isc_mutex_t lock;
- isc_condition_t cond;
- isc_condition_t scond;
-
/*%
* Used to pass a result back from listen or connect events.
*/
int backtrace_size;
LINK(isc_nmsocket_t) active_link;
ISC_LIST(isc_nmhandle_t) active_handles;
+ isc_mutex_t tracelock;
#endif
};
-bool
-isc__nm_in_netthread(void);
+void
+isc__nm_process_ievent(isc__networker_t *worker, isc__netievent_t *event);
/*%<
- * Returns 'true' if we're in the network thread.
+ * If the call knows it's in the matching loop, process the netievent directly.
*/
void
*/
isc__nm_uvreq_t *
-isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG);
+isc___nm_uvreq_get(isc__networker_t *worker, isc_nmsocket_t *sock FLARG);
/*%<
* Get a UV request structure for the socket 'sock', allocating a
* new one if there isn't one available in 'sock->inactivereqs'.
*/
void
-isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
- isc_sockaddr_t *iface FLARG);
+isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker,
+ isc_nmsocket_type type, isc_sockaddr_t *iface FLARG);
/*%<
* Initialize socket 'sock', attach it to 'mgr', and set it to type 'type'
* and its interface to 'iface'.
void
isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0);
void
-isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0);
-void
isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0);
void
-isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0);
-void
-isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0);
-void
isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0);
void
-isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0);
+isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0);
/*%<
* Callback handlers for asynchronous UDP events (listen, stoplisten, send).
*/
-void
-isc__nm_async_routeconnect(isc__networker_t *worker, isc__netievent_t *ev0);
-/*%<
- * Callback handler for route socket events.
- */
-
void
isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region,
isc_nm_cb_t cb, void *cbarg);
isc__nm_http_set_max_streams(isc_nmsocket_t *listener,
const uint32_t max_concurrent_streams);
-#endif
-
void
isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0);
-bool
-isc__nm_acquire_interlocked(isc_nm_t *mgr);
-/*%<
- * Try to acquire interlocked state; return true if successful.
- */
-
-void
-isc__nm_drop_interlocked(isc_nm_t *mgr);
-/*%<
- * Drop interlocked state; signal waiters.
- */
-
-void
-isc__nm_acquire_interlocked_force(isc_nm_t *mgr);
-/*%<
- * Actively wait for interlocked state.
- */
+#endif
void
isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id);
* typedef all the netievent types
*/
-NETIEVENT_SOCKET_TYPE(close);
NETIEVENT_SOCKET_TYPE(tcpclose);
NETIEVENT_SOCKET_TYPE(tcplisten);
NETIEVENT_SOCKET_TYPE(tcppauseread);
NETIEVENT_SOCKET_TYPE(tlsdobio);
NETIEVENT_SOCKET_TYPE(tlsstartread);
NETIEVENT_SOCKET_HANDLE_TYPE(tlscancel);
-NETIEVENT_SOCKET_TYPE(udpclose);
NETIEVENT_SOCKET_TYPE(udplisten);
-NETIEVENT_SOCKET_TYPE(udpread);
-/* NETIEVENT_SOCKET_TYPE(udpsend); */ /* unique type, defined independently */
NETIEVENT_SOCKET_TYPE(udpstop);
+NETIEVENT_SOCKET_TYPE(udpread);
NETIEVENT_SOCKET_TYPE(tcpdnsclose);
NETIEVENT_SOCKET_TYPE(tcpdnsread);
NETIEVENT_SOCKET_REQ_TYPE(tcpsend);
NETIEVENT_SOCKET_TYPE(tcpstartread);
NETIEVENT_SOCKET_REQ_TYPE(tlssend);
-NETIEVENT_SOCKET_REQ_TYPE(udpconnect);
-
-NETIEVENT_SOCKET_REQ_TYPE(routeconnect);
NETIEVENT_SOCKET_REQ_RESULT_TYPE(connectcb);
NETIEVENT_SOCKET_REQ_RESULT_TYPE(readcb);
NETIEVENT_SOCKET_QUOTA_TYPE(tcpaccept);
-NETIEVENT_TYPE(pause);
-NETIEVENT_TYPE(resume);
-NETIEVENT_TYPE(shutdown);
-NETIEVENT_TYPE(stop);
-
-NETIEVENT_TASK_TYPE(task);
-
NETIEVENT_SOCKET_TLSCTX_TYPE(settlsctx);
/* Now declared the helper functions */
-NETIEVENT_SOCKET_DECL(close);
NETIEVENT_SOCKET_DECL(tcpclose);
NETIEVENT_SOCKET_DECL(tcplisten);
NETIEVENT_SOCKET_DECL(tcppauseread);
NETIEVENT_SOCKET_DECL(tlsdobio);
NETIEVENT_SOCKET_DECL(tlsstartread);
NETIEVENT_SOCKET_HANDLE_DECL(tlscancel);
-NETIEVENT_SOCKET_DECL(udpclose);
NETIEVENT_SOCKET_DECL(udplisten);
-NETIEVENT_SOCKET_DECL(udpread);
-NETIEVENT_SOCKET_DECL(udpsend);
NETIEVENT_SOCKET_DECL(udpstop);
+NETIEVENT_SOCKET_DECL(udpread);
NETIEVENT_SOCKET_DECL(tcpdnsclose);
NETIEVENT_SOCKET_DECL(tcpdnsread);
NETIEVENT_SOCKET_REQ_DECL(tcpconnect);
NETIEVENT_SOCKET_REQ_DECL(tcpsend);
NETIEVENT_SOCKET_REQ_DECL(tlssend);
-NETIEVENT_SOCKET_REQ_DECL(udpconnect);
-
-NETIEVENT_SOCKET_REQ_DECL(routeconnect);
NETIEVENT_SOCKET_REQ_RESULT_DECL(connectcb);
NETIEVENT_SOCKET_REQ_RESULT_DECL(readcb);
NETIEVENT_SOCKET_REQ_RESULT_DECL(sendcb);
-NETIEVENT_SOCKET_HANDLE_DECL(udpcancel);
NETIEVENT_SOCKET_HANDLE_DECL(tcpcancel);
+NETIEVENT_SOCKET_HANDLE_DECL(udpcancel);
NETIEVENT_SOCKET_DECL(detach);
NETIEVENT_SOCKET_QUOTA_DECL(tcpaccept);
-NETIEVENT_DECL(pause);
-NETIEVENT_DECL(resume);
-NETIEVENT_DECL(shutdown);
-NETIEVENT_DECL(stop);
-
-NETIEVENT_TASK_DECL(task);
-
NETIEVENT_SOCKET_TLSCTX_DECL(settlsctx);
void
bool
isc__nmsocket_closing(isc_nmsocket_t *sock);
bool
-isc__nm_closing(isc_nmsocket_t *sock);
+isc__nm_closing(isc__networker_t *worker);
void
isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len);
* information regarding copyright ownership.
*/
+#include <assert.h>
#include <inttypes.h>
#include <unistd.h>
+#include <isc/async.h>
#include <isc/atomic.h>
#include <isc/backtrace.h>
#include <isc/barrier.h>
#include <isc/errno.h>
#include <isc/list.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/strerr.h>
#include <isc/task.h>
#include <isc/thread.h>
+#include <isc/tid.h>
#include <isc/tls.h>
#include <isc/util.h>
#include <isc/uv.h>
+#include "../loop_p.h"
#include "netmgr-int.h"
-#include "netmgr_p.h"
#include "openssl_shim.h"
#include "trampoline_p.h"
};
#endif /* if 0 */
-/*
- * libuv is not thread safe, but has mechanisms to pass messages
- * between threads. Each socket is owned by a thread. For UDP
- * sockets we have a set of sockets for each interface and we can
- * choose a sibling and send the message directly. For TCP, or if
- * we're calling from a non-networking thread, we need to pass the
- * request using async_cb.
- */
-
-static thread_local int isc__nm_tid_v = ISC_NETMGR_TID_UNKNOWN;
-
/*
* Set by the -T dscp option on the command line. If set to a value
* other than -1, we check to make sure DSCP values match it, and
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG);
static void
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle);
-static isc_threadresult_t
-nm_thread(isc_threadarg_t worker0);
-static void
-async_cb(uv_async_t *handle);
-static bool
-process_netievent(isc__networker_t *worker, isc__netievent_t *ievent);
-static isc_result_t
-process_queue(isc__networker_t *worker, netievent_type_t type);
static void
-wait_for_priority_queue(isc__networker_t *worker);
-static void
-drain_queue(isc__networker_t *worker, netievent_type_t type);
+process_netievent(void *arg);
-static void
-isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0);
-static void
-isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0);
-static void
-isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0);
static void
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0);
-static void
-isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0);
-
-static void
-isc__nm_threadpool_initialize(uint32_t workers);
-static void
-isc__nm_work_cb(uv_work_t *req);
-static void
-isc__nm_after_work_cb(uv_work_t *req, int status);
/*%<
* Issue a 'handle closed' callback on the socket.
static void
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG);
-int
-isc_nm_tid(void) {
- return (isc__nm_tid_v);
-}
+static void
+shutdown_walk_cb(uv_handle_t *handle, void *arg);
-bool
-isc__nm_in_netthread(void) {
- return (isc__nm_tid_v >= 0);
-}
+static void
+networker_teardown(void *arg) {
+ isc__networker_t *worker = arg;
+ isc_loop_t *loop = worker->loop;
-void
-isc__nm_force_tid(int tid) {
- isc__nm_tid_v = tid;
+ worker->shuttingdown = true;
+
+ isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR,
+ ISC_LOG_DEBUG(1),
+ "Shutting down network manager worker on loop %p(%d)",
+ loop, isc_tid());
+
+ uv_walk(&loop->loop, shutdown_walk_cb, NULL);
+
+ isc__networker_detach(&worker);
}
static void
-isc__nm_threadpool_initialize(uint32_t workers) {
- char buf[11];
- int r = uv_os_getenv("UV_THREADPOOL_SIZE", buf,
- &(size_t){ sizeof(buf) });
- if (r == UV_ENOENT) {
- snprintf(buf, sizeof(buf), "%" PRIu32, workers);
- uv_os_setenv("UV_THREADPOOL_SIZE", buf);
+netmgr_teardown(void *arg) {
+ isc_nm_t *netmgr = (void *)arg;
+
+ if (atomic_compare_exchange_strong(&netmgr->shuttingdown,
+ &(bool){ false }, true))
+ {
+ isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
+ ISC_LOGMODULE_NETMGR, ISC_LOG_DEBUG(1),
+ "Shutting down network manager");
}
}
#endif
void
-isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netmgrp) {
- isc_nm_t *mgr = NULL;
- char name[32];
-
- REQUIRE(workers > 0);
+isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netmgrp) {
+ isc_nm_t *netmgr = NULL;
if (uv_version() < MINIMAL_UV_VERSION) {
isc_error_fatal(__FILE__, __LINE__,
uv_version_string(), UV_VERSION_STRING);
}
- isc__nm_threadpool_initialize(workers);
-
- mgr = isc_mem_get(mctx, sizeof(*mgr));
- *mgr = (isc_nm_t){ .nworkers = workers };
-
- isc_mem_attach(mctx, &mgr->mctx);
- isc_mutex_init(&mgr->lock);
- isc_condition_init(&mgr->wkstatecond);
- isc_condition_init(&mgr->wkpausecond);
- isc_refcount_init(&mgr->references, 1);
- atomic_init(&mgr->maxudp, 0);
- atomic_init(&mgr->interlocked, ISC_NETMGR_NON_INTERLOCKED);
- atomic_init(&mgr->workers_paused, 0);
- atomic_init(&mgr->paused, false);
- atomic_init(&mgr->closing, false);
- atomic_init(&mgr->recv_tcp_buffer_size, 0);
- atomic_init(&mgr->send_tcp_buffer_size, 0);
- atomic_init(&mgr->recv_udp_buffer_size, 0);
- atomic_init(&mgr->send_udp_buffer_size, 0);
+ netmgr = isc_mem_get(mctx, sizeof(*netmgr));
+ *netmgr = (isc_nm_t){
+ .loopmgr = loopmgr,
+ .nloops = isc_loopmgr_nloops(loopmgr),
+ };
+
+ isc_mem_attach(mctx, &netmgr->mctx);
+ isc_mutex_init(&netmgr->lock);
+ isc_refcount_init(&netmgr->references, 1);
+ atomic_init(&netmgr->maxudp, 0);
+ atomic_init(&netmgr->shuttingdown, false);
+ atomic_init(&netmgr->recv_tcp_buffer_size, 0);
+ atomic_init(&netmgr->send_tcp_buffer_size, 0);
+ atomic_init(&netmgr->recv_udp_buffer_size, 0);
+ atomic_init(&netmgr->send_udp_buffer_size, 0);
#if HAVE_SO_REUSEPORT_LB
- mgr->load_balance_sockets = true;
+ netmgr->load_balance_sockets = true;
#else
- mgr->load_balance_sockets = false;
+ netmgr->load_balance_sockets = false;
#endif
#ifdef NETMGR_TRACE
- ISC_LIST_INIT(mgr->active_sockets);
+ ISC_LIST_INIT(netmgr->active_sockets);
#endif
/*
* Default TCP timeout values.
* May be updated by isc_nm_tcptimeouts().
*/
- atomic_init(&mgr->init, 30000);
- atomic_init(&mgr->idle, 30000);
- atomic_init(&mgr->keepalive, 30000);
- atomic_init(&mgr->advertised, 30000);
+ atomic_init(&netmgr->init, 30000);
+ atomic_init(&netmgr->idle, 30000);
+ atomic_init(&netmgr->keepalive, 30000);
+ atomic_init(&netmgr->advertised, 30000);
- isc_barrier_init(&mgr->pausing, workers);
- isc_barrier_init(&mgr->resuming, workers);
+ netmgr->workers =
+ isc_mem_get(mctx, netmgr->nloops * sizeof(netmgr->workers[0]));
- mgr->workers = isc_mem_get(mctx, workers * sizeof(isc__networker_t));
- for (size_t i = 0; i < workers; i++) {
- isc__networker_t *worker = &mgr->workers[i];
- int r;
+ isc_loopmgr_teardown(loopmgr, netmgr_teardown, netmgr);
- *worker = (isc__networker_t){
- .mgr = mgr,
- .id = i,
- };
+ netmgr->magic = NM_MAGIC;
- r = uv_loop_init(&worker->loop);
- UV_RUNTIME_CHECK(uv_loop_init, r);
+ for (size_t i = 0; i < netmgr->nloops; i++) {
+ isc_loop_t *loop = isc_loop_get(netmgr->loopmgr, i);
+ isc__networker_t *worker = &netmgr->workers[i];
- worker->loop.data = &mgr->workers[i];
-
- r = uv_async_init(&worker->loop, &worker->async, async_cb);
- UV_RUNTIME_CHECK(uv_async_init, r);
+ *worker = (isc__networker_t){
+ .recvbuf = isc_mem_get(loop->mctx,
+ ISC_NETMGR_RECVBUF_SIZE),
+ .sendbuf = isc_mem_get(loop->mctx,
+ ISC_NETMGR_SENDBUF_SIZE),
+ };
- for (size_t type = 0; type < NETIEVENT_MAX; type++) {
- isc_mutex_init(&worker->ievents[type].lock);
- isc_condition_init(&worker->ievents[type].cond);
- ISC_LIST_INIT(worker->ievents[type].list);
- }
+ isc_nm_attach(netmgr, &worker->netmgr);
- worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE);
- worker->sendbuf = isc_mem_get(mctx, ISC_NETMGR_SENDBUF_SIZE);
+ isc_mem_attach(loop->mctx, &worker->mctx);
- /*
- * We need to do this here and not in nm_thread to avoid a
- * race - we could exit isc_nm_start, launch nm_destroy,
- * and nm_thread would still not be up.
- */
- mgr->workers_running++;
- isc_thread_create(nm_thread, &mgr->workers[i], &worker->thread);
-
- snprintf(name, sizeof(name), "isc-net-%04zu", i);
- isc_thread_setname(worker->thread, name);
+ isc_loop_attach(loop, &worker->loop);
+ isc_loop_teardown(loop, networker_teardown, worker);
+ isc_refcount_init(&worker->references, 1);
}
- mgr->magic = NM_MAGIC;
- *netmgrp = mgr;
+ *netmgrp = netmgr;
}
/*
static void
nm_destroy(isc_nm_t **mgr0) {
REQUIRE(VALID_NM(*mgr0));
- REQUIRE(!isc__nm_in_netthread());
isc_nm_t *mgr = *mgr0;
*mgr0 = NULL;
mgr->magic = 0;
- for (int i = 0; i < mgr->nworkers; i++) {
- isc__networker_t *worker = &mgr->workers[i];
- isc__netievent_t *event = isc__nm_get_netievent_stop(mgr);
- isc__nm_enqueue_ievent(worker, event);
- }
-
- LOCK(&mgr->lock);
- while (mgr->workers_running > 0) {
- WAIT(&mgr->wkstatecond, &mgr->lock);
- }
- UNLOCK(&mgr->lock);
-
- for (int i = 0; i < mgr->nworkers; i++) {
- isc__networker_t *worker = &mgr->workers[i];
- int r;
-
- r = uv_loop_close(&worker->loop);
- UV_RUNTIME_CHECK(uv_loop_close, r);
-
- for (size_t type = 0; type < NETIEVENT_MAX; type++) {
- INSIST(ISC_LIST_EMPTY(worker->ievents[type].list));
- isc_condition_destroy(&worker->ievents[type].cond);
- isc_mutex_destroy(&worker->ievents[type].lock);
- }
-
- isc_mem_put(mgr->mctx, worker->sendbuf,
- ISC_NETMGR_SENDBUF_SIZE);
- isc_mem_put(mgr->mctx, worker->recvbuf,
- ISC_NETMGR_RECVBUF_SIZE);
- isc_thread_join(worker->thread, NULL);
- }
-
if (mgr->stats != NULL) {
isc_stats_detach(&mgr->stats);
}
- isc_barrier_destroy(&mgr->resuming);
- isc_barrier_destroy(&mgr->pausing);
-
- isc_condition_destroy(&mgr->wkstatecond);
- isc_condition_destroy(&mgr->wkpausecond);
isc_mutex_destroy(&mgr->lock);
isc_mem_put(mgr->mctx, mgr->workers,
- mgr->nworkers * sizeof(isc__networker_t));
+ mgr->nloops * sizeof(mgr->workers[0]));
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
}
-static void
-enqueue_pause(isc__networker_t *worker) {
- isc__netievent_pause_t *event =
- isc__nm_get_netievent_pause(worker->mgr);
- isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event);
-}
-
-static void
-isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0) {
- UNUSED(ev0);
- REQUIRE(worker->paused == false);
-
- worker->paused = true;
- uv_stop(&worker->loop);
-}
-
-void
-isc_nm_pause(isc_nm_t *mgr) {
- REQUIRE(VALID_NM(mgr));
- REQUIRE(!atomic_load(&mgr->paused));
-
- isc__nm_acquire_interlocked_force(mgr);
-
- if (isc__nm_in_netthread()) {
- REQUIRE(isc_nm_tid() == 0);
- }
-
- for (int i = 0; i < mgr->nworkers; i++) {
- isc__networker_t *worker = &mgr->workers[i];
- if (i == isc_nm_tid()) {
- isc__nm_async_pause(worker, NULL);
- } else {
- enqueue_pause(worker);
- }
- }
-
- if (isc__nm_in_netthread()) {
- atomic_fetch_add(&mgr->workers_paused, 1);
- isc_barrier_wait(&mgr->pausing);
- }
-
- LOCK(&mgr->lock);
- while (atomic_load(&mgr->workers_paused) != mgr->workers_running) {
- WAIT(&mgr->wkstatecond, &mgr->lock);
- }
- UNLOCK(&mgr->lock);
-
- atomic_compare_exchange_enforced(&mgr->paused, &(bool){ false }, true);
-}
-
-static void
-enqueue_resume(isc__networker_t *worker) {
- isc__netievent_resume_t *event =
- isc__nm_get_netievent_resume(worker->mgr);
- isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event);
-}
-
-static void
-isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0) {
- UNUSED(ev0);
- REQUIRE(worker->paused == true);
-
- worker->paused = false;
-}
-
-void
-isc_nm_resume(isc_nm_t *mgr) {
- REQUIRE(VALID_NM(mgr));
- REQUIRE(atomic_load(&mgr->paused));
-
- if (isc__nm_in_netthread()) {
- REQUIRE(isc_nm_tid() == 0);
- drain_queue(&mgr->workers[isc_nm_tid()], NETIEVENT_PRIORITY);
- }
-
- for (int i = 0; i < mgr->nworkers; i++) {
- isc__networker_t *worker = &mgr->workers[i];
- if (i == isc_nm_tid()) {
- isc__nm_async_resume(worker, NULL);
- } else {
- enqueue_resume(worker);
- }
- }
-
- if (isc__nm_in_netthread()) {
- atomic_fetch_sub(&mgr->workers_paused, 1);
- isc_barrier_wait(&mgr->resuming);
- }
-
- LOCK(&mgr->lock);
- while (atomic_load(&mgr->workers_paused) != 0) {
- WAIT(&mgr->wkstatecond, &mgr->lock);
- }
- UNLOCK(&mgr->lock);
-
- atomic_compare_exchange_enforced(&mgr->paused, &(bool){ true }, false);
-
- isc__nm_drop_interlocked(mgr);
-}
-
void
isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) {
REQUIRE(VALID_NM(mgr));
}
void
-isc__netmgr_shutdown(isc_nm_t *mgr) {
- REQUIRE(VALID_NM(mgr));
-
- atomic_store(&mgr->closing, true);
- for (int i = 0; i < mgr->nworkers; i++) {
- isc__netievent_t *event = NULL;
- event = isc__nm_get_netievent_shutdown(mgr);
- isc__nm_enqueue_ievent(&mgr->workers[i], event);
- }
-}
-
-void
-isc__netmgr_destroy(isc_nm_t **netmgrp) {
+isc_netmgr_destroy(isc_nm_t **netmgrp) {
isc_nm_t *mgr = NULL;
- int counter = 0;
REQUIRE(VALID_NM(*netmgrp));
mgr = *netmgrp;
+ *netmgrp = NULL;
- /*
- * Close active connections.
- */
- isc__netmgr_shutdown(mgr);
-
- /*
- * Wait for the manager to be dereferenced elsewhere.
- */
- while (isc_refcount_current(&mgr->references) > 1 && counter++ < 1000) {
- uv_sleep(10);
- }
-
-#ifdef NETMGR_TRACE
- if (isc_refcount_current(&mgr->references) > 1) {
- isc__nm_dump_active(mgr);
- UNREACHABLE();
- }
-#endif
-
- /*
- * Now just patiently wait
- */
- while (isc_refcount_current(&mgr->references) > 1) {
- uv_sleep(10);
- }
-
- /*
- * Detach final reference.
- */
- isc_nm_detach(netmgrp);
+ REQUIRE(isc_refcount_decrement(&mgr->references) == 1);
+ nm_destroy(&mgr);
}
void
isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout) {
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
- REQUIRE(handle->sock->tid == isc_nm_tid());
+ REQUIRE(handle->sock->tid == isc_tid());
switch (handle->sock->type) {
case isc_nm_tcpsocket:
}
}
-/*
- * nm_thread is a single worker thread, that runs uv_run event loop
- * until asked to stop.
- *
- * There are four queues for asynchronous events:
- *
- * 1. priority queue - netievents on the priority queue are run even when
- * the taskmgr enters exclusive mode and the netmgr is paused. This
- * is needed to properly start listening on the interfaces, free
- * resources on shutdown, or resume from a pause.
- *
- * 2. task queue - only (traditional) tasks are scheduled here, and this queue
- * is processed when the netmgr workers are finishing. This is needed to
- * process the task shutdown events.
- *
- * 3. normal queue - this is the queue with netmgr events, e.g. reading,
- * sending, callbacks, etc.
- */
-
-static isc_threadresult_t
-nm_thread(isc_threadarg_t worker0) {
- isc__networker_t *worker = (isc__networker_t *)worker0;
- isc_nm_t *mgr = worker->mgr;
-
- isc__nm_tid_v = worker->id;
-
- while (true) {
- /*
- * uv_run() runs async_cb() in a loop, which processes
- * all four event queues until a "pause" or "stop" event
- * is encountered. On pause, we process only priority
- * events until resuming.
- */
- int r = uv_run(&worker->loop, UV_RUN_DEFAULT);
- INSIST(r > 0 || worker->finished);
-
- if (worker->paused) {
- INSIST(atomic_load(&mgr->interlocked) != isc_nm_tid());
-
- atomic_fetch_add(&mgr->workers_paused, 1);
- if (isc_barrier_wait(&mgr->pausing) != 0) {
- LOCK(&mgr->lock);
- SIGNAL(&mgr->wkstatecond);
- UNLOCK(&mgr->lock);
- }
-
- while (worker->paused) {
- wait_for_priority_queue(worker);
- }
-
- atomic_fetch_sub(&mgr->workers_paused, 1);
- if (isc_barrier_wait(&mgr->resuming) != 0) {
- LOCK(&mgr->lock);
- SIGNAL(&mgr->wkstatecond);
- UNLOCK(&mgr->lock);
- }
- }
-
- if (r == 0) {
- INSIST(worker->finished);
- break;
- }
-
- INSIST(!worker->finished);
- }
-
- /*
- * We are shutting down. Drain the queues.
- */
- drain_queue(worker, NETIEVENT_TASK);
-
- for (size_t type = 0; type < NETIEVENT_MAX; type++) {
- LOCK(&worker->ievents[type].lock);
- INSIST(ISC_LIST_EMPTY(worker->ievents[type].list));
- UNLOCK(&worker->ievents[type].lock);
- }
-
- LOCK(&mgr->lock);
- mgr->workers_running--;
- SIGNAL(&mgr->wkstatecond);
- UNLOCK(&mgr->lock);
-
- return ((isc_threadresult_t)0);
-}
-
-static bool
-process_all_queues(isc__networker_t *worker) {
- bool reschedule = false;
- /*
- * The queue processing functions will return false when the
- * system is pausing or stopping and we don't want to process
- * the other queues in such case, but we need the async event
- * to be rescheduled in the next uv_run().
- */
- for (size_t type = 0; type < NETIEVENT_MAX; type++) {
- isc_result_t result = process_queue(worker, type);
- switch (result) {
- case ISC_R_SUSPEND:
- reschedule = true;
- break;
- case ISC_R_EMPTY:
- /* empty queue */
- break;
- case ISC_R_SUCCESS:
- reschedule = true;
- break;
- default:
- UNREACHABLE();
- }
- }
-
- return (reschedule);
-}
-
-/*
- * async_cb() is a universal callback for 'async' events sent to event loop.
- * It's the only way to safely pass data to the libuv event loop. We use a
- * single async event and a set of lockless queues of 'isc__netievent_t'
- * structures passed from other threads.
- */
-static void
-async_cb(uv_async_t *handle) {
- isc__networker_t *worker = (isc__networker_t *)handle->loop->data;
-
- if (process_all_queues(worker)) {
- /*
- * If we didn't process all the events, we need to enqueue
- * async_cb to be run in the next iteration of the uv_loop
- */
- uv_async_send(handle);
- }
-}
-
-static void
-isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0) {
- UNUSED(ev0);
- worker->finished = true;
- /* Close the async handler */
- uv_close((uv_handle_t *)&worker->async, NULL);
-}
-
-void
-isc_nm_task_enqueue(isc_nm_t *nm, isc_task_t *task, int tid) {
- isc__netievent_t *event = NULL;
- isc__networker_t *worker = NULL;
- REQUIRE(tid >= 0 && tid < nm->nworkers);
-
- worker = &nm->workers[tid];
-
- event = (isc__netievent_t *)isc__nm_get_netievent_task(nm, task);
-
- isc__nm_enqueue_ievent(worker, event);
-}
-
-static void
-isc__nm_async_task(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_task_t *ievent = (isc__netievent_task_t *)ev0;
- isc_result_t result;
-
- UNUSED(worker);
-
- result = isc_task_run(ievent->task);
-
- switch (result) {
- case ISC_R_QUOTA:
- isc_task_ready(ievent->task);
- return;
- case ISC_R_SUCCESS:
- return;
- default:
- UNREACHABLE();
- }
-}
-
-static void
-wait_for_priority_queue(isc__networker_t *worker) {
- isc_condition_t *cond = &worker->ievents[NETIEVENT_PRIORITY].cond;
- isc_mutex_t *lock = &worker->ievents[NETIEVENT_PRIORITY].lock;
- isc__netievent_list_t *list =
- &(worker->ievents[NETIEVENT_PRIORITY].list);
-
- LOCK(lock);
- while (ISC_LIST_EMPTY(*list)) {
- WAIT(cond, lock);
- }
- UNLOCK(lock);
-
- drain_queue(worker, NETIEVENT_PRIORITY);
-}
-
-static void
-drain_queue(isc__networker_t *worker, netievent_type_t type) {
- bool empty = false;
- while (!empty) {
- if (process_queue(worker, type) == ISC_R_EMPTY) {
- LOCK(&worker->ievents[type].lock);
- empty = ISC_LIST_EMPTY(worker->ievents[type].list);
- UNLOCK(&worker->ievents[type].lock);
- }
- }
-}
-
/*
* The two macros here generate the individual cases for the process_netievent()
* function. The NETIEVENT_CASE(type) macro is the common case, and
* process_queue() to stop, e.g. it's only used for the netievent that
* stops/pauses processing the enqueued netievents.
*/
-#define NETIEVENT_CASE(type) \
- case netievent_##type: { \
- isc__nm_async_##type(worker, ievent); \
- isc__nm_put_netievent_##type( \
- worker->mgr, (isc__netievent_##type##_t *)ievent); \
- return (true); \
+#define NETIEVENT_CASE(type) \
+ case netievent_##type: { \
+ isc__nm_async_##type(worker, ievent); \
+ isc__nm_put_netievent_##type( \
+ worker, (isc__netievent_##type##_t *)ievent); \
+ return; \
}
-#define NETIEVENT_CASE_NOMORE(type) \
- case netievent_##type: { \
- isc__nm_async_##type(worker, ievent); \
- isc__nm_put_netievent_##type(worker->mgr, ievent); \
- return (false); \
- }
-
-static bool
-process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) {
- REQUIRE(worker->id == isc_nm_tid());
+static void
+process_netievent(void *arg) {
+ isc__netievent_t *ievent = (isc__netievent_t *)arg;
+ isc__networker_t *worker = ievent->worker;
switch (ievent->type) {
- /* Don't process more ievents when we are stopping */
- NETIEVENT_CASE_NOMORE(stop);
-
- NETIEVENT_CASE(task);
-
- NETIEVENT_CASE(udpconnect);
NETIEVENT_CASE(udplisten);
NETIEVENT_CASE(udpstop);
- NETIEVENT_CASE(udpsend);
- NETIEVENT_CASE(udpread);
NETIEVENT_CASE(udpcancel);
- NETIEVENT_CASE(udpclose);
-
- NETIEVENT_CASE(routeconnect);
+ NETIEVENT_CASE(udpread);
NETIEVENT_CASE(tcpaccept);
NETIEVENT_CASE(tcpconnect);
NETIEVENT_CASE(httpsend);
NETIEVENT_CASE(httpclose);
NETIEVENT_CASE(httpendpoints);
-#endif
NETIEVENT_CASE(settlsctx);
+#endif
NETIEVENT_CASE(connectcb);
NETIEVENT_CASE(readcb);
NETIEVENT_CASE(sendcb);
- NETIEVENT_CASE(close);
NETIEVENT_CASE(detach);
-
- NETIEVENT_CASE(shutdown);
- NETIEVENT_CASE(resume);
- NETIEVENT_CASE_NOMORE(pause);
default:
UNREACHABLE();
}
- return (true);
-}
-
-static isc_result_t
-process_queue(isc__networker_t *worker, netievent_type_t type) {
- isc__netievent_t *ievent = NULL;
- isc__netievent_list_t list;
-
- ISC_LIST_INIT(list);
-
- LOCK(&worker->ievents[type].lock);
- ISC_LIST_MOVE(list, worker->ievents[type].list);
- UNLOCK(&worker->ievents[type].lock);
-
- ievent = ISC_LIST_HEAD(list);
- if (ievent == NULL) {
- /* There's nothing scheduled */
- return (ISC_R_EMPTY);
- }
-
- while (ievent != NULL) {
- isc__netievent_t *next = ISC_LIST_NEXT(ievent, link);
- ISC_LIST_DEQUEUE(list, ievent, link);
-
- if (!process_netievent(worker, ievent)) {
- /* The netievent told us to stop */
- if (!ISC_LIST_EMPTY(list)) {
- /*
- * Reschedule the rest of the unprocessed
- * events.
- */
- LOCK(&worker->ievents[type].lock);
- ISC_LIST_PREPENDLIST(worker->ievents[type].list,
- list, link);
- UNLOCK(&worker->ievents[type].lock);
- }
- return (ISC_R_SUSPEND);
- }
-
- ievent = next;
- }
-
- /* We processed at least one */
- return (ISC_R_SUCCESS);
}
void *
-isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type) {
- isc__netievent_storage_t *event = isc_mem_get(mgr->mctx,
+isc__nm_get_netievent(isc__networker_t *worker, isc__netievent_type type) {
+ isc__netievent_storage_t *event = isc_mem_get(worker->mctx,
sizeof(*event));
*event = (isc__netievent_storage_t){ .ni.type = type };
ISC_LINK_INIT(&(event->ni), link);
+
+ isc__networker_ref(worker);
+
return (event);
}
void
-isc__nm_put_netievent(isc_nm_t *mgr, void *ievent) {
- isc_mem_put(mgr->mctx, ievent, sizeof(isc__netievent_storage_t));
+isc__nm_put_netievent(isc__networker_t *worker, void *ievent) {
+ isc_mem_put(worker->mctx, ievent, sizeof(isc__netievent_storage_t));
+ isc__networker_unref(worker);
}
NETIEVENT_SOCKET_DEF(tcpclose);
NETIEVENT_SOCKET_DEF(tlsdobio);
NETIEVENT_SOCKET_DEF(tlsstartread);
NETIEVENT_SOCKET_HANDLE_DEF(tlscancel);
-NETIEVENT_SOCKET_DEF(udpclose);
NETIEVENT_SOCKET_DEF(udplisten);
-NETIEVENT_SOCKET_DEF(udpread);
-NETIEVENT_SOCKET_DEF(udpsend);
NETIEVENT_SOCKET_DEF(udpstop);
+NETIEVENT_SOCKET_HANDLE_DEF(udpcancel);
+NETIEVENT_SOCKET_DEF(udpread);
NETIEVENT_SOCKET_DEF(tcpdnsclose);
NETIEVENT_SOCKET_DEF(tcpdnsread);
NETIEVENT_SOCKET_REQ_DEF(tcpconnect);
NETIEVENT_SOCKET_REQ_DEF(tcpsend);
NETIEVENT_SOCKET_REQ_DEF(tlssend);
-NETIEVENT_SOCKET_REQ_DEF(udpconnect);
-NETIEVENT_SOCKET_REQ_DEF(routeconnect);
NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb);
NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb);
NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb);
NETIEVENT_SOCKET_DEF(detach);
NETIEVENT_SOCKET_HANDLE_DEF(tcpcancel);
-NETIEVENT_SOCKET_HANDLE_DEF(udpcancel);
NETIEVENT_SOCKET_QUOTA_DEF(tcpaccept);
-NETIEVENT_SOCKET_DEF(close);
-NETIEVENT_DEF(pause);
-NETIEVENT_DEF(resume);
-NETIEVENT_DEF(shutdown);
-NETIEVENT_DEF(stop);
-
-NETIEVENT_TASK_DEF(task);
-
NETIEVENT_SOCKET_TLSCTX_DEF(settlsctx);
+void
+isc__nm_process_ievent(isc__networker_t *worker, isc__netievent_t *event) {
+ event->worker = worker;
+ process_netievent(event);
+}
+
void
isc__nm_maybe_enqueue_ievent(isc__networker_t *worker,
isc__netievent_t *event) {
* If we are already in the matching nmthread, process the ievent
* directly.
*/
- if (worker->id == isc_nm_tid()) {
- process_netievent(worker, event);
+ if (worker->loop == isc_loop_current(worker->netmgr->loopmgr)) {
+ isc__nm_process_ievent(worker, event);
return;
}
void
isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) {
- netievent_type_t type;
-
- if (event->type > netievent_prio) {
- type = NETIEVENT_PRIORITY;
- } else {
- switch (event->type) {
- case netievent_prio:
- UNREACHABLE();
- break;
- case netievent_task:
- type = NETIEVENT_TASK;
- break;
- default:
- type = NETIEVENT_NORMAL;
- break;
- }
- }
-
- /*
- * We need to make sure this signal will be delivered and
- * the queue will be processed.
- */
- LOCK(&worker->ievents[type].lock);
- ISC_LIST_ENQUEUE(worker->ievents[type].list, event, link);
- if (type == NETIEVENT_PRIORITY) {
- SIGNAL(&worker->ievents[type].cond);
- }
- UNLOCK(&worker->ievents[type].lock);
+ event->worker = worker;
- uv_async_send(&worker->async);
+ isc_async_run(worker->loop, process_netievent, event);
}
bool
}
}
- /*
- * This was a parent socket: destroy the listening
- * barriers that synchronized the children.
- */
- isc_barrier_destroy(&sock->startlistening);
- isc_barrier_destroy(&sock->stoplistening);
-
/*
* Now free them.
*/
- isc_mem_put(sock->mgr->mctx, sock->children,
+ isc_mem_put(sock->worker->mctx, sock->children,
sock->nchildren * sizeof(*sock));
sock->children = NULL;
sock->nchildren = 0;
}
if (sock->buf != NULL) {
- isc_mem_put(sock->mgr->mctx, sock->buf, sock->buf_size);
+ isc_mem_put(sock->worker->mctx, sock->buf, sock->buf_size);
}
if (sock->quota != NULL) {
isc_astack_destroy(sock->inactivehandles);
while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) {
- isc_mem_put(sock->mgr->mctx, uvreq, sizeof(*uvreq));
+ isc_mem_put(sock->worker->mctx, uvreq, sizeof(*uvreq));
}
isc_astack_destroy(sock->inactivereqs);
- sock->magic = 0;
- isc_condition_destroy(&sock->scond);
- isc_condition_destroy(&sock->cond);
- isc_mutex_destroy(&sock->lock);
isc__nm_tlsdns_cleanup_data(sock);
#if HAVE_LIBNGHTTP2
isc__nm_tls_cleanup_data(sock);
isc__nm_http_cleanup_data(sock);
#endif
+
+ sock->magic = 0;
+
#ifdef NETMGR_TRACE
- LOCK(&sock->mgr->lock);
- ISC_LIST_UNLINK(sock->mgr->active_sockets, sock, active_link);
- UNLOCK(&sock->mgr->lock);
+ LOCK(&sock->worker->netmgr->lock);
+ ISC_LIST_UNLINK(sock->worker->netmgr->active_sockets, sock,
+ active_link);
+ UNLOCK(&sock->worker->netmgr->lock);
+ isc_mutex_destroy(&sock->tracelock);
#endif
+
+ isc_mutex_destroy(&sock->lock);
+
if (dofree) {
- isc_nm_t *mgr = sock->mgr;
- isc_mem_put(mgr->mctx, sock, sizeof(*sock));
- isc_nm_detach(&mgr);
+ isc__networker_t *worker = sock->worker;
+ isc_mem_put(worker->mctx, sock, sizeof(*sock));
+ isc__networker_detach(&worker);
} else {
- isc_nm_detach(&sock->mgr);
+ isc__networker_detach(&sock->worker);
}
}
* children have active handles before deciding whether to
* accept destruction.
*/
- LOCK(&sock->lock);
if (atomic_load(&sock->active) || atomic_load(&sock->destroying) ||
!atomic_load(&sock->closed) || atomic_load(&sock->references) != 0)
{
- UNLOCK(&sock->lock);
return;
}
active_handles = atomic_load(&sock->ah);
if (sock->children != NULL) {
for (size_t i = 0; i < sock->nchildren; i++) {
- LOCK(&sock->children[i].lock);
active_handles += atomic_load(&sock->children[i].ah);
- UNLOCK(&sock->children[i].lock);
}
}
if (destroy) {
atomic_store(&sock->destroying, true);
- UNLOCK(&sock->lock);
nmsocket_cleanup(sock, true FLARG_PASS);
- } else {
- UNLOCK(&sock->lock);
}
}
}
void
-isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
- isc_sockaddr_t *iface FLARG) {
+isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker,
+ isc_nmsocket_type type, isc_sockaddr_t *iface FLARG) {
uint16_t family;
REQUIRE(sock != NULL);
- REQUIRE(mgr != NULL);
-
- *sock = (isc_nmsocket_t){ .type = type,
- .fd = -1,
- .inactivehandles = isc_astack_new(
- mgr->mctx, ISC_NM_HANDLES_STACK_SIZE),
- .inactivereqs = isc_astack_new(
- mgr->mctx, ISC_NM_REQS_STACK_SIZE) };
+ REQUIRE(worker != NULL);
+
+ *sock = (isc_nmsocket_t){
+ .type = type,
+ .tid = worker->loop->tid,
+ .fd = -1,
+ .inactivehandles = isc_astack_new(worker->mctx,
+ ISC_NM_HANDLES_STACK_SIZE),
+ .inactivereqs = isc_astack_new(worker->mctx,
+ ISC_NM_REQS_STACK_SIZE),
+ .result = ISC_R_UNSET,
+ };
ISC_LIST_INIT(sock->tls.sendreqs);
+ isc_mutex_init(&sock->lock);
if (iface != NULL) {
family = iface->type.sa.sa_family;
sock->backtrace_size = isc_backtrace(sock->backtrace, TRACE_SIZE);
ISC_LINK_INIT(sock, active_link);
ISC_LIST_INIT(sock->active_handles);
- LOCK(&mgr->lock);
- ISC_LIST_APPEND(mgr->active_sockets, sock, active_link);
- UNLOCK(&mgr->lock);
+ LOCK(&worker->netmgr->lock);
+ ISC_LIST_APPEND(worker->netmgr->active_sockets, sock, active_link);
+ UNLOCK(&worker->netmgr->lock);
+ isc_mutex_init(&sock->tracelock);
#endif
- isc_nm_attach(mgr, &sock->mgr);
+ isc__networker_attach(worker, &sock->worker);
sock->uv_handle.handle.data = sock;
ISC_LINK_INIT(&sock->quotacb, link);
break;
}
- isc_mutex_init(&sock->lock);
- isc_condition_init(&sock->cond);
- isc_condition_init(&sock->scond);
isc_refcount_init(&sock->references, 1);
#if HAVE_LIBNGHTTP2
void
isc__nmsocket_clearcb(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(!isc__nm_in_netthread() || sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
sock->recv_cb = NULL;
sock->recv_cbarg = NULL;
void
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
- isc__networker_t *worker = NULL;
-
REQUIRE(VALID_NMSOCK(sock));
- worker = &sock->mgr->workers[sock->tid];
- REQUIRE(buf->base == worker->recvbuf);
+ REQUIRE(buf->base == sock->worker->recvbuf);
- worker->recvbuf_inuse = false;
+ sock->worker->recvbuf_inuse = false;
}
static isc_nmhandle_t *
alloc_handle(isc_nmsocket_t *sock) {
- isc_nmhandle_t *handle = isc_mem_get(sock->mgr->mctx,
+ isc_nmhandle_t *handle = isc_mem_get(sock->worker->mctx,
sizeof(isc_nmhandle_t));
*handle = (isc_nmhandle_t){ .magic = NMHANDLE_MAGIC };
(void)atomic_fetch_add(&sock->ah, 1);
#ifdef NETMGR_TRACE
- LOCK(&sock->lock);
+ LOCK(&sock->tracelock);
ISC_LIST_APPEND(sock->active_handles, handle, active_link);
- UNLOCK(&sock->lock);
+ UNLOCK(&sock->tracelock);
#endif
switch (sock->type) {
*handle = (isc_nmhandle_t){ .magic = 0 };
- isc_mem_put(sock->mgr->mctx, handle, sizeof(isc_nmhandle_t));
+ isc_mem_put(sock->worker->mctx, handle, sizeof(isc_nmhandle_t));
}
static void
* destruction. We have to do this now, because at this point the
* socket is either unused or still attached to event->sock.
*/
- LOCK(&sock->lock);
-
#ifdef NETMGR_TRACE
ISC_LIST_UNLINK(sock->active_handles, handle, active_link);
#endif
if (!reuse) {
nmhandle_free(sock, handle);
}
- UNLOCK(&sock->lock);
}
void
* ensure correct ordering of the isc__nm_process_sock_buffer().
*/
sock = handle->sock;
- if (sock->tid == isc_nm_tid() && sock->closehandle_cb == NULL) {
+ if (sock->tid == isc_tid() && sock->closehandle_cb == NULL) {
nmhandle_detach_cb(&handle FLARG_PASS);
} else {
isc__netievent_detach_t *event =
- isc__nm_get_netievent_detach(sock->mgr, sock);
+ isc__nm_get_netievent_detach(sock->worker, sock);
/*
* we are using implicit "attach" as the last reference
* need to be destroyed explicitly in the async callback
*/
event->handle = handle;
FLARG_IEVENT_PASS(event);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)event);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)event);
}
}
/*
* The handle is gone now. If the socket has a callback configured
* for that (e.g., to perform cleanup after request processing),
- * call it now, or schedule it to run asynchronously.
+ * call it now..
*/
if (sock->closehandle_cb != NULL) {
- if (sock->tid == isc_nm_tid()) {
- sock->closehandle_cb(sock);
- } else {
- isc__netievent_close_t *event =
- isc__nm_get_netievent_close(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)event);
- }
+ sock->closehandle_cb(sock);
}
if (handle == sock->statichandle) {
if (sock->buf == NULL) {
/* We don't have the buffer at all */
size_t alloc_len = len < NM_REG_BUF ? NM_REG_BUF : NM_BIG_BUF;
- sock->buf = isc_mem_get(sock->mgr->mctx, alloc_len);
+ sock->buf = isc_mem_get(sock->worker->mctx, alloc_len);
sock->buf_size = alloc_len;
} else {
/* We have the buffer but it's too small */
- sock->buf = isc_mem_reget(sock->mgr->mctx, sock->buf,
+ sock->buf = isc_mem_reget(sock->worker->mctx, sock->buf,
sock->buf_size, NM_BIG_BUF);
sock->buf_size = NM_BIG_BUF;
}
isc_result_t eresult, bool async) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(req->cb.connect != NULL);
isc__nm_incstats(sock, STATID_CONNECTFAIL);
isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->connecting));
REQUIRE(VALID_UVREQ(req));
REQUIRE(VALID_NMHANDLE(req->handle));
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->reading));
if (atomic_load(&sock->client)) {
isc__nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr) {
isc__nm_uvreq_t *req = NULL;
- req = isc__nm_uvreq_get(sock->mgr, sock);
+ req = isc__nm_uvreq_get(sock->worker, sock);
req->cb.recv = sock->recv_cb;
req->cbarg = sock->recv_cbarg;
isc__networker_t *worker = NULL;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(isc__nm_in_netthread());
/*
* The size provided by libuv is only suggested size, and it always
* defaults to 64 * 1024 in the current versions of libuv (see
*/
UNUSED(size);
- worker = &sock->mgr->workers[sock->tid];
+ worker = sock->worker;
INSIST(!worker->recvbuf_inuse);
INSIST(worker->recvbuf != NULL);
}
bool
-isc__nm_closing(isc_nmsocket_t *sock) {
- return (atomic_load(&sock->mgr->closing));
+isc__nm_closing(isc__networker_t *worker) {
+ return (worker->shuttingdown);
}
bool
isc__nmsocket_closing(isc_nmsocket_t *sock) {
return (!isc__nmsocket_active(sock) || atomic_load(&sock->closing) ||
- isc__nm_closing(sock) ||
+ isc__nm_closing(sock->worker) ||
(sock->server != NULL && !isc__nmsocket_active(sock->server)));
}
isc_nmsocket_t *sock = (isc_nmsocket_t *)arg;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(!atomic_load(&sock->client));
if (isc__nmsocket_closing(sock)) {
void
isc_nmhandle_keepalive(isc_nmhandle_t *handle, bool value) {
isc_nmsocket_t *sock = NULL;
+ isc_nm_t *netmgr = NULL;
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
sock = handle->sock;
+ netmgr = sock->worker->netmgr;
switch (sock->type) {
case isc_nm_tcpsocket:
case isc_nm_tcpdnssocket:
case isc_nm_tlsdnssocket:
atomic_store(&sock->keepalive, value);
- sock->read_timeout = value ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle);
- sock->write_timeout = value ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle);
+ sock->read_timeout = value ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle);
+ sock->write_timeout = value ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle);
break;
#if HAVE_LIBNGHTTP2
case isc_nm_tlssocket:
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
- return (handle->sock->mgr);
+ return (handle->sock->worker->netmgr);
}
+/* FIXME: Use per-worker mempool */
isc__nm_uvreq_t *
-isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG) {
+isc___nm_uvreq_get(isc__networker_t *worker, isc_nmsocket_t *sock FLARG) {
isc__nm_uvreq_t *req = NULL;
- REQUIRE(VALID_NM(mgr));
+ REQUIRE(worker != NULL);
REQUIRE(VALID_NMSOCK(sock));
if (sock != NULL && isc__nmsocket_active(sock)) {
}
if (req == NULL) {
- req = isc_mem_get(mgr->mctx, sizeof(*req));
+ req = isc_mem_get(worker->mctx, sizeof(*req));
}
*req = (isc__nm_uvreq_t){
#if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__
if (!isc__nmsocket_active(sock) ||
!isc_astack_trypush(sock->inactivereqs, req)) {
- isc_mem_put(sock->mgr->mctx, req, sizeof(*req));
+ isc_mem_put(sock->worker->mctx, req, sizeof(*req));
}
#else /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
- isc_mem_put(sock->mgr->mctx, req, sizeof(*req));
+ isc_mem_put(sock->worker->mctx, req, sizeof(*req));
#endif /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
if (handle != NULL) {
isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent);
} else {
isc__netievent_connectcb_t *ievent =
- isc__nm_get_netievent_connectcb(sock->mgr, sock, uvreq,
- eresult);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_get_netievent_connectcb(sock->worker, sock,
+ uvreq, eresult);
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
REQUIRE(uvreq->cb.connect != NULL);
uvreq->cb.connect(uvreq->handle, eresult, uvreq->cbarg);
REQUIRE(VALID_NMHANDLE(uvreq->handle));
if (eresult == ISC_R_SUCCESS || eresult == ISC_R_TIMEDOUT) {
- isc__netievent_readcb_t ievent = { .sock = sock,
+ isc__netievent_readcb_t ievent = { .type = netievent_readcb,
+ .sock = sock,
.req = uvreq,
.result = eresult };
isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent);
} else {
isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb(
- sock->mgr, sock, uvreq, eresult);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ sock->worker, sock, uvreq, eresult);
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
region.base = (unsigned char *)uvreq->uvbuf.base;
region.length = uvreq->uvbuf.len;
return;
}
- isc__netievent_sendcb_t *ievent =
- isc__nm_get_netievent_sendcb(sock->mgr, sock, uvreq, eresult);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__netievent_sendcb_t *ievent = isc__nm_get_netievent_sendcb(
+ sock->worker, sock, uvreq, eresult);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
uvreq->cb.send(uvreq->handle, eresult, uvreq->cbarg);
isc__nm_uvreq_put(&uvreq, sock);
}
-static void
-isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_close_t *ievent = (isc__netievent_close_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
-
- REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->closehandle_cb != NULL);
-
- UNUSED(worker);
-
- ievent->sock->closehandle_cb(sock);
-}
-
void
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_detach_t *ievent = (isc__netievent_detach_t *)ev0;
REQUIRE(VALID_NMSOCK(ievent->sock));
REQUIRE(VALID_NMHANDLE(ievent->handle));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
UNUSED(worker);
static void
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
- isc_nmsocket_t *sock = uv_handle_get_data(handle);
+ isc_nmsocket_t *sock = NULL;
UNUSED(arg);
if (uv_is_closing(handle)) {
return;
}
+ sock = uv_handle_get_data(handle);
+
switch (handle->type) {
case UV_UDP:
isc__nmsocket_shutdown(sock);
}
}
-void
-isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) {
- UNUSED(ev0);
-
- uv_walk(&worker->loop, shutdown_walk_cb, NULL);
-}
-
-bool
-isc__nm_acquire_interlocked(isc_nm_t *mgr) {
- if (!isc__nm_in_netthread()) {
- return (false);
- }
-
- LOCK(&mgr->lock);
- bool success = atomic_compare_exchange_strong(
- &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED },
- isc_nm_tid());
-
- UNLOCK(&mgr->lock);
- return (success);
-}
-
-void
-isc__nm_drop_interlocked(isc_nm_t *mgr) {
- if (!isc__nm_in_netthread()) {
- return;
- }
-
- LOCK(&mgr->lock);
- int tid = atomic_exchange(&mgr->interlocked,
- ISC_NETMGR_NON_INTERLOCKED);
- INSIST(tid != ISC_NETMGR_NON_INTERLOCKED);
- BROADCAST(&mgr->wkstatecond);
- UNLOCK(&mgr->lock);
-}
-
-void
-isc__nm_acquire_interlocked_force(isc_nm_t *mgr) {
- if (!isc__nm_in_netthread()) {
- return;
- }
-
- LOCK(&mgr->lock);
- while (!atomic_compare_exchange_strong(
- &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED },
- isc_nm_tid()))
- {
- WAIT(&mgr->wkstatecond, &mgr->lock);
- }
- UNLOCK(&mgr->lock);
-}
-
void
isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) {
REQUIRE(VALID_NM(mgr));
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(id < STATID_MAX);
- if (sock->statsindex != NULL && sock->mgr->stats != NULL) {
- isc_stats_increment(sock->mgr->stats, sock->statsindex[id]);
+ if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
+ isc_stats_increment(sock->worker->netmgr->stats,
+ sock->statsindex[id]);
}
}
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(id < STATID_MAX);
- if (sock->statsindex != NULL && sock->mgr->stats != NULL) {
- isc_stats_decrement(sock->mgr->stats, sock->statsindex[id]);
+ if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
+ isc_stats_decrement(sock->worker->netmgr->stats,
+ sock->statsindex[id]);
}
}
}
}
-static isc_threadresult_t
-isc__nm_work_run(isc_threadarg_t arg) {
- isc__nm_work_t *work = (isc__nm_work_t *)arg;
-
- work->cb(work->data);
-
- return ((isc_threadresult_t)0);
-}
-
-static void
-isc__nm_work_cb(uv_work_t *req) {
- isc__nm_work_t *work = uv_req_get_data((uv_req_t *)req);
-
- if (isc_tid_v == SIZE_MAX) {
- isc__trampoline_t *trampoline_arg =
- isc__trampoline_get(isc__nm_work_run, work);
- (void)isc__trampoline_run(trampoline_arg);
- } else {
- (void)isc__nm_work_run((isc_threadarg_t)work);
- }
-}
-
-static void
-isc__nm_after_work_cb(uv_work_t *req, int status) {
- isc_result_t result = ISC_R_SUCCESS;
- isc__nm_work_t *work = uv_req_get_data((uv_req_t *)req);
- isc_nm_t *netmgr = work->netmgr;
-
- if (status != 0) {
- result = isc_uverr2result(status);
- }
-
- work->after_cb(work->data, result);
-
- isc_mem_put(netmgr->mctx, work, sizeof(*work));
-
- isc_nm_detach(&netmgr);
-}
-
-void
-isc_nm_work_offload(isc_nm_t *netmgr, isc_nm_workcb_t work_cb,
- isc_nm_after_workcb_t after_work_cb, void *data) {
- isc__networker_t *worker = NULL;
- isc__nm_work_t *work = NULL;
- int r;
-
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(VALID_NM(netmgr));
-
- worker = &netmgr->workers[isc_nm_tid()];
-
- work = isc_mem_get(netmgr->mctx, sizeof(*work));
- *work = (isc__nm_work_t){
- .cb = work_cb,
- .after_cb = after_work_cb,
- .data = data,
- };
-
- isc_nm_attach(netmgr, &work->netmgr);
-
- uv_req_set_data((uv_req_t *)&work->req, work);
-
- r = uv_queue_work(&worker->loop, &work->req, isc__nm_work_cb,
- isc__nm_after_work_cb);
- UV_RUNTIME_CHECK(uv_queue_work, r);
-}
-
void
isc_nm_bad_request(isc_nmhandle_t *handle) {
isc_nmsocket_t *sock = NULL;
bool
isc_nm_xfr_allowed(isc_nmhandle_t *handle) {
- isc_nmsocket_t *sock;
+ isc_nmsocket_t *sock = NULL;
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
return (false);
}
-uint32_t
-isc_nm_getnworkers(const isc_nm_t *netmgr) {
- REQUIRE(VALID_NM(netmgr));
-
- return (netmgr->nworkers);
-}
-
const char *
isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle) {
- isc_nmsocket_t *sock;
+ isc_nmsocket_t *sock = NULL;
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
void
isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent__tlsctx_t *ev_tlsctx = (isc__netievent__tlsctx_t *)ev0;
- const int tid = isc_nm_tid();
+ const int tid = isc_tid();
isc_nmsocket_t *listener = ev_tlsctx->sock;
isc_tlsctx_t *tlsctx = ev_tlsctx->tlsctx;
static void
set_tlsctx_workers(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) {
+ uint32_t nloops = isc_loopmgr_nloops(listener->worker->netmgr->loopmgr);
/* Update the TLS context reference for every worker thread. */
- for (size_t i = 0; i < isc_nm_getnworkers(listener->mgr); i++) {
+ for (size_t i = 0; i < nloops; i++) {
isc__netievent__tlsctx_t *ievent =
- isc__nm_get_netievent_settlsctx(listener->mgr, listener,
- tlsctx);
- isc__nm_enqueue_ievent(&listener->mgr->workers[i],
+ isc__nm_get_netievent_settlsctx(listener->worker,
+ listener, tlsctx);
+ isc__nm_enqueue_ievent(listener->worker,
(isc__netievent_t *)ievent);
}
}
client_sabuf, local_sabuf);
}
+static void
+isc__networker_destroy(isc__networker_t *worker) {
+ isc_nm_t *netmgr = worker->netmgr;
+ worker->netmgr = NULL;
+
+ isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR,
+ ISC_LOG_DEBUG(1),
+ "Destroying down network manager worker on loop %p(%d)",
+ worker->loop, isc_tid());
+
+ isc_loop_detach(&worker->loop);
+
+ isc_mem_put(worker->mctx, worker->sendbuf, ISC_NETMGR_SENDBUF_SIZE);
+ isc_mem_putanddetach(&worker->mctx, worker->recvbuf,
+ ISC_NETMGR_RECVBUF_SIZE);
+ isc_nm_detach(&netmgr);
+}
+
+ISC_REFCOUNT_IMPL(isc__networker, isc__networker_destroy);
+
#ifdef NETMGR_TRACE
/*
* Dump all active sockets in netmgr. We output to stderr
nmsocket_dump(isc_nmsocket_t *sock) {
isc_nmhandle_t *handle = NULL;
- LOCK(&sock->lock);
+ LOCK(&sock->tracelock);
fprintf(stderr, "\n=================\n");
fprintf(stderr, "Active %s socket %p, type %s, refs %" PRIuFAST32 "\n",
atomic_load(&sock->client) ? "client" : "server", sock,
}
fprintf(stderr, "\n");
- UNLOCK(&sock->lock);
+ UNLOCK(&sock->tracelock);
}
void
#include <isc/util.h>
#include <isc/uv.h>
+#include "../loop_p.h"
#include "netmgr-int.h"
static atomic_uint_fast32_t last_tcpquota_log = 0;
static void
failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult);
-static void
-stop_tcp_parent(isc_nmsocket_t *sock);
-static void
-stop_tcp_child(isc_nmsocket_t *sock);
-
static void
failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult) {
REQUIRE(atomic_load(&sock->accepting));
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
- worker = &sock->mgr->workers[sock->tid];
+ worker = sock->worker;
atomic_store(&sock->connecting, true);
result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
if (r != 0) {
isc__nm_closesocket(sock->fd);
isc__nm_incstats(sock, STATID_OPENFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
isc__nm_incstats(sock, STATID_OPEN);
r = uv_tcp_bind(&sock->uv_handle.tcp, &req->local.type.sa, 0);
if (r != 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
uv_handle_set_data(&req->uv_req.handle, req);
r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp,
&req->peer.type.sa, tcp_connect_cb);
if (r != 0) {
isc__nm_incstats(sock, STATID_CONNECTFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
uv_handle_set_data((uv_handle_t *)&sock->read_timer,
atomic_store(&sock->connected, true);
-done:
- result = isc_uverr2result(r);
- LOCK(&sock->lock);
- sock->result = result;
- SIGNAL(&sock->cond);
- if (!atomic_load(&sock->active)) {
- WAIT(&sock->scond, &sock->lock);
- }
- INSIST(atomic_load(&sock->active));
- UNLOCK(&sock->lock);
-
- return (result);
+ return (ISC_R_SUCCESS);
}
void
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpsocket);
REQUIRE(sock->parent == NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
result = tcp_connect_direct(sock, req);
if (result != ISC_R_SUCCESS) {
isc__nm_uvreq_t *req = NULL;
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
struct sockaddr_storage ss;
+ isc__networker_t *worker = NULL;
int r;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+
+ worker = sock->worker;
req = uv_handle_get_data((uv_handle_t *)uvreq);
*/
isc__nm_uvreq_put(&req, sock);
return;
- } else if (isc__nm_closing(sock)) {
+ } else if (isc__nm_closing(worker)) {
/* Network manager shutting down */
result = ISC_R_SHUTTINGDOWN;
goto error;
isc__netievent_tcpconnect_t *ievent = NULL;
isc__nm_uvreq_t *req = NULL;
sa_family_t sa_family;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
+ uv_os_sock_t fd = -1;
REQUIRE(VALID_NM(mgr));
REQUIRE(local != NULL);
REQUIRE(peer != NULL);
+ if (isc__nm_closing(worker)) {
+ cb(NULL, ISC_R_SHUTTINGDOWN, cbarg);
+ return;
+ }
+
sa_family = peer->type.sa.sa_family;
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tcpsocket, local);
+ result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &fd);
+ if (result != ISC_R_SUCCESS) {
+ cb(NULL, result, cbarg);
+ return;
+ }
+
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tcpsocket, local);
sock->connect_timeout = timeout;
- sock->result = ISC_R_UNSET;
- sock->fd = (uv_os_sock_t)-1;
+ sock->fd = fd;
atomic_init(&sock->client, true);
- req = isc__nm_uvreq_get(mgr, sock);
+ req = isc__nm_uvreq_get(worker, sock);
req->cb.connect = cb;
req->cbarg = cbarg;
req->peer = *peer;
req->local = *local;
req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface);
- result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &sock->fd);
- if (result != ISC_R_SUCCESS) {
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- isc__nmsocket_clearcb(sock);
- isc__nm_connectcb(sock, req, result, false);
- } else {
- isc__nmsocket_clearcb(sock);
- sock->tid = isc_random_uniform(mgr->nworkers);
- isc__nm_connectcb(sock, req, result, true);
- }
- atomic_store(&sock->closed, true);
- isc__nmsocket_detach(&sock);
- return;
- }
-
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
(void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG);
- ievent = isc__nm_get_netievent_tcpconnect(mgr, sock, req);
+ ievent = isc__nm_get_netievent_tcpconnect(worker, sock, req);
+
+ atomic_store(&sock->active, true);
+ isc__nm_async_tcpconnect(&mgr->workers[sock->tid],
+ (isc__netievent_t *)ievent);
+ isc__nm_put_netievent_tcpconnect(worker, ievent);
- if (isc__nm_in_netthread()) {
- atomic_store(&sock->active, true);
- sock->tid = isc_nm_tid();
- isc__nm_async_tcpconnect(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- isc__nm_put_netievent_tcpconnect(mgr, ievent);
- } else {
- atomic_init(&sock->active, false);
- sock->tid = isc_random_uniform(mgr->nworkers);
- isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- }
- LOCK(&sock->lock);
- while (sock->result == ISC_R_UNSET) {
- WAIT(&sock->cond, &sock->lock);
- }
atomic_store(&sock->active, true);
- BROADCAST(&sock->scond);
- UNLOCK(&sock->lock);
}
static uv_os_sock_t
uv_os_sock_t fd, int tid) {
isc__netievent_tcplisten_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[tid];
+ isc__networker_t *worker = &mgr->workers[tid];
- isc__nmsocket_init(csock, mgr, isc_nm_tcpsocket, iface);
+ isc__nmsocket_init(csock, worker, isc_nm_tcpsocket, iface);
csock->parent = sock;
csock->accept_cb = sock->accept_cb;
csock->accept_cbarg = sock->accept_cbarg;
csock->backlog = sock->backlog;
- csock->tid = tid;
+
/*
* We don't attach to quota, just assign - to avoid
* increasing quota unnecessarily.
}
REQUIRE(csock->fd >= 0);
- ievent = isc__nm_get_netievent_tcplisten(mgr, csock);
- isc__nm_maybe_enqueue_ievent(&mgr->workers[tid],
- (isc__netievent_t *)ievent);
-}
+ ievent = isc__nm_get_netievent_tcplisten(csock->worker, csock);
-static void
-enqueue_stoplistening(isc_nmsocket_t *sock) {
- isc__netievent_tcpstop_t *ievent =
- isc__nm_get_netievent_tcpstop(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
}
isc_result_t
isc_nm_listentcp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface,
isc_nm_accept_cb_t accept_cb, void *accept_cbarg, int backlog,
isc_quota_t *quota, isc_nmsocket_t **sockp) {
- isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
size_t children_size = 0;
uv_os_sock_t fd = -1;
+ isc_result_t result = ISC_R_UNSET;
+ isc__networker_t *worker = &mgr->workers[0];
REQUIRE(VALID_NM(mgr));
+ REQUIRE(isc_tid() == 0);
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tcplistener, iface);
+ if (workers == 0) {
+ workers = mgr->nloops;
+ }
+ REQUIRE(workers <= mgr->nloops);
+
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tcplistener, iface);
atomic_init(&sock->rchildren, 0);
- sock->nchildren = (workers == ISC_NM_LISTEN_ALL)
- ? (uint32_t)mgr->nworkers
- : workers;
+ sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops
+ : workers;
children_size = sock->nchildren * sizeof(sock->children[0]);
- sock->children = isc_mem_get(mgr->mctx, children_size);
+ sock->children = isc_mem_get(worker->mctx, children_size);
memset(sock->children, 0, children_size);
- sock->result = ISC_R_UNSET;
+ isc_barrier_init(&sock->barrier, sock->nchildren);
sock->accept_cb = accept_cb;
sock->accept_cbarg = accept_cbarg;
sock->backlog = backlog;
sock->pquota = quota;
- sock->tid = 0;
- sock->fd = -1;
-
if (!mgr->load_balance_sockets) {
fd = isc__nm_tcp_lb_socket(mgr, iface->type.sa.sa_family);
}
- isc_barrier_init(&sock->startlistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- if ((int)i == isc_nm_tid()) {
- continue;
- }
+ for (size_t i = 1; i < sock->nchildren; i++) {
start_tcp_child(mgr, iface, sock, fd, i);
}
- if (isc__nm_in_netthread()) {
- start_tcp_child(mgr, iface, sock, fd, isc_nm_tid());
- }
+ start_tcp_child(mgr, iface, sock, fd, 0);
if (!mgr->load_balance_sockets) {
isc__nm_closesocket(fd);
}
LOCK(&sock->lock);
- while (atomic_load(&sock->rchildren) != sock->nchildren) {
- WAIT(&sock->cond, &sock->lock);
- }
result = sock->result;
- atomic_store(&sock->active, true);
UNLOCK(&sock->lock);
-
INSIST(result != ISC_R_UNSET);
- if (result == ISC_R_SUCCESS) {
- REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
- *sockp = sock;
- } else {
+ atomic_store(&sock->active, true);
+
+ if (result != ISC_R_SUCCESS) {
atomic_store(&sock->active, false);
- enqueue_stoplistening(sock);
+ isc__nm_tcp_stoplistening(sock);
isc_nmsocket_close(&sock);
+
+ return (result);
}
- return (result);
+ REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
+ *sockp = sock;
+ return (ISC_R_SUCCESS);
}
void
int r;
int flags = 0;
isc_nmsocket_t *sock = NULL;
- isc_result_t result;
- isc_nm_t *mgr;
+ isc_result_t result = ISC_R_UNSET;
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
REQUIRE(VALID_NMSOCK(ievent->sock->parent));
sock = ievent->sock;
sa_family = sock->iface.type.sa.sa_family;
- mgr = sock->mgr;
REQUIRE(sock->type == isc_nm_tcpsocket);
REQUIRE(sock->parent != NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
(void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
-
uv_handle_set_data(&sock->uv_handle.handle, sock);
/* This keeps the socket alive after everything else is gone */
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- LOCK(&sock->parent->lock);
-
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r < 0) {
isc__nm_closesocket(sock->fd);
flags = UV_TCP_IPV6ONLY;
}
- if (mgr->load_balance_sockets) {
+ if (sock->worker->netmgr->load_balance_sockets) {
r = isc__nm_tcp_freebind(&sock->uv_handle.tcp,
&sock->iface.type.sa, flags);
if (r < 0) {
goto done;
}
} else {
+ LOCK(&sock->parent->lock);
if (sock->parent->fd == -1) {
r = isc__nm_tcp_freebind(&sock->uv_handle.tcp,
&sock->iface.type.sa, flags);
if (r < 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
+ UNLOCK(&sock->parent->lock);
goto done;
}
sock->parent->uv_handle.tcp.flags =
sock->uv_handle.tcp.flags =
sock->parent->uv_handle.tcp.flags;
}
+ UNLOCK(&sock->parent->lock);
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
/*
* The callback will run in the same thread uv_listen() was called
done:
result = isc_uverr2result(r);
+ atomic_fetch_add(&sock->parent->rchildren, 1);
+
if (result != ISC_R_SUCCESS) {
sock->pquota = NULL;
}
- atomic_fetch_add(&sock->parent->rchildren, 1);
+ LOCK(&sock->parent->lock);
if (sock->parent->result == ISC_R_UNSET) {
sock->parent->result = result;
+ } else {
+ REQUIRE(sock->parent->result == result);
}
- SIGNAL(&sock->parent->cond);
UNLOCK(&sock->parent->lock);
- isc_barrier_wait(&sock->parent->startlistening);
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
static void
}
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
result = ISC_R_CANCELED;
isc__nm_accept_connection_log(result, can_log_tcp_quota());
}
+static void
+stop_tcp_child(isc_nmsocket_t *sock, uint32_t tid) {
+ isc_nmsocket_t *csock = NULL;
+ isc__netievent_tcpstop_t *ievent = NULL;
+
+ csock = &sock->children[tid];
+ REQUIRE(VALID_NMSOCK(csock));
+
+ atomic_store(&csock->active, false);
+ ievent = isc__nm_get_netievent_tcpstop(csock->worker, csock);
+
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
+}
+
+static void
+stop_tcp_parent(isc_nmsocket_t *sock) {
+ /* Stop the parent */
+ atomic_store(&sock->closed, true);
+ isc__nmsocket_prep_destroy(sock);
+}
+
void
isc__nm_tcp_stoplistening(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcplistener);
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->tid == 0);
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- UNREACHABLE();
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- if (!isc__nm_in_netthread()) {
- enqueue_stoplistening(sock);
- } else {
- stop_tcp_parent(sock);
+ for (size_t i = 1; i < sock->nchildren; i++) {
+ stop_tcp_child(sock, i);
}
+
+ stop_tcp_child(sock, 0);
+
+ stop_tcp_parent(sock);
}
void
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->parent != NULL);
+ REQUIRE(sock->type == isc_nm_tcpsocket);
- if (sock->parent != NULL) {
- stop_tcp_child(sock);
- return;
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- stop_tcp_parent(sock);
+ tcp_close_direct(sock);
+
+ (void)atomic_fetch_sub(&sock->parent->rchildren, 1);
+
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
void
isc_nmsocket_t *sock = handle->sock;
isc__netievent_tcpstartread_t *ievent = NULL;
+ isc_nm_t *netmgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_tcpsocket);
REQUIRE(sock->statichandle == handle);
sock->recv_cbarg = cbarg;
sock->recv_read = true;
if (sock->read_timeout == 0) {
- sock->read_timeout =
- (atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ sock->read_timeout = (atomic_load(&sock->keepalive)
+ ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle));
}
- ievent = isc__nm_get_netievent_tcpstartread(sock->mgr, sock);
+ ievent = isc__nm_get_netievent_tcpstartread(sock->worker, sock);
/*
* This MUST be done asynchronously, no matter which thread we're
* isc_nm_read() again; if we tried to do that synchronously
* we'd clash in processbuffer() and grow the stack indefinitely.
*/
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
isc_result_t result;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
if (isc__nmsocket_closing(sock)) {
return;
}
- ievent = isc__nm_get_netievent_tcppauseread(sock->mgr, sock);
+ ievent = isc__nm_get_netievent_tcppauseread(sock->worker, sock);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
isc__nmsocket_timer_stop(sock);
isc__netievent_tcpstartread_t *ievent = NULL;
isc_nmsocket_t *sock = handle->sock;
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (sock->recv_cb == NULL) {
/* We are no longer reading */
return;
}
- ievent = isc__nm_get_netievent_tcpstartread(sock->mgr, sock);
+ ievent = isc__nm_get_netievent_tcpstartread(sock->worker, sock);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)stream);
isc__nm_uvreq_t *req = NULL;
+ isc_nm_t *netmgr = NULL;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->reading));
REQUIRE(buf != NULL);
+ netmgr = sock->worker->netmgr;
+
if (isc__nmsocket_closing(sock)) {
isc__nm_tcp_failed_read_cb(sock, ISC_R_CANCELED);
goto free;
req->uvbuf.len = nread;
if (!atomic_load(&sock->client)) {
- sock->read_timeout =
- (atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ sock->read_timeout = (atomic_load(&sock->keepalive)
+ ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle));
}
isc__nm_readcb(sock, req, ISC_R_SUCCESS);
/*
* Create a tcpaccept event and pass it using the async channel.
*/
- ievent = isc__nm_get_netievent_tcpaccept(sock->mgr, sock, quota);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpaccept(sock->worker, sock, quota);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
/*
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
result = accept_connection(sock, ievent->quota);
isc__nm_accept_connection_log(result, can_log_tcp_quota());
isc_nmhandle_t *handle = NULL;
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
if (quota != NULL) {
return (ISC_R_CANCELED);
}
- csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(csock, ssock->mgr, isc_nm_tcpsocket, &ssock->iface);
- csock->tid = ssock->tid;
+ csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(csock, ssock->worker, isc_nm_tcpsocket,
+ &ssock->iface);
isc__nmsocket_attach(ssock, &csock->server);
csock->recv_cb = ssock->recv_cb;
csock->recv_cbarg = ssock->recv_cbarg;
csock->quota = quota;
atomic_init(&csock->accepting, true);
- worker = &csock->mgr->workers[isc_nm_tid()];
+ worker = csock->worker;
- r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&csock->uv_handle.handle, csock);
- r = uv_timer_init(&worker->loop, &csock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &csock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock);
isc__nm_incstats(csock, STATID_ACCEPT);
- csock->read_timeout = atomic_load(&csock->mgr->init);
+ csock->read_timeout = atomic_load(&csock->worker->netmgr->init);
atomic_fetch_add(&ssock->parent->active_child_connections, 1);
REQUIRE(sock->type == isc_nm_tcpsocket);
- uvreq = isc__nm_uvreq_get(sock->mgr, sock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
- ievent = isc__nm_get_netievent_tcpsend(sock->mgr, sock, uvreq);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpsend(sock->worker, sock, uvreq);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
isc__netievent_tcpsend_t *ievent = (isc__netievent_tcpsend_t *)ev0;
isc_nmsocket_t *sock = ievent->sock;
isc__nm_uvreq_t *uvreq = ievent->req;
+ isc_nm_t *netmgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_tcpsocket);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
if (sock->write_timeout == 0) {
- sock->write_timeout =
- (atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ sock->write_timeout = (atomic_load(&sock->keepalive)
+ ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle));
}
result = tcp_send_direct(sock, uvreq);
tcp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_tcpsocket);
int r;
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
+ REQUIRE(sock->type == isc_nm_tcpsocket);
if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
true)) {
static void
tcp_close_sock(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
}
}
-static void
-stop_tcp_child(isc_nmsocket_t *sock) {
- REQUIRE(sock->type == isc_nm_tcpsocket);
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- return;
- }
-
- tcp_close_direct(sock);
-
- atomic_fetch_sub(&sock->parent->rchildren, 1);
-
- isc_barrier_wait(&sock->parent->stoplistening);
-}
-
-static void
-stop_tcp_parent(isc_nmsocket_t *sock) {
- isc_nmsocket_t *csock = NULL;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->type == isc_nm_tcplistener);
-
- isc_barrier_init(&sock->stoplistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- csock = &sock->children[i];
- REQUIRE(VALID_NMSOCK(csock));
-
- if ((int)i == isc_nm_tid()) {
- /*
- * We need to schedule closing the other sockets first
- */
- continue;
- }
-
- atomic_store(&csock->active, false);
- enqueue_stoplistening(csock);
- }
-
- csock = &sock->children[isc_nm_tid()];
- atomic_store(&csock->active, false);
- stop_tcp_child(csock);
-
- atomic_store(&sock->closed, true);
- isc__nmsocket_prep_destroy(sock);
-}
-
static void
tcp_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (sock->server != NULL) {
return;
}
- if (sock->tid == isc_nm_tid()) {
+ if (sock->tid == isc_tid()) {
tcp_close_direct(sock);
} else {
/*
* We need to create an event and pass it using async channel
*/
isc__netievent_tcpclose_t *ievent =
- isc__nm_get_netievent_tcpclose(sock->mgr, sock);
+ isc__nm_get_netievent_tcpclose(sock->worker, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
void
isc__nm_tcp_shutdown(isc_nmsocket_t *sock) {
+ isc__networker_t *worker = NULL;
+
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_tcpsocket);
+ worker = sock->worker;
+
/*
* If the socket is active, mark it inactive and
* continue. If it isn't active, stop now.
}
if (sock->statichandle != NULL) {
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false);
} else {
isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpsocket);
- ievent = isc__nm_get_netievent_tcpcancel(sock->mgr, sock, handle);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpcancel(sock->worker, sock, handle);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
uv_timer_stop(&sock->read_timer);
static void
quota_accept_cb(isc_quota_t *quota, void *sock0);
-static void
-stop_tcpdns_parent(isc_nmsocket_t *sock);
-static void
-stop_tcpdns_child(isc_nmsocket_t *sock);
-
static isc_result_t
tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
isc__networker_t *worker = NULL;
- isc_result_t result = ISC_R_UNSET;
int r;
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
- worker = &sock->mgr->workers[sock->tid];
+ worker = sock->worker;
atomic_store(&sock->connecting, true);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- if (isc__nm_closing(sock)) {
- result = ISC_R_SHUTTINGDOWN;
- goto error;
+ if (isc__nm_closing(worker)) {
+ return (ISC_R_SHUTTINGDOWN);
}
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r != 0) {
isc__nm_closesocket(sock->fd);
isc__nm_incstats(sock, STATID_OPENFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
isc__nm_incstats(sock, STATID_OPEN);
*/
if (r != 0 && r != UV_EINVAL) {
isc__nm_incstats(sock, STATID_BINDFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
uv_handle_set_data(&req->uv_req.handle, req);
r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp,
&req->peer.type.sa, tcpdns_connect_cb);
if (r != 0) {
isc__nm_incstats(sock, STATID_CONNECTFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
uv_handle_set_data((uv_handle_t *)&sock->read_timer,
atomic_store(&sock->connected, true);
-done:
- result = isc_uverr2result(r);
-error:
- LOCK(&sock->lock);
- sock->result = result;
- SIGNAL(&sock->cond);
- if (!atomic_load(&sock->active)) {
- WAIT(&sock->scond, &sock->lock);
- }
- INSIST(atomic_load(&sock->active));
- UNLOCK(&sock->lock);
-
- return (result);
+ return (ISC_R_SUCCESS);
}
void
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpdnssocket);
REQUIRE(sock->parent == NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
result = tcpdns_connect_direct(sock, req);
if (result != ISC_R_SUCCESS) {
isc__nm_uvreq_t *req = NULL;
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
struct sockaddr_storage ss;
+ isc__networker_t *worker = NULL;
int r;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+
+ worker = sock->worker;
req = uv_handle_get_data((uv_handle_t *)uvreq);
if (atomic_load(&sock->timedout)) {
result = ISC_R_TIMEDOUT;
goto error;
- } else if (isc__nm_closing(sock)) {
+ } else if (isc__nm_closing(worker)) {
/* Network manager shutting down */
result = ISC_R_SHUTTINGDOWN;
goto error;
isc__netievent_tcpdnsconnect_t *ievent = NULL;
isc__nm_uvreq_t *req = NULL;
sa_family_t sa_family;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
REQUIRE(local != NULL);
sa_family = peer->type.sa.sa_family;
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tcpdnssocket, local);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tcpdnssocket, local);
sock->connect_timeout = timeout;
- sock->result = ISC_R_UNSET;
atomic_init(&sock->client, true);
- req = isc__nm_uvreq_get(mgr, sock);
+ req = isc__nm_uvreq_get(worker, sock);
req->cb.connect = cb;
req->cbarg = cbarg;
req->peer = *peer;
result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &sock->fd);
if (result != ISC_R_SUCCESS) {
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- }
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, result, true);
atomic_store(&sock->closed, true);
result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
- ievent = isc__nm_get_netievent_tcpdnsconnect(mgr, sock, req);
+ ievent = isc__nm_get_netievent_tcpdnsconnect(sock->worker, sock, req);
- if (isc__nm_in_netthread()) {
- atomic_store(&sock->active, true);
- sock->tid = isc_nm_tid();
- isc__nm_async_tcpdnsconnect(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- isc__nm_put_netievent_tcpdnsconnect(mgr, ievent);
- } else {
- atomic_init(&sock->active, false);
- sock->tid = isc_random_uniform(mgr->nworkers);
- isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- }
+ atomic_store(&sock->active, true);
+ isc__nm_async_tcpdnsconnect(sock->worker, (isc__netievent_t *)ievent);
+ isc__nm_put_netievent_tcpdnsconnect(sock->worker, ievent);
- LOCK(&sock->lock);
- while (sock->result == ISC_R_UNSET) {
- WAIT(&sock->cond, &sock->lock);
- }
atomic_store(&sock->active, true);
- BROADCAST(&sock->scond);
- UNLOCK(&sock->lock);
}
static uv_os_sock_t
return (sock);
}
-static void
-enqueue_stoplistening(isc_nmsocket_t *sock) {
- isc__netievent_tcpdnsstop_t *ievent =
- isc__nm_get_netievent_tcpdnsstop(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
-}
-
static void
start_tcpdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock,
uv_os_sock_t fd, int tid) {
isc__netievent_tcpdnslisten_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[tid];
+ isc__networker_t *worker = &mgr->workers[tid];
- isc__nmsocket_init(csock, mgr, isc_nm_tcpdnssocket, iface);
+ isc__nmsocket_init(csock, worker, isc_nm_tcpdnssocket, iface);
csock->parent = sock;
csock->accept_cb = sock->accept_cb;
csock->accept_cbarg = sock->accept_cbarg;
csock->recv_cb = sock->recv_cb;
csock->recv_cbarg = sock->recv_cbarg;
csock->backlog = sock->backlog;
- csock->tid = tid;
/*
* We don't attach to quota, just assign - to avoid
* increasing quota unnecessarily.
isc_quota_cb_init(&csock->quotacb, quota_accept_cb, csock);
if (mgr->load_balance_sockets) {
- UNUSED(fd);
+ REQUIRE(fd == -1);
csock->fd = isc__nm_tcpdns_lb_socket(mgr,
iface->type.sa.sa_family);
} else {
}
REQUIRE(csock->fd >= 0);
- ievent = isc__nm_get_netievent_tcpdnslisten(mgr, csock);
- isc__nm_maybe_enqueue_ievent(&mgr->workers[tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpdnslisten(csock->worker, csock);
+
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
}
+
isc_result_t
isc_nm_listentcpdns(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface,
isc_nm_recv_cb_t recv_cb, void *recv_cbarg,
isc_nm_accept_cb_t accept_cb, void *accept_cbarg,
int backlog, isc_quota_t *quota, isc_nmsocket_t **sockp) {
- isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
size_t children_size = 0;
uv_os_sock_t fd = -1;
+ isc_result_t result = ISC_R_UNSET;
+ isc__networker_t *worker = &mgr->workers[0];
REQUIRE(VALID_NM(mgr));
+ REQUIRE(isc_tid() == 0);
+
+ if (workers == 0) {
+ workers = mgr->nloops;
+ }
+ REQUIRE(workers <= mgr->nloops);
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tcpdnslistener, iface);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tcpdnslistener, iface);
atomic_init(&sock->rchildren, 0);
- sock->nchildren = (workers == ISC_NM_LISTEN_ALL)
- ? (uint32_t)mgr->nworkers
- : workers;
+ sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops
+ : workers;
children_size = sock->nchildren * sizeof(sock->children[0]);
- sock->children = isc_mem_get(mgr->mctx, children_size);
+ sock->children = isc_mem_get(worker->mctx, children_size);
memset(sock->children, 0, children_size);
- sock->result = ISC_R_UNSET;
+ isc_barrier_init(&sock->barrier, sock->nchildren);
+
sock->accept_cb = accept_cb;
sock->accept_cbarg = accept_cbarg;
sock->recv_cb = recv_cb;
sock->backlog = backlog;
sock->pquota = quota;
- sock->tid = 0;
- sock->fd = -1;
-
if (!mgr->load_balance_sockets) {
fd = isc__nm_tcpdns_lb_socket(mgr, iface->type.sa.sa_family);
}
- isc_barrier_init(&sock->startlistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- if ((int)i == isc_nm_tid()) {
- continue;
- }
+ for (size_t i = 1; i < sock->nchildren; i++) {
start_tcpdns_child(mgr, iface, sock, fd, i);
}
- if (isc__nm_in_netthread()) {
- start_tcpdns_child(mgr, iface, sock, fd, isc_nm_tid());
- }
+ start_tcpdns_child(mgr, iface, sock, fd, 0);
if (!mgr->load_balance_sockets) {
isc__nm_closesocket(fd);
}
LOCK(&sock->lock);
- while (atomic_load(&sock->rchildren) != sock->nchildren) {
- WAIT(&sock->cond, &sock->lock);
- }
result = sock->result;
- atomic_store(&sock->active, true);
UNLOCK(&sock->lock);
-
INSIST(result != ISC_R_UNSET);
- if (result == ISC_R_SUCCESS) {
- REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
- *sockp = sock;
- } else {
+ atomic_store(&sock->active, true);
+
+ if (result != ISC_R_SUCCESS) {
atomic_store(&sock->active, false);
- enqueue_stoplistening(sock);
+ isc__nm_tcpdns_stoplistening(sock);
isc_nmsocket_close(&sock);
+
+ return (result);
}
- return (result);
+ REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
+ *sockp = sock;
+ return (ISC_R_SUCCESS);
}
void
isc_nm_t *mgr = NULL;
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
REQUIRE(VALID_NMSOCK(ievent->sock->parent));
sock = ievent->sock;
sa_family = sock->iface.type.sa.sa_family;
- mgr = sock->mgr;
+ mgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_tcpdnssocket);
REQUIRE(sock->parent != NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
(void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
/* This keeps the socket alive after everything else is gone */
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- LOCK(&sock->parent->lock);
-
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r < 0) {
isc__nm_closesocket(sock->fd);
goto done;
}
} else {
+ LOCK(&sock->parent->lock);
if (sock->parent->fd == -1) {
r = isc__nm_tcp_freebind(&sock->uv_handle.tcp,
&sock->iface.type.sa, flags);
if (r < 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
+ UNLOCK(&sock->parent->lock);
goto done;
}
sock->parent->uv_handle.tcp.flags =
sock->uv_handle.tcp.flags =
sock->parent->uv_handle.tcp.flags;
}
+ UNLOCK(&sock->parent->lock);
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
/*
* The callback will run in the same thread uv_listen() was called
done:
result = isc_uverr2result(r);
+ atomic_fetch_add(&sock->parent->rchildren, 1);
+
if (result != ISC_R_SUCCESS) {
sock->pquota = NULL;
}
- atomic_fetch_add(&sock->parent->rchildren, 1);
+ LOCK(&sock->parent->lock);
if (sock->parent->result == ISC_R_UNSET) {
sock->parent->result = result;
+ } else {
+ REQUIRE(sock->parent->result == result);
}
- SIGNAL(&sock->parent->cond);
UNLOCK(&sock->parent->lock);
- isc_barrier_wait(&sock->parent->startlistening);
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
static void
}
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
result = ISC_R_CANCELED;
isc__nm_accept_connection_log(result, can_log_tcpdns_quota());
}
+static void
+stop_tcpdns_child(isc_nmsocket_t *sock, uint32_t tid) {
+ isc_nmsocket_t *csock = NULL;
+ isc__netievent_tcpstop_t *ievent = NULL;
+
+ csock = &sock->children[tid];
+ REQUIRE(VALID_NMSOCK(csock));
+
+ atomic_store(&csock->active, false);
+ ievent = isc__nm_get_netievent_tcpdnsstop(csock->worker, csock);
+
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
+}
+
+static void
+stop_tcpdns_parent(isc_nmsocket_t *sock) {
+ /* Stop the parent */
+ atomic_store(&sock->closed, true);
+ isc__nmsocket_prep_destroy(sock);
+}
+
void
isc__nm_tcpdns_stoplistening(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpdnslistener);
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- UNREACHABLE();
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- if (!isc__nm_in_netthread()) {
- enqueue_stoplistening(sock);
- } else {
- stop_tcpdns_parent(sock);
+ for (size_t i = 1; i < sock->nchildren; i++) {
+ stop_tcpdns_child(sock, i);
}
+
+ stop_tcpdns_child(sock, 0);
+
+ stop_tcpdns_parent(sock);
}
void
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->parent != NULL);
- if (sock->parent != NULL) {
- stop_tcpdns_child(sock);
- return;
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- stop_tcpdns_parent(sock);
+ tcpdns_close_direct(sock);
+
+ (void)atomic_fetch_sub(&sock->parent->rchildren, 1);
+
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
void
isc_nmsocket_t *sock = handle->sock;
isc__netievent_tcpdnsread_t *ievent = NULL;
+ isc_nm_t *netmgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_tcpdnssocket);
REQUIRE(sock->statichandle == handle);
sock->recv_cbarg = cbarg;
sock->recv_read = true;
if (sock->read_timeout == 0) {
- sock->read_timeout =
- (atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ sock->read_timeout = (atomic_load(&sock->keepalive)
+ ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle));
}
- ievent = isc__nm_get_netievent_tcpdnsread(sock->mgr, sock);
+ ievent = isc__nm_get_netievent_tcpdnsread(sock->worker, sock);
/*
* This MUST be done asynchronously, no matter which thread we're
* isc_nm_read() again; if we tried to do that synchronously
* we'd clash in processbuffer() and grow the stack indefinitely.
*/
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (isc__nmsocket_closing(sock)) {
result = ISC_R_CANCELED;
isc_nmhandle_t *handle = NULL;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (isc__nmsocket_closing(sock)) {
return (ISC_R_CANCELED);
isc_result_t result;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->reading));
REQUIRE(buf != NULL);
sock->buf_len += len;
if (!atomic_load(&sock->client)) {
- sock->read_timeout = atomic_load(&sock->mgr->idle);
+ sock->read_timeout = atomic_load(&sock->worker->netmgr->idle);
}
result = isc__nm_process_sock_buffer(sock);
*/
isc__netievent_tcpdnsaccept_t *ievent =
- isc__nm_get_netievent_tcpdnsaccept(sock->mgr, sock, quota);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_get_netievent_tcpdnsaccept(sock->worker, sock, quota);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
/*
UNUSED(worker);
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
result = accept_connection(ievent->sock, ievent->quota);
isc__nm_accept_connection_log(result, can_log_tcpdns_quota());
isc_nmhandle_t *handle = NULL;
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
if (quota != NULL) {
REQUIRE(ssock->accept_cb != NULL);
- csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(csock, ssock->mgr, isc_nm_tcpdnssocket,
+ csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(csock, ssock->worker, isc_nm_tcpdnssocket,
&ssock->iface);
- csock->tid = ssock->tid;
isc__nmsocket_attach(ssock, &csock->server);
csock->recv_cb = ssock->recv_cb;
csock->recv_cbarg = ssock->recv_cbarg;
csock->quota = quota;
atomic_init(&csock->accepting, true);
- worker = &csock->mgr->workers[csock->tid];
+ worker = csock->worker;
- r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&csock->uv_handle.handle, csock);
- r = uv_timer_init(&worker->loop, &csock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &csock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock);
isc__nm_incstats(csock, STATID_ACCEPT);
- csock->read_timeout = atomic_load(&csock->mgr->init);
+ csock->read_timeout = atomic_load(&csock->worker->netmgr->init);
csock->closehandle_cb = isc__nm_resume_processing;
* The initial timer has been set, update the read timeout for the next
* reads.
*/
- csock->read_timeout = (atomic_load(&csock->keepalive)
- ? atomic_load(&csock->mgr->keepalive)
- : atomic_load(&csock->mgr->idle));
+ csock->read_timeout =
+ (atomic_load(&csock->keepalive)
+ ? atomic_load(&csock->worker->netmgr->keepalive)
+ : atomic_load(&csock->worker->netmgr->idle));
isc_nmhandle_detach(&handle);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpdnssocket);
- uvreq = isc__nm_uvreq_get(sock->mgr, sock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
*(uint16_t *)uvreq->tcplen = htons(region->length);
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
- ievent = isc__nm_get_netievent_tcpdnssend(sock->mgr, sock, uvreq);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpdnssend(sock->worker, sock, uvreq);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
REQUIRE(VALID_UVREQ(ievent->req));
REQUIRE(VALID_NMSOCK(ievent->sock));
REQUIRE(ievent->sock->type == isc_nm_tcpdnssocket);
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
sock = ievent->sock;
uvreq = ievent->req;
if (sock->write_timeout == 0) {
sock->write_timeout =
(atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ ? atomic_load(&sock->worker->netmgr->keepalive)
+ : atomic_load(&sock->worker->netmgr->idle));
}
uv_buf_t bufs[2] = { { .base = uvreq->tcplen, .len = 2 },
isc_nmsocket_t *sock = uv_handle_get_data(handle);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
uv_handle_set_data(handle, NULL);
static void
tcpdns_close_sock(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
}
}
-static void
-stop_tcpdns_child(isc_nmsocket_t *sock) {
- REQUIRE(sock->type == isc_nm_tcpdnssocket);
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- return;
- }
-
- tcpdns_close_direct(sock);
-
- atomic_fetch_sub(&sock->parent->rchildren, 1);
-
- isc_barrier_wait(&sock->parent->stoplistening);
-}
-
-static void
-stop_tcpdns_parent(isc_nmsocket_t *sock) {
- isc_nmsocket_t *csock = NULL;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->type == isc_nm_tcpdnslistener);
-
- isc_barrier_init(&sock->stoplistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- csock = &sock->children[i];
- REQUIRE(VALID_NMSOCK(csock));
-
- if ((int)i == isc_nm_tid()) {
- /*
- * We need to schedule closing the other sockets first
- */
- continue;
- }
-
- atomic_store(&csock->active, false);
- enqueue_stoplistening(csock);
- }
-
- csock = &sock->children[isc_nm_tid()];
- atomic_store(&csock->active, false);
- stop_tcpdns_child(csock);
-
- atomic_store(&sock->closed, true);
- isc__nmsocket_prep_destroy(sock);
-}
-
static void
tcpdns_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (sock->quota != NULL) {
return;
}
- if (sock->tid == isc_nm_tid()) {
+ if (sock->tid == isc_tid()) {
tcpdns_close_direct(sock);
} else {
/*
* We need to create an event and pass it using async channel
*/
isc__netievent_tcpdnsclose_t *ievent =
- isc__nm_get_netievent_tcpdnsclose(sock->mgr, sock);
+ isc__nm_get_netievent_tcpdnsclose(sock->worker, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
tcpdns_close_direct(sock);
}
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
void
isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) {
+ isc__networker_t *worker = NULL;
+
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_tcpdnssocket);
+ worker = sock->worker;
+
/*
* If the socket is active, mark it inactive and
* continue. If it isn't active, stop now.
}
if (sock->statichandle != NULL) {
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false);
} else {
isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpdnssocket);
- ievent = isc__nm_get_netievent_tcpdnscancel(sock->mgr, sock, handle);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tcpdnscancel(sock->worker, sock, handle);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
isc__nm_failed_read_cb(sock, ISC_R_EOF, false);
}
isc_nm_timer_t *timer = NULL;
int r;
- REQUIRE(isc__nm_in_netthread());
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
sock = handle->sock;
- worker = &sock->mgr->workers[isc_nm_tid()];
+ worker = sock->worker;
/* TODO: per-loop object cache */
- timer = isc_mem_get(sock->mgr->mctx, sizeof(*timer));
+ timer = isc_mem_get(worker->mctx, sizeof(*timer));
*timer = (isc_nm_timer_t){ .cb = cb, .cbarg = cbarg };
isc_refcount_init(&timer->references, 1);
isc_nmhandle_attach(handle, &timer->handle);
- r = uv_timer_init(&worker->loop, &timer->timer);
+ r = uv_timer_init(&worker->loop->loop, &timer->timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
-
uv_handle_set_data((uv_handle_t *)&timer->timer, timer);
*timerp = timer;
timer_destroy(uv_handle_t *uvhandle) {
isc_nm_timer_t *timer = uv_handle_get_data(uvhandle);
isc_nmhandle_t *handle = timer->handle;
- isc_mem_t *mctx = timer->handle->sock->mgr->mctx;
+ isc_mem_t *mctx = timer->handle->sock->worker->mctx;
isc_mem_put(mctx, timer, sizeof(*timer));
handle = timer->handle;
- REQUIRE(isc__nm_in_netthread());
REQUIRE(VALID_NMHANDLE(handle));
REQUIRE(VALID_NMSOCK(handle->sock));
static void
quota_accept_cb(isc_quota_t *quota, void *sock0);
-static void
-stop_tlsdns_parent(isc_nmsocket_t *sock);
-static void
-stop_tlsdns_child(isc_nmsocket_t *sock);
-
static void
async_tlsdns_cycle(isc_nmsocket_t *sock) __attribute__((unused));
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
- worker = &sock->mgr->workers[sock->tid];
+ worker = sock->worker;
atomic_store(&sock->connecting, true);
result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- if (isc__nm_closing(sock)) {
- result = ISC_R_SHUTTINGDOWN;
- goto error;
+ if (isc__nm_closing(worker)) {
+ return (ISC_R_SHUTTINGDOWN);
}
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r != 0) {
isc__nm_closesocket(sock->fd);
isc__nm_incstats(sock, STATID_OPENFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
isc__nm_incstats(sock, STATID_OPEN);
*/
if (r != 0 && r != UV_EINVAL) {
isc__nm_incstats(sock, STATID_BINDFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
uv_handle_set_data(&req->uv_req.handle, req);
r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp,
&req->peer.type.sa, tlsdns_connect_cb);
if (r != 0) {
isc__nm_incstats(sock, STATID_CONNECTFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
uv_handle_set_data((uv_handle_t *)&sock->read_timer,
atomic_store(&sock->connected, true);
-done:
- result = isc_uverr2result(r);
-error:
- LOCK(&sock->lock);
- sock->result = result;
- SIGNAL(&sock->cond);
- if (!atomic_load(&sock->active)) {
- WAIT(&sock->scond, &sock->lock);
- }
- INSIST(atomic_load(&sock->active));
- UNLOCK(&sock->lock);
-
- return (result);
+ return (ISC_R_SUCCESS);
}
void
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tlsdnssocket);
REQUIRE(sock->parent == NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
result = tlsdns_connect_direct(sock, req);
if (result != ISC_R_SUCCESS) {
isc__nm_uvreq_t *req = NULL;
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
struct sockaddr_storage ss;
+ isc__networker_t *worker = NULL;
int r;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+
+ worker = sock->worker;
req = uv_handle_get_data((uv_handle_t *)uvreq);
if (atomic_load(&sock->timedout)) {
result = ISC_R_TIMEDOUT;
goto error;
- } else if (isc__nm_closing(sock)) {
+ } else if (isc__nm_closing(worker)) {
/* Network manager shutting down */
result = ISC_R_SHUTTINGDOWN;
goto error;
isc__netievent_tlsdnsconnect_t *ievent = NULL;
isc__nm_uvreq_t *req = NULL;
sa_family_t sa_family;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
REQUIRE(local != NULL);
sa_family = peer->type.sa.sa_family;
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tlsdnssocket, local);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tlsdnssocket, local);
sock->connect_timeout = timeout;
- sock->result = ISC_R_UNSET;
isc_tlsctx_attach(sslctx, &sock->tls.ctx);
atomic_init(&sock->client, true);
atomic_init(&sock->connecting, true);
- req = isc__nm_uvreq_get(mgr, sock);
+ req = isc__nm_uvreq_get(sock->worker, sock);
req->cb.connect = cb;
req->cbarg = cbarg;
req->peer = *peer;
goto failure;
}
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
result = ISC_R_SHUTTINGDOWN;
goto failure;
}
result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
- ievent = isc__nm_get_netievent_tlsdnsconnect(mgr, sock, req);
+ ievent = isc__nm_get_netievent_tlsdnsconnect(sock->worker, sock, req);
- if (isc__nm_in_netthread()) {
- atomic_store(&sock->active, true);
- sock->tid = isc_nm_tid();
- isc__nm_async_tlsdnsconnect(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- isc__nm_put_netievent_tlsdnsconnect(mgr, ievent);
- } else {
- atomic_init(&sock->active, false);
- sock->tid = isc_random_uniform(mgr->nworkers);
- isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- }
- LOCK(&sock->lock);
- while (sock->result == ISC_R_UNSET) {
- WAIT(&sock->cond, &sock->lock);
- }
atomic_store(&sock->active, true);
- BROADCAST(&sock->scond);
- UNLOCK(&sock->lock);
+ isc__nm_async_tlsdnsconnect(sock->worker, (isc__netievent_t *)ievent);
+ isc__nm_put_netievent_tlsdnsconnect(sock->worker, ievent);
+
+ atomic_store(&sock->active, true);
+
return;
failure:
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- }
-
atomic_compare_exchange_enforced(&sock->connecting, &(bool){ true },
false);
isc__nmsocket_clearcb(sock);
uv_os_sock_t fd, int tid) {
isc__netievent_tlsdnslisten_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[tid];
+ isc__networker_t *worker = &mgr->workers[tid];
- isc__nmsocket_init(csock, mgr, isc_nm_tlsdnssocket, iface);
+ isc__nmsocket_init(csock, worker, isc_nm_tlsdnssocket, iface);
csock->parent = sock;
csock->accept_cb = sock->accept_cb;
csock->accept_cbarg = sock->accept_cbarg;
csock->recv_cb = sock->recv_cb;
csock->recv_cbarg = sock->recv_cbarg;
csock->backlog = sock->backlog;
- csock->tid = tid;
isc_tlsctx_attach(sock->tls.ctx, &csock->tls.ctx);
/*
}
REQUIRE(csock->fd >= 0);
- ievent = isc__nm_get_netievent_tlsdnslisten(mgr, csock);
- isc__nm_maybe_enqueue_ievent(&mgr->workers[tid],
- (isc__netievent_t *)ievent);
-}
+ ievent = isc__nm_get_netievent_tlsdnslisten(csock->worker, csock);
-static void
-enqueue_stoplistening(isc_nmsocket_t *sock) {
- isc__netievent_tlsdnsstop_t *ievent =
- isc__nm_get_netievent_tlsdnsstop(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
}
isc_result_t
isc_nm_accept_cb_t accept_cb, void *accept_cbarg,
int backlog, isc_quota_t *quota, isc_tlsctx_t *sslctx,
isc_nmsocket_t **sockp) {
- isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
size_t children_size = 0;
uv_os_sock_t fd = -1;
+ isc_result_t result = ISC_R_UNSET;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
+ REQUIRE(isc_tid() == 0);
+
+ if (workers == 0) {
+ workers = mgr->nloops;
+ }
+ REQUIRE(workers <= mgr->nloops);
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_tlsdnslistener, iface);
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_tlsdnslistener, iface);
atomic_init(&sock->rchildren, 0);
- sock->nchildren = (workers == ISC_NM_LISTEN_ALL)
- ? (uint32_t)mgr->nworkers
- : workers;
+ sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops
+ : workers;
children_size = sock->nchildren * sizeof(sock->children[0]);
- sock->children = isc_mem_get(mgr->mctx, children_size);
+ sock->children = isc_mem_get(worker->mctx, children_size);
memset(sock->children, 0, children_size);
- sock->result = ISC_R_UNSET;
+ isc_barrier_init(&sock->barrier, sock->nchildren);
+
sock->accept_cb = accept_cb;
sock->accept_cbarg = accept_cbarg;
sock->recv_cb = recv_cb;
isc_tlsctx_attach(sslctx, &sock->tls.ctx);
- sock->tid = 0;
- sock->fd = -1;
-
if (!mgr->load_balance_sockets) {
fd = isc__nm_tlsdns_lb_socket(mgr, iface->type.sa.sa_family);
}
- isc_barrier_init(&sock->startlistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- if ((int)i == isc_nm_tid()) {
- continue;
- }
+ for (size_t i = 1; i < sock->nchildren; i++) {
start_tlsdns_child(mgr, iface, sock, fd, i);
}
- if (isc__nm_in_netthread()) {
- start_tlsdns_child(mgr, iface, sock, fd, isc_nm_tid());
- }
+ start_tlsdns_child(mgr, iface, sock, fd, 0);
if (!mgr->load_balance_sockets) {
isc__nm_closesocket(fd);
}
LOCK(&sock->lock);
- while (atomic_load(&sock->rchildren) != sock->nchildren) {
- WAIT(&sock->cond, &sock->lock);
- }
result = sock->result;
- atomic_store(&sock->active, true);
UNLOCK(&sock->lock);
-
INSIST(result != ISC_R_UNSET);
- if (result == ISC_R_SUCCESS) {
- REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
- *sockp = sock;
- } else {
+ atomic_store(&sock->active, true);
+
+ if (result != ISC_R_SUCCESS) {
atomic_store(&sock->active, false);
- enqueue_stoplistening(sock);
+ isc__nm_tcpdns_stoplistening(sock);
isc_nmsocket_close(&sock);
+
+ return (result);
}
- return (result);
+ REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
+ *sockp = sock;
+ return (ISC_R_SUCCESS);
}
void
int flags = 0;
isc_nmsocket_t *sock = NULL;
isc_result_t result = ISC_R_UNSET;
- isc_nm_t *mgr;
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
REQUIRE(VALID_NMSOCK(ievent->sock->parent));
sock = ievent->sock;
sa_family = sock->iface.type.sa.sa_family;
- mgr = sock->mgr;
REQUIRE(sock->type == isc_nm_tlsdnssocket);
REQUIRE(sock->parent != NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
(void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG);
- r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
/* This keeps the socket alive after everything else is gone */
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- LOCK(&sock->parent->lock);
-
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r < 0) {
isc__nm_closesocket(sock->fd);
flags = UV_TCP_IPV6ONLY;
}
- if (mgr->load_balance_sockets) {
+ if (sock->worker->netmgr->load_balance_sockets) {
r = isc__nm_tcp_freebind(&sock->uv_handle.tcp,
&sock->iface.type.sa, flags);
if (r < 0) {
goto done;
}
} else {
+ LOCK(&sock->parent->lock);
if (sock->parent->fd == -1) {
r = isc__nm_tcp_freebind(&sock->uv_handle.tcp,
&sock->iface.type.sa, flags);
if (r < 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
+ UNLOCK(&sock->parent->lock);
goto done;
}
sock->parent->uv_handle.tcp.flags =
sock->uv_handle.tcp.flags =
sock->parent->uv_handle.tcp.flags;
}
+ UNLOCK(&sock->parent->lock);
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
/*
* The callback will run in the same thread uv_listen() was
done:
result = isc_uverr2result(r);
+ atomic_fetch_add(&sock->parent->rchildren, 1);
+
if (result != ISC_R_SUCCESS) {
sock->pquota = NULL;
}
- atomic_fetch_add(&sock->parent->rchildren, 1);
+ LOCK(&sock->parent->lock);
if (sock->parent->result == ISC_R_UNSET) {
sock->parent->result = result;
+ } else {
+ REQUIRE(sock->parent->result == result);
}
- SIGNAL(&sock->parent->cond);
UNLOCK(&sock->parent->lock);
- isc_barrier_wait(&sock->parent->startlistening);
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
static void
}
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
result = ISC_R_CANCELED;
isc__nm_accept_connection_log(result, can_log_tlsdns_quota());
}
+static void
+stop_tlsdns_child(isc_nmsocket_t *sock, uint32_t tid) {
+ isc_nmsocket_t *csock = NULL;
+ isc__netievent_tcpstop_t *ievent = NULL;
+
+ csock = &sock->children[tid];
+ REQUIRE(VALID_NMSOCK(csock));
+
+ atomic_store(&csock->active, false);
+ ievent = isc__nm_get_netievent_tlsdnsstop(csock->worker, csock);
+
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
+}
+
+static void
+stop_tlsdns_parent(isc_nmsocket_t *sock) {
+ /* Stop the parent */
+ atomic_store(&sock->closed, true);
+ isc__nmsocket_prep_destroy(sock);
+}
+
void
isc__nm_tlsdns_stoplistening(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tlsdnslistener);
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- UNREACHABLE();
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- if (!isc__nm_in_netthread()) {
- enqueue_stoplistening(sock);
- } else {
- stop_tlsdns_parent(sock);
+ for (size_t i = 1; i < sock->nchildren; i++) {
+ stop_tlsdns_child(sock, i);
}
+
+ stop_tlsdns_child(sock, 0);
+
+ stop_tlsdns_parent(sock);
}
static void
REQUIRE(VALID_NMSOCK(sock));
isc__netievent_tlsdnsshutdown_t *ievent =
- isc__nm_get_netievent_tlsdnsshutdown(sock->mgr, sock);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_get_netievent_tlsdnsshutdown(sock->worker, sock);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
+}
+
+void
+isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
+ isc__netievent_tlsdnsstop_t *ievent =
+ (isc__netievent_tlsdnsstop_t *)ev0;
+ isc_nmsocket_t *sock = ievent->sock;
+
+ UNUSED(worker);
+
+ REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->parent != NULL);
+
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
+
+ tlsdns_close_direct(sock);
+
+ (void)atomic_fetch_sub(&sock->parent->rchildren, 1);
+
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
void
return;
}
-void
-isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_tlsdnsstop_t *ievent =
- (isc__netievent_tlsdnsstop_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
-
- UNUSED(worker);
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (sock->parent != NULL) {
- stop_tlsdns_child(sock);
- return;
- }
-
- stop_tlsdns_parent(sock);
-}
-
void
isc__nm_tlsdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
bool async) {
isc_nmsocket_t *sock = handle->sock;
isc__netievent_tlsdnsread_t *ievent = NULL;
+ isc_nm_t *netmgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_tlsdnssocket);
REQUIRE(sock->statichandle == handle);
sock->recv_cbarg = cbarg;
sock->recv_read = true;
if (sock->read_timeout == 0) {
- sock->read_timeout =
- (atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ sock->read_timeout = (atomic_load(&sock->keepalive)
+ ? atomic_load(&netmgr->keepalive)
+ : atomic_load(&netmgr->idle));
}
- ievent = isc__nm_get_netievent_tlsdnsread(sock->mgr, sock);
+ ievent = isc__nm_get_netievent_tlsdnsread(sock->worker, sock);
/*
* This MUST be done asynchronously, no matter which thread
* we'd clash in processbuffer() and grow the stack
* indefinitely.
*/
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (isc__nmsocket_closing(sock)) {
atomic_store(&sock->reading, true);
isc_nmhandle_t *handle = NULL;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (isc__nmsocket_closing(sock)) {
return (ISC_R_CANCELED);
REQUIRE(sock->tls.senddata.base != NULL);
REQUIRE(sock->tls.senddata.length > 0);
- isc_mem_put(sock->mgr->mctx, sock->tls.senddata.base,
+ isc_mem_put(sock->worker->mctx, sock->tls.senddata.base,
sock->tls.senddata.length);
sock->tls.senddata.base = NULL;
sock->tls.senddata.length = 0;
isc__nm_uvreq_put(&uvreq, sock);
if (status != 0) {
- tls_error(sock, result);
+ tls_error(sock, isc_uverr2result(status));
return;
}
pending = (int)ISC_NETMGR_TCP_RECVBUF_SIZE;
}
- sock->tls.senddata.base = isc_mem_get(sock->mgr->mctx, pending);
+ sock->tls.senddata.base = isc_mem_get(sock->worker->mctx,
+ pending);
sock->tls.senddata.length = pending;
/* It's a bit misnomer here, but it does the right thing */
return;
}
- ievent = isc__nm_get_netievent_tlsdnscycle(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlsdnscycle(sock->worker, sock);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
UNUSED(worker);
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
sock = ievent->sock;
int rv;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->reading));
REQUIRE(buf != NULL);
}
if (!atomic_load(&sock->client)) {
- sock->read_timeout = atomic_load(&sock->mgr->idle);
+ sock->read_timeout = atomic_load(&sock->worker->netmgr->idle);
}
/*
*/
isc__netievent_tlsdnsaccept_t *ievent =
- isc__nm_get_netievent_tlsdnsaccept(sock->mgr, sock, quota);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_get_netievent_tlsdnsaccept(sock->worker, sock, quota);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
/*
UNUSED(worker);
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
result = accept_connection(ievent->sock, ievent->quota);
isc__nm_accept_connection_log(result, can_log_tlsdns_quota());
isc_sockaddr_t local;
REQUIRE(VALID_NMSOCK(ssock));
- REQUIRE(ssock->tid == isc_nm_tid());
+ REQUIRE(ssock->tid == isc_tid());
if (isc__nmsocket_closing(ssock)) {
if (quota != NULL) {
REQUIRE(ssock->accept_cb != NULL);
- csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(csock, ssock->mgr, isc_nm_tlsdnssocket,
+ csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(csock, ssock->worker, isc_nm_tlsdnssocket,
&ssock->iface);
- csock->tid = ssock->tid;
isc__nmsocket_attach(ssock, &csock->server);
csock->accept_cb = ssock->accept_cb;
csock->accept_cbarg = ssock->accept_cbarg;
csock->quota = quota;
atomic_init(&csock->accepting, true);
- worker = &csock->mgr->workers[csock->tid];
+ worker = csock->worker;
- r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp);
+ r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp);
UV_RUNTIME_CHECK(uv_tcp_init, r);
uv_handle_set_data(&csock->uv_handle.handle, csock);
- r = uv_timer_init(&worker->loop, &csock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &csock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock);
isc__nm_incstats(csock, STATID_ACCEPT);
- csock->read_timeout = atomic_load(&csock->mgr->init);
+ csock->read_timeout = atomic_load(&csock->worker->netmgr->init);
csock->closehandle_cb = isc__nm_resume_processing;
* The initial timer has been set, update the read timeout for
* the next reads.
*/
- csock->read_timeout = (atomic_load(&csock->keepalive)
- ? atomic_load(&csock->mgr->keepalive)
- : atomic_load(&csock->mgr->idle));
+ csock->read_timeout =
+ (atomic_load(&csock->keepalive)
+ ? atomic_load(&csock->worker->netmgr->keepalive)
+ : atomic_load(&csock->worker->netmgr->idle));
result = isc__nm_process_sock_buffer(csock);
if (result != ISC_R_SUCCESS) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tlsdnssocket);
- uvreq = isc__nm_uvreq_get(sock->mgr, sock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
*(uint16_t *)uvreq->tcplen = htons(region->length);
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
- ievent = isc__nm_get_netievent_tlsdnssend(sock->mgr, sock, uvreq);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlsdnssend(sock->worker, sock, uvreq);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
return;
}
UNUSED(worker);
REQUIRE(sock->type == isc_nm_tlsdnssocket);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (sock->write_timeout == 0) {
sock->write_timeout =
(atomic_load(&sock->keepalive)
- ? atomic_load(&sock->mgr->keepalive)
- : atomic_load(&sock->mgr->idle));
+ ? atomic_load(&sock->worker->netmgr->keepalive)
+ : atomic_load(&sock->worker->netmgr->idle));
}
result = tlsdns_send_direct(sock, uvreq);
static void
tlsdns_send_enqueue(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
isc__netievent_tlsdnssend_t *ievent =
- isc__nm_get_netievent_tlsdnssend(sock->mgr, sock, req);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_get_netievent_tlsdnssend(sock->worker, sock, req);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
static isc_result_t
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_tlsdnssocket);
result = tls_pop_error(sock);
* There's no SSL_writev(), so we need to use a local buffer to
* assemble the whole message
*/
- worker = &sock->mgr->workers[sock->tid];
+ worker = sock->worker;
sendlen = req->uvbuf.len + sizeof(uint16_t);
memmove(worker->sendbuf, req->tcplen, sizeof(uint16_t));
memmove(worker->sendbuf + sizeof(uint16_t), req->uvbuf.base,
isc_nmsocket_t *sock = uv_handle_get_data(handle);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
uv_handle_set_data(handle, NULL);
static void
tlsdns_close_sock(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
}
}
-static void
-stop_tlsdns_child(isc_nmsocket_t *sock) {
- REQUIRE(sock->type == isc_nm_tlsdnssocket);
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- return;
- }
-
- tlsdns_close_direct(sock);
-
- atomic_fetch_sub(&sock->parent->rchildren, 1);
-
- isc_barrier_wait(&sock->parent->stoplistening);
-}
-
-static void
-stop_tlsdns_parent(isc_nmsocket_t *sock) {
- isc_nmsocket_t *csock = NULL;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->type == isc_nm_tlsdnslistener);
-
- isc_barrier_init(&sock->stoplistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- csock = &sock->children[i];
-
- REQUIRE(VALID_NMSOCK(csock));
-
- if ((int)i == isc_nm_tid()) {
- /*
- * We need to schedule closing the other sockets first
- */
- continue;
- }
-
- atomic_store(&csock->active, false);
- enqueue_stoplistening(csock);
- }
-
- csock = &sock->children[isc_nm_tid()];
- atomic_store(&csock->active, false);
- stop_tlsdns_child(csock);
-
- atomic_store(&sock->closed, true);
- isc__nmsocket_prep_destroy(sock);
-}
-
static void
tlsdns_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
REQUIRE(sock->tls.pending_req == NULL);
return;
}
- if (sock->tid == isc_nm_tid()) {
+ if (sock->tid == isc_tid()) {
tlsdns_close_direct(sock);
} else {
/*
* channel
*/
isc__netievent_tlsdnsclose_t *ievent =
- isc__nm_get_netievent_tlsdnsclose(sock->mgr, sock);
+ isc__nm_get_netievent_tlsdnsclose(sock->worker, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
tlsdns_close_direct(sock);
}
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
void
isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) {
+ isc__networker_t *worker = NULL;
+
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_tlsdnssocket);
+ worker = sock->worker;
+
/*
* If the socket is active, mark it inactive and
* continue. If it isn't active, stop now.
}
if (sock->statichandle != NULL) {
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false);
} else {
isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tlsdnssocket);
- ievent = isc__nm_get_netievent_tlsdnscancel(sock->mgr, sock, handle);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlsdnscancel(sock->worker, sock, handle);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
isc__nm_failed_read_cb(sock, ISC_R_EOF, false);
}
* Ensure that the isc_tls_t is being accessed from
* within the worker thread the socket is bound to.
*/
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (sock->tls.client_sess_cache != NULL &&
sock->tls.client_session_saved == false)
{
atomic_load(&sock->outerhandle->sock->closing) ||
(sock->listener != NULL &&
!isc__nmsocket_active(sock->listener)) ||
- isc__nm_closing(sock));
+ isc__nm_closing(sock->worker));
}
static void
* requests. See the mirroring code in the tls_send_outgoing()
* function. */
if (send_req->data.length > sizeof(send_req->smallbuf)) {
- isc_mem_put(handle->sock->mgr->mctx, send_req->data.base,
+ isc_mem_put(handle->sock->worker->mctx, send_req->data.base,
send_req->data.length);
} else {
INSIST(&send_req->smallbuf[0] == send_req->data.base);
}
- isc_mem_put(handle->sock->mgr->mctx, send_req, sizeof(*send_req));
+ isc_mem_put(handle->sock->worker->mctx, send_req, sizeof(*send_req));
tlssock->tlsstream.nsending--;
if (finish && eresult == ISC_R_SUCCESS) {
} else if (sock->recv_cb != NULL && sock->statichandle != NULL) {
isc__nm_uvreq_t *req = NULL;
INSIST(VALID_NMHANDLE(sock->statichandle));
- req = isc__nm_uvreq_get(sock->mgr, sock);
+ req = isc__nm_uvreq_get(sock->worker, sock);
req->cb.recv = sock->recv_cb;
req->cbarg = sock->recv_cbarg;
isc_nmhandle_attach(sock->statichandle, &req->handle);
static void
async_tls_do_bio(isc_nmsocket_t *sock) {
isc__netievent_tlsdobio_t *ievent =
- isc__nm_get_netievent_tlsdobio(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_get_netievent_tlsdobio(sock->worker, sock);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
static int
pending = TLS_BUF_SIZE;
}
- send_req = isc_mem_get(sock->mgr->mctx, sizeof(*send_req));
+ send_req = isc_mem_get(sock->worker->mctx, sizeof(*send_req));
*send_req = (isc_nmsocket_tls_send_req_t){ .finish = finish,
.data.length = pending };
/* Let's try to avoid a memory allocation for small write requests */
if ((size_t)pending > sizeof(send_req->smallbuf)) {
- send_req->data.base = isc_mem_get(sock->mgr->mctx, pending);
+ send_req->data.base = isc_mem_get(sock->worker->mctx, pending);
} else {
send_req->data.base = &send_req->smallbuf[0];
}
int saved_errno = 0;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
/* We will resume read if TLS layer wants us to */
if (sock->tlsstream.reading && sock->outerhandle) {
REQUIRE(VALID_NMSOCK(tlssock));
REQUIRE(VALID_NMHANDLE(handle));
- REQUIRE(tlssock->tid == isc_nm_tid());
+ REQUIRE(tlssock->tid == isc_tid());
if (result != ISC_R_SUCCESS) {
tls_failed_read_cb(tlssock, result);
return;
+ } else if (isc__nmsocket_closing(handle->sock)) {
+ tls_failed_read_cb(tlssock, ISC_R_CANCELED);
+ return;
}
tls_do_bio(tlssock, region, NULL, false);
static isc_result_t
initialize_tls(isc_nmsocket_t *sock, bool server) {
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
sock->tlsstream.bio_in = BIO_new(BIO_s_mem());
if (sock->tlsstream.bio_in == NULL) {
isc_nmsocket_t *tlslistensock = (isc_nmsocket_t *)cbarg;
isc_nmsocket_t *tlssock = NULL;
isc_tlsctx_t *tlsctx = NULL;
- int tid;
/* If accept() was unsuccessful we can't do anything */
if (result != ISC_R_SUCCESS) {
/*
* We need to create a 'wrapper' tlssocket for this connection.
*/
- tlssock = isc_mem_get(handle->sock->mgr->mctx, sizeof(*tlssock));
- isc__nmsocket_init(tlssock, handle->sock->mgr, isc_nm_tlssocket,
+ tlssock = isc_mem_get(handle->sock->worker->mctx, sizeof(*tlssock));
+ isc__nmsocket_init(tlssock, handle->sock->worker, isc_nm_tlssocket,
&handle->sock->iface);
- tid = isc_nm_tid();
/* We need to initialize SSL now to reference SSL_CTX properly */
- tlsctx = tls_get_listener_tlsctx(tlslistensock, tid);
+ tlsctx = tls_get_listener_tlsctx(tlslistensock, isc_tid());
RUNTIME_CHECK(tlsctx != NULL);
isc_tlsctx_attach(tlsctx, &tlssock->tlsstream.ctx);
tlssock->tlsstream.tls = isc_tls_create(tlssock->tlsstream.ctx);
isc__nmsocket_attach(tlslistensock, &tlssock->listener);
isc_nmhandle_attach(handle, &tlssock->outerhandle);
tlssock->peer = handle->sock->peer;
- tlssock->read_timeout = atomic_load(&handle->sock->mgr->init);
- tlssock->tid = tid;
+ tlssock->read_timeout =
+ atomic_load(&handle->sock->worker->netmgr->init);
/*
* Hold a reference to tlssock in the TCP socket: it will
isc_result_t result;
isc_nmsocket_t *tlssock = NULL;
isc_nmsocket_t *tsock = NULL;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
- if (atomic_load(&mgr->closing)) {
+ REQUIRE(isc_tid() == 0);
+
+ if (isc__nm_closing(worker)) {
return (ISC_R_SHUTTINGDOWN);
}
- tlssock = isc_mem_get(mgr->mctx, sizeof(*tlssock));
+ if (workers == 0) {
+ workers = mgr->nloops;
+ }
+ REQUIRE(workers <= mgr->nloops);
+
+ tlssock = isc_mem_get(worker->mctx, sizeof(*tlssock));
- isc__nmsocket_init(tlssock, mgr, isc_nm_tlslistener, iface);
- tlssock->result = ISC_R_UNSET;
+ isc__nmsocket_init(tlssock, worker, isc_nm_tlslistener, iface);
tlssock->accept_cb = accept_cb;
tlssock->accept_cbarg = accept_cbarg;
tls_init_listener_tlsctx(tlssock, sslctx);
isc__nm_uvreq_t *req = ievent->req;
REQUIRE(VALID_UVREQ(req));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
return;
}
- uvreq = isc__nm_uvreq_get(sock->mgr, sock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
isc_nmhandle_attach(handle, &uvreq->handle);
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
/*
* We need to create an event and pass it using async channel
*/
- ievent = isc__nm_get_netievent_tlssend(sock->mgr, sock, uvreq);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlssend(sock->worker, sock, uvreq);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
(isc__netievent_tlsstartread_t *)ev0;
isc_nmsocket_t *sock = ievent->sock;
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
sock = handle->sock;
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->statichandle == handle);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->recv_cb == NULL);
if (inactive(sock)) {
sock->recv_cb = cb;
sock->recv_cbarg = cbarg;
- ievent = isc__nm_get_netievent_tlsstartread(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlsstartread(sock->worker, sock);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
static void
tls_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
/*
* At this point we're certain that there are no
* external references, we can close everything.
return;
}
- ievent = isc__nm_get_netievent_tlsclose(sock->mgr, sock);
- isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ ievent = isc__nm_get_netievent_tlsclose(sock->worker, sock);
+ isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
isc__netievent_tlsclose_t *ievent = (isc__netievent_tlsclose_t *)ev0;
isc_nmsocket_t *sock = ievent->sock;
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
UNUSED(worker);
isc_tlsctx_client_session_cache_t *client_sess_cache,
unsigned int timeout) {
isc_nmsocket_t *nsock = NULL;
-#if defined(NETMGR_TRACE) && defined(NETMGR_TRACE_VERBOSE)
- fprintf(stderr, "TLS: isc_nm_tlsconnect(): in net thread: %s\n",
- isc__nm_in_netthread() ? "yes" : "no");
-#endif /* NETMGR_TRACE */
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
REQUIRE(VALID_NM(mgr));
- if (atomic_load(&mgr->closing)) {
+ if (isc__nm_closing(worker)) {
cb(NULL, ISC_R_SHUTTINGDOWN, cbarg);
return;
}
- nsock = isc_mem_get(mgr->mctx, sizeof(*nsock));
- isc__nmsocket_init(nsock, mgr, isc_nm_tlssocket, local);
- nsock->result = ISC_R_UNSET;
+ nsock = isc_mem_get(worker->mctx, sizeof(*nsock));
+ isc__nmsocket_init(nsock, worker, isc_nm_tlssocket, local);
nsock->connect_cb = cb;
nsock->connect_cbarg = cbarg;
nsock->connect_timeout = timeout;
tcp_connected(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
isc_nmsocket_t *tlssock = (isc_nmsocket_t *)cbarg;
isc_nmhandle_t *tlshandle = NULL;
+ isc__networker_t *worker = NULL;
REQUIRE(VALID_NMSOCK(tlssock));
- tlssock->tid = isc_nm_tid();
+ worker = tlssock->worker;
+
if (result != ISC_R_SUCCESS) {
goto error;
}
tlssock->iface = handle->sock->iface;
tlssock->peer = handle->sock->peer;
- if (isc__nm_closing(tlssock)) {
+ if (isc__nm_closing(worker)) {
result = ISC_R_SHUTTINGDOWN;
goto error;
}
REQUIRE(sock->type == isc_nm_tlssocket);
- if (sock->tid == isc_nm_tid()) {
+ if (sock->tid == isc_tid()) {
tls_cancelread(sock);
} else {
- ievent = isc__nm_get_netievent_tlscancel(sock->mgr, sock,
+ ievent = isc__nm_get_netievent_tlscancel(sock->worker, sock,
handle);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
isc_nmsocket_t *sock = ievent->sock;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(worker->id == sock->tid);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
UNUSED(worker);
tls_cancelread(sock);
tls_init_listener_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *ctx) {
size_t nworkers;
- REQUIRE(VALID_NM(listener->mgr));
+ REQUIRE(VALID_NMSOCK(listener));
REQUIRE(ctx != NULL);
- nworkers = (size_t)isc_nm_getnworkers(listener->mgr);
+ nworkers =
+ (size_t)isc_loopmgr_nloops(listener->worker->netmgr->loopmgr);
INSIST(nworkers > 0);
listener->tlsstream.listener_tls_ctx = isc_mem_get(
- listener->mgr->mctx, sizeof(isc_tlsctx_t *) * nworkers);
+ listener->worker->mctx, sizeof(isc_tlsctx_t *) * nworkers);
listener->tlsstream.n_listener_tls_ctx = nworkers;
for (size_t i = 0; i < nworkers; i++) {
listener->tlsstream.listener_tls_ctx[i] = NULL;
static void
tls_cleanup_listener_tlsctx(isc_nmsocket_t *listener) {
- REQUIRE(VALID_NM(listener->mgr));
+ REQUIRE(VALID_NMSOCK(listener));
if (listener->tlsstream.listener_tls_ctx == NULL) {
return;
for (size_t i = 0; i < listener->tlsstream.n_listener_tls_ctx; i++) {
isc_tlsctx_free(&listener->tlsstream.listener_tls_ctx[i]);
}
- isc_mem_put(listener->mgr->mctx, listener->tlsstream.listener_tls_ctx,
+ isc_mem_put(listener->worker->mctx,
+ listener->tlsstream.listener_tls_ctx,
sizeof(isc_tlsctx_t *) *
listener->tlsstream.n_listener_tls_ctx);
listener->tlsstream.n_listener_tls_ctx = 0;
static isc_tlsctx_t *
tls_get_listener_tlsctx(isc_nmsocket_t *listener, const int tid) {
- REQUIRE(VALID_NM(listener->mgr));
+ REQUIRE(VALID_NMSOCK(listener));
REQUIRE(tid >= 0);
if (listener->tlsstream.listener_tls_ctx == NULL) {
* Ensure that the isc_tls_t is being accessed from
* within the worker thread the socket is bound to.
*/
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
if (sock->tlsstream.client_sess_cache != NULL &&
sock->tlsstream.client_session_saved == false)
{
#include <isc/util.h>
#include <isc/uv.h>
+#include "../loop_p.h"
#include "netmgr-int.h"
#ifdef HAVE_NET_ROUTE_H
#endif /* if defined(HAVE_LINUX_NETLINK_H) && defined(HAVE_LINUX_RTNETLINK_H) \
*/
-static isc_result_t
-udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
- isc_sockaddr_t *peer);
-
static void
udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
const struct sockaddr *addr, unsigned flags);
static void
read_timer_close_cb(uv_handle_t *handle);
-static void
-udp_close_direct(isc_nmsocket_t *sock);
-
-static void
-stop_udp_parent(isc_nmsocket_t *sock);
-static void
-stop_udp_child(isc_nmsocket_t *sock);
-
static uv_os_sock_t
isc__nm_udp_lb_socket(isc_nm_t *mgr, sa_family_t sa_family) {
isc_result_t result;
uv_os_sock_t fd, int tid) {
isc_nmsocket_t *csock;
isc__netievent_udplisten_t *ievent = NULL;
+ isc__networker_t *worker = &mgr->workers[tid];
csock = &sock->children[tid];
- isc__nmsocket_init(csock, mgr, isc_nm_udpsocket, iface);
+ isc__nmsocket_init(csock, worker, isc_nm_udpsocket, iface);
csock->parent = sock;
- csock->iface = sock->iface;
- atomic_init(&csock->reading, true);
csock->recv_cb = sock->recv_cb;
csock->recv_cbarg = sock->recv_cbarg;
- csock->tid = tid;
+
+ atomic_init(&csock->reading, true);
if (mgr->load_balance_sockets) {
UNUSED(fd);
}
REQUIRE(csock->fd >= 0);
- ievent = isc__nm_get_netievent_udplisten(mgr, csock);
- isc__nm_maybe_enqueue_ievent(&mgr->workers[tid],
- (isc__netievent_t *)ievent);
-}
+ ievent = isc__nm_get_netievent_udplisten(worker, csock);
-static void
-enqueue_stoplistening(isc_nmsocket_t *sock) {
- isc__netievent_udpstop_t *ievent =
- isc__nm_get_netievent_udpstop(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ if (tid == 0) {
+ isc__nm_process_ievent(&mgr->workers[tid],
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(&mgr->workers[tid],
+ (isc__netievent_t *)ievent);
+ }
}
isc_result_t
isc_nm_listenudp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface,
isc_nm_recv_cb_t cb, void *cbarg, isc_nmsocket_t **sockp) {
- isc_result_t result = ISC_R_SUCCESS;
+ isc_result_t result = ISC_R_UNSET;
isc_nmsocket_t *sock = NULL;
size_t children_size = 0;
- REQUIRE(VALID_NM(mgr));
uv_os_sock_t fd = -1;
+ isc__networker_t *worker = &mgr->workers[0];
- /*
- * We are creating mgr->nworkers duplicated sockets, one
- * socket for each worker thread.
- */
- sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(sock, mgr, isc_nm_udplistener, iface);
+ REQUIRE(VALID_NM(mgr));
+ REQUIRE(isc_tid() == 0);
+
+ if (isc__nm_closing(worker)) {
+ return (ISC_R_SHUTTINGDOWN);
+ }
+
+ if (workers == 0) {
+ workers = mgr->nloops;
+ }
+ REQUIRE(workers <= mgr->nloops);
+
+ sock = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(sock, worker, isc_nm_udplistener, iface);
atomic_init(&sock->rchildren, 0);
- sock->nchildren = (workers == ISC_NM_LISTEN_ALL)
- ? (uint32_t)mgr->nworkers
- : workers;
+ sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops
+ : workers;
children_size = sock->nchildren * sizeof(sock->children[0]);
- sock->children = isc_mem_get(mgr->mctx, children_size);
+ sock->children = isc_mem_get(worker->mctx, children_size);
memset(sock->children, 0, children_size);
+ isc_barrier_init(&sock->barrier, sock->nchildren);
+
sock->recv_cb = cb;
sock->recv_cbarg = cbarg;
- sock->result = ISC_R_UNSET;
-
- sock->tid = 0;
- sock->fd = -1;
if (!mgr->load_balance_sockets) {
fd = isc__nm_udp_lb_socket(mgr, iface->type.sa.sa_family);
}
- isc_barrier_init(&sock->startlistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- if ((int)i == isc_nm_tid()) {
- continue;
- }
+ for (size_t i = 1; i < sock->nchildren; i++) {
start_udp_child(mgr, iface, sock, fd, i);
}
- if (isc__nm_in_netthread()) {
- start_udp_child(mgr, iface, sock, fd, isc_nm_tid());
- }
+ start_udp_child(mgr, iface, sock, fd, 0);
if (!mgr->load_balance_sockets) {
isc__nm_closesocket(fd);
}
LOCK(&sock->lock);
- while (atomic_load(&sock->rchildren) != sock->nchildren) {
- WAIT(&sock->cond, &sock->lock);
- }
result = sock->result;
- atomic_store(&sock->active, true);
UNLOCK(&sock->lock);
-
INSIST(result != ISC_R_UNSET);
- if (result == ISC_R_SUCCESS) {
- REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
- *sockp = sock;
- } else {
+ atomic_store(&sock->active, true);
+
+ if (result != ISC_R_SUCCESS) {
atomic_store(&sock->active, false);
- enqueue_stoplistening(sock);
+ isc__nm_udp_stoplistening(sock);
isc_nmsocket_close(&sock);
- }
- return (result);
+ return (result);
+ }
+ REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
+ *sockp = sock;
+ return (ISC_R_SUCCESS);
}
#ifdef USE_ROUTE_SOCKET
static isc_result_t
route_connect_direct(isc_nmsocket_t *sock) {
isc__networker_t *worker = NULL;
- isc_result_t result = ISC_R_UNSET;
int r;
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
- worker = &sock->mgr->workers[isc_nm_tid()];
+ worker = sock->worker;
atomic_store(&sock->connecting, true);
- r = uv_udp_init(&worker->loop, &sock->uv_handle.udp);
+ r = uv_udp_init(&worker->loop->loop, &sock->uv_handle.udp);
UV_RUNTIME_CHECK(uv_udp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- if (isc__nm_closing(sock)) {
- result = ISC_R_SHUTTINGDOWN;
- goto error;
+ if (isc__nm_closing(worker)) {
+ return (ISC_R_SHUTTINGDOWN);
}
r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
if (r != 0) {
- goto done;
+ return (isc_uverr2result(r));
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
atomic_store(&sock->connecting, false);
atomic_store(&sock->connected, true);
-done:
- result = isc_uverr2result(r);
-error:
-
- LOCK(&sock->lock);
- sock->result = result;
- SIGNAL(&sock->cond);
- if (!atomic_load(&sock->active)) {
- WAIT(&sock->scond, &sock->lock);
- }
- INSIST(atomic_load(&sock->active));
- UNLOCK(&sock->lock);
-
- return (result);
+ return (ISC_R_SUCCESS);
}
-/*
- * Asynchronous 'udpconnect' call handler: open a new UDP socket and
- * call the 'open' callback with a handle.
- */
-void
-isc__nm_async_routeconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_routeconnect_t *ievent =
- (isc__netievent_routeconnect_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
- isc__nm_uvreq_t *req = ievent->req;
- isc_result_t result;
-
- UNUSED(worker);
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->type == isc_nm_udpsocket);
- REQUIRE(sock->parent == NULL);
- REQUIRE(sock->tid == isc_nm_tid());
-
- result = route_connect_direct(sock);
- if (result != ISC_R_SUCCESS) {
- atomic_store(&sock->active, false);
- isc__nm_udp_close(sock);
- isc__nm_connectcb(sock, req, result, true);
- } else {
- /*
- * The callback has to be called after the socket has been
- * initialized
- */
- isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
- }
-
- /*
- * The sock is now attached to the handle.
- */
- isc__nmsocket_detach(&sock);
-}
#endif /* USE_ROUTE_SOCKET */
isc_result_t
#ifdef USE_ROUTE_SOCKET
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
- isc__netievent_udpconnect_t *event = NULL;
isc__nm_uvreq_t *req = NULL;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
+ uv_os_sock_t fd = -1;
REQUIRE(VALID_NM(mgr));
+ REQUIRE(isc_tid() == 0);
+
+ if (isc__nm_closing(worker)) {
+ return (ISC_R_SHUTTINGDOWN);
+ }
- sock = isc_mem_get(mgr->mctx, sizeof(*sock));
- isc__nmsocket_init(sock, mgr, isc_nm_udpsocket, NULL);
+ result = route_socket(&fd);
+ if (result != ISC_R_SUCCESS) {
+ return (result);
+ }
+
+ sock = isc_mem_get(worker->mctx, sizeof(*sock));
+ isc__nmsocket_init(sock, worker, isc_nm_udpsocket, NULL);
sock->connect_cb = cb;
sock->connect_cbarg = cbarg;
- sock->result = ISC_R_UNSET;
atomic_init(&sock->client, true);
sock->route_sock = true;
+ sock->fd = fd;
- req = isc__nm_uvreq_get(mgr, sock);
+ req = isc__nm_uvreq_get(worker, sock);
req->cb.connect = cb;
req->cbarg = cbarg;
req->handle = isc__nmhandle_get(sock, NULL, NULL);
- result = route_socket(&sock->fd);
+ atomic_store(&sock->active, true);
+
+ result = route_connect_direct(sock);
if (result != ISC_R_SUCCESS) {
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- }
- isc__nmsocket_clearcb(sock);
- isc__nm_connectcb(sock, req, result, true);
- atomic_store(&sock->closed, true);
- isc__nmsocket_detach(&sock);
- return (result);
+ atomic_store(&sock->active, false);
+ isc__nm_udp_close(sock);
}
- event = isc__nm_get_netievent_routeconnect(mgr, sock, req);
+ isc__nm_connectcb(sock, req, result, true);
- if (isc__nm_in_netthread()) {
- atomic_store(&sock->active, true);
- sock->tid = isc_nm_tid();
- isc__nm_async_routeconnect(&mgr->workers[sock->tid],
- (isc__netievent_t *)event);
- isc__nm_put_netievent_routeconnect(mgr, event);
- } else {
- atomic_init(&sock->active, false);
- sock->tid = 0;
- isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
- (isc__netievent_t *)event);
- }
- LOCK(&sock->lock);
- while (sock->result == ISC_R_UNSET) {
- WAIT(&sock->cond, &sock->lock);
- }
- atomic_store(&sock->active, true);
- BROADCAST(&sock->scond);
- UNLOCK(&sock->lock);
+ isc__nmsocket_detach(&sock);
- return (sock->result);
+ return (ISC_R_SUCCESS);
#else /* USE_ROUTE_SOCKET */
UNUSED(mgr);
UNUSED(cb);
isc_nm_t *mgr = NULL;
REQUIRE(VALID_NMSOCK(ievent->sock));
- REQUIRE(ievent->sock->tid == isc_nm_tid());
+ REQUIRE(ievent->sock->tid == isc_tid());
REQUIRE(VALID_NMSOCK(ievent->sock->parent));
sock = ievent->sock;
sa_family = sock->iface.type.sa.sa_family;
- mgr = sock->mgr;
+ mgr = sock->worker->netmgr;
REQUIRE(sock->type == isc_nm_udpsocket);
REQUIRE(sock->parent != NULL);
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
#if HAVE_DECL_UV_UDP_RECVMMSG
uv_init_flags |= UV_UDP_RECVMMSG;
#endif
- r = uv_udp_init_ex(&worker->loop, &sock->uv_handle.udp, uv_init_flags);
+ r = uv_udp_init_ex(&worker->loop->loop, &sock->uv_handle.udp,
+ uv_init_flags);
UV_RUNTIME_CHECK(uv_udp_init_ex, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
/* This keeps the socket alive after everything else is gone */
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- LOCK(&sock->parent->lock);
-
r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
if (r < 0) {
isc__nm_closesocket(sock->fd);
goto done;
}
} else {
+ LOCK(&sock->parent->lock);
if (sock->parent->fd == -1) {
/* This thread is first, bind the socket */
r = isc__nm_udp_freebind(&sock->uv_handle.udp,
uv_bind_flags);
if (r < 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
+ UNLOCK(&sock->parent->lock);
goto done;
}
sock->parent->uv_handle.udp.flags =
sock->uv_handle.udp.flags =
sock->parent->uv_handle.udp.flags;
}
+ UNLOCK(&sock->parent->lock);
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(mgr, &sock->uv_handle.handle);
r = uv_udp_recv_start(&sock->uv_handle.udp, isc__nm_alloc_cb,
udp_recv_cb);
done:
result = isc_uverr2result(r);
atomic_fetch_add(&sock->parent->rchildren, 1);
+
+ LOCK(&sock->parent->lock);
if (sock->parent->result == ISC_R_UNSET) {
sock->parent->result = result;
+ } else {
+ REQUIRE(sock->parent->result == result);
}
- SIGNAL(&sock->parent->cond);
UNLOCK(&sock->parent->lock);
- isc_barrier_wait(&sock->parent->startlistening);
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
+}
+
+static void
+stop_udp_child(isc_nmsocket_t *sock, uint32_t tid) {
+ isc_nmsocket_t *csock = NULL;
+ isc__netievent_udpstop_t *ievent = NULL;
+
+ csock = &sock->children[tid];
+ REQUIRE(VALID_NMSOCK(csock));
+
+ atomic_store(&csock->active, false);
+ ievent = isc__nm_get_netievent_udpstop(csock->worker, csock);
+
+ if (tid == 0) {
+ isc__nm_process_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ } else {
+ isc__nm_enqueue_ievent(csock->worker,
+ (isc__netievent_t *)ievent);
+ }
+}
+
+static void
+stop_udp_parent(isc_nmsocket_t *sock) {
+ /* Stop the parent */
+ atomic_store(&sock->closed, true);
+ isc__nmsocket_prep_destroy(sock);
}
void
isc__nm_udp_stoplistening(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_udplistener);
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->tid == 0);
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- UNREACHABLE();
- }
+ RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
+ &(bool){ false }, true));
- if (!isc__nm_in_netthread()) {
- enqueue_stoplistening(sock);
- } else {
- stop_udp_parent(sock);
+ /* Stop all the children */
+ for (size_t i = 1; i < sock->nchildren; i++) {
+ stop_udp_child(sock, i);
}
+
+ stop_udp_child(sock, 0);
+
+ stop_udp_parent(sock);
}
/*
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
+ REQUIRE(sock->parent != NULL);
- if (sock->parent != NULL) {
- stop_udp_child(sock);
- return;
- }
+ isc__nm_udp_close(sock);
- stop_udp_parent(sock);
+ (void)atomic_fetch_sub(&sock->parent->rchildren, 1);
+
+ REQUIRE(!worker->loop->paused);
+ isc_barrier_wait(&sock->parent->barrier);
}
/*
isc_sockaddr_t sockaddr, *sa = NULL;
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(atomic_load(&sock->reading));
+ REQUIRE(sock->tid == isc_tid());
/*
* When using recvmmsg(2), if no errors occur, there will be a final
#else
UNUSED(flags);
#endif
-
/*
+ * Possible reasons to return now without processing:
+ *
* - If we're simulating a firewall blocking UDP packets
* bigger than 'maxudp' bytes for testing purposes.
*/
- maxudp = atomic_load(&sock->mgr->maxudp);
+ maxudp = atomic_load(&sock->worker->netmgr->maxudp);
if ((maxudp != 0 && (uint32_t)nrecv > maxudp)) {
/*
* We need to keep the read_cb intact in case, so the
goto free;
}
+ /*
+ * - If the network manager is shutting down
+ */
+ if (isc__nm_closing(sock->worker)) {
+ isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false);
+ goto free;
+ }
+
/*
* - If the socket is no longer active.
*/
isc__nm_free_uvbuf(sock, buf);
}
+static void
+udp_send_cb(uv_udp_send_t *req, int status) {
+ isc_result_t result = ISC_R_SUCCESS;
+ isc__nm_uvreq_t *uvreq = uv_handle_get_data((uv_handle_t *)req);
+ isc_nmsocket_t *sock = NULL;
+
+ REQUIRE(VALID_UVREQ(uvreq));
+ REQUIRE(VALID_NMHANDLE(uvreq->handle));
+
+ sock = uvreq->sock;
+
+ REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(sock->tid == isc_tid());
+
+ if (status < 0) {
+ result = isc_uverr2result(status);
+ isc__nm_incstats(sock, STATID_SENDFAIL);
+ }
+
+ isc__nm_sendcb(sock, uvreq, result, false);
+}
+
/*
* Send the data in 'region' to a peer via a UDP socket. We try to find
* a proper sibling/child socket so that we won't have to jump to
isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region,
isc_nm_cb_t cb, void *cbarg) {
isc_nmsocket_t *sock = handle->sock;
- isc_nmsocket_t *rsock = NULL;
- isc_sockaddr_t *peer = &handle->peer;
+ const isc_sockaddr_t *peer = &handle->peer;
+ const struct sockaddr *sa = &peer->type.sa;
isc__nm_uvreq_t *uvreq = NULL;
- uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
- int ntid;
+ isc__networker_t *worker = NULL;
+ uint32_t maxudp;
+ int r;
- INSIST(sock->type == isc_nm_udpsocket);
+ REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(sock->type == isc_nm_udpsocket);
+ REQUIRE(sock->tid == isc_tid());
+
+ worker = sock->worker;
+ maxudp = atomic_load(&worker->netmgr->maxudp);
/*
* We're simulating a firewall blocking UDP packets bigger than
return;
}
- if (atomic_load(&sock->client)) {
- /*
- * When we are sending from the client socket, we directly use
- * the socket provided.
- */
- rsock = sock;
- goto send;
- } else {
- /*
- * When we are sending from the server socket, we either use the
- * socket associated with the network thread we are in, or we
- * use the thread from the socket associated with the handle.
- */
- INSIST(sock->parent != NULL);
+ if (isc__nm_closing(worker)) {
+ cb(handle, ISC_R_SHUTTINGDOWN, cbarg);
+ return;
+ }
- if (isc__nm_in_netthread()) {
- ntid = isc_nm_tid();
- } else {
- ntid = sock->tid;
- }
- rsock = &sock->parent->children[ntid];
+ if (isc__nmsocket_closing(sock)) {
+ cb(handle, ISC_R_CANCELED, cbarg);
+ return;
}
-send:
- uvreq = isc__nm_uvreq_get(rsock->mgr, rsock);
+ uvreq = isc__nm_uvreq_get(sock->worker, sock);
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
- if (isc_nm_tid() == rsock->tid) {
- REQUIRE(rsock->tid == isc_nm_tid());
- isc__netievent_udpsend_t ievent = { .sock = rsock,
- .req = uvreq,
- .peer = *peer };
-
- isc__nm_async_udpsend(NULL, (isc__netievent_t *)&ievent);
- } else {
- isc__netievent_udpsend_t *ievent =
- isc__nm_get_netievent_udpsend(sock->mgr, rsock);
- ievent->peer = *peer;
- ievent->req = uvreq;
-
- isc__nm_enqueue_ievent(&sock->mgr->workers[rsock->tid],
- (isc__netievent_t *)ievent);
- }
-}
-
-/*
- * Asynchronous 'udpsend' event handler: send a packet on a UDP socket.
- */
-void
-isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc_result_t result;
- isc__netievent_udpsend_t *ievent = (isc__netievent_udpsend_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
- isc__nm_uvreq_t *uvreq = ievent->req;
-
- REQUIRE(sock->type == isc_nm_udpsocket);
- REQUIRE(sock->tid == isc_nm_tid());
- UNUSED(worker);
-
- if (isc__nmsocket_closing(sock)) {
- isc__nm_failed_send_cb(sock, uvreq, ISC_R_CANCELED);
- return;
- }
-
- result = udp_send_direct(sock, uvreq, &ievent->peer);
- if (result != ISC_R_SUCCESS) {
- isc__nm_incstats(sock, STATID_SENDFAIL);
- isc__nm_failed_send_cb(sock, uvreq, result);
- }
-}
-
-static void
-udp_send_cb(uv_udp_send_t *req, int status) {
- isc_result_t result = ISC_R_SUCCESS;
- isc__nm_uvreq_t *uvreq = uv_handle_get_data((uv_handle_t *)req);
- isc_nmsocket_t *sock = NULL;
-
- REQUIRE(VALID_UVREQ(uvreq));
- REQUIRE(VALID_NMHANDLE(uvreq->handle));
-
- sock = uvreq->sock;
-
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (status < 0) {
- result = isc_uverr2result(status);
- isc__nm_incstats(sock, STATID_SENDFAIL);
- }
-
- isc__nm_sendcb(sock, uvreq, result, false);
-}
-
-/*
- * udp_send_direct sends buf to a peer on a socket. Sock has to be in
- * the same thread as the callee.
- */
-static isc_result_t
-udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
- isc_sockaddr_t *peer) {
- const struct sockaddr *sa = &peer->type.sa;
- int r;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(VALID_UVREQ(req));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->type == isc_nm_udpsocket);
-
- if (isc__nmsocket_closing(sock)) {
- return (ISC_R_CANCELED);
- }
-
#if UV_VERSION_HEX >= UV_VERSION(1, 27, 0)
/*
* If we used uv_udp_connect() (and not the shim version for
}
#endif
- r = uv_udp_send(&req->uv_req.udp_send, &sock->uv_handle.udp,
- &req->uvbuf, 1, sa, udp_send_cb);
+ r = uv_udp_send(&uvreq->uv_req.udp_send, &sock->uv_handle.udp,
+ &uvreq->uvbuf, 1, sa, udp_send_cb);
if (r < 0) {
- return (isc_uverr2result(r));
+ isc__nm_incstats(sock, STATID_SENDFAIL);
+ isc__nm_failed_send_cb(sock, uvreq, isc_uverr2result(r));
}
-
- return (ISC_R_SUCCESS);
}
static isc_result_t
udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
- isc__networker_t *worker = NULL;
int uv_bind_flags = UV_UDP_REUSEADDR;
- isc_result_t result = ISC_R_UNSET;
int r;
+ isc__networker_t *worker = sock->worker;
- REQUIRE(isc__nm_in_netthread());
- REQUIRE(sock->tid == isc_nm_tid());
-
- worker = &sock->mgr->workers[isc_nm_tid()];
-
- atomic_store(&sock->connecting, true);
-
- r = uv_udp_init(&worker->loop, &sock->uv_handle.udp);
+ r = uv_udp_init(&worker->loop->loop, &sock->uv_handle.udp);
UV_RUNTIME_CHECK(uv_udp_init, r);
uv_handle_set_data(&sock->uv_handle.handle, sock);
- r = uv_timer_init(&worker->loop, &sock->read_timer);
+ r = uv_timer_init(&worker->loop->loop, &sock->read_timer);
UV_RUNTIME_CHECK(uv_timer_init, r);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- if (isc__nm_closing(sock)) {
- result = ISC_R_SHUTTINGDOWN;
- goto error;
- }
-
r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
if (r != 0) {
isc__nm_incstats(sock, STATID_OPENFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
isc__nm_incstats(sock, STATID_OPEN);
uv_bind_flags);
if (r != 0) {
isc__nm_incstats(sock, STATID_BINDFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
- isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle);
+ isc__nm_set_network_buffers(sock->worker->netmgr,
+ &sock->uv_handle.handle);
/*
* On FreeBSD the UDP connect() call sometimes results in a
} while (r == UV_EADDRINUSE && --req->connect_tries > 0);
if (r != 0) {
isc__nm_incstats(sock, STATID_CONNECTFAIL);
- goto done;
+ return (isc_uverr2result(r));
}
isc__nm_incstats(sock, STATID_CONNECT);
- atomic_store(&sock->connecting, false);
- atomic_store(&sock->connected, true);
-
-done:
- result = isc_uverr2result(r);
-error:
-
- LOCK(&sock->lock);
- sock->result = result;
- SIGNAL(&sock->cond);
- if (!atomic_load(&sock->active)) {
- WAIT(&sock->scond, &sock->lock);
- }
- INSIST(atomic_load(&sock->active));
- UNLOCK(&sock->lock);
-
- return (result);
-}
-
-/*
- * Asynchronous 'udpconnect' call handler: open a new UDP socket and
- * call the 'open' callback with a handle.
- */
-void
-isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_udpconnect_t *ievent =
- (isc__netievent_udpconnect_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
- isc__nm_uvreq_t *req = ievent->req;
- isc_result_t result;
-
- UNUSED(worker);
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->type == isc_nm_udpsocket);
- REQUIRE(sock->parent == NULL);
- REQUIRE(sock->tid == isc_nm_tid());
-
- result = udp_connect_direct(sock, req);
- if (result != ISC_R_SUCCESS) {
- atomic_store(&sock->active, false);
- isc__nm_udp_close(sock);
- isc__nm_connectcb(sock, req, result, true);
- } else {
- /*
- * The callback has to be called after the socket has been
- * initialized
- */
- isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
- }
-
- /*
- * The sock is now attached to the handle.
- */
- isc__nmsocket_detach(&sock);
+ return (ISC_R_SUCCESS);
}
void
isc_nm_cb_t cb, void *cbarg, unsigned int timeout) {
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
- isc__netievent_udpconnect_t *event = NULL;
isc__nm_uvreq_t *req = NULL;
sa_family_t sa_family;
+ isc__networker_t *worker = &mgr->workers[isc_tid()];
+ uv_os_sock_t fd = -1;
REQUIRE(VALID_NM(mgr));
REQUIRE(local != NULL);
REQUIRE(peer != NULL);
+ if (isc__nm_closing(worker)) {
+ cb(NULL, ISC_R_SHUTTINGDOWN, cbarg);
+ return;
+ }
+
sa_family = peer->type.sa.sa_family;
- sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t));
- isc__nmsocket_init(sock, mgr, isc_nm_udpsocket, local);
+ result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &fd);
+ if (result != ISC_R_SUCCESS) {
+ cb(NULL, result, cbarg);
+ return;
+ }
+
+ /* Initialize the new socket */
+ /* FIXME: Use per-worker mempool for new sockets */
+ sock = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t));
+ isc__nmsocket_init(sock, worker, isc_nm_udpsocket, local);
sock->connect_cb = cb;
sock->connect_cbarg = cbarg;
sock->read_timeout = timeout;
sock->peer = *peer;
- sock->result = ISC_R_UNSET;
atomic_init(&sock->client, true);
- req = isc__nm_uvreq_get(mgr, sock);
- req->cb.connect = cb;
- req->cbarg = cbarg;
- req->peer = *peer;
- req->local = *local;
- req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface);
-
- result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &sock->fd);
- if (result != ISC_R_SUCCESS) {
- if (isc__nm_in_netthread()) {
- sock->tid = isc_nm_tid();
- }
- isc__nmsocket_clearcb(sock);
- isc__nm_connectcb(sock, req, result, true);
- atomic_store(&sock->closed, true);
- isc__nmsocket_detach(&sock);
- return;
- }
-
+ sock->fd = fd;
result = isc__nm_socket_reuse(sock->fd);
RUNTIME_CHECK(result == ISC_R_SUCCESS ||
result == ISC_R_NOTIMPLEMENTED);
(void)isc__nm_socket_min_mtu(sock->fd, sa_family);
- event = isc__nm_get_netievent_udpconnect(mgr, sock, req);
+ /* Initialize the request */
+ req = isc__nm_uvreq_get(worker, sock);
+ req->cb.connect = cb;
+ req->cbarg = cbarg;
+ req->peer = *peer;
+ req->local = *local;
+ req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface);
+
+ atomic_store(&sock->active, true);
+ atomic_store(&sock->connecting, true);
- if (isc__nm_in_netthread()) {
- atomic_store(&sock->active, true);
- sock->tid = isc_nm_tid();
- isc__nm_async_udpconnect(&mgr->workers[sock->tid],
- (isc__netievent_t *)event);
- isc__nm_put_netievent_udpconnect(mgr, event);
- } else {
- atomic_init(&sock->active, false);
- sock->tid = isc_random_uniform(mgr->nworkers);
- isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
- (isc__netievent_t *)event);
- }
- LOCK(&sock->lock);
- while (sock->result == ISC_R_UNSET) {
- WAIT(&sock->cond, &sock->lock);
+ result = udp_connect_direct(sock, req);
+ if (result != ISC_R_SUCCESS) {
+ atomic_store(&sock->active, false);
+ isc__nm_failed_connect_cb(sock, req, result, true);
+ isc__nmsocket_detach(&sock);
+ return;
}
- atomic_store(&sock->active, true);
- BROADCAST(&sock->scond);
- UNLOCK(&sock->lock);
+
+ atomic_store(&sock->connecting, false);
+ atomic_store(&sock->connected, true);
+
+ isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
+ isc__nmsocket_detach(&sock);
}
void
const struct sockaddr *addr, unsigned flags) {
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle);
REQUIRE(VALID_NMSOCK(sock));
+ REQUIRE(atomic_load(&sock->client));
+ REQUIRE(sock->parent == NULL);
- udp_recv_cb(handle, nrecv, buf, addr, flags);
/*
- * If a caller calls isc_nm_read() on a listening socket, we can
- * get here, but we MUST NOT stop reading from the listener
- * socket. The only difference between listener and connected
- * sockets is that the former has sock->parent set and later
- * does not.
+ * This function can only be reached when calling isc_nm_read() on
+ * a UDP client socket. There's no point calling isc_nm_read() on
+ * a UDP listener socket; those are always reading.
+ *
+ * The reason why we stop the timer and the reading after calling the
+ * callback is because there's a time window where a second UDP packet
+ * might be received between isc__nm_stop_reading() call and
+ * isc_nm_read() call from the callback and such UDP datagram would be
+ * lost like tears in the rain.
*/
- if (!sock->parent) {
- isc__nmsocket_timer_stop(sock);
- isc__nm_stop_reading(sock);
- }
+ udp_recv_cb(handle, nrecv, buf, addr, flags);
+
+ isc__nmsocket_timer_stop(sock);
+ isc__nm_stop_reading(sock);
}
void
isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(result != ISC_R_SUCCESS);
+ REQUIRE(sock->tid == isc_tid());
if (atomic_load(&sock->client)) {
isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock);
+ /* Nobody expects the callback if isc_nm_read() wasn't called */
if (!sock->recv_read) {
goto destroy;
}
UNUSED(worker);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
result = ISC_R_SHUTTINGDOWN;
- } else if (isc__nmsocket_closing(sock)) {
+ goto fail;
+ }
+
+ if (isc__nmsocket_closing(sock)) {
result = ISC_R_CANCELED;
- } else {
- result = isc__nm_start_reading(sock);
+ goto fail;
}
+ result = isc__nm_start_reading(sock);
if (result != ISC_R_SUCCESS) {
- atomic_store(&sock->reading, true);
- isc__nm_failed_read_cb(sock, result, false);
- return;
+ goto fail;
}
isc__nmsocket_timer_start(sock);
+ return;
+
+fail:
+ atomic_store(&sock->reading, true); /* required by the next call */
+ isc__nm_failed_read_cb(sock, result, false);
}
void
isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
+ isc_nmsocket_t *sock = NULL;
+
REQUIRE(VALID_NMHANDLE(handle));
- REQUIRE(VALID_NMSOCK(handle->sock));
- isc_nmsocket_t *sock = handle->sock;
+ sock = handle->sock;
+ REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_udpsocket);
REQUIRE(sock->statichandle == handle);
REQUIRE(!sock->recv_read);
sock->recv_cbarg = cbarg;
sock->recv_read = true;
- if (!atomic_load(&sock->reading) && sock->tid == isc_nm_tid()) {
+ if (!atomic_load(&sock->reading) && sock->tid == isc_tid()) {
isc__netievent_udpread_t ievent = { .sock = sock };
- isc__nm_async_udpread(NULL, (isc__netievent_t *)&ievent);
+ isc__nm_async_udpread(sock->worker,
+ (isc__netievent_t *)&ievent);
} else {
isc__netievent_udpread_t *ievent =
- isc__nm_get_netievent_udpread(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
+ isc__nm_get_netievent_udpread(sock->worker, sock);
+ isc__nm_enqueue_ievent(sock->worker,
(isc__netievent_t *)ievent);
}
}
-static void
-udp_stop_cb(uv_handle_t *handle) {
- isc_nmsocket_t *sock = uv_handle_get_data(handle);
- uv_handle_set_data(handle, NULL);
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(atomic_load(&sock->closing));
-
- if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
- true)) {
- UNREACHABLE();
- }
-
- isc__nm_incstats(sock, STATID_CLOSE);
-
- atomic_store(&sock->listening, false);
-
- isc__nmsocket_detach(&sock);
-}
-
static void
udp_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
isc__nm_incstats(sock, STATID_CLOSE);
if (sock->server != NULL) {
+ /* server socket (accept) */
isc__nmsocket_detach(&sock->server);
}
- atomic_store(&sock->connected, false);
- atomic_store(&sock->listening, false);
-
- isc__nmsocket_prep_destroy(sock);
+ if (sock->parent != NULL) {
+ /* listening socket (listen) */
+ atomic_store(&sock->listening, false);
+ isc__nmsocket_detach(&sock);
+ } else {
+ /* client and server sockets */
+ atomic_store(&sock->connected, false);
+ atomic_store(&sock->listening, false);
+ isc__nmsocket_prep_destroy(sock);
+ }
}
static void
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
- if (sock->parent) {
- uv_close(&sock->uv_handle.handle, udp_stop_cb);
- } else {
- uv_close(&sock->uv_handle.handle, udp_close_cb);
- }
-}
-
-static void
-stop_udp_child(isc_nmsocket_t *sock) {
- REQUIRE(sock->type == isc_nm_udpsocket);
- REQUIRE(sock->tid == isc_nm_tid());
-
- if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
- true)) {
- return;
- }
-
- udp_close_direct(sock);
-
- atomic_fetch_sub(&sock->parent->rchildren, 1);
-
- isc_barrier_wait(&sock->parent->stoplistening);
-}
-
-static void
-stop_udp_parent(isc_nmsocket_t *sock) {
- isc_nmsocket_t *csock = NULL;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- REQUIRE(sock->type == isc_nm_udplistener);
-
- isc_barrier_init(&sock->stoplistening, sock->nchildren);
-
- for (size_t i = 0; i < sock->nchildren; i++) {
- csock = &sock->children[i];
- REQUIRE(VALID_NMSOCK(csock));
-
- if ((int)i == isc_nm_tid()) {
- /*
- * We need to schedule closing the other sockets first
- */
- continue;
- }
-
- atomic_store(&csock->active, false);
- enqueue_stoplistening(csock);
- }
-
- csock = &sock->children[isc_nm_tid()];
- atomic_store(&csock->active, false);
- stop_udp_child(csock);
-
- atomic_store(&sock->closed, true);
- isc__nmsocket_prep_destroy(sock);
-}
-
-static void
-udp_close_direct(isc_nmsocket_t *sock) {
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
-
- uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
- uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
-}
-
-void
-isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0) {
- isc__netievent_udpclose_t *ievent = (isc__netievent_udpclose_t *)ev0;
- isc_nmsocket_t *sock = ievent->sock;
-
- REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
- UNUSED(worker);
-
- udp_close_direct(sock);
+ uv_close(&sock->uv_handle.handle, udp_close_cb);
}
void
isc__nm_udp_close(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_udpsocket);
- REQUIRE(!isc__nmsocket_active(sock));
+ REQUIRE(sock->tid == isc_tid());
if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
true)) {
return;
}
- if (sock->tid == isc_nm_tid()) {
- udp_close_direct(sock);
- } else {
- isc__netievent_udpclose_t *ievent =
- isc__nm_get_netievent_udpclose(sock->mgr, sock);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
- }
+ uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
}
void
isc__nm_udp_shutdown(isc_nmsocket_t *sock) {
+ isc__networker_t *worker = NULL;
+
REQUIRE(VALID_NMSOCK(sock));
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(sock->type == isc_nm_udpsocket);
+ worker = sock->worker;
+
/*
* If the socket is active, mark it inactive and
* continue. If it isn't active, stop now.
* interested in the callback.
*/
if (sock->statichandle != NULL) {
- if (isc__nm_closing(sock)) {
+ if (isc__nm_closing(worker)) {
isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false);
} else {
isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false);
}
/*
- * Otherwise, we just send the socket to abyss...
+ * Ignore the listening sockets
*/
- if (sock->parent == NULL) {
- isc__nmsocket_prep_destroy(sock);
+ if (sock->parent != NULL) {
+ return;
}
+
+ /*
+ * Otherwise, we just send the socket to abyss...
+ */
+ isc__nmsocket_prep_destroy(sock);
}
void
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_udpsocket);
- ievent = isc__nm_get_netievent_udpcancel(sock->mgr, sock, handle);
+ ievent = isc__nm_get_netievent_udpcancel(sock->worker, sock, handle);
- isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
- (isc__netievent_t *)ievent);
+ isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
}
void
sock = ievent->sock;
- REQUIRE(sock->tid == isc_nm_tid());
+ REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->client));
isc__nm_failed_read_cb(sock, ISC_R_EOF, false);
+++ /dev/null
-/*
- * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
- *
- * SPDX-License-Identifier: MPL-2.0
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at https://mozilla.org/MPL/2.0/.
- *
- * See the COPYRIGHT file distributed with this work for additional
- * information regarding copyright ownership.
- */
-
-#pragma once
-
-#include <isc/mem.h>
-#include <isc/result.h>
-
-void
-isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netgmrp);
-/*%<
- * Creates a new network manager with 'workers' worker threads,
- * and starts it running.
- */
-
-void
-isc__netmgr_destroy(isc_nm_t **netmgrp);
-/*%<
- * Similar to isc_nm_detach(), but actively waits for all other references
- * to be gone before returning.
- */
-
-void
-isc__netmgr_shutdown(isc_nm_t *mgr);
-/*%<
- * Shut down all active connections, freeing associated resources;
- * prevent new connections from being established.
- */
/*! \file */
-/*
- * XXXRTH Need to document the states a task can be in, and the rules
- * for changing states.
- */
-
#include <stdbool.h>
#include <unistd.h>
-#include <isc/app.h>
+#include <isc/async.h>
#include <isc/atomic.h>
#include <isc/backtrace.h>
#include <isc/condition.h>
#include <isc/event.h>
+#include <isc/job.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/once.h>
#include <isc/string.h>
#include <isc/task.h>
#include <isc/thread.h>
+#include <isc/tid.h>
#include <isc/time.h>
#include <isc/util.h>
#include <isc/uv.h>
#include <json_object.h>
#endif /* HAVE_JSON_C */
-#include "task_p.h"
+#include "loop_p.h"
/*
* Task manager is built around 'as little locking as possible' concept.
*/
#ifdef ISC_TASK_TRACE
-#define XTRACE(m) \
- fprintf(stderr, "task %p thread %zu: %s\n", task, isc_tid_v, (m))
+#define XTRACE(m) \
+ fprintf(stderr, "task %p.tid %zu thread %zu: %s\n", task, \
+ (size_t)task->tid, (size_t)task->tid, (m))
#define XTTRACE(t, m) \
- fprintf(stderr, "task %p thread %zu: %s\n", (t), isc_tid_v, (m))
-#define XTHREADTRACE(m) fprintf(stderr, "thread %zu: %s\n", isc_tid_v, (m))
+ fprintf(stderr, "task %p thread %zu: %s\n", (t), (size_t)isc_tid(), (m))
+#define XTHREADTRACE(m) \
+ fprintf(stderr, "thread %zu: %s\n", (size_t)isc_tid(), (m))
#else /* ifdef ISC_TASK_TRACE */
#define XTRACE(m)
#define XTTRACE(t, m)
isc_taskmgr_t *manager;
isc_mutex_t lock;
/* Locked by task lock. */
- int tid;
+ isc_loop_t *loop;
+ uint32_t tid;
task_state_t state;
isc_refcount_t references;
isc_eventlist_t events;
unsigned int nevents;
- unsigned int quantum;
isc_stdtime_t now;
isc_time_t tnow;
char name[16];
void *tag;
- /* Protected by atomics */
- atomic_bool shuttingdown;
/* Locked by task manager lock. */
#if TASKMGR_TRACE
char func[PATH_MAX];
void *backtrace[ISC__TASKTRACE_SIZE];
int backtrace_size;
#endif
+ LINK(isc_task_t) qlink;
LINK(isc_task_t) link;
};
-#define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
-#define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
+#define TASK_SHUTTINGDOWN(t) (atomic_load_acquire(&(t)->manager->shuttingdown))
+
+#define TASK_TASKMGR_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
+#define VALID_TASKMGR(m) ISC_MAGIC_VALID(m, TASK_TASKMGR_MAGIC)
+
+typedef ISC_LIST(isc_task_t) isc_tasklist_t;
struct isc_taskmgr {
/* Not locked. */
isc_refcount_t references;
isc_mem_t *mctx;
isc_mutex_t lock;
- atomic_uint_fast32_t tasks_count;
- isc_nm_t *netmgr;
- uint32_t nworkers;
+ isc_loopmgr_t *loopmgr;
+ uint32_t nloops;
/* Locked by task manager lock. */
- unsigned int default_quantum;
- LIST(isc_task_t) tasks;
+ isc_mutex_t *locks;
+ isc_tasklist_t *tasks;
atomic_uint_fast32_t mode;
- atomic_bool exclusive_req;
- bool exiting;
+ uint32_t exclusive_req;
+ atomic_bool shuttingdown;
isc_task_t *excl;
};
-#define DEFAULT_DEFAULT_QUANTUM 25
-
-/*%
- * The following are intended for internal use (indicated by "isc__"
- * prefix) but are not declared as static, allowing direct access from
- * unit tests etc.
- */
-
-bool
-isc_task_purgeevent(isc_task_t *task, isc_event_t *event);
-void
-isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task);
-isc_result_t
-isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp);
+static void
+task_setstate(isc_task_t *task, task_state_t state);
/***
*** Tasks.
***/
static void
-task_destroy(isc_task_t *task) {
- isc_taskmgr_t *manager = task->manager;
- isc_mem_t *mctx = manager->mctx;
+task_destroy(void *arg) {
+ isc_task_t *task = arg;
+ isc_loop_t *loop = task->loop;
+ isc_taskmgr_t *taskmgr = task->manager;
REQUIRE(EMPTY(task->events));
- REQUIRE(task->nevents == 0);
- REQUIRE(task->state == task_state_done);
XTRACE("task_finished");
+ task_setstate(task, task_state_done);
+
isc_refcount_destroy(&task->references);
- LOCK(&manager->lock);
- UNLINK(manager->tasks, task, link);
- atomic_fetch_sub(&manager->tasks_count, 1);
- UNLOCK(&manager->lock);
+ LOCK(&taskmgr->locks[task->tid]);
+ UNLINK(taskmgr->tasks[task->tid], task, link);
+ UNLOCK(&taskmgr->locks[task->tid]);
isc_mutex_destroy(&task->lock);
task->magic = 0;
- isc_mem_put(mctx, task, sizeof(*task));
- isc_taskmgr_detach(&manager);
+ isc_mem_put(loop->mctx, task, sizeof(*task));
+
+ isc_taskmgr_detach(&taskmgr);
+
+ isc_loop_detach(&loop);
}
+ISC_REFCOUNT_IMPL(isc_task, task_destroy);
+
+static isc_result_t
+task_run(isc_task_t *task);
+static void
+task_ready(isc_task_t *task);
+static void
+task__run(void *arg);
isc_result_t
-isc__task_create(isc_taskmgr_t *manager, unsigned int quantum,
- isc_task_t **taskp, int tid ISC__TASKFLARG) {
+isc__task_create(isc_taskmgr_t *taskmgr, isc_task_t **taskp,
+ int tid ISC__TASKFLARG) {
isc_task_t *task = NULL;
- bool exiting;
+ isc_loop_t *loop = NULL;
- REQUIRE(VALID_MANAGER(manager));
+ REQUIRE(VALID_TASKMGR(taskmgr));
REQUIRE(taskp != NULL && *taskp == NULL);
- REQUIRE(tid >= 0 && tid < (int)manager->nworkers);
+ REQUIRE(tid >= 0 && tid < (int)taskmgr->nloops);
+
+ if (atomic_load(&taskmgr->shuttingdown)) {
+ return (ISC_R_SHUTTINGDOWN);
+ }
- XTRACE("isc_task_create");
+ loop = isc_loop_get(taskmgr->loopmgr, tid);
- task = isc_mem_get(manager->mctx, sizeof(*task));
+ task = isc_mem_get(loop->mctx, sizeof(*task));
*task = (isc_task_t){
- .state = task_state_idle,
.tid = tid,
+ .state = task_state_idle,
};
+ isc_loop_attach(loop, &task->loop);
+
#if TASKMGR_TRACE
strlcpy(task->func, func, sizeof(task->func));
strlcpy(task->file, file, sizeof(task->file));
ISC__TASKTRACE_SIZE);
#endif
- isc_taskmgr_attach(manager, &task->manager);
+ isc_taskmgr_attach(taskmgr, &task->manager);
isc_mutex_init(&task->lock);
isc_refcount_init(&task->references, 1);
+
INIT_LIST(task->events);
- task->quantum = (quantum > 0) ? quantum : manager->default_quantum;
- atomic_init(&task->shuttingdown, false);
+
isc_time_settoepoch(&task->tnow);
- memset(task->name, 0, sizeof(task->name));
+
INIT_LINK(task, link);
- task->magic = TASK_MAGIC;
+ INIT_LINK(task, qlink);
- LOCK(&manager->lock);
- exiting = manager->exiting;
- if (!exiting) {
- APPEND(manager->tasks, task, link);
- atomic_fetch_add(&manager->tasks_count, 1);
- }
- UNLOCK(&manager->lock);
+ task->magic = TASK_MAGIC;
- if (exiting) {
- isc_refcount_decrement(&task->references);
- isc_refcount_destroy(&task->references);
- isc_mutex_destroy(&task->lock);
- isc_taskmgr_detach(&task->manager);
- isc_mem_put(manager->mctx, task, sizeof(*task));
- return (ISC_R_SHUTTINGDOWN);
- }
+ LOCK(&taskmgr->locks[task->tid]);
+ APPEND(taskmgr->tasks[task->tid], task, link);
+ UNLOCK(&taskmgr->locks[task->tid]);
*taskp = task;
return (ISC_R_SUCCESS);
}
-void
-isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
- /*
- * Attach *targetp to source.
- */
-
- REQUIRE(VALID_TASK(source));
- REQUIRE(targetp != NULL && *targetp == NULL);
+static void
+task_setstate(isc_task_t *task, task_state_t state) {
+ switch (state) {
+ case task_state_idle:
+ INSIST(task->state == task_state_running);
+ break;
+ case task_state_ready:
+ if (task->state == task_state_idle) {
+ INSIST(EMPTY(task->events));
+ } else {
+ INSIST(task->state == task_state_running);
+ }
+ break;
+ case task_state_running:
+ INSIST(task->state == task_state_ready);
+ break;
+ case task_state_done:
+ INSIST(task->state == task_state_ready ||
+ task->state == task_state_running ||
+ task->state == task_state_idle);
+ break;
+ default:
+ UNREACHABLE();
+ }
- XTTRACE(source, "isc_task_attach");
+ task->state = state;
+}
- isc_refcount_increment(&source->references);
+static void
+task__run(void *arg) {
+ isc_task_t *task = arg;
+ isc_result_t result = task_run(task);
- *targetp = source;
+ switch (result) {
+ case ISC_R_QUOTA:
+ task_ready(task);
+ break;
+ case ISC_R_SUCCESS:
+ case ISC_R_NOMORE:
+ break;
+ default:
+ UNREACHABLE();
+ }
}
/*
*/
static void
task_ready(isc_task_t *task) {
- isc_taskmgr_t *manager = task->manager;
- REQUIRE(VALID_MANAGER(manager));
-
- XTRACE("task_ready");
-
- isc_task_attach(task, &(isc_task_t *){ NULL });
- LOCK(&task->lock);
- isc_nm_task_enqueue(manager->netmgr, task, task->tid);
- UNLOCK(&task->lock);
-}
-
-void
-isc_task_ready(isc_task_t *task) {
- task_ready(task);
-}
-
-void
-isc_task_detach(isc_task_t **taskp) {
- isc_task_t *task;
-
- REQUIRE(taskp != NULL);
- REQUIRE(VALID_TASK(*taskp));
-
- task = *taskp;
- *taskp = NULL;
-
- XTRACE("isc_task_detach");
-
- if (isc_refcount_decrement(&task->references) == 1) {
- LOCK(&task->lock);
- task->state = task_state_done;
- UNLOCK(&task->lock);
-
- task_destroy(task);
- }
+ isc_async_run(task->loop, task__run, task);
}
static bool
if (task->state == task_state_idle) {
was_idle = true;
- INSIST(EMPTY(task->events));
- task->state = task_state_ready;
+ task_setstate(task, task_state_ready);
+ isc_task_attach(task, &(isc_task_t *){ NULL });
}
INSIST(task->state == task_state_ready ||
task->state == task_state_running);
ENQUEUE(task->events, event, ev_link);
- task->nevents++;
return (was_idle);
}
void
isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
- isc_task_t *task;
+ isc_task_t *task = NULL;
REQUIRE(taskp != NULL);
+
task = *taskp;
*taskp = NULL;
+
REQUIRE(VALID_TASK(task));
XTRACE("isc_task_sendanddetach");
isc_task_detach(&task);
}
-bool
-isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
- bool found = false;
-
- /*
- * Purge 'event' from a task's event queue.
- */
-
- REQUIRE(VALID_TASK(task));
-
- /*
- * If 'event' is on the task's event queue, it will be purged, 'event'
- * does not have to be on the task's event queue; in fact, it can even
- * be an invalid pointer. Purging only occurs if the event is actually
- * on the task's event queue.
- *
- * Purging never changes the state of the task.
- */
-
- LOCK(&task->lock);
- if (ISC_LINK_LINKED(event, ev_link)) {
- DEQUEUE(task->events, event, ev_link);
- task->nevents--;
- found = true;
- }
- UNLOCK(&task->lock);
-
- if (!found) {
- return (false);
- }
-
- isc_event_free(&event);
-
- return (true);
-}
-
void
isc_task_setname(isc_task_t *task, const char *name, void *tag) {
/*
UNLOCK(&task->lock);
}
+isc_loopmgr_t *
+isc_task_getloopmgr(isc_task_t *task) {
+ REQUIRE(VALID_TASK(task));
+
+ return (task->manager->loopmgr);
+}
+
const char *
isc_task_getname(isc_task_t *task) {
REQUIRE(VALID_TASK(task));
return (task->tag);
}
-isc_nm_t *
-isc_task_getnetmgr(isc_task_t *task) {
- REQUIRE(VALID_TASK(task));
-
- return (task->manager->netmgr);
-}
-
-void
-isc_task_setquantum(isc_task_t *task, unsigned int quantum) {
- REQUIRE(VALID_TASK(task));
-
- LOCK(&task->lock);
- task->quantum = (quantum > 0) ? quantum
- : task->manager->default_quantum;
- UNLOCK(&task->lock);
-}
-
/***
*** Task Manager.
***/
static isc_result_t
task_run(isc_task_t *task) {
- unsigned int dispatch_count = 0;
isc_event_t *event = NULL;
- isc_result_t result = ISC_R_SUCCESS;
- uint32_t quantum;
+ isc_result_t result = ISC_R_UNSET;
+ isc_eventlist_t events;
REQUIRE(VALID_TASK(task));
LOCK(&task->lock);
- quantum = task->quantum;
- if (task->state != task_state_ready) {
- goto done;
- }
+ ISC_LIST_INIT(events);
+ ISC_LIST_MOVE(events, task->events);
- INSIST(task->state == task_state_ready);
- task->state = task_state_running;
+ REQUIRE(task->state == task_state_ready);
+
+ task_setstate(task, task_state_running);
XTRACE("running");
XTRACE(task->name);
TIME_NOW(&task->tnow);
task->now = isc_time_seconds(&task->tnow);
+ UNLOCK(&task->lock);
- while (true) {
- if (!EMPTY(task->events)) {
- event = HEAD(task->events);
- DEQUEUE(task->events, event, ev_link);
- task->nevents--;
-
- /*
- * Execute the event action.
- */
- XTRACE("execute action");
- XTRACE(task->name);
- if (event->ev_action != NULL) {
- UNLOCK(&task->lock);
- (event->ev_action)(task, event);
- LOCK(&task->lock);
- }
- XTRACE("execution complete");
- dispatch_count++;
- }
+ event = ISC_LIST_HEAD(events);
+ while (event != NULL) {
+ isc_event_t *next = ISC_LIST_NEXT(event, ev_link);
+ ISC_LIST_UNLINK(events, event, ev_link);
- if (EMPTY(task->events)) {
- /*
- * Nothing else to do for this task right now.
- */
- XTRACE("empty");
- if (isc_refcount_current(&task->references) == 0) {
- /*
- * The task is done.
- */
- XTRACE("done");
- task->state = task_state_done;
- } else if (task->state == task_state_running) {
- XTRACE("idling");
- task->state = task_state_idle;
- }
- break;
- } else if (dispatch_count >= quantum) {
- /*
- * Our quantum has expired, but there is more work to be
- * done. We'll requeue it to the ready queue later.
- *
- * We don't check quantum until dispatching at least one
- * event, so the minimum quantum is one.
- */
- XTRACE("quantum");
- task->state = task_state_ready;
- result = ISC_R_QUOTA;
- break;
+ /*
+ * Execute the event action.
+ */
+ XTRACE("execute action");
+ XTRACE(task->name);
+ if (event->ev_action != NULL) {
+ (event->ev_action)(task, event);
}
+ XTRACE("execution complete");
+
+ event = next;
}
-done:
+ LOCK(&task->lock);
+ if (EMPTY(task->events)) {
+ /*
+ * Nothing else to do for this task right now.
+ */
+ XTRACE("empty");
+ XTRACE("idling");
+ task_setstate(task, task_state_idle);
+
+ result = ISC_R_SUCCESS;
+ } else {
+ /*
+ * More tasks were scheduled.
+ */
+ XTRACE("quantum");
+ task_setstate(task, task_state_ready);
+ result = ISC_R_QUOTA;
+ }
UNLOCK(&task->lock);
- isc_task_detach(&task);
- return (result);
-}
+ if (result == ISC_R_SUCCESS) {
+ isc_task_detach(&task);
+ }
-isc_result_t
-isc_task_run(isc_task_t *task) {
- return (task_run(task));
+ return (result);
}
static void
-manager_free(isc_taskmgr_t *manager) {
- isc_refcount_destroy(&manager->references);
- isc_nm_detach(&manager->netmgr);
+taskmgr_destroy(isc_taskmgr_t *taskmgr) {
+ taskmgr->magic = 0;
- isc_mutex_destroy(&manager->lock);
- manager->magic = 0;
- isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
+ for (size_t tid = 0; tid < taskmgr->nloops; tid++) {
+ INSIST(EMPTY(taskmgr->tasks[tid]));
+ isc_mutex_destroy(&taskmgr->locks[tid]);
+ }
+
+ isc_mem_put(taskmgr->mctx, taskmgr->tasks,
+ taskmgr->nloops * sizeof(taskmgr->tasks[0]));
+ isc_mem_put(taskmgr->mctx, taskmgr->locks,
+ taskmgr->nloops * sizeof(taskmgr->locks[0]));
+
+ isc_refcount_destroy(&taskmgr->references);
+ isc_mutex_destroy(&taskmgr->lock);
+ isc_mem_putanddetach(&taskmgr->mctx, taskmgr, sizeof(*taskmgr));
}
void
isc_taskmgr_attach(isc_taskmgr_t *source, isc_taskmgr_t **targetp) {
- REQUIRE(VALID_MANAGER(source));
+ REQUIRE(VALID_TASKMGR(source));
REQUIRE(targetp != NULL && *targetp == NULL);
isc_refcount_increment(&source->references);
void
isc_taskmgr_detach(isc_taskmgr_t **managerp) {
REQUIRE(managerp != NULL);
- REQUIRE(VALID_MANAGER(*managerp));
+ REQUIRE(VALID_TASKMGR(*managerp));
isc_taskmgr_t *manager = *managerp;
*managerp = NULL;
if (isc_refcount_decrement(&manager->references) == 1) {
- manager_free(manager);
+ taskmgr_destroy(manager);
}
}
-isc_result_t
-isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
- isc_taskmgr_t **managerp) {
- isc_taskmgr_t *manager;
-
- /*
- * Create a new task manager.
- */
+static void
+taskmgr_teardown(void *arg) {
+ isc_taskmgr_t *taskmgr = (void *)arg;
+ uint32_t tid = isc_tid();
+ isc_task_t *excl = NULL;
- REQUIRE(managerp != NULL && *managerp == NULL);
- REQUIRE(nm != NULL);
+ REQUIRE(VALID_TASKMGR(taskmgr));
- manager = isc_mem_get(mctx, sizeof(*manager));
- *manager = (isc_taskmgr_t){ .magic = TASK_MANAGER_MAGIC };
+ atomic_store(&taskmgr->shuttingdown, true);
- isc_mutex_init(&manager->lock);
+ isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR,
+ ISC_LOG_DEBUG(1), "Shutting down task manager");
- if (default_quantum == 0) {
- default_quantum = DEFAULT_DEFAULT_QUANTUM;
+ LOCK(&taskmgr->lock);
+ if (taskmgr->excl != NULL && taskmgr->excl->tid == tid) {
+ XTTRACE(taskmgr->excl, "taskmgr_teardown: excl");
+ excl = taskmgr->excl;
+ taskmgr->excl = NULL;
}
- manager->default_quantum = default_quantum;
-
- isc_nm_attach(nm, &manager->netmgr);
- manager->nworkers = isc_nm_getnworkers(nm);
+ UNLOCK(&taskmgr->lock);
+ if (excl != NULL) {
+ isc_task_detach(&excl);
+ }
+}
- INIT_LIST(manager->tasks);
- atomic_init(&manager->exclusive_req, false);
- atomic_init(&manager->tasks_count, 0);
+void
+isc_taskmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr,
+ isc_taskmgr_t **taskmgrp) {
+ isc_taskmgr_t *taskmgr = NULL;
- isc_mem_attach(mctx, &manager->mctx);
+ /*
+ * Create a new task manager.
+ */
- isc_refcount_init(&manager->references, 1);
+ REQUIRE(taskmgrp != NULL && *taskmgrp == NULL);
- *managerp = manager;
+ taskmgr = isc_mem_get(mctx, sizeof(*taskmgr));
+ *taskmgr = (isc_taskmgr_t){
+ .loopmgr = loopmgr,
+ .magic = TASK_TASKMGR_MAGIC,
+ .nloops = isc_loopmgr_nloops(loopmgr),
+ };
- return (ISC_R_SUCCESS);
-}
+ isc_mem_attach(mctx, &taskmgr->mctx);
-void
-isc__taskmgr_shutdown(isc_taskmgr_t *manager) {
- isc_task_t *task = NULL;
+ isc_mutex_init(&taskmgr->lock);
- REQUIRE(VALID_MANAGER(manager));
+ taskmgr->tasks = isc_mem_get(
+ taskmgr->mctx, taskmgr->nloops * sizeof(taskmgr->tasks[0]));
+ taskmgr->locks = isc_mem_get(
+ taskmgr->mctx, taskmgr->nloops * sizeof(taskmgr->locks[0]));
- XTHREADTRACE("isc_taskmgr_shutdown");
- /*
- * Only one non-worker thread may ever call this routine.
- * If a worker thread wants to initiate shutdown of the
- * task manager, it should ask some non-worker thread to call
- * isc_taskmgr_destroy(), e.g. by signalling a condition variable
- * that the startup thread is sleeping on.
- */
- LOCK(&manager->lock);
- if (manager->excl != NULL) {
- task = manager->excl;
- manager->excl = NULL;
+ for (size_t tid = 0; tid < taskmgr->nloops; tid++) {
+ isc_mutex_init(&taskmgr->locks[tid]);
+ ISC_LIST_INIT(taskmgr->tasks[tid]);
}
- /*
- * Make sure we only get called once.
- */
- INSIST(manager->exiting == false);
- manager->exiting = true;
+ isc_loopmgr_teardown(loopmgr, taskmgr_teardown, taskmgr);
- UNLOCK(&manager->lock);
+ isc_refcount_init(&taskmgr->references, 1);
- if (task != NULL) {
- isc_task_detach(&task);
- }
+ *taskmgrp = taskmgr;
}
void
-isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
- REQUIRE(managerp != NULL && VALID_MANAGER(*managerp));
+isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
+ isc_taskmgr_t *manager = NULL;
+ uint_fast32_t refs;
+
+ REQUIRE(managerp != NULL && VALID_TASKMGR(*managerp));
XTHREADTRACE("isc_taskmgr_destroy");
- int counter = 0;
- while (isc_refcount_current(&(*managerp)->references) > 1 &&
- counter++ < 1000) {
- uv_sleep(10);
- }
+ manager = *managerp;
+ *managerp = NULL;
+ /*
+ * The isc_loopmgr is not running, there's nothing that can finish now
+ */
+ refs = isc_refcount_decrement(&manager->references);
#if TASKMGR_TRACE
- if (isc_refcount_current(&(*managerp)->references) > 1) {
+ if (refs > 1) {
isc__taskmgr_dump_active(*managerp);
}
- INSIST(isc_refcount_current(&(*managerp)->references) == 1);
#endif
-
- while (isc_refcount_current(&(*managerp)->references) > 1) {
- uv_sleep(10);
- }
-
- isc_taskmgr_detach(managerp);
+ INSIST(refs == 1);
+ taskmgr_destroy(manager);
}
void
isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task) {
- REQUIRE(VALID_MANAGER(mgr));
+ REQUIRE(VALID_TASKMGR(mgr));
REQUIRE(VALID_TASK(task));
LOCK(&task->lock);
isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp) {
isc_result_t result;
- REQUIRE(VALID_MANAGER(mgr));
+ REQUIRE(VALID_TASKMGR(mgr));
REQUIRE(taskp != NULL && *taskp == NULL);
+ if (atomic_load(&mgr->shuttingdown)) {
+ return (ISC_R_SHUTTINGDOWN);
+ }
+
LOCK(&mgr->lock);
if (mgr->excl != NULL) {
isc_task_attach(mgr->excl, taskp);
result = ISC_R_SUCCESS;
- } else if (mgr->exiting) {
- result = ISC_R_SHUTTINGDOWN;
} else {
result = ISC_R_NOTFOUND;
}
return (result);
}
-isc_result_t
+void
isc_task_beginexclusive(isc_task_t *task) {
isc_taskmgr_t *manager;
+ bool first;
REQUIRE(VALID_TASK(task));
LOCK(&manager->lock);
REQUIRE(task == manager->excl ||
- (manager->exiting && manager->excl == NULL));
+ (atomic_load(&manager->shuttingdown) && manager->excl == NULL));
+ first = (manager->exclusive_req++ == 0);
UNLOCK(&manager->lock);
- if (!atomic_compare_exchange_strong(&manager->exclusive_req,
- &(bool){ false }, true))
- {
- return (ISC_R_LOCKBUSY);
+ if (!first) {
+ return;
}
if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) {
"exclusive task mode: %s", "starting");
}
- isc_nm_pause(manager->netmgr);
+ isc_loopmgr_pause(manager->loopmgr);
if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) {
isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1),
"exclusive task mode: %s", "started");
}
-
- return (ISC_R_SUCCESS);
}
void
isc_task_endexclusive(isc_task_t *task) {
isc_taskmgr_t *manager = NULL;
+ bool last;
REQUIRE(VALID_TASK(task));
REQUIRE(task->state == task_state_running);
manager = task->manager;
+ LOCK(&manager->lock);
+ INSIST(manager->exclusive_req > 0);
+ last = (--manager->exclusive_req == 0);
+ UNLOCK(&manager->lock);
+
+ if (!last) {
+ return;
+ }
+
if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) {
isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1),
"exclusive task mode: %s", "ending");
}
- isc_nm_resume(manager->netmgr);
+ isc_loopmgr_resume(manager->loopmgr);
if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) {
isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1),
"exclusive task mode: %s", "ended");
}
-
- atomic_compare_exchange_enforced(&manager->exclusive_req,
- &(bool){ true }, false);
}
#ifdef HAVE_LIBXML2
LOCK(&mgr->lock);
- /*
- * Write out the thread-model, and some details about each depending
- * on which type is enabled.
- */
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
- TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
- TRY0(xmlTextWriterEndElement(writer)); /* type */
-
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%d",
- mgr->default_quantum));
- TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
-
- TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
-
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
- task = ISC_LIST_HEAD(mgr->tasks);
- while (task != NULL) {
- LOCK(&task->lock);
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
-
- if (task->name[0] != 0) {
+ for (size_t tid = 0; tid < mgr->nloops; tid++) {
+ for (task = ISC_LIST_HEAD(mgr->tasks[tid]); task != NULL;
+ task = ISC_LIST_NEXT(task, link))
+ {
+ LOCK(&task->lock);
TRY0(xmlTextWriterStartElement(writer,
- ISC_XMLCHAR "name"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%s",
- task->name));
- TRY0(xmlTextWriterEndElement(writer)); /* name */
- }
+ ISC_XMLCHAR "task"));
+
+ if (task->name[0] != 0) {
+ TRY0(xmlTextWriterStartElement(
+ writer, ISC_XMLCHAR "name"));
+ TRY0(xmlTextWriterWriteFormatString(
+ writer, "%s", task->name));
+ TRY0(xmlTextWriterEndElement(writer)); /* name
+ */
+ }
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "reference"
+ TRY0(xmlTextWriterStartElement(writer,
+ ISC_XMLCHAR "reference"
"s"));
- TRY0(xmlTextWriterWriteFormatString(
- writer, "%" PRIuFAST32,
- isc_refcount_current(&task->references)));
- TRY0(xmlTextWriterEndElement(writer)); /* references */
-
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
- TRY0(xmlTextWriterEndElement(writer)); /* id */
+ TRY0(xmlTextWriterWriteFormatString(
+ writer, "%" PRIuFAST32,
+ isc_refcount_current(&task->references)));
+ TRY0(xmlTextWriterEndElement(writer)); /* references */
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%s",
- statenames[task->state]));
- TRY0(xmlTextWriterEndElement(writer)); /* state */
-
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%d",
- task->quantum));
- TRY0(xmlTextWriterEndElement(writer)); /* quantum */
+ TRY0(xmlTextWriterStartElement(writer,
+ ISC_XMLCHAR "id"));
+ TRY0(xmlTextWriterWriteFormatString(writer, "%p",
+ task));
+ TRY0(xmlTextWriterEndElement(writer)); /* id */
- TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "events"));
- TRY0(xmlTextWriterWriteFormatString(writer, "%d",
- task->nevents));
- TRY0(xmlTextWriterEndElement(writer)); /* events */
+ TRY0(xmlTextWriterStartElement(writer,
+ ISC_XMLCHAR "state"));
+ TRY0(xmlTextWriterWriteFormatString(
+ writer, "%s", statenames[task->state]));
+ TRY0(xmlTextWriterEndElement(writer)); /* state */
- TRY0(xmlTextWriterEndElement(writer));
+ TRY0(xmlTextWriterEndElement(writer));
- UNLOCK(&task->lock);
- task = ISC_LIST_NEXT(task, link);
+ UNLOCK(&task->lock);
+ }
}
TRY0(xmlTextWriterEndElement(writer)); /* tasks */
LOCK(&mgr->lock);
- /*
- * Write out the thread-model, and some details about each depending
- * on which type is enabled.
- */
- obj = json_object_new_string("threaded");
- CHECKMEM(obj);
- json_object_object_add(tasks, "thread-model", obj);
-
- obj = json_object_new_int(mgr->default_quantum);
- CHECKMEM(obj);
- json_object_object_add(tasks, "default-quantum", obj);
-
array = json_object_new_array();
CHECKMEM(array);
- for (task = ISC_LIST_HEAD(mgr->tasks); task != NULL;
- task = ISC_LIST_NEXT(task, link))
- {
- char buf[255];
-
- LOCK(&task->lock);
+ for (size_t tid = 0; tid < mgr->nloops; tid++) {
+ for (task = ISC_LIST_HEAD(mgr->tasks[tid]); task != NULL;
+ task = ISC_LIST_NEXT(task, link))
+ {
+ char buf[255];
- taskobj = json_object_new_object();
- CHECKMEM(taskobj);
- json_object_array_add(array, taskobj);
+ LOCK(&task->lock);
- snprintf(buf, sizeof(buf), "%p", task);
- obj = json_object_new_string(buf);
- CHECKMEM(obj);
- json_object_object_add(taskobj, "id", obj);
+ taskobj = json_object_new_object();
+ CHECKMEM(taskobj);
+ json_object_array_add(array, taskobj);
- if (task->name[0] != 0) {
- obj = json_object_new_string(task->name);
+ snprintf(buf, sizeof(buf), "%p", task);
+ obj = json_object_new_string(buf);
CHECKMEM(obj);
- json_object_object_add(taskobj, "name", obj);
- }
-
- obj = json_object_new_int(
- isc_refcount_current(&task->references));
- CHECKMEM(obj);
- json_object_object_add(taskobj, "references", obj);
+ json_object_object_add(taskobj, "id", obj);
- obj = json_object_new_string(statenames[task->state]);
- CHECKMEM(obj);
- json_object_object_add(taskobj, "state", obj);
+ if (task->name[0] != 0) {
+ obj = json_object_new_string(task->name);
+ CHECKMEM(obj);
+ json_object_object_add(taskobj, "name", obj);
+ }
- obj = json_object_new_int(task->quantum);
- CHECKMEM(obj);
- json_object_object_add(taskobj, "quantum", obj);
+ obj = json_object_new_int(
+ isc_refcount_current(&task->references));
+ CHECKMEM(obj);
+ json_object_object_add(taskobj, "references", obj);
- obj = json_object_new_int(task->nevents);
- CHECKMEM(obj);
- json_object_object_add(taskobj, "events", obj);
+ obj = json_object_new_string(statenames[task->state]);
+ CHECKMEM(obj);
+ json_object_object_add(taskobj, "state", obj);
- UNLOCK(&task->lock);
+ UNLOCK(&task->lock);
+ }
}
json_object_object_add(tasks, "tasks", array);
LOCK(&taskmgr->lock);
fprintf(stderr, "- taskmgr: %p\n", taskmgr);
- for (isc_task_t *task = ISC_LIST_HEAD(taskmgr->tasks); task != NULL;
- task = ISC_LIST_NEXT(task, link))
- {
- task_dump(task);
+ for (size_t tid = 0; tid < taskmgr->nloops; tid++) {
+ for (isc_task_t *task = ISC_LIST_HEAD(taskmgr->tasks[tid]);
+ task != NULL; task = ISC_LIST_NEXT(task, link))
+ {
+ task_dump(task);
+ }
}
UNLOCK(&taskmgr->lock);
+++ /dev/null
-/*
- * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
- *
- * SPDX-License-Identifier: MPL-2.0
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, you can obtain one at https://mozilla.org/MPL/2.0/.
- *
- * See the COPYRIGHT file distributed with this work for additional
- * information regarding copyright ownership.
- */
-
-#pragma once
-
-#include <isc/mem.h>
-#include <isc/result.h>
-#include <isc/task.h>
-
-isc_result_t
-isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
- isc_taskmgr_t **managerp);
-/*%<
- * Create a new task manager.
- *
- * Notes:
- *
- *\li If 'default_quantum' is non-zero, then it will be used as the default
- * quantum value when tasks are created. If zero, then an implementation
- * defined default quantum will be used.
- *
- *\li If 'nm' is set then netmgr is paused when an exclusive task mode
- * is requested.
- *
- * Requires:
- *
- *\li 'mctx' is a valid memory context.
- *
- *\li managerp != NULL && *managerp == NULL
- *
- * Ensures:
- *
- *\li On success, '*managerp' will be attached to the newly created task
- * manager.
- *
- * Returns:
- *
- *\li #ISC_R_SUCCESS
- *\li #ISC_R_NOMEMORY
- *\li #ISC_R_NOTHREADS No threads could be created.
- *\li #ISC_R_UNEXPECTED An unexpected error occurred.
- *\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task
- * manager shutting down.
- */
-
-void
-isc__taskmgr_destroy(isc_taskmgr_t **managerp);
-/*%<
- * Destroy '*managerp'.
- *
- * Notes:
- *
- *\li Calling isc_taskmgr_destroy() will shutdown all tasks managed by
- * *managerp that haven't already been shutdown. The call will block
- * until all tasks have entered the done state.
- *
- *\li isc_taskmgr_destroy() must not be called by a task event action,
- * because it would block forever waiting for the event action to
- * complete. An event action that wants to cause task manager shutdown
- * should request some non-event action thread of execution to do the
- * shutdown, e.g. by signaling a condition variable or using
- * isc_app_shutdown().
- *
- *\li Task manager references are not reference counted, so the caller
- * must ensure that no attempt will be made to use the manager after
- * isc_taskmgr_destroy() returns.
- *
- * Requires:
- *
- *\li '*managerp' is a valid task manager.
- *
- *\li 'isc__taskmgr_shutdown()' and isc__netmgr_shutdown() have been
- * called.
- */
-
-void
-isc__taskmgr_shutdown(isc_taskmgr_t *manager);
-/*%>
- * Shutdown 'manager'.
- *
- * Notes:
- *
- *\li Calling isc__taskmgr_shutdown() will shut down all tasks managed by
- * *managerp that haven't already been shut down.
- *
- * Requires:
- *
- *\li 'manager' is a valid task manager.
- *
- *\li isc_taskmgr_destroy() has not be called previously on '*managerp'.
- *
- * Ensures:
- *
- *\li All resources used by the task manager, and any tasks it managed,
- * have been freed.
- */
isc_timer_create(isc_loop_t *loop, isc_job_cb cb, void *cbarg,
isc_timer_t **timerp) {
int r;
- isc_timer_t *timer = NULL;
+ isc_timer_t *timer;
isc_loopmgr_t *loopmgr = NULL;
REQUIRE(cb != NULL);
#include <stdbool.h>
#include <isc/aes.h>
+#include <isc/async.h>
#include <isc/atomic.h>
#include <isc/formatcheck.h>
#include <isc/fuzz.h>
#include <isc/string.h>
#include <isc/task.h>
#include <isc/thread.h>
+#include <isc/tid.h>
+#include <isc/timer.h>
#include <isc/util.h>
#include <dns/adb.h>
static void
clientmgr_detach(ns_clientmgr_t **mp);
static void
-clientmgr_destroy(ns_clientmgr_t *manager);
+clientmgr_destroy_cb(void *arg);
static void
ns_client_dumpmessage(ns_client_t *client, const char *reason);
static void
ns_interfacemgr_getclientmgr(ifp->mgr);
INSIST(VALID_MANAGER(clientmgr));
- INSIST(clientmgr->tid == isc_nm_tid());
+ INSIST(clientmgr->tid == isc_tid());
client = isc_mem_get(clientmgr->mctx, sizeof(*client));
if (new) {
REQUIRE(VALID_MANAGER(mgr));
REQUIRE(client != NULL);
- REQUIRE(mgr->tid == isc_nm_tid());
+ REQUIRE(mgr->tid == isc_tid());
*client = (ns_client_t){ .magic = 0 };
}
} else {
REQUIRE(NS_CLIENT_VALID(client));
- REQUIRE(client->manager->tid == isc_nm_tid());
+ REQUIRE(client->manager->tid == isc_tid());
/*
* Retain these values from the existing client, but
}
static void
-clientmgr_detach(ns_clientmgr_t **mp) {
- int32_t oldrefs;
- ns_clientmgr_t *mgr = *mp;
- *mp = NULL;
-
- oldrefs = isc_refcount_decrement(&mgr->references);
- isc_log_write(ns_lctx, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_CLIENT,
- ISC_LOG_DEBUG(3), "clientmgr @%p detach: %d", mgr,
- oldrefs - 1);
- if (oldrefs == 1) {
- clientmgr_destroy(mgr);
- }
-}
-
-static void
-clientmgr_destroy(ns_clientmgr_t *manager) {
+clientmgr_destroy_cb(void *arg) {
+ ns_clientmgr_t *manager = (ns_clientmgr_t *)arg;
MTRACE("clientmgr_destroy");
isc_refcount_destroy(&manager->references);
isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
}
+static void
+clientmgr_detach(ns_clientmgr_t **mp) {
+ int32_t oldrefs;
+ ns_clientmgr_t *mgr = *mp;
+ *mp = NULL;
+
+ oldrefs = isc_refcount_decrement(&mgr->references);
+ isc_log_write(ns_lctx, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_CLIENT,
+ ISC_LOG_DEBUG(3), "clientmgr @%p detach: %d", mgr,
+ oldrefs - 1);
+ if (oldrefs == 1) {
+ isc_loop_t *loop = isc_loop_get(mgr->loopmgr, mgr->tid);
+
+ /* FIXME: Use isc_loopmgr_teardown() function instead? */
+ isc_async_run(loop, clientmgr_destroy_cb, mgr);
+ }
+}
+
isc_result_t
ns_clientmgr_create(ns_server_t *sctx, isc_taskmgr_t *taskmgr,
isc_loopmgr_t *loopmgr, dns_aclenv_t *aclenv, int tid,
dns_aclenv_attach(aclenv, &manager->aclenv);
- result = isc_task_create(manager->taskmgr, 20, &manager->task,
+ result = isc_task_create(manager->taskmgr, &manager->task,
manager->tid);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
isc_task_setname(manager->task, "clientmgr", NULL);
void
ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
- ns_clientmgr_t *manager;
-
REQUIRE(managerp != NULL);
REQUIRE(VALID_MANAGER(*managerp));
- manager = *managerp;
- *managerp = NULL;
-
MTRACE("destroy");
- if (isc_refcount_decrement(&manager->references) == 1) {
- clientmgr_destroy(manager);
- }
+ clientmgr_detach(managerp);
}
isc_sockaddr_t *
isc_taskmgr_t *taskmgr;
isc_loopmgr_t *loopmgr;
isc_refcount_t references;
- int tid;
+ uint32_t tid;
/* Attached by clients, needed for e.g. recursion */
isc_task_t *task;
#include <stdbool.h>
+#include <isc/loop.h>
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/refcount.h>
#include <isc/result.h>
+#include <isc/sockaddr.h>
#include <dns/geoip.h>
isc_result_t
ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx,
- isc_taskmgr_t *taskmgr, isc_loopmgr_t *loopmgr,
+ isc_loopmgr_t *loopmgr, isc_taskmgr_t *taskmgr,
isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr,
isc_task_t *task, dns_geoip_databases_t *geoip,
bool scan, ns_interfacemgr_t **mgrp);
#include <stdbool.h>
#include <isc/interfaceiter.h>
+#include <isc/loop.h>
#include <isc/netmgr.h>
#include <isc/os.h>
#include <isc/random.h>
#include <isc/string.h>
#include <isc/task.h>
+#include <isc/tid.h>
#include <isc/util.h>
#include <dns/acl.h>
isc_mutex_t lock;
isc_mem_t *mctx; /*%< Memory context */
ns_server_t *sctx; /*%< Server context */
+ isc_loopmgr_t *loopmgr; /*%< Loop manager */
isc_taskmgr_t *taskmgr; /*%< Task manager */
isc_task_t *task; /*%< Task */
- isc_loopmgr_t *loopmgr; /*%< Loop manager */
isc_nm_t *nm; /*%< Net manager */
uint32_t ncpus; /*%< Number of workers */
dns_dispatchmgr_t *dispatchmgr;
isc_result_t
ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx,
- isc_taskmgr_t *taskmgr, isc_loopmgr_t *loopmgr,
+ isc_loopmgr_t *loopmgr, isc_taskmgr_t *taskmgr,
isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr,
isc_task_t *task, dns_geoip_databases_t *geoip,
bool scan, ns_interfacemgr_t **mgrp) {
mgr = isc_mem_get(mctx, sizeof(*mgr));
*mgr = (ns_interfacemgr_t){
- .taskmgr = taskmgr,
.loopmgr = loopmgr,
+ .taskmgr = taskmgr,
.nm = nm,
.dispatchmgr = dispatchmgr,
.generation = 1,
- .ncpus = isc_nm_getnworkers(nm),
+ .ncpus = isc_loopmgr_nloops(loopmgr),
};
isc_mem_attach(mctx, &mgr->mctx);
isc_mutex_init(&mgr->lock);
- result = isc_task_create(taskmgr, 0, &mgr->task, 0);
+ result = isc_task_create(taskmgr, &mgr->task, 0);
if (result != ISC_R_SUCCESS) {
goto cleanup_lock;
}
ns_interfacemgr_attach(mgr, &imgr);
result = isc_nm_routeconnect(nm, route_connected, imgr);
- if (result == ISC_R_NOTIMPLEMENTED) {
- ns_interfacemgr_detach(&imgr);
- }
if (result != ISC_R_SUCCESS) {
isc_log_write(IFMGR_COMMON_LOGARGS, ISC_LOG_INFO,
"unable to open route socket: %s",
isc_result_totext(result));
+ ns_interfacemgr_detach(&imgr);
}
}
bool purge = true;
REQUIRE(NS_INTERFACEMGR_VALID(mgr));
- REQUIRE(isc_nm_tid() == 0);
+ REQUIRE(isc_tid() == 0);
mgr->generation++; /* Increment the generation count. */
ns_clientmgr_t *
ns_interfacemgr_getclientmgr(ns_interfacemgr_t *mgr) {
- int tid = isc_nm_tid();
+ int tid = isc_tid();
REQUIRE(NS_INTERFACEMGR_VALID(mgr));
REQUIRE(tid >= 0);
/zone.data
/testdata/dnstap/dnstap.file
+/testdata/master/master18.data
#include <isc/buffer.h>
#include <isc/managers.h>
#include <isc/refcount.h>
-#include <isc/task.h>
#include <isc/util.h>
#include <isc/uv.h>
#include <tests/dns.h>
-uv_sem_t sem;
-
/* Timeouts in miliseconds */
#define T_SERVER_INIT 5000
#define T_SERVER_IDLE 5000
#define T_CLIENT_CONNECT 1000
-dns_dispatchmgr_t *dispatchmgr = NULL;
-dns_dispatchset_t *dset = NULL;
-isc_nm_t *connect_nm = NULL;
+/* dns_dispatchset_t *dset = NULL; */
static isc_sockaddr_t udp_server_addr;
static isc_sockaddr_t udp_connect_addr;
static isc_sockaddr_t tcp_server_addr;
static isc_sockaddr_t tcp_connect_addr;
+static dns_dispatchmgr_t *dispatchmgr = NULL;
+static dns_dispatch_t *dispatch = NULL;
+static isc_nmsocket_t *sock = NULL;
+
+static isc_nm_t *connect_nm = NULL;
+
const struct in6_addr in6addr_blackhole = { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1 } } };
+struct {
+ uint8_t rbuf[12];
+ isc_region_t region;
+ uint8_t message[12];
+} testdata;
+
static int
setup_ephemeral_port(isc_sockaddr_t *addr, sa_family_t family) {
socklen_t addrlen = sizeof(*addr);
- uv_os_sock_t fd = -1;
+ uv_os_sock_t fd;
int r;
isc_sockaddr_fromin6(addr, &in6addr_loopback, 0);
return (fd);
}
-static void
-reset_testdata(void);
-
static int
-_setup(void **state) {
- uv_os_sock_t sock = -1;
- int r;
+setup_test(void **state) {
+ uv_os_sock_t socket = -1;
+
+ setup_loopmgr(state);
+ setup_netmgr(state);
+
+ isc_netmgr_create(mctx, loopmgr, &connect_nm);
udp_connect_addr = (isc_sockaddr_t){ .length = 0 };
isc_sockaddr_fromin6(&udp_connect_addr, &in6addr_loopback, 0);
isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0);
udp_server_addr = (isc_sockaddr_t){ .length = 0 };
- sock = setup_ephemeral_port(&udp_server_addr, SOCK_DGRAM);
- if (sock < 0) {
+ socket = setup_ephemeral_port(&udp_server_addr, SOCK_DGRAM);
+ if (socket < 0) {
return (-1);
}
- close(sock);
+ close(socket);
tcp_server_addr = (isc_sockaddr_t){ .length = 0 };
- sock = setup_ephemeral_port(&tcp_server_addr, SOCK_STREAM);
- if (sock < 0) {
+ socket = setup_ephemeral_port(&tcp_server_addr, SOCK_STREAM);
+ if (socket < 0) {
return (-1);
}
- close(sock);
-
- setup_managers(state);
-
- /* Create a secondary network manager */
- isc_managers_create(mctx, workers, 0, &connect_nm, NULL, NULL);
+ close(socket);
isc_nm_settimeouts(netmgr, T_SERVER_INIT, T_SERVER_IDLE,
T_SERVER_KEEPALIVE, T_SERVER_ADVERTISED);
isc_nm_settimeouts(connect_nm, T_CLIENT_INIT, T_CLIENT_IDLE,
T_CLIENT_KEEPALIVE, T_CLIENT_ADVERTISED);
- r = uv_sem_init(&sem, 0);
- assert_int_equal(r, 0);
-
- reset_testdata();
+ memset(testdata.rbuf, 0, sizeof(testdata.rbuf));
+ testdata.region.base = testdata.rbuf;
+ testdata.region.length = sizeof(testdata.rbuf);
+ memset(testdata.message, 0, sizeof(testdata.message));
return (0);
}
static int
-_teardown(void **state) {
- uv_sem_destroy(&sem);
-
- isc_managers_destroy(&connect_nm, NULL, NULL);
- assert_null(connect_nm);
+teardown_test(void **state) {
+ isc_netmgr_destroy(&connect_nm);
- teardown_managers(state);
+ teardown_netmgr(state);
+ teardown_loopmgr(state);
return (0);
}
static isc_result_t
-make_dispatchset(unsigned int ndisps) {
+make_dispatchset(unsigned int ndisps, dns_dispatchset_t **dsetp) {
isc_result_t result;
isc_sockaddr_t any;
dns_dispatch_t *disp = NULL;
- result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr);
- if (result != ISC_R_SUCCESS) {
- return (result);
- }
-
isc_sockaddr_any(&any);
result = dns_dispatch_createudp(dispatchmgr, &any, &disp);
if (result != ISC_R_SUCCESS) {
return (result);
}
- result = dns_dispatchset_create(mctx, disp, &dset, ndisps);
+ result = dns_dispatchset_create(mctx, disp, dsetp, ndisps);
dns_dispatch_detach(&disp);
return (result);
}
-static void
-reset(void) {
- if (dset != NULL) {
- dns_dispatchset_destroy(&dset);
- }
- if (dispatchmgr != NULL) {
- dns_dispatchmgr_detach(&dispatchmgr);
- }
-}
-
/* create dispatch set */
-ISC_RUN_TEST_IMPL(dispatchset_create) {
+ISC_LOOP_TEST_IMPL(dispatchset_create) {
+ dns_dispatchset_t *dset = NULL;
isc_result_t result;
- UNUSED(state);
+ UNUSED(arg);
+
+ result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
+ assert_int_equal(result, ISC_R_SUCCESS);
- result = make_dispatchset(1);
+ result = make_dispatchset(1, &dset);
assert_int_equal(result, ISC_R_SUCCESS);
- reset();
+ dns_dispatchset_destroy(&dset);
- result = make_dispatchset(10);
+ result = make_dispatchset(10, &dset);
assert_int_equal(result, ISC_R_SUCCESS);
- reset();
+ dns_dispatchset_destroy(&dset);
+
+ dns_dispatchmgr_detach(&dispatchmgr);
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* test dispatch set round-robin */
-ISC_RUN_TEST_IMPL(dispatchset_get) {
+ISC_LOOP_TEST_IMPL(dispatchset_get) {
isc_result_t result;
+ dns_dispatchset_t *dset = NULL;
dns_dispatch_t *d1, *d2, *d3, *d4, *d5;
- UNUSED(state);
+ UNUSED(arg);
+
+ result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
+ assert_int_equal(result, ISC_R_SUCCESS);
- result = make_dispatchset(1);
+ result = make_dispatchset(1, &dset);
assert_int_equal(result, ISC_R_SUCCESS);
d1 = dns_dispatchset_get(dset);
assert_ptr_equal(d3, d4);
assert_ptr_equal(d4, d5);
- reset();
+ dns_dispatchset_destroy(&dset);
- result = make_dispatchset(4);
+ result = make_dispatchset(4, &dset);
assert_int_equal(result, ISC_R_SUCCESS);
d1 = dns_dispatchset_get(dset);
assert_ptr_not_equal(d3, d4);
assert_ptr_not_equal(d4, d5);
- reset();
+ dns_dispatchset_destroy(&dset);
+ dns_dispatchmgr_detach(&dispatchmgr);
+ isc_loopmgr_shutdown(loopmgr);
}
-struct {
- atomic_uint_fast32_t responses;
- atomic_uint_fast32_t result;
-} testdata;
-
-static dns_dispatch_t *dispatch = NULL;
static dns_dispentry_t *dispentry = NULL;
static atomic_bool first = true;
-static void
-reset_testdata(void) {
- atomic_init(&testdata.responses, 0);
- atomic_init(&testdata.result, ISC_R_UNSET);
-}
-
static void
server_senddone(isc_nmhandle_t *handle, isc_result_t eresult, void *cbarg) {
UNUSED(handle);
static void
nameserver(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *cbarg) {
- isc_region_t response;
+ isc_region_t response1, response2;
static unsigned char buf1[16];
static unsigned char buf2[16];
/*
* send message to be discarded.
*/
- response.base = buf1;
- response.length = sizeof(buf1);
- isc_nm_send(handle, &response, server_senddone, NULL);
+ response1.base = buf1;
+ response1.length = sizeof(buf1);
+ isc_nm_send(handle, &response1, server_senddone, NULL);
/*
* send nextitem message.
*/
- response.base = buf2;
- response.length = sizeof(buf2);
- isc_nm_send(handle, &response, server_senddone, NULL);
+ response2.base = buf2;
+ response2.length = sizeof(buf2);
+ isc_nm_send(handle, &response2, server_senddone, NULL);
}
static isc_result_t
UNUSED(region);
UNUSED(arg);
- atomic_fetch_add_relaxed(&testdata.responses, 1);
-
if (atomic_compare_exchange_strong(&first, &(bool){ true }, false)) {
result = dns_dispatch_getnext(dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
} else {
- uv_sem_post(&sem);
+ dns_dispatch_done(&dispentry);
+ isc_loopmgr_shutdown(loopmgr);
}
}
UNUSED(region);
UNUSED(arg);
- switch (eresult) {
- case ISC_R_EOF:
- case ISC_R_CANCELED:
- case ISC_R_SHUTTINGDOWN:
- break;
- default:
- atomic_fetch_add_relaxed(&testdata.responses, 1);
- atomic_store_relaxed(&testdata.result, eresult);
- }
+ assert_int_equal(eresult, ISC_R_SUCCESS);
- uv_sem_post(&sem);
+ dns_dispatch_done(&dispentry);
+ isc_loopmgr_shutdown(loopmgr);
}
static void
UNUSED(region);
UNUSED(arg);
- atomic_store_relaxed(&testdata.result, eresult);
+ assert_int_equal(eresult, ISC_R_TIMEDOUT);
- uv_sem_post(&sem);
+ dns_dispatch_done(&dispentry);
+ isc_loopmgr_shutdown(loopmgr);
}
static void
UNUSED(eresult);
UNUSED(region);
UNUSED(cbarg);
-
- return;
}
static void
UNUSED(region);
UNUSED(cbarg);
- atomic_store_relaxed(&testdata.result, eresult);
+ if (eresult == ISC_R_ADDRNOTAVAIL || eresult == ISC_R_CONNREFUSED) {
+ /* FIXME: Skip */
+ } else {
+ assert_int_equal(eresult, ISC_R_TIMEDOUT);
+ }
+
+ dns_dispatch_done(&dispentry);
- uv_sem_post(&sem);
+ isc_loopmgr_shutdown(loopmgr);
}
-ISC_RUN_TEST_IMPL(dispatch_timeout_tcp_connect) {
+ISC_LOOP_TEST_IMPL(dispatch_timeout_tcp_connect) {
isc_result_t result;
- isc_region_t region;
- unsigned char rbuf[12] = { 0 };
- unsigned char message[12] = { 0 };
uint16_t id;
- UNUSED(state);
-
+ /* Client */
tcp_connect_addr = (isc_sockaddr_t){ .length = 0 };
isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_blackhole, 0);
+ testdata.region.base = testdata.message;
+ testdata.region.length = sizeof(testdata.message);
+
result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr,
&tcp_server_addr, -1, &dispatch);
assert_int_equal(result, ISC_R_SUCCESS);
-
- region.base = rbuf;
- region.length = sizeof(rbuf);
+ dns_dispatchmgr_detach(&dispatchmgr);
result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT,
&tcp_server_addr, timeout_connected,
- client_senddone, response, ®ion, &id,
- &dispentry);
+ client_senddone, response_timeout,
+ &testdata.region, &id, &dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
+ dns_dispatch_detach(&dispatch);
- memset(message, 0, sizeof(message));
- message[0] = (id >> 8) & 0xff;
- message[1] = id & 0xff;
-
- region.base = message;
- region.length = sizeof(message);
+ testdata.message[0] = (id >> 8) & 0xff;
+ testdata.message[1] = id & 0xff;
dns_dispatch_connect(dispentry);
+}
- uv_sem_wait(&sem);
-
- dns_dispatch_done(&dispentry);
-
- dns_dispatch_detach(&dispatch);
- dns_dispatchmgr_detach(&dispatchmgr);
-
- /* Skip if the IPv6 is not available or not blackholed */
-
- result = atomic_load_acquire(&testdata.result);
- if (result == ISC_R_ADDRNOTAVAIL || result == ISC_R_CONNREFUSED) {
- skip();
- return;
- }
+static void
+stop_listening(void *arg) {
+ UNUSED(arg);
- assert_int_equal(result, ISC_R_TIMEDOUT);
+ isc_nm_stoplistening(sock);
+ isc_nmsocket_close(&sock);
+ assert_null(sock);
}
-ISC_RUN_TEST_IMPL(dispatch_timeout_tcp_response) {
+ISC_LOOP_TEST_IMPL(dispatch_timeout_tcp_response) {
isc_result_t result;
- isc_region_t region;
- unsigned char rbuf[12] = { 0 };
- unsigned char message[12] = { 0 };
uint16_t id;
- isc_nmsocket_t *sock = NULL;
- UNUSED(state);
+ /* Server */
+ result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE,
+ &tcp_server_addr, noop_nameserver, NULL,
+ accept_cb, NULL, 0, NULL, &sock);
+ assert_int_equal(result, ISC_R_SUCCESS);
- tcp_connect_addr = (isc_sockaddr_t){ .length = 0 };
- isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0);
+ /* ensure we stop listening after the test is done */
+ isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock);
+ /* Client */
result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr,
&tcp_server_addr, -1, &dispatch);
assert_int_equal(result, ISC_R_SUCCESS);
-
- result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE,
- &tcp_server_addr, noop_nameserver, NULL,
- accept_cb, NULL, 0, NULL, &sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- region.base = rbuf;
- region.length = sizeof(rbuf);
+ dns_dispatchmgr_detach(&dispatchmgr);
result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT,
&tcp_server_addr, connected, client_senddone,
- response_timeout, ®ion, &id, &dispentry);
+ response_timeout, &testdata.region, &id,
+ &dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
-
- memset(message, 0, sizeof(message));
- message[0] = (id >> 8) & 0xff;
- message[1] = id & 0xff;
-
- region.base = message;
- region.length = sizeof(message);
+ dns_dispatch_detach(&dispatch);
dns_dispatch_connect(dispentry);
-
- uv_sem_wait(&sem);
-
- assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_TIMEDOUT);
-
- isc_nm_stoplistening(sock);
- isc_nmsocket_close(&sock);
- assert_null(sock);
-
- dns_dispatch_done(&dispentry);
-
- dns_dispatch_detach(&dispatch);
- dns_dispatchmgr_detach(&dispatchmgr);
}
-ISC_RUN_TEST_IMPL(dispatch_tcp_response) {
+ISC_LOOP_TEST_IMPL(dispatch_tcp_response) {
isc_result_t result;
- isc_region_t region;
- unsigned char rbuf[12] = { 0 };
- unsigned char message[12] = { 0 };
uint16_t id;
- isc_nmsocket_t *sock = NULL;
- UNUSED(state);
+ /* Server */
+ result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE,
+ &tcp_server_addr, nameserver, NULL,
+ accept_cb, NULL, 0, NULL, &sock);
+ assert_int_equal(result, ISC_R_SUCCESS);
+
+ isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock);
- tcp_connect_addr = (isc_sockaddr_t){ .length = 0 };
- isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0);
+ /* Client */
+ testdata.region.base = testdata.message;
+ testdata.region.length = sizeof(testdata.message);
result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr,
&tcp_server_addr, -1, &dispatch);
assert_int_equal(result, ISC_R_SUCCESS);
-
- result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE,
- &tcp_server_addr, nameserver, NULL,
- accept_cb, NULL, 0, NULL, &sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- region.base = rbuf;
- region.length = sizeof(rbuf);
+ dns_dispatchmgr_detach(&dispatchmgr);
result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT,
&tcp_server_addr, connected, client_senddone,
- response, ®ion, &id, &dispentry);
+ response, &testdata.region, &id, &dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
+ dns_dispatch_detach(&dispatch);
- memset(message, 0, sizeof(message));
- message[0] = (id >> 8) & 0xff;
- message[1] = id & 0xff;
-
- region.base = message;
- region.length = sizeof(message);
+ testdata.message[0] = (id >> 8) & 0xff;
+ testdata.message[1] = id & 0xff;
dns_dispatch_connect(dispentry);
-
- uv_sem_wait(&sem);
-
- assert_in_range(atomic_load_acquire(&testdata.responses), 1, 2);
- assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_SUCCESS);
-
- /* Cleanup */
-
- isc_nm_stoplistening(sock);
- isc_nmsocket_close(&sock);
- assert_null(sock);
-
- dns_dispatch_done(&dispentry);
-
- dns_dispatch_detach(&dispatch);
- dns_dispatchmgr_detach(&dispatchmgr);
}
-ISC_RUN_TEST_IMPL(dispatch_timeout_udp_response) {
+ISC_LOOP_TEST_IMPL(dispatch_timeout_udp_response) {
isc_result_t result;
- isc_region_t region;
- unsigned char rbuf[12] = { 0 };
- unsigned char message[12] = { 0 };
uint16_t id;
- isc_nmsocket_t *sock = NULL;
-
- UNUSED(state);
-
- udp_connect_addr = (isc_sockaddr_t){ .length = 0 };
- isc_sockaddr_fromin6(&udp_connect_addr, &in6addr_loopback, 0);
+ /* Server */
result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
- result = dns_dispatch_createudp(dispatchmgr, &tcp_connect_addr,
- &dispatch);
- assert_int_equal(result, ISC_R_SUCCESS);
-
result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr,
noop_nameserver, NULL, &sock);
assert_int_equal(result, ISC_R_SUCCESS);
- region.base = rbuf;
- region.length = sizeof(rbuf);
+ /* ensure we stop listening after the test is done */
+ isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock);
+
+ /* Client */
+ result = dns_dispatch_createudp(dispatchmgr, &udp_connect_addr,
+ &dispatch);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ dns_dispatchmgr_detach(&dispatchmgr);
result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT,
&udp_server_addr, connected, client_senddone,
- response_timeout, ®ion, &id, &dispentry);
+ response_timeout, &testdata.region, &id,
+ &dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
-
- memset(message, 0, sizeof(message));
- message[0] = (id >> 8) & 0xff;
- message[1] = id & 0xff;
-
- region.base = message;
- region.length = sizeof(message);
+ dns_dispatch_detach(&dispatch);
dns_dispatch_connect(dispentry);
-
- uv_sem_wait(&sem);
-
- assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_TIMEDOUT);
-
- isc_nm_stoplistening(sock);
- isc_nmsocket_close(&sock);
- assert_null(sock);
-
- dns_dispatch_done(&dispentry);
-
- dns_dispatch_detach(&dispatch);
- dns_dispatchmgr_detach(&dispatchmgr);
}
/* test dispatch getnext */
-ISC_RUN_TEST_IMPL(dispatch_getnext) {
+ISC_LOOP_TEST_IMPL(dispatch_getnext) {
isc_result_t result;
- isc_region_t region;
- isc_nmsocket_t *sock = NULL;
- unsigned char message[12] = { 0 };
- unsigned char rbuf[12] = { 0 };
uint16_t id;
- UNUSED(state);
+ /* Server */
+ result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr,
+ nameserver, NULL, &sock);
+ assert_int_equal(result, ISC_R_SUCCESS);
+
+ isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock);
+
+ /* Client */
+ testdata.region.base = testdata.message;
+ testdata.region.length = sizeof(testdata.message);
result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_dispatch_createudp(dispatchmgr, &udp_connect_addr,
&dispatch);
assert_int_equal(result, ISC_R_SUCCESS);
+ dns_dispatchmgr_detach(&dispatchmgr);
- /*
- * Create a local udp nameserver on the loopback.
- */
- result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr,
- nameserver, NULL, &sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- region.base = rbuf;
- region.length = sizeof(rbuf);
result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT,
&udp_server_addr, connected, client_senddone,
- response_getnext, ®ion, &id, &dispentry);
+ response_getnext, &testdata.region, &id,
+ &dispentry);
assert_int_equal(result, ISC_R_SUCCESS);
+ dns_dispatch_detach(&dispatch);
- memset(message, 0, sizeof(message));
- message[0] = (id >> 8) & 0xff;
- message[1] = id & 0xff;
-
- region.base = message;
- region.length = sizeof(message);
+ testdata.message[0] = (id >> 8) & 0xff;
+ testdata.message[1] = id & 0xff;
dns_dispatch_connect(dispentry);
-
- uv_sem_wait(&sem);
-
- assert_int_equal(atomic_load_acquire(&testdata.responses), 2);
-
- /* Cleanup */
- isc_nm_stoplistening(sock);
- isc_nmsocket_close(&sock);
- assert_null(sock);
-
- dns_dispatch_done(&dispentry);
- dns_dispatch_detach(&dispatch);
- dns_dispatchmgr_detach(&dispatchmgr);
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_connect, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_response, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatch_tcp_response, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_udp_response, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatchset_create, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatchset_get, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dispatch_getnext, _setup, _teardown)
-
+ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_udp_response, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatchset_create, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatchset_get, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_response, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_connect, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatch_tcp_response, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dispatch_getnext, setup_test, teardown_test)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#include <tests/dns.h>
+static int
+setup_test(void **state) {
+ setup_loopmgr(state);
+ setup_taskmgr(state);
+
+ return (0);
+}
+
+static int
+teardown_test(void **state) {
+ teardown_taskmgr(state);
+ teardown_loopmgr(state);
+
+ return (0);
+}
+
dns_keytable_t *keytable = NULL;
dns_ntatable_t *ntatable = NULL;
ISC_R_SUCCESS);
assert_int_equal(dns_keytable_create(mctx, &keytable), ISC_R_SUCCESS);
- assert_int_equal(
- dns_ntatable_create(view, taskmgr, timermgr, &ntatable),
- ISC_R_SUCCESS);
+ assert_int_equal(dns_ntatable_create(view, taskmgr, loopmgr, &ntatable),
+ ISC_R_SUCCESS);
/* Add a normal key */
dns_test_namefromstring("example.com", &fn);
}
/* add keys to the keytable */
-ISC_RUN_TEST_IMPL(dns_keytable_add) {
+ISC_LOOP_TEST_IMPL(add) {
dns_keynode_t *keynode = NULL;
dns_keynode_t *null_keynode = NULL;
unsigned char digest[ISC_MAX_MD_SIZE];
dns_fixedname_t fn;
dns_name_t *keyname = dns_fixedname_name(&fn);
- UNUSED(state);
+ UNUSED(arg);
create_tables();
dns_keytable_detachkeynode(keytable, &keynode);
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* delete keys from the keytable */
-ISC_RUN_TEST_IMPL(dns_keytable_delete) {
+ISC_LOOP_TEST_IMPL(delete) {
+ UNUSED(arg);
+
create_tables();
/* dns_keytable_delete requires exact match */
ISC_R_SUCCESS);
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* delete key nodes from the keytable */
-ISC_RUN_TEST_IMPL(dns_keytable_deletekey) {
+ISC_LOOP_TEST_IMPL(deletekey) {
dns_rdata_dnskey_t dnskey;
dns_fixedname_t fn;
dns_name_t *keyname = dns_fixedname_name(&fn);
- UNUSED(state);
+ UNUSED(arg);
create_tables();
dns_rdata_freestruct(&dnskey);
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* check find-variant operations */
-ISC_RUN_TEST_IMPL(dns_keytable_find) {
+ISC_LOOP_TEST_IMPL(find) {
dns_keynode_t *keynode = NULL;
dns_fixedname_t fname;
dns_name_t *name;
- UNUSED(state);
+ UNUSED(arg);
create_tables();
assert_true(dns_name_equal(name, str2name("null.example")));
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* check issecuredomain() */
-ISC_RUN_TEST_IMPL(dns_keytable_issecuredomain) {
+ISC_LOOP_TEST_IMPL(issecuredomain) {
bool issecure;
const char **n;
const char *names[] = { "example.com", "sub.example.com",
"null.example", "sub.null.example", NULL };
- UNUSED(state);
+ UNUSED(arg);
create_tables();
/*
assert_false(issecure);
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* check dns_keytable_dump() */
-ISC_RUN_TEST_IMPL(dns_keytable_dump) {
+ISC_LOOP_TEST_IMPL(dump) {
FILE *f = fopen("/dev/null", "w");
- UNUSED(state);
+ UNUSED(arg);
create_tables();
fclose(f);
destroy_tables();
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* check negative trust anchors */
-ISC_RUN_TEST_IMPL(dns_keytable_nta) {
+ISC_LOOP_TEST_IMPL(nta) {
isc_result_t result;
bool issecure, covered;
dns_fixedname_t fn;
dns_view_t *myview = NULL;
isc_stdtime_t now;
- UNUSED(state);
-
result = dns_test_makeview("view", false, &myview);
assert_int_equal(result, ISC_R_SUCCESS);
- result = isc_task_create(taskmgr, 0, &myview->task, 0);
+ result = isc_task_create(taskmgr, &myview->task, 0);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_view_initsecroots(myview, mctx);
result = dns_view_getsecroots(myview, &keytable);
assert_int_equal(result, ISC_R_SUCCESS);
- result = dns_view_initntatable(myview, taskmgr, timermgr);
+ result = dns_view_initntatable(myview, taskmgr, loopmgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_view_getntatable(myview, &ntatable);
assert_int_equal(result, ISC_R_SUCCESS);
dns_ntatable_detach(&ntatable);
dns_keytable_detach(&keytable);
dns_view_detach(&myview);
+
+ isc_loopmgr_shutdown(loopmgr);
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_add, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_delete, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_deletekey, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_find, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_issecuredomain, setup_managers,
- teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_dump, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_keytable_nta, setup_managers, teardown_managers)
-
+ISC_TEST_ENTRY_CUSTOM(add, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(delete, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(deletekey, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(find, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(issecuredomain, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(dump, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(nta, setup_test, teardown_test)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#define UNIT_TESTING
#include <cmocka.h>
-#include <isc/app.h>
#include <isc/buffer.h>
#include <isc/net.h>
#include <isc/print.h>
static dns_view_t *view = NULL;
static int
-_setup(void **state) {
+setup_test(void **state) {
isc_result_t result;
isc_sockaddr_t local;
result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr);
assert_int_equal(result, ISC_R_SUCCESS);
- result = dns_test_makeview("view", true, &view);
+ result = dns_test_makeview("view", false, &view);
assert_int_equal(result, ISC_R_SUCCESS);
isc_sockaddr_any(&local);
}
static int
-_teardown(void **state) {
+teardown_test(void **state) {
dns_dispatch_detach(&dispatch);
dns_view_detach(&view);
dns_dispatchmgr_detach(&dispatchmgr);
-
teardown_managers(state);
return (0);
mkres(dns_resolver_t **resolverp) {
isc_result_t result;
- result = dns_resolver_create(view, taskmgr, 1, netmgr, timermgr, 0,
+ result = dns_resolver_create(view, loopmgr, taskmgr, 1, netmgr, 0,
dispatchmgr, dispatch, NULL, resolverp);
assert_int_equal(result, ISC_R_SUCCESS);
}
}
/* dns_resolver_create */
-ISC_RUN_TEST_IMPL(dns_resolver_create) {
+ISC_LOOP_TEST_IMPL(create) {
dns_resolver_t *resolver = NULL;
- UNUSED(state);
-
mkres(&resolver);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
/* dns_resolver_gettimeout */
-ISC_RUN_TEST_IMPL(dns_resolver_gettimeout) {
+ISC_LOOP_TEST_IMPL(gettimeout) {
dns_resolver_t *resolver = NULL;
unsigned int timeout;
- UNUSED(state);
-
mkres(&resolver);
timeout = dns_resolver_gettimeout(resolver);
assert_true(timeout > 0);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
/* dns_resolver_settimeout */
-ISC_RUN_TEST_IMPL(dns_resolver_settimeout) {
+ISC_LOOP_TEST_IMPL(settimeout) {
dns_resolver_t *resolver = NULL;
unsigned int default_timeout, timeout;
- UNUSED(state);
-
mkres(&resolver);
default_timeout = dns_resolver_gettimeout(resolver);
assert_true(timeout == default_timeout + 1);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
/* dns_resolver_settimeout */
-ISC_RUN_TEST_IMPL(dns_resolver_settimeout_default) {
+ISC_LOOP_TEST_IMPL(settimeout_default) {
dns_resolver_t *resolver = NULL;
unsigned int default_timeout, timeout;
- UNUSED(state);
-
mkres(&resolver);
default_timeout = dns_resolver_gettimeout(resolver);
assert_int_equal(timeout, default_timeout);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
/* dns_resolver_settimeout below minimum */
-ISC_RUN_TEST_IMPL(dns_resolver_settimeout_belowmin) {
+ISC_LOOP_TEST_IMPL(settimeout_belowmin) {
dns_resolver_t *resolver = NULL;
unsigned int default_timeout, timeout;
- UNUSED(state);
-
mkres(&resolver);
default_timeout = dns_resolver_gettimeout(resolver);
assert_int_equal(timeout, default_timeout);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
/* dns_resolver_settimeout over maximum */
-ISC_RUN_TEST_IMPL(dns_resolver_settimeout_overmax) {
+ISC_LOOP_TEST_IMPL(settimeout_overmax) {
dns_resolver_t *resolver = NULL;
unsigned int timeout;
- UNUSED(state);
-
mkres(&resolver);
dns_resolver_settimeout(resolver, 4000000);
timeout = dns_resolver_gettimeout(resolver);
assert_in_range(timeout, 0, 3999999);
destroy_resolver(&resolver);
+ isc_loopmgr_shutdown(loopmgr);
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_create, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_gettimeout, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_default, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_belowmin, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_overmax, _setup, _teardown)
-
+ISC_TEST_ENTRY_CUSTOM(create, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(gettimeout, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(settimeout, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(settimeout_default, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(settimeout_belowmin, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(settimeout_overmax, setup_test, teardown_test)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#define TEST_ORIGIN "test"
+#define CHECK(r) \
+ { \
+ result = (r); \
+ if (result != ISC_R_SUCCESS) { \
+ goto cleanup; \
+ } \
+ }
+
static int debug = 0;
static int
#include <tests/dns.h>
+static int
+setup_test(void **state) {
+ setup_loopmgr(state);
+ setup_taskmgr(state);
+ setup_netmgr(state);
+
+ return (0);
+}
+
+static int
+teardown_test(void **state) {
+ teardown_netmgr(state);
+ teardown_taskmgr(state);
+ teardown_loopmgr(state);
+
+ return (0);
+}
+
/* create zone manager */
-ISC_RUN_TEST_IMPL(dns_zonemgr_create) {
+ISC_LOOP_TEST_IMPL(zonemgr_create) {
dns_zonemgr_t *myzonemgr = NULL;
isc_result_t result;
- UNUSED(state);
+ UNUSED(arg);
- result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr,
- &myzonemgr);
+ result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr);
assert_int_equal(result, ISC_R_SUCCESS);
dns_zonemgr_shutdown(myzonemgr);
dns_zonemgr_detach(&myzonemgr);
assert_null(myzonemgr);
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* manage and release a zone */
-ISC_RUN_TEST_IMPL(dns_zonemgr_managezone) {
+ISC_LOOP_TEST_IMPL(zonemgr_managezone) {
dns_zonemgr_t *myzonemgr = NULL;
dns_zone_t *zone = NULL;
isc_result_t result;
- UNUSED(state);
+ UNUSED(arg);
- result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr,
- &myzonemgr);
+ result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_test_makezone("foo", &zone, NULL, false);
dns_zonemgr_shutdown(myzonemgr);
dns_zonemgr_detach(&myzonemgr);
assert_null(myzonemgr);
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* create and release a zone */
-ISC_RUN_TEST_IMPL(dns_zonemgr_createzone) {
+ISC_LOOP_TEST_IMPL(zonemgr_createzone) {
dns_zonemgr_t *myzonemgr = NULL;
dns_zone_t *zone = NULL;
isc_result_t result;
- UNUSED(state);
+ UNUSED(arg);
- result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr,
- &myzonemgr);
+ result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_zonemgr_createzone(myzonemgr, &zone);
dns_zonemgr_shutdown(myzonemgr);
dns_zonemgr_detach(&myzonemgr);
assert_null(myzonemgr);
+
+ isc_loopmgr_shutdown(loopmgr);
}
/* manage and release a zone */
-ISC_RUN_TEST_IMPL(dns_zonemgr_unreachable) {
+ISC_LOOP_TEST_IMPL(zonemgr_unreachable) {
dns_zonemgr_t *myzonemgr = NULL;
dns_zone_t *zone = NULL;
isc_sockaddr_t addr1, addr2;
isc_result_t result;
isc_time_t now;
- UNUSED(state);
+ UNUSED(arg);
TIME_NOW(&now);
- result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr,
- &myzonemgr);
+ result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr);
assert_int_equal(result, ISC_R_SUCCESS);
result = dns_test_makezone("foo", &zone, NULL, false);
dns_zonemgr_shutdown(myzonemgr);
dns_zonemgr_detach(&myzonemgr);
assert_null(myzonemgr);
-}
-/*
- * XXX:
- * dns_zonemgr API calls that are not yet part of this unit test:
- *
- * - dns_zonemgr_attach
- * - dns_zonemgr_forcemaint
- * - dns_zonemgr_resumexfrs
- * - dns_zonemgr_shutdown
- * - dns_zonemgr_settransfersin
- * - dns_zonemgr_getttransfersin
- * - dns_zonemgr_settransfersperns
- * - dns_zonemgr_getttransfersperns
- * - dns_zonemgr_setiolimit
- * - dns_zonemgr_getiolimit
- * - dns_zonemgr_dbdestroyed
- * - dns_zonemgr_setserialqueryrate
- * - dns_zonemgr_getserialqueryrate
- */
+ isc_loopmgr_shutdown(loopmgr);
+}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_create, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_managezone, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_createzone, setup_managers, teardown_managers)
-ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_unreachable, setup_managers,
- teardown_managers)
-
+ISC_TEST_ENTRY_CUSTOM(zonemgr_create, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(zonemgr_managezone, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(zonemgr_createzone, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(zonemgr_unreachable, setup_test, teardown_test)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#define UNIT_TESTING
#include <cmocka.h>
-#include <isc/app.h>
#include <isc/atomic.h>
#include <isc/buffer.h>
+#include <isc/loop.h>
#include <isc/print.h>
#include <isc/task.h>
#include <isc/timer.h>
#include <tests/dns.h>
-static int
-_setup(void **state) {
- isc_app_start();
- setup_managers(state);
-
- return (0);
-}
-
-static int
-_teardown(void **state) {
- teardown_managers(state);
- isc_app_finish();
-
- return (0);
-}
-
-struct args {
- void *arg1;
- void *arg2;
- bool arg3;
-};
+static dns_db_t *db = NULL;
+static FILE *zonefile, *origfile;
+static dns_view_t *view = NULL;
static isc_result_t
count_zone(dns_zone_t *zone, void *uap) {
return (ISC_R_SUCCESS);
}
-static isc_result_t
-load_done(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) {
- /* We treat zt as a pointer to a boolean for testing purposes */
- atomic_bool *done = (atomic_bool *)zt;
-
- UNUSED(zone);
- UNUSED(task);
-
- atomic_store(done, true);
- isc_app_shutdown();
- return (ISC_R_SUCCESS);
-}
-
-static isc_result_t
-all_done(void *arg) {
- atomic_bool *done = (atomic_bool *)arg;
-
- atomic_store(done, true);
- isc_app_shutdown();
- return (ISC_R_SUCCESS);
-}
-
-static void
-start_zt_asyncload(isc_task_t *task, isc_event_t *event) {
- struct args *args = (struct args *)(event->ev_arg);
-
- UNUSED(task);
-
- dns_zt_asyncload(args->arg1, false, all_done, args->arg2);
-
- isc_event_free(&event);
-}
-
-static void
-start_zone_asyncload(isc_task_t *task, isc_event_t *event) {
- struct args *args = (struct args *)(event->ev_arg);
-
- UNUSED(task);
-
- dns_zone_asyncload(args->arg1, args->arg3, load_done, args->arg2);
- isc_event_free(&event);
-}
-
/* apply a function to a zone table */
-ISC_RUN_TEST_IMPL(dns_zt_apply) {
+ISC_LOOP_TEST_IMPL(apply) {
isc_result_t result;
dns_zone_t *zone = NULL;
- dns_view_t *view = NULL;
int nzones = 0;
- UNUSED(state);
-
result = dns_test_makezone("foo", &zone, NULL, true);
assert_int_equal(result, ISC_R_SUCCESS);
/* The view was left attached in dns_test_makezone() */
dns_view_detach(&view);
dns_zone_detach(&zone);
+ isc_loopmgr_shutdown(loopmgr);
+}
+
+static isc_result_t
+load_done_last(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) {
+ isc_result_t result;
+
+ UNUSED(zt);
+ UNUSED(zone);
+ UNUSED(task);
+
+ /* The zone should now be loaded; test it */
+ result = dns_zone_getdb(zone, &db);
+ assert_int_equal(result, ISC_R_SUCCESS);
+
+ assert_non_null(db);
+ if (db != NULL) {
+ dns_db_detach(&db);
+ }
+
+ dns_test_releasezone(zone);
+ dns_test_closezonemgr();
+
+ dns_zone_detach(&zone);
+ dns_view_detach(&view);
+
+ isc_loopmgr_shutdown(loopmgr);
+
+ return (ISC_R_SUCCESS);
+}
+
+static isc_result_t
+load_done_new_only(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) {
+ isc_result_t result;
+
+ UNUSED(zt);
+ UNUSED(zone);
+ UNUSED(task);
+
+ /* The zone should now be loaded; test it */
+ result = dns_zone_getdb(zone, &db);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ dns_db_detach(&db);
+
+ dns_zone_asyncload(zone, true, load_done_last, NULL);
+
+ return (ISC_R_SUCCESS);
+}
+
+static isc_result_t
+load_done_first(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) {
+ atomic_bool *done = (atomic_bool *)zt;
+ isc_result_t result;
+
+ UNUSED(zone);
+ UNUSED(task);
+
+ /* The zone should now be loaded; test it */
+ result = dns_zone_getdb(zone, &db);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ dns_db_detach(&db);
+
+ /*
+ * Add something to zone file, reload zone with newonly - it should
+ * not be reloaded.
+ */
+ fprintf(zonefile, "\nb in b 1.2.3.4\n");
+ fflush(zonefile);
+ fclose(zonefile);
+
+ dns_zone_asyncload(zone, true, load_done_new_only, &done);
+
+ return (ISC_R_SUCCESS);
}
/* asynchronous zone load */
-ISC_RUN_TEST_IMPL(dns_zt_asyncload_zone) {
+ISC_LOOP_TEST_IMPL(asyncload_zone) {
isc_result_t result;
int n;
dns_zone_t *zone = NULL;
- dns_view_t *view = NULL;
- dns_db_t *db = NULL;
- FILE *zonefile, *origfile;
char buf[4096];
atomic_bool done;
- int i = 0;
- struct args args;
-
- UNUSED(state);
atomic_init(&done, false);
dns_zone_setfile(zone, "./zone.data", dns_masterformat_text,
&dns_master_style_default);
- args.arg1 = zone;
- args.arg2 = &done;
- args.arg3 = false;
- isc_app_onrun(mctx, maintask, start_zone_asyncload, &args);
-
- isc_app_run();
- while (dns__zone_loadpending(zone) && i++ < 5000) {
- dns_test_nap(1000);
- }
- assert_true(atomic_load(&done));
- /* The zone should now be loaded; test it */
- result = dns_zone_getdb(zone, &db);
- assert_int_equal(result, ISC_R_SUCCESS);
- dns_db_detach(&db);
- /*
- * Add something to zone file, reload zone with newonly - it should
- * not be reloaded.
- */
- fprintf(zonefile, "\nb in b 1.2.3.4\n");
- fflush(zonefile);
- fclose(zonefile);
+ dns_zone_asyncload(zone, false, load_done_first, &done);
+}
- args.arg1 = zone;
- args.arg2 = &done;
- args.arg3 = true;
- isc_app_onrun(mctx, maintask, start_zone_asyncload, &args);
+dns_zone_t *zone1 = NULL, *zone2 = NULL, *zone3 = NULL;
- isc_app_run();
+static isc_result_t
+all_done(void *arg __attribute__((__unused__))) {
+ isc_result_t result;
- while (dns__zone_loadpending(zone) && i++ < 5000) {
- dns_test_nap(1000);
- }
- assert_true(atomic_load(&done));
- /* The zone should now be loaded; test it */
- result = dns_zone_getdb(zone, &db);
+ /* Both zones should now be loaded; test them */
+ result = dns_zone_getdb(zone1, &db);
assert_int_equal(result, ISC_R_SUCCESS);
- dns_db_detach(&db);
-
- /* Now reload it without newonly - it should be reloaded */
- args.arg1 = zone;
- args.arg2 = &done;
- args.arg3 = false;
- isc_app_onrun(mctx, maintask, start_zone_asyncload, &args);
-
- isc_app_run();
-
- while (dns__zone_loadpending(zone) && i++ < 5000) {
- dns_test_nap(1000);
+ assert_non_null(db);
+ if (db != NULL) {
+ dns_db_detach(&db);
}
- assert_true(atomic_load(&done));
- /* The zone should now be loaded; test it */
- result = dns_zone_getdb(zone, &db);
- assert_int_equal(result, ISC_R_SUCCESS);
+ result = dns_zone_getdb(zone2, &db);
+ assert_int_equal(result, ISC_R_SUCCESS);
assert_non_null(db);
if (db != NULL) {
dns_db_detach(&db);
}
- dns_test_releasezone(zone);
+ dns_test_releasezone(zone3);
+ dns_test_releasezone(zone2);
+ dns_test_releasezone(zone1);
dns_test_closezonemgr();
- dns_zone_detach(&zone);
+ dns_zone_detach(&zone1);
+ dns_zone_detach(&zone2);
+ dns_zone_detach(&zone3);
dns_view_detach(&view);
+
+ isc_loopmgr_shutdown(loopmgr);
+ return (ISC_R_SUCCESS);
}
/* asynchronous zone table load */
-ISC_RUN_TEST_IMPL(dns_zt_asyncload_zt) {
+ISC_LOOP_TEST_IMPL(asyncload_zt) {
isc_result_t result;
- dns_zone_t *zone1 = NULL, *zone2 = NULL, *zone3 = NULL;
- dns_view_t *view;
dns_zt_t *zt = NULL;
- dns_db_t *db = NULL;
atomic_bool done;
- int i = 0;
- struct args args;
-
- UNUSED(state);
atomic_init(&done, false);
assert_false(dns__zone_loadpending(zone2));
assert_false(atomic_load(&done));
- args.arg1 = zt;
- args.arg2 = &done;
- isc_app_onrun(mctx, maintask, start_zt_asyncload, &args);
-
- isc_app_run();
- while (!atomic_load(&done) && i++ < 5000) {
- dns_test_nap(1000);
- }
- assert_true(atomic_load(&done));
-
- /* Both zones should now be loaded; test them */
- result = dns_zone_getdb(zone1, &db);
- assert_int_equal(result, ISC_R_SUCCESS);
- assert_non_null(db);
- if (db != NULL) {
- dns_db_detach(&db);
- }
-
- result = dns_zone_getdb(zone2, &db);
- assert_int_equal(result, ISC_R_SUCCESS);
- assert_non_null(db);
- if (db != NULL) {
- dns_db_detach(&db);
- }
-
- dns_test_releasezone(zone3);
- dns_test_releasezone(zone2);
- dns_test_releasezone(zone1);
- dns_test_closezonemgr();
-
- dns_zone_detach(&zone1);
- dns_zone_detach(&zone2);
- dns_zone_detach(&zone3);
- dns_view_detach(&view);
+ dns_zt_asyncload(zt, false, all_done, NULL);
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(dns_zt_apply, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_zt_asyncload_zone, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(dns_zt_asyncload_zt, _setup, _teardown)
-
+ISC_TEST_ENTRY_CUSTOM(apply, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(asyncload_zone, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(asyncload_zt, setup_managers, teardown_managers)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#include <isc/buffer.h>
#include <isc/hash.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/mem.h>
#include <isc/result.h>
#include <isc/string.h>
#include <inttypes.h>
#include <stdbool.h>
-#include <uv.h>
#include <isc/buffer.h>
#include <isc/hash.h>
#include <isc/task.h>
#include <isc/timer.h>
#include <isc/util.h>
+#include <isc/uv.h>
-#include "netmgr_p.h"
-#include "task_p.h"
-#include "timer_p.h"
-
-#define CHECK(r) \
- do { \
- result = (r); \
- if (result != ISC_R_SUCCESS) \
- goto cleanup; \
- } while (0)
-
-extern isc_mem_t *mctx;
-extern isc_nm_t *netmgr;
-extern isc_loopmgr_t *loopmgr;
-extern isc_loop_t *mainloop;
-extern isc_taskmgr_t *taskmgr;
-extern isc_timermgr_t *timermgr;
-extern unsigned int workers;
-extern isc_task_t *maintask;
-
-#define isc_test_nap(ms) uv_sleep(ms)
+extern isc_mem_t *mctx;
+extern isc_log_t *lctx;
+extern isc_loop_t *mainloop;
+extern isc_loopmgr_t *loopmgr;
+extern isc_taskmgr_t *taskmgr;
+extern isc_nm_t *netmgr;
+extern int ncpus;
+extern unsigned int workers;
int
setup_mctx(void **state);
int
teardown_loopmgr(void **state);
+int
+setup_taskmgr(void **state);
+int
+teardown_taskmgr(void **state);
+
+int
+setup_netmgr(void **state);
+int
+teardown_netmgr(void **state);
+
int
setup_managers(void **state);
int
#define ISC_TEARDOWN_TEST_DECLARE(name) \
int teardown_test_##name(void **state __attribute__((unused)))
+#define ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup, teardown) \
+ void run_test_##name(void **state __attribute__((__unused__))); \
+ void loop_test_##name(void *arg __attribute__((__unused__)));
+
+#define ISC_LOOP_TEST_DECLARE(name) \
+ ISC_LOOP_TEST_CUSTOM_DECLARE(name, NULL, NULL)
+
+#define ISC_LOOP_TEST_SETUP_DECLARE(name) \
+ ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup_loop_##name, NULL)
+
+#define ISC_LOOP_TEST_SETUP_TEARDOWN_DECLARE(name) \
+ ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup_loop_##name, \
+ teardown_loop_##name)
+
+#define ISC_LOOP_TEST_TEARDOWN_DECLARE(name) \
+ ISC_LOOP_TEST_CUSTOM_DECLARE(name, NULL, teardown_loop_##name)
+
+#define ISC_LOOP_SETUP_DECLARE(name) \
+ void setup_loop_##name(void *arg __attribute__((__unused__)));
+
#define ISC_SETUP_TEST_IMPL(name) \
int setup_test_##name(void **state __attribute__((unused))); \
int setup_test_##name(void **state __attribute__((unused)))
int teardown_test_##name(void **state __attribute__((unused))); \
int teardown_test_##name(void **state __attribute__((unused)))
+#define ISC_TEST_LIST_START const struct CMUnitTest tests[] = {
+#define ISC_TEST_LIST_END \
+ } \
+ ;
+
+#define ISC_LOOP_TEST_CUSTOM_IMPL(name, setup, teardown) \
+ void run_test_##name(void **state __attribute__((__unused__))); \
+ void loop_test_##name(void *arg __attribute__((__unused__))); \
+ void run_test_##name(void **state __attribute__((__unused__))) { \
+ isc_job_cb setup_loop = setup; \
+ isc_job_cb teardown_loop = teardown; \
+ if (setup_loop != NULL) { \
+ setup_loop(state); \
+ } \
+ isc_loop_setup(mainloop, loop_test_##name, state); \
+ isc_loopmgr_run(loopmgr); \
+ if (teardown_loop != NULL) { \
+ teardown_loop(state); \
+ } \
+ } \
+ void loop_test_##name(void *arg __attribute__((__unused__)))
+
+#define ISC_LOOP_TEST_IMPL(name) ISC_LOOP_TEST_CUSTOM_IMPL(name, NULL, NULL)
+
+#define ISC_LOOP_TEST_SETUP_IMPL(name) \
+ ISC_LOOP_TEST_CUSTOM_IMPL(name, setup_loop_##name, NULL)
+
+#define ISC_LOOP_TEST_SETUP_TEARDOWN_IMPL(name) \
+ ISC_LOOP_TEST_CUSTOM_IMPL(name, setup_loop_##name, teardown_loop_##name)
+
+#define ISC_LOOP_TEST_TEARDOWN_IMPL(name) \
+ ISC_LOOP_TEST_CUSTOM_IMPL(name, NULL, teardown_loop_##name)
+
+#define ISC_LOOP_SETUP_IMPL(name) \
+ void setup_loop_##name(void *arg __attribute__((__unused__))); \
+ void setup_loop_##name(void *arg __attribute__((__unused__)))
+
+#define ISC_LOOP_TEARDOWN_IMPL(name) \
+ void teardown_loop_##name(void *arg __attribute__((__unused__))); \
+ void teardown_loop_##name(void *arg __attribute__((__unused__)))
+
+#define ISC_TEST_DECLARE(name) void run_test_##name(void **state);
+
#define ISC_TEST_LIST_START const struct CMUnitTest tests[] = {
#define ISC_TEST_LIST_END \
} \
#include <isc/buffer.h>
#include <isc/hash.h>
#include <isc/log.h>
+#include <isc/loop.h>
#include <isc/mem.h>
#include <isc/result.h>
#include <isc/string.h>
.description = desc, .lineno = __LINE__ \
}
-#define CHECK(r) \
- do { \
- result = (r); \
- if (result != ISC_R_SUCCESS) \
- goto cleanup; \
- } while (0)
-
extern dns_dispatchmgr_t *dispatchmgr;
-extern ns_clientmgr_t *clientmgr;
extern ns_interfacemgr_t *interfacemgr;
extern ns_server_t *sctx;
+extern atomic_uint_fast32_t client_refs[32];
+extern atomic_uintptr_t client_addrs[32];
+
#ifdef NETMGR_TRACE
#define FLARG \
, const char *file __attribute__((unused)), \
setup_server(void **state);
int
teardown_server(void **state);
+void
+shutdown_interfacemgr(void *arg __attribute__((unused)));
/*%
* Load data for zone "zonename" from file "filename" and start serving it to
md_test \
mem_test \
netaddr_test \
- netmgr_test \
parse_test \
quota_test \
radix_test \
$(LDADD) \
$(OPENSSL_LIBS)
-netmgr_test_CPPFLAGS = \
- $(AM_CPPFLAGS) \
- $(OPENSSL_CFLAGS)
-
-netmgr_test_LDADD = \
- $(LDADD) \
- $(OPENSSL_LIBS)
-
-netmgr_test_SOURCES = \
- netmgr_test.c \
- uv_wrap.h
-
random_test_LDADD = \
$(LDADD) \
-lm
#define UNIT_TESTING
#include <cmocka.h>
+#include <isc/async.h>
#include <isc/atomic.h>
#include <isc/buffer.h>
#include <isc/condition.h>
#include "netmgr/http.c"
#include "netmgr/netmgr-int.h"
#include "netmgr/socket.c"
-#include "netmgr_p.h"
#include <tests/isc.h>
static atomic_int_fast64_t ctimeouts = 0;
static atomic_int_fast64_t total_sends = 0;
-static atomic_bool was_error = false;
+static int expected_ssends;
+static int expected_sreads;
+static int expected_csends;
+static int expected_cconnects;
+static int expected_creads;
+static int expected_ctimeouts;
+
+#define have_expected_ssends(v) ((v) >= expected_ssends && expected_ssends >= 0)
+#define have_expected_sreads(v) ((v) >= expected_sreads && expected_sreads >= 0)
+#define have_expected_csends(v) ((v) >= expected_csends && expected_csends >= 0)
+#define have_expected_cconnects(v) \
+ ((v) >= expected_cconnects && expected_cconnects >= 0)
+#define have_expected_creads(v) ((v) >= expected_creads && expected_creads >= 0)
+#define have_expected_ctimeouts(v) \
+ ((v) >= expected_ctimeouts && expected_ctimeouts >= 0)
+
+static atomic_bool test_was_error = false;
-static bool reuse_supported = true;
static bool noanswer = false;
static atomic_bool POST = true;
static isc_nm_http_endpoints_t *endpoints = NULL;
-static bool skip_long_tests = false;
+static isc_nm_t **nm = NULL;
/* Timeout for soft-timeout tests (0.05 seconds) */
#define T_SOFT 50
#define X(v)
#endif
-#define SKIP_IN_CI \
- if (skip_long_tests) { \
- skip(); \
- return; \
- }
-
typedef struct csdata {
+ isc_mem_t *mctx;
isc_nm_recv_cb_t reply_cb;
void *cb_arg;
isc_region_t region;
(void)atomic_fetch_sub(&active_cconnects, 1);
memmove(&data, arg, sizeof(data));
- isc_mem_put(handle->sock->mgr->mctx, arg, sizeof(data));
+ isc_mem_put(data.mctx, arg, sizeof(data));
if (result != ISC_R_SUCCESS) {
goto error;
}
goto error;
}
- isc_mem_put(handle->sock->mgr->mctx, data.region.base,
- data.region.length);
+ isc_mem_putanddetach(&data.mctx, data.region.base, data.region.length);
return;
error:
data.reply_cb(handle, result, NULL, data.cb_arg);
- isc_mem_put(handle->sock->mgr->mctx, data.region.base,
- data.region.length);
+ isc_mem_putanddetach(&data.mctx, data.region.base, data.region.length);
if (result == ISC_R_TOOMANYOPENFILES) {
atomic_store(&slowdown, true);
} else {
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
}
}
memmove(copy.base, region->base, region->length);
data = isc_mem_get(mgr->mctx, sizeof(*data));
*data = (csdata_t){ .reply_cb = cb, .cb_arg = cbarg, .region = copy };
+ isc_mem_attach(mgr->mctx, &data->mctx);
if (tls) {
ctx = client_tlsctx;
}
close(fd);
return (-1);
}
- if (result == ISC_R_NOTIMPLEMENTED) {
- reuse_supported = false;
- }
#if IPV6_RECVERR
#define setsockopt_on(socket, level, name) \
static int
setup_test(void **state) {
char *env_workers = getenv("ISC_TASK_WORKERS");
- size_t nworkers;
uv_os_sock_t tcp_listen_sock = -1;
- isc_nm_t **nm = NULL;
tcp_listen_addr = (isc_sockaddr_t){ .length = 0 };
tcp_listen_sock = setup_ephemeral_port(&tcp_listen_addr, SOCK_STREAM);
workers = isc_os_ncpus();
}
INSIST(workers > 0);
- nworkers = ISC_MAX(ISC_MIN(workers, 32), 1);
-
- if (!reuse_supported || getenv("CI") != NULL) {
- skip_long_tests = true;
- }
atomic_store(&total_sends, NSENDS * NWRITES);
atomic_store(&nsends, atomic_load(&total_sends));
atomic_store(&ctimeouts, 0);
atomic_store(&active_cconnects, 0);
- atomic_store(&was_error, false);
+ expected_cconnects = -1;
+ expected_csends = -1;
+ expected_creads = -1;
+ expected_sreads = -1;
+ expected_ssends = -1;
+ expected_ctimeouts = -1;
+
+ atomic_store(&test_was_error, false);
atomic_store(&POST, false);
atomic_store(&use_TLS, false);
return (-1);
}
+ setup_loopmgr(state);
+
nm = isc_mem_get(mctx, MAX_NM * sizeof(nm[0]));
for (size_t i = 0; i < MAX_NM; i++) {
- isc__netmgr_create(mctx, nworkers, &nm[i]);
+ isc_netmgr_create(mctx, loopmgr, &nm[i]);
assert_non_null(nm[i]);
}
}
static int
-teardown_test(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
-
+teardown_test(void **state __attribute__((__unused__))) {
for (size_t i = 0; i < MAX_NM; i++) {
- isc__netmgr_destroy(&nm[i]);
+ isc_netmgr_destroy(&nm[i]);
assert_null(nm[i]);
}
isc_mem_put(mctx, nm, MAX_NM * sizeof(nm[0]));
+ teardown_loopmgr(state);
+
if (server_tlsctx != NULL) {
isc_tlsctx_free(&server_tlsctx);
}
if (eresult == ISC_R_SUCCESS) {
(void)atomic_fetch_sub(&nsends, 1);
- atomic_fetch_add(&csends, 1);
- atomic_fetch_add(&creads, 1);
+ if (have_expected_csends(atomic_fetch_add(&csends, 1) + 1) ||
+ have_expected_creads(atomic_fetch_add(&creads, 1) + 1))
+ {
+ isc_loopmgr_shutdown(loopmgr);
+ }
} else {
/* We failed to connect; try again */
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
+ isc_loopmgr_shutdown(loopmgr);
}
}
assert_non_null(handle);
if (eresult != ISC_R_SUCCESS) {
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
return;
}
}
}
-ISC_RUN_TEST_IMPL(mock_doh_uv_tcp_bind) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+ISC_LOOP_TEST_IMPL(mock_doh_uv_tcp_bind) {
isc_nm_t *listen_nm = nm[0];
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *listen_sock = NULL;
assert_null(listen_sock);
RESET_RETURN;
+
+ isc_loopmgr_shutdown(loopmgr);
}
static void
-doh_noop(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_noop(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
.length = send_msg.len },
noop_read_cb, NULL, atomic_load(&use_TLS), 30000);
- isc__netmgr_shutdown(connect_nm);
+ isc_loopmgr_shutdown(loopmgr);
assert_int_equal(0, atomic_load(&csends));
assert_int_equal(0, atomic_load(&creads));
assert_int_equal(0, atomic_load(&ssends));
}
-ISC_RUN_TEST_IMPL(doh_noop_POST) {
+ISC_LOOP_TEST_IMPL(doh_noop_POST) {
atomic_store(&POST, true);
- doh_noop(state);
+ doh_noop(arg);
}
-ISC_RUN_TEST_IMPL(doh_noop_GET) {
+ISC_LOOP_TEST_IMPL(doh_noop_GET) {
atomic_store(&POST, false);
- doh_noop(state);
+ doh_noop(arg);
}
static void
-doh_noresponse(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_noresponse(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
isc_nm_stoplistening(listen_sock);
isc_nmsocket_close(&listen_sock);
assert_null(listen_sock);
- isc__netmgr_shutdown(connect_nm);
+ isc_loopmgr_shutdown(loopmgr);
}
-ISC_RUN_TEST_IMPL(doh_noresponse_POST) {
+ISC_LOOP_TEST_IMPL(doh_noresponse_POST) {
atomic_store(&POST, true);
- doh_noresponse(state);
+ doh_noresponse(arg);
}
-ISC_RUN_TEST_IMPL(doh_noresponse_GET) {
+ISC_LOOP_TEST_IMPL(doh_noresponse_GET) {
atomic_store(&POST, false);
- doh_noresponse(state);
+ doh_noresponse(arg);
}
static void
static void
timeout_retry_cb(isc_nmhandle_t *handle, isc_result_t eresult,
- isc_region_t *region, void *arg) {
- UNUSED(region);
- UNUSED(arg);
-
+ isc_region_t *region __attribute__((__unused__)),
+ void *arg __attribute__((__unused__))) {
assert_non_null(handle);
atomic_fetch_add(&ctimeouts, 1);
}
isc_nmhandle_detach(&handle);
+ isc_loopmgr_shutdown(loopmgr);
}
static void
return;
error:
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
+}
+
+static void
+listen_sock_close(void *arg) {
+ isc_nmsocket_t *listen_sock = arg;
+
+ isc_nm_stoplistening(listen_sock);
+ isc_nmsocket_close(&listen_sock);
+ assert_null(listen_sock);
}
static void
-doh_timeout_recovery(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_timeout_recovery(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
+ isc_nmsocket_t *listen_sock = NULL;
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
- isc_nmsocket_t *listen_sock = NULL;
isc_tlsctx_t *ctx = atomic_load(&use_TLS) ? server_tlsctx : NULL;
char req_url[256];
&tcp_listen_addr, 0, NULL, NULL, endpoints,
0, &listen_sock);
assert_int_equal(result, ISC_R_SUCCESS);
+ isc_loop_teardown(mainloop, listen_sock_close, listen_sock);
/*
* Accept connections but don't send responses, forcing client
isc_nm_httpconnect(connect_nm, NULL, &tcp_listen_addr, req_url,
atomic_load(&POST), timeout_request_cb, NULL, ctx,
client_sess_cache, T_SOFT);
+}
- /*
- * Sleep until sends reaches 5.
- */
- for (size_t i = 0; i < 1000; i++) {
- if (atomic_load(&ctimeouts) == 5) {
- break;
- }
- isc_test_nap(1);
- }
+static int
+doh_timeout_recovery_teardown(void **state) {
assert_true(atomic_load(&ctimeouts) == 5);
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
- isc__netmgr_shutdown(connect_nm);
+ return (teardown_test(state));
}
-ISC_RUN_TEST_IMPL(doh_timeout_recovery_POST) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_timeout_recovery_POST) {
atomic_store(&POST, true);
- doh_timeout_recovery(state);
+ doh_timeout_recovery(arg);
}
-ISC_RUN_TEST_IMPL(doh_timeout_recovery_GET) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_timeout_recovery_GET) {
atomic_store(&POST, false);
- doh_timeout_recovery(state);
+ doh_timeout_recovery(arg);
}
+static void
+doh_connect_thread(void *arg);
+
static void
doh_receive_send_reply_cb(isc_nmhandle_t *handle, isc_result_t eresult,
isc_region_t *region, void *cbarg) {
isc_nmhandle_t *thandle = NULL;
+ isc_nm_t *connect_nm = (isc_nm_t *)cbarg;
+
assert_non_null(handle);
UNUSED(region);
int_fast64_t sends = atomic_fetch_sub(&nsends, 1);
atomic_fetch_add(&csends, 1);
atomic_fetch_add(&creads, 1);
- if (sends > 0 && cbarg == NULL) {
+ if (sends > 0 && connect_nm != NULL) {
size_t i;
for (i = 0; i < NWRITES / 2; i++) {
eresult = isc__nm_http_request(
&(isc_region_t){
.base = (uint8_t *)send_msg.base,
.length = send_msg.len },
- doh_receive_send_reply_cb, (void *)1);
+ doh_receive_send_reply_cb, NULL);
if (eresult == ISC_R_CANCELED) {
break;
}
assert_true(eresult == ISC_R_SUCCESS);
}
+
+ isc_job_run(loopmgr, doh_connect_thread, connect_nm);
+ }
+ if (sends <= 0) {
+ isc_loopmgr_shutdown(loopmgr);
}
} else {
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
+ isc_loopmgr_shutdown(loopmgr);
}
isc_nmhandle_detach(&thandle);
}
-static isc_threadresult_t
-doh_connect_thread(isc_threadarg_t arg) {
+static void
+doh_connect_thread(void *arg) {
isc_nm_t *connect_nm = (isc_nm_t *)arg;
char req_url[256];
int64_t sends = atomic_load(&nsends);
sockaddr_to_url(&tcp_listen_addr, atomic_load(&use_TLS), req_url,
sizeof(req_url), ISC_NM_HTTP_DEFAULT_PATH);
- while (sends > 0) {
- /*
- * We need to back off and slow down if we start getting
- * errors, to prevent a thundering herd problem.
- */
- int_fast64_t active = atomic_fetch_add(&active_cconnects, 1);
- if (atomic_load(&slowdown) || active > workers) {
- isc_test_nap(active - workers);
- atomic_store(&slowdown, false);
- }
- connect_send_request(
- connect_nm, req_url, atomic_load(&POST),
- &(isc_region_t){ .base = (uint8_t *)send_msg.base,
- .length = send_msg.len },
- doh_receive_send_reply_cb, NULL, atomic_load(&use_TLS),
- 30000);
- sends = atomic_load(&nsends);
+ /*
+ * We need to back off and slow down if we start getting
+ * errors, to prevent a thundering herd problem.
+ */
+ int_fast64_t active = atomic_fetch_add(&active_cconnects, 1);
+ if (atomic_load(&slowdown) || active > workers) {
+ goto next;
}
+ connect_send_request(connect_nm, req_url, atomic_load(&POST),
+ &(isc_region_t){ .base = (uint8_t *)send_msg.base,
+ .length = send_msg.len },
+ doh_receive_send_reply_cb, connect_nm,
+ atomic_load(&use_TLS), 30000);
- return ((isc_threadresult_t)0);
+ if (sends <= 0) {
+ isc_loopmgr_shutdown(loopmgr);
+ }
+
+next : {}
}
static void
-doh_recv_one(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_recv_one(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
isc_quota_t *quotap = init_listener_quota(workers);
atomic_store(&total_sends, 1);
+ expected_creads = 1;
atomic_store(&nsends, atomic_load(&total_sends));
doh_receive_reply_cb, NULL, atomic_load(&use_TLS),
30000);
- while (atomic_load(&nsends) > 0) {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_thread_yield();
- }
-
- while (atomic_load(&ssends) != 1 || atomic_load(&sreads) != 1 ||
- atomic_load(&csends) != 1)
- {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_thread_yield();
- }
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
- isc__netmgr_shutdown(connect_nm);
+ isc_loop_teardown(mainloop, listen_sock_close, listen_sock);
+}
+static int
+doh_recv_one_teardown(void **state) {
X(total_sends);
X(csends);
X(creads);
assert_int_equal(atomic_load(&creads), 1);
assert_int_equal(atomic_load(&sreads), 1);
assert_int_equal(atomic_load(&ssends), 1);
-}
-ISC_RUN_TEST_IMPL(doh_recv_one_POST) {
- SKIP_IN_CI;
+ return (teardown_test(state));
+}
+ISC_LOOP_TEST_IMPL(doh_recv_one_POST) {
atomic_store(&POST, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_GET) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_GET) {
atomic_store(&POST, false);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_POST_TLS) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_POST_TLS) {
atomic_store(&use_TLS, true);
atomic_store(&POST, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_GET_TLS) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_GET_TLS) {
atomic_store(&use_TLS, true);
atomic_store(&POST, false);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_POST_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_POST_quota) {
atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_GET_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_GET_quota) {
atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_POST_TLS_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_POST_TLS_quota) {
atomic_store(&use_TLS, true);
atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_one_GET_TLS_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_one_GET_TLS_quota) {
atomic_store(&use_TLS, true);
atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_one(state);
+ doh_recv_one(arg);
}
static void
}
return;
error:
- atomic_store(&was_error, true);
+ atomic_store(&test_was_error, true);
}
static void
-doh_recv_two(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_recv_two(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
isc_quota_t *quotap = init_listener_quota(workers);
atomic_store(&total_sends, 2);
+ expected_creads = 2;
atomic_store(&nsends, atomic_load(&total_sends));
atomic_load(&POST), doh_connect_send_two_requests_cb,
NULL, ctx, client_sess_cache, 5000);
- while (atomic_load(&nsends) > 0) {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_thread_yield();
- }
-
- while (atomic_load(&ssends) != 2 || atomic_load(&sreads) != 2 ||
- atomic_load(&csends) != 2)
- {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_thread_yield();
- }
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
- isc__netmgr_shutdown(connect_nm);
+ isc_loop_teardown(mainloop, listen_sock_close, listen_sock);
+}
+static int
+doh_recv_two_teardown(void **state) {
X(total_sends);
X(csends);
X(creads);
assert_int_equal(atomic_load(&creads), 2);
assert_int_equal(atomic_load(&sreads), 2);
assert_int_equal(atomic_load(&ssends), 2);
-}
-ISC_RUN_TEST_IMPL(doh_recv_two_POST) {
- SKIP_IN_CI;
+ return (teardown_test(state));
+}
+ISC_LOOP_TEST_IMPL(doh_recv_two_POST) {
atomic_store(&POST, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_GET) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_GET) {
atomic_store(&POST, false);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_POST_TLS) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_POST_TLS) {
atomic_store(&use_TLS, true);
atomic_store(&POST, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_GET_TLS) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_GET_TLS) {
atomic_store(&use_TLS, true);
atomic_store(&POST, false);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_POST_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_POST_quota) {
atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_GET_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_GET_quota) {
atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_POST_TLS_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_POST_TLS_quota) {
atomic_store(&use_TLS, true);
atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_two_GET_TLS_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_two_GET_TLS_quota) {
atomic_store(&use_TLS, true);
atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_two(state);
+ doh_recv_two(arg);
}
static void
-doh_recv_send(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+doh_recv_send(void *arg __attribute__((__unused__))) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *listen_sock = NULL;
- size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1);
- isc_thread_t threads[32] = { 0 };
+ size_t nthreads = isc_loopmgr_nloops(loopmgr);
isc_quota_t *quotap = init_listener_quota(workers);
+ atomic_store(&total_sends, 1000);
+ atomic_store(&nsends, 1000);
+
result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH,
doh_receive_request_cb, NULL);
assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(result, ISC_R_SUCCESS);
for (size_t i = 0; i < nthreads; i++) {
- isc_thread_create(doh_connect_thread, connect_nm, &threads[i]);
- }
-
- /* wait for the all responses from the server */
- while (atomic_load(&ssends) < atomic_load(&total_sends)) {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_test_nap(1);
+ isc_async_run(isc_loop_get(loopmgr, i), doh_connect_thread,
+ connect_nm);
}
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_join(threads[i], NULL);
- }
+ isc_loop_teardown(mainloop, listen_sock_close, listen_sock);
+}
- isc__netmgr_shutdown(connect_nm);
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
+static int
+doh_recv_send_teardown(void **state) {
+ int res = teardown_test(state);
X(total_sends);
X(csends);
CHECK_RANGE_FULL(creads);
CHECK_RANGE_FULL(sreads);
CHECK_RANGE_FULL(ssends);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_send_POST) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- doh_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_send_GET) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- doh_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_send_POST_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- atomic_store(&use_TLS, true);
- doh_recv_send(state);
-}
-ISC_RUN_TEST_IMPL(doh_recv_send_GET_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- atomic_store(&use_TLS, true);
- doh_recv_send(state);
+ return (res);
}
-ISC_RUN_TEST_IMPL(doh_recv_send_POST_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_POST) {
atomic_store(&POST, true);
- atomic_store(&check_listener_quota, true);
- doh_recv_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_send_GET_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_GET) {
atomic_store(&POST, false);
- atomic_store(&check_listener_quota, true);
- doh_recv_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_send_POST_TLS_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_POST_TLS) {
atomic_store(&POST, true);
atomic_store(&use_TLS, true);
- atomic_store(&check_listener_quota, true);
- doh_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_send_GET_TLS_quota) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- atomic_store(&use_TLS, true);
- atomic_store(&check_listener_quota, true);
- doh_recv_send(state);
-}
-
-static void
-doh_recv_half_send(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
- isc_nm_t *listen_nm = nm[0];
- isc_nm_t *connect_nm = nm[1];
- isc_result_t result = ISC_R_SUCCESS;
- isc_nmsocket_t *listen_sock = NULL;
- size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1);
- isc_thread_t threads[32] = { 0 };
- isc_quota_t *quotap = init_listener_quota(workers);
-
- atomic_store(&total_sends, atomic_load(&total_sends) / 2);
-
- atomic_store(&nsends, atomic_load(&total_sends));
-
- result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH,
- doh_receive_request_cb, NULL);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL,
- &tcp_listen_addr, 0, quotap,
- atomic_load(&use_TLS) ? server_tlsctx : NULL,
- endpoints, 0, &listen_sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_create(doh_connect_thread, connect_nm, &threads[i]);
- }
-
- while (atomic_load(&nsends) > 0) {
- isc_thread_yield();
- }
-
- isc__netmgr_shutdown(connect_nm);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_join(threads[i], NULL);
- }
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
-
- X(total_sends);
- X(csends);
- X(creads);
- X(sreads);
- X(ssends);
-
- CHECK_RANGE_HALF(csends);
- CHECK_RANGE_HALF(creads);
- CHECK_RANGE_HALF(sreads);
- CHECK_RANGE_HALF(ssends);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_half_send_POST) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- doh_recv_half_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_half_send_GET) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_GET_TLS) {
atomic_store(&POST, false);
- doh_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, true);
- doh_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_TLS) {
- SKIP_IN_CI;
-
atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
- doh_recv_half_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_POST_quota) {
atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_recv_half_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_GET_quota) {
atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_half_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_TLS_quota) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
+ISC_LOOP_TEST_IMPL(doh_recv_send_POST_TLS_quota) {
atomic_store(&POST, true);
- atomic_store(&check_listener_quota, true);
- doh_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_TLS_quota) {
- SKIP_IN_CI;
-
atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
atomic_store(&check_listener_quota, true);
- doh_recv_half_send(state);
+ doh_recv_send(arg);
}
-static void
-doh_half_recv_send(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
- isc_nm_t *listen_nm = nm[0];
- isc_nm_t *connect_nm = nm[1];
- isc_result_t result = ISC_R_SUCCESS;
- isc_nmsocket_t *listen_sock = NULL;
- size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1);
- isc_thread_t threads[32] = { 0 };
- isc_quota_t *quotap = init_listener_quota(workers);
-
- atomic_store(&total_sends, atomic_load(&total_sends) / 2);
-
- atomic_store(&nsends, atomic_load(&total_sends));
-
- result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH,
- doh_receive_request_cb, NULL);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL,
- &tcp_listen_addr, 0, quotap,
- atomic_load(&use_TLS) ? server_tlsctx : NULL,
- endpoints, 0, &listen_sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_create(doh_connect_thread, connect_nm, &threads[i]);
- }
-
- while (atomic_load(&nsends) > 0) {
- isc_thread_yield();
- }
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_join(threads[i], NULL);
- }
-
- isc__netmgr_shutdown(connect_nm);
-
- X(total_sends);
- X(csends);
- X(creads);
- X(sreads);
- X(ssends);
-
- CHECK_RANGE_HALF(csends);
- CHECK_RANGE_HALF(creads);
- CHECK_RANGE_HALF(sreads);
- CHECK_RANGE_HALF(ssends);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_POST) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_GET) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, true);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_quota) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_quota) {
- SKIP_IN_CI;
-
+ISC_LOOP_TEST_IMPL(doh_recv_send_GET_TLS_quota) {
atomic_store(&POST, false);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_TLS_quota) {
- SKIP_IN_CI;
-
atomic_store(&use_TLS, true);
- atomic_store(&POST, true);
atomic_store(&check_listener_quota, true);
- doh_half_recv_send(state);
+ doh_recv_send(arg);
}
-ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_TLS_quota) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_send(state);
-}
-
-static void
-doh_half_recv_half_send(void **state) {
- isc_nm_t **nm = (isc_nm_t **)*state;
- isc_nm_t *listen_nm = nm[0];
- isc_nm_t *connect_nm = nm[1];
- isc_result_t result = ISC_R_SUCCESS;
- isc_nmsocket_t *listen_sock = NULL;
- size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1);
- isc_thread_t threads[32] = { 0 };
- isc_quota_t *quotap = init_listener_quota(workers);
-
- atomic_store(&total_sends, atomic_load(&total_sends) / 2);
-
- atomic_store(&nsends, atomic_load(&total_sends));
-
- result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH,
- doh_receive_request_cb, NULL);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL,
- &tcp_listen_addr, 0, quotap,
- atomic_load(&use_TLS) ? server_tlsctx : NULL,
- endpoints, 0, &listen_sock);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_create(doh_connect_thread, connect_nm, &threads[i]);
- }
-
- while (atomic_load(&nsends) > 0) {
- isc_thread_yield();
- }
-
- isc__netmgr_shutdown(connect_nm);
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
-
- for (size_t i = 0; i < nthreads; i++) {
- isc_thread_join(threads[i], NULL);
- }
-
+static int
+doh_bad_connect_uri_teardown(void **state) {
X(total_sends);
X(csends);
X(creads);
X(sreads);
X(ssends);
- CHECK_RANGE_HALF(csends);
- CHECK_RANGE_HALF(creads);
- CHECK_RANGE_HALF(sreads);
- CHECK_RANGE_HALF(ssends);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, true);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_TLS) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_quota) {
- SKIP_IN_CI;
-
- atomic_store(&POST, true);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_quota) {
- SKIP_IN_CI;
-
- atomic_store(&POST, false);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_TLS_quota) {
- SKIP_IN_CI;
-
- atomic_store(&use_TLS, true);
- atomic_store(&POST, true);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_half_send(state);
-}
-
-ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_TLS_quota) {
- SKIP_IN_CI;
+ /* As we used an ill-formed URI, there ought to be an error. */
+ assert_true(atomic_load(&test_was_error));
+ assert_int_equal(atomic_load(&csends), 0);
+ assert_int_equal(atomic_load(&creads), 0);
+ assert_int_equal(atomic_load(&sreads), 0);
+ assert_int_equal(atomic_load(&ssends), 0);
- atomic_store(&use_TLS, true);
- atomic_store(&POST, false);
- atomic_store(&check_listener_quota, true);
- doh_half_recv_half_send(state);
+ return (teardown_test(state));
}
/* See: GL #2858, !5319 */
-ISC_RUN_TEST_IMPL(doh_bad_connect_uri) {
- isc_nm_t **nm = (isc_nm_t **)*state;
+ISC_LOOP_TEST_IMPL(doh_bad_connect_uri) {
isc_nm_t *listen_nm = nm[0];
isc_nm_t *connect_nm = nm[1];
isc_result_t result = ISC_R_SUCCESS;
.length = send_msg.len },
doh_receive_reply_cb, NULL, true, 30000);
- while (atomic_load(&nsends) > 0) {
- if (atomic_load(&was_error)) {
- break;
- }
- isc_thread_yield();
- }
-
- isc_nm_stoplistening(listen_sock);
- isc_nmsocket_close(&listen_sock);
- assert_null(listen_sock);
- isc__netmgr_shutdown(connect_nm);
-
- X(total_sends);
- X(csends);
- X(creads);
- X(sreads);
- X(ssends);
-
- /* As we used an ill-formed URI, there ought to be an error. */
- assert_true(atomic_load(&was_error));
- assert_int_equal(atomic_load(&csends), 0);
- assert_int_equal(atomic_load(&creads), 0);
- assert_int_equal(atomic_load(&sreads), 0);
- assert_int_equal(atomic_load(&ssends), 0);
+ isc_loop_teardown(mainloop, listen_sock_close, listen_sock);
}
ISC_RUN_TEST_IMPL(doh_parse_GET_query_string) {
- UNUSED(state);
/* valid */
{
bool ret;
}
ISC_RUN_TEST_IMPL(doh_base64url_to_base64) {
- UNUSED(state);
char *res;
size_t res_len = 0;
/* valid */
ISC_RUN_TEST_IMPL(doh_base64_to_base64url) {
char *res;
size_t res_len = 0;
- UNUSED(state);
/* valid */
{
char res_test[] = "YW55IGNhcm5hbCBwbGVhc3VyZS4";
}
ISC_RUN_TEST_IMPL(doh_path_validation) {
- UNUSED(state);
-
assert_true(isc_nm_http_path_isvalid("/"));
assert_true(isc_nm_http_path_isvalid(ISC_NM_HTTP_DEFAULT_PATH));
assert_false(isc_nm_http_path_isvalid("laaaa"));
struct in_addr localhostv4 = { .s_addr = ntohl(INADDR_LOOPBACK) };
isc_sockaddr_t sa;
char uri[256];
- UNUSED(state);
/* Firstly, test URI generation using isc_sockaddr_t */
isc_sockaddr_fromin(&sa, &localhostv4, 0);
ISC_TEST_ENTRY_CUSTOM(doh_noop_GET, setup_test, teardown_test)
ISC_TEST_ENTRY_CUSTOM(doh_noresponse_POST, setup_test, teardown_test)
ISC_TEST_ENTRY_CUSTOM(doh_noresponse_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_TLS, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_quota, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_TLS, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_TLS, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_TLS_quota, setup_test,
- teardown_test)
-ISC_TEST_ENTRY_CUSTOM(doh_bad_connect_uri, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_POST, setup_test,
+ doh_timeout_recovery_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_GET, setup_test,
+ doh_timeout_recovery_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST, setup_test, doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET, setup_test, doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS, setup_test, doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS, setup_test, doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_quota, setup_test,
+ doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_quota, setup_test, doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS_quota, setup_test,
+ doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS_quota, setup_test,
+ doh_recv_one_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST, setup_test, doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET, setup_test, doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS, setup_test, doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS, setup_test, doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_quota, setup_test,
+ doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_quota, setup_test, doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS_quota, setup_test,
+ doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS_quota, setup_test,
+ doh_recv_two_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET, setup_test, doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST, setup_test, doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS, setup_test, doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS, setup_test,
+ doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_quota, setup_test,
+ doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_quota, setup_test,
+ doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS_quota, setup_test,
+ doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS_quota, setup_test,
+ doh_recv_send_teardown)
+ISC_TEST_ENTRY_CUSTOM(doh_bad_connect_uri, setup_test,
+ doh_bad_connect_uri_teardown)
ISC_TEST_LIST_END
ISC_TEST_MAIN
ISC_TEST_ENTRY(lex_setline)
ISC_TEST_ENTRY(lex_string)
ISC_TEST_ENTRY(lex_qstring)
-
ISC_TEST_LIST_END
ISC_TEST_MAIN
#include <isc/cmocka.h>
#include <isc/commandline.h>
#include <isc/condition.h>
+#include <isc/event.h>
+#include <isc/job.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
+#include <isc/os.h>
#include <isc/print.h>
#include <isc/task.h>
#include <isc/time.h>
#include <isc/timer.h>
#include <isc/util.h>
-#include <isc/uv.h>
+#include <isc/work.h>
#include <tests/isc.h>
-/* Set to true (or use -v option) for verbose output */
-static bool verbose = false;
-
-static isc_mutex_t lock;
-static isc_condition_t cv;
-
-atomic_int_fast32_t counter;
+static atomic_int_fast32_t counter;
static int active[10];
-static atomic_bool done;
-
-static int
-_setup(void **state) {
- isc_mutex_init(&lock);
- isc_condition_init(&cv);
-
- workers = 0;
- setup_managers(state);
-
- return (0);
-}
-
-static int
-_setup2(void **state) {
- isc_mutex_init(&lock);
- isc_condition_init(&cv);
-
- /* Two worker threads */
- workers = 2;
- setup_managers(state);
-
- return (0);
-}
-
-static int
-_setup4(void **state) {
- isc_mutex_init(&lock);
- isc_condition_init(&cv);
+static atomic_bool done = false;
- /* Four worker threads */
- workers = 4;
- setup_managers(state);
-
- return (0);
-}
-
-static int
-_teardown(void **state) {
- teardown_managers(state);
-
- isc_condition_destroy(&cv);
- isc_mutex_destroy(&lock);
-
- return (0);
-}
+atomic_int_fast32_t set_a, set_b;
static void
set(isc_task_t *task, isc_event_t *event) {
isc_event_free(&event);
atomic_store(value, atomic_fetch_add(&counter, 1));
+
+ if ((atomic_load(&set_a) != 0 && atomic_load(&set_b) != 0)) {
+ isc_loopmgr_shutdown(loopmgr);
+ }
}
#include <isc/thread.h>
-/* Create a task */
-ISC_RUN_TEST_IMPL(create_task) {
+ISC_LOOP_TEST_IMPL(create_task) {
isc_result_t result;
isc_task_t *task = NULL;
- UNUSED(state);
-
- result = isc_task_create(taskmgr, 0, &task, 0);
+ result = isc_task_create(taskmgr, &task, 0);
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_detach(&task);
assert_null(task);
+
+ isc_loopmgr_shutdown(loopmgr);
}
-/* Process events */
-ISC_RUN_TEST_IMPL(all_events) {
+ISC_LOOP_SETUP_IMPL(all_events) {
+ atomic_init(&set_a, 0);
+ atomic_init(&set_b, 0);
+}
+
+ISC_LOOP_TEARDOWN_IMPL(all_events) {
+ assert_int_not_equal(atomic_load(&set_a), 0);
+ assert_int_not_equal(atomic_load(&set_b), 0);
+}
+
+ISC_LOOP_TEST_SETUP_TEARDOWN_IMPL(all_events) {
isc_result_t result;
isc_task_t *task = NULL;
isc_event_t *event = NULL;
- atomic_int_fast32_t a, b;
- int i = 0;
-
- UNUSED(state);
atomic_init(&counter, 1);
- atomic_init(&a, 0);
- atomic_init(&b, 0);
- result = isc_task_create(taskmgr, 0, &task, 0);
+ result = isc_task_create(taskmgr, &task, 0);
assert_int_equal(result, ISC_R_SUCCESS);
/* First event */
- event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &a,
+ event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &set_a,
sizeof(isc_event_t));
assert_non_null(event);
- assert_int_equal(atomic_load(&a), 0);
+ assert_int_equal(atomic_load(&set_a), 0);
isc_task_send(task, &event);
- event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &b,
+ event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &set_b,
sizeof(isc_event_t));
assert_non_null(event);
- assert_int_equal(atomic_load(&b), 0);
+ assert_int_equal(atomic_load(&set_b), 0);
isc_task_send(task, &event);
- while ((atomic_load(&a) == 0 || atomic_load(&b) == 0) && i++ < 5000) {
- uv_sleep(1);
- }
-
- assert_int_not_equal(atomic_load(&a), 0);
- assert_int_not_equal(atomic_load(&b), 0);
-
isc_task_detach(&task);
assert_null(task);
}
}
UNUSED(j);
-
- if (verbose) {
- print_message("# task %s\n", (char *)event->ev_arg);
- }
-
isc_event_free(&event);
}
static void
-basic_tick(isc_task_t *task, isc_event_t *event) {
- UNUSED(task);
-
- if (verbose) {
- print_message("# %s\n", (char *)event->ev_arg);
- }
-
- isc_event_free(&event);
+basic_tick(void *arg __attribute__((__unused__))) {
+ /* no-op */
}
static char one[] = "1";
static char tick[] = "tick";
static char tock[] = "tock";
-ISC_RUN_TEST_IMPL(basic) {
- isc_result_t result;
- isc_task_t *task1 = NULL;
- isc_task_t *task2 = NULL;
- isc_task_t *task3 = NULL;
- isc_task_t *task4 = NULL;
- isc_event_t *event = NULL;
- isc_timer_t *ti1 = NULL;
- isc_timer_t *ti2 = NULL;
- isc_interval_t interval;
+isc_task_t *task1 = NULL;
+isc_task_t *task2 = NULL;
+isc_task_t *task3 = NULL;
+isc_task_t *task4 = NULL;
+isc_timer_t *ti1 = NULL;
+isc_timer_t *ti2 = NULL;
+
+static void
+basic_work(void *arg __attribute__((__unused__))) {
char *testarray[] = { one, one, one, one, one, one, one, one,
one, two, three, four, two, three, four, NULL };
- int i;
-
- UNUSED(state);
-
- result = isc_task_create(taskmgr, 0, &task1, 0);
- assert_int_equal(result, ISC_R_SUCCESS);
- result = isc_task_create(taskmgr, 0, &task2, 0);
- assert_int_equal(result, ISC_R_SUCCESS);
- result = isc_task_create(taskmgr, 0, &task3, 0);
- assert_int_equal(result, ISC_R_SUCCESS);
- result = isc_task_create(taskmgr, 0, &task4, 0);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- isc_interval_set(&interval, 1, 0);
- isc_timer_create(timermgr, task1, basic_tick, tick, &ti1);
- result = isc_timer_reset(ti1, isc_timertype_ticker, &interval, false);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- ti2 = NULL;
- isc_interval_set(&interval, 1, 0);
- isc_timer_create(timermgr, task2, basic_tick, tock, &ti2);
- result = isc_timer_reset(ti2, isc_timertype_ticker, &interval, false);
- assert_int_equal(result, ISC_R_SUCCESS);
-
sleep(2);
- for (i = 0; testarray[i] != NULL; i++) {
+ for (size_t i = 0; testarray[i] != NULL; i++) {
/*
* Note: (void *)1 is used as a sender here, since some
* compilers don't like casting a function pointer to a
* structure (socket, timer, task, etc) but this is just a
* test program.
*/
- event = isc_event_allocate(mctx, (void *)1, 1, basic_cb,
- testarray[i], sizeof(*event));
+ isc_event_t *event = isc_event_allocate(mctx, &task1, 1,
+ basic_cb, testarray[i],
+ sizeof(*event));
assert_non_null(event);
isc_task_send(task1, &event);
}
+}
+
+static void
+basic_after_work(void *arg) {
+ UNUSED(arg);
+
+ sleep(5);
isc_task_detach(&task1);
isc_task_detach(&task2);
isc_task_detach(&task3);
isc_task_detach(&task4);
- sleep(10);
+ sleep(5);
+
isc_timer_destroy(&ti1);
isc_timer_destroy(&ti2);
+
+ isc_loopmgr_shutdown(loopmgr);
+}
+
+ISC_LOOP_TEST_IMPL(basic) {
+ isc_result_t result;
+ isc_interval_t interval;
+
+ UNUSED(arg);
+
+ result = isc_task_create(taskmgr, &task1, 0);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ result = isc_task_create(taskmgr, &task2, 0);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ result = isc_task_create(taskmgr, &task3, 0);
+ assert_int_equal(result, ISC_R_SUCCESS);
+ result = isc_task_create(taskmgr, &task4, 0);
+ assert_int_equal(result, ISC_R_SUCCESS);
+
+ isc_interval_set(&interval, 1, 0);
+ isc_timer_create(mainloop, basic_tick, tick, &ti1);
+ isc_timer_start(ti1, isc_timertype_ticker, &interval);
+
+ isc_interval_set(&interval, 1, 0);
+ isc_timer_create(mainloop, basic_tick, tock, &ti2);
+ isc_timer_start(ti2, isc_timertype_ticker, &interval);
+
+ isc_work_enqueue(mainloop, basic_work, basic_after_work, NULL);
}
/*
* When one task enters exclusive mode, all other active
* tasks complete first.
*/
-static int
-spin(int n) {
- int i;
- int r = 0;
- for (i = 0; i < n; i++) {
- r += i;
- if (r > 1000000) {
- r = 0;
- }
- }
- return (r);
-}
static void
exclusive_cb(isc_task_t *task, isc_event_t *event) {
int taskno = *(int *)(event->ev_arg);
- if (verbose) {
- print_message("# task enter %d\n", taskno);
- }
-
/* task chosen from the middle of the range */
if (taskno == 6) {
- isc_result_t result;
int i;
- result = isc_task_beginexclusive(task);
- assert_int_equal(result, ISC_R_SUCCESS);
+ isc_task_beginexclusive(task);
for (i = 0; i < 10; i++) {
assert_int_equal(active[i], 0);
atomic_store(&done, true);
} else {
active[taskno]++;
- (void)spin(10000000);
+ isc_thread_yield();
active[taskno]--;
}
- if (verbose) {
- print_message("# task exit %d\n", taskno);
- }
-
if (atomic_load(&done)) {
isc_mem_put(event->ev_destroy_arg, event->ev_arg, sizeof(int));
isc_event_free(&event);
atomic_fetch_sub(&counter, 1);
+ isc_loopmgr_shutdown(loopmgr);
} else {
isc_task_send(task, &event);
}
}
-ISC_RUN_TEST_IMPL(task_exclusive) {
- isc_task_t *tasks[10];
- isc_result_t result;
- int i;
+isc_task_t *tasks[10] = { NULL };
- UNUSED(state);
+ISC_LOOP_SETUP_IMPL(task_exclusive) {
+ isc_result_t result;
atomic_init(&counter, 0);
+ atomic_init(&done, false);
- for (i = 0; i < 10; i++) {
- isc_event_t *event = NULL;
- int *v;
-
- tasks[i] = NULL;
+ for (size_t i = 0; i < 10; i++) {
+ uint32_t tid = i % isc_loopmgr_nloops(loopmgr);
if (i == 6) {
/* task chosen from the middle of the range */
- result = isc_task_create(taskmgr, 0, &tasks[i], 0);
+ tid = 0;
+ result = isc_task_create(taskmgr, &tasks[i], tid);
assert_int_equal(result, ISC_R_SUCCESS);
- isc_taskmgr_setexcltask(taskmgr, tasks[6]);
+ isc_taskmgr_setexcltask(taskmgr, tasks[i]);
} else {
- result = isc_task_create(taskmgr, 0, &tasks[i], 0);
+ result = isc_task_create(taskmgr, &tasks[i], tid);
assert_int_equal(result, ISC_R_SUCCESS);
}
+ }
+}
+
+ISC_LOOP_TEST_SETUP_IMPL(task_exclusive) {
+ UNUSED(arg);
+
+ for (size_t i = 0; i < 10; i++) {
+ isc_event_t *event = NULL;
+ int *v;
v = isc_mem_get(mctx, sizeof *v);
assert_non_null(v);
isc_task_send(tasks[i], &event);
atomic_fetch_add(&counter, 1);
- }
-
- for (i = 0; i < 10; i++) {
isc_task_detach(&tasks[i]);
}
-
- while (atomic_load(&counter) > 0) {
- uv_sleep(1);
- }
}
-/*
- * Max tasks test:
- * The task system can create and execute many tasks. Tests with 10000.
- */
-
static void
maxtask_cb(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
uintptr_t ntasks = (uintptr_t)event->ev_arg;
- if (ntasks-- > 0) {
- task = NULL;
+ if (event->ev_arg != NULL) {
+ isc_task_t *newtask = NULL;
- event->ev_arg = (void *)ntasks;
+ event->ev_arg = (void *)(ntasks - 1);
/*
* Create a new task and forward the message.
*/
- result = isc_task_create(taskmgr, 0, &task, 0);
+ result = isc_task_create(taskmgr, &newtask, 0);
assert_int_equal(result, ISC_R_SUCCESS);
- isc_task_send(task, &event);
- isc_task_detach(&task);
+ isc_task_send(newtask, &event);
} else {
isc_event_free(&event);
+ isc_loopmgr_shutdown(loopmgr);
+ }
- LOCK(&lock);
- atomic_store(&done, true);
- SIGNAL(&cv);
- UNLOCK(&lock);
+ if (task != NULL) {
+ isc_task_detach(&task);
}
}
-ISC_RUN_TEST_IMPL(manytasks) {
+ISC_LOOP_TEST_IMPL(manytasks) {
isc_event_t *event = NULL;
- uintptr_t ntasks = 2; /* 0000; */
-
- UNUSED(state);
+ uintptr_t ntasks = 10000;
- if (verbose) {
- print_message("# Testing with %lu tasks\n",
- (unsigned long)ntasks);
- }
+ UNUSED(arg);
- atomic_init(&done, false);
-
- event = isc_event_allocate(mctx, NULL, 1, maxtask_cb, (void *)ntasks,
- sizeof(*event));
+ event = isc_event_allocate(mctx, (void *)1, 1, maxtask_cb,
+ (void *)ntasks, sizeof(*event));
assert_non_null(event);
- LOCK(&lock);
maxtask_cb(NULL, event);
- while (!atomic_load(&done)) {
- WAIT(&cv, &lock);
- }
- UNLOCK(&lock);
-}
-
-/*
- * Helper for the purge tests below:
- */
-
-#define SENDERCNT 3
-#define TYPECNT 4
-#define TAGCNT 5
-#define NEVENTS (SENDERCNT * TYPECNT * TAGCNT)
-
-static int eventcnt;
-
-atomic_bool started;
-
-/*
- * Helpers for purge event tests
- */
-static void
-pge_event1(isc_task_t *task, isc_event_t *event) {
- UNUSED(task);
-
- LOCK(&lock);
- while (!atomic_load(&started)) {
- WAIT(&cv, &lock);
- }
- UNLOCK(&lock);
-
- LOCK(&lock);
- atomic_store(&done, true);
- SIGNAL(&cv);
- UNLOCK(&lock);
-
- isc_event_free(&event);
-}
-
-static void
-pge_event2(isc_task_t *task, isc_event_t *event) {
- UNUSED(task);
-
- ++eventcnt;
- isc_event_free(&event);
-}
-
-static void
-try_purgeevent(void) {
- isc_result_t result;
- isc_task_t *task = NULL;
- bool purged;
- isc_event_t *event1 = NULL;
- isc_event_t *event2 = NULL;
- isc_event_t *event2_clone = NULL;
- isc_time_t now;
- isc_interval_t interval;
-
- atomic_init(&started, false);
- atomic_init(&done, false);
- eventcnt = 0;
-
- result = isc_task_create(taskmgr, 0, &task, 0);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- /*
- * Block the task on cv.
- */
- event1 = isc_event_allocate(mctx, (void *)1, (isc_eventtype_t)1,
- pge_event1, NULL, sizeof(*event1));
- assert_non_null(event1);
- isc_task_send(task, &event1);
-
- event2 = isc_event_allocate(mctx, (void *)1, (isc_eventtype_t)1,
- pge_event2, NULL, sizeof(*event2));
- assert_non_null(event2);
-
- event2_clone = event2;
-
- isc_task_send(task, &event2);
-
- purged = isc_task_purgeevent(task, event2_clone);
-
- assert_true(purged);
-
- /*
- * Unblock the task, allowing event processing.
- */
- LOCK(&lock);
- atomic_store(&started, true);
- SIGNAL(&cv);
-
- isc_interval_set(&interval, 5, 0);
-
- /*
- * Wait for shutdown processing to complete.
- */
- while (!atomic_load(&done)) {
- result = isc_time_nowplusinterval(&now, &interval);
- assert_int_equal(result, ISC_R_SUCCESS);
-
- WAITUNTIL(&cv, &lock, &now);
- }
- UNLOCK(&lock);
-
- isc_task_detach(&task);
-}
-
-/*
- * Purge event test:
- * When the event is marked as purgeable, a call to
- * isc_task_purgeevent(task, event) purges the event 'event' from the
- * task's queue and returns true.
- */
-
-ISC_RUN_TEST_IMPL(purgeevent) {
- UNUSED(state);
-
- try_purgeevent();
}
ISC_TEST_LIST_START
-ISC_TEST_ENTRY_CUSTOM(manytasks, _setup4, _teardown)
-ISC_TEST_ENTRY_CUSTOM(all_events, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(basic, _setup2, _teardown)
-ISC_TEST_ENTRY_CUSTOM(create_task, _setup, _teardown)
-ISC_TEST_ENTRY_CUSTOM(purgeevent, _setup2, _teardown)
-ISC_TEST_ENTRY_CUSTOM(task_exclusive, _setup4, _teardown)
+ISC_TEST_ENTRY_CUSTOM(manytasks, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(all_events, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(basic, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(create_task, setup_managers, teardown_managers)
+ISC_TEST_ENTRY_CUSTOM(task_exclusive, setup_managers, teardown_managers)
ISC_TEST_LIST_END
#include <tests/isc.h>
-isc_log_t *lctx = NULL;
static isc_logcategory_t categories[] = { { "", 0 },
{ "client", 0 },
{ "network", 0 },
#include <tests/isc.h>
-isc_log_t *lctx = NULL;
static isc_logcategory_t categories[] = { { "", 0 },
{ "client", 0 },
{ "network", 0 },
}
if (with_cache) {
- result = dns_cache_create(mctx, mctx, taskmgr, timermgr,
+ result = dns_cache_create(mctx, mctx, taskmgr,
dns_rdataclass_in, "", "rbt", 0, NULL,
&cache);
if (result != ISC_R_SUCCESS) {
isc_result_t result;
REQUIRE(zonemgr == NULL);
- result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, &zonemgr);
+ result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &zonemgr);
return (result);
}
#include <isc/timer.h>
#include <isc/util.h>
-#include "netmgr_p.h"
-#include "task_p.h"
-#include "timer_p.h"
-
#include <tests/isc.h>
isc_mem_t *mctx = NULL;
-isc_loopmgr_t *loopmgr = NULL;
+isc_log_t *lctx = NULL;
isc_loop_t *mainloop = NULL;
+isc_loopmgr_t *loopmgr = NULL;
isc_taskmgr_t *taskmgr = NULL;
-isc_timermgr_t *timermgr = NULL;
isc_nm_t *netmgr = NULL;
-unsigned int workers = 0;
-isc_task_t *maintask = NULL;
+unsigned int workers = -1;
int
setup_mctx(void **state __attribute__((__unused__))) {
}
int
-setup_managers(void **state) {
- isc_result_t result;
+setup_taskmgr(void **state __attribute__((__unused__))) {
+ REQUIRE(loopmgr != NULL);
- UNUSED(state);
+ isc_taskmgr_create(mctx, loopmgr, &taskmgr);
- REQUIRE(mctx != NULL);
+ return (0);
+}
- if (workers == 0) {
- char *env_workers = getenv("ISC_TASK_WORKERS");
- if (env_workers != NULL) {
- workers = atoi(env_workers);
- } else {
- workers = isc_os_ncpus();
- }
- INSIST(workers > 0);
- }
+int
+teardown_taskmgr(void **state __attribute__((__unused__))) {
+ isc_taskmgr_destroy(&taskmgr);
- result = isc_managers_create(mctx, workers, 0, &netmgr, &taskmgr,
- &timermgr);
- if (result != ISC_R_SUCCESS) {
- return (-1);
- }
+ return (0);
+}
- result = isc_task_create(taskmgr, 0, &maintask, 0);
- if (result != ISC_R_SUCCESS) {
- return (-1);
- }
+int
+setup_netmgr(void **state __attribute__((__unused__))) {
+ REQUIRE(loopmgr != NULL);
- isc_taskmgr_setexcltask(taskmgr, maintask);
+ isc_netmgr_create(mctx, loopmgr, &netmgr);
return (0);
}
int
-teardown_managers(void **state) {
- UNUSED(state);
+teardown_netmgr(void **state __attribute__((__unused__))) {
+ REQUIRE(loopmgr != NULL);
+
+ isc_netmgr_destroy(&netmgr);
- isc_task_detach(&maintask);
- isc_managers_destroy(&netmgr, &taskmgr, &timermgr);
+ return (0);
+}
+
+int
+setup_managers(void **state) {
+ setup_loopmgr(state);
+ setup_taskmgr(state);
+ setup_netmgr(state);
+
+ return (0);
+}
+
+int
+teardown_managers(void **state) {
+ teardown_netmgr(state);
+ teardown_taskmgr(state);
+ teardown_loopmgr(state);
return (0);
}
#include <isc/buffer.h>
#include <isc/file.h>
#include <isc/hash.h>
+#include <isc/job.h>
+#include <isc/loop.h>
#include <isc/managers.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <tests/ns.h>
+isc_task_t *maintask = NULL;
dns_dispatchmgr_t *dispatchmgr = NULL;
-ns_clientmgr_t *clientmgr = NULL;
ns_interfacemgr_t *interfacemgr = NULL;
ns_server_t *sctx = NULL;
-bool debug_mem_record = true;
static isc_result_t
matchview(isc_netaddr_t *srcaddr, isc_netaddr_t *destaddr,
return (ISC_R_NOTIMPLEMENTED);
}
+static void
+scan_interfaces(void *arg) {
+ UNUSED(arg);
+ ns_interfacemgr_scan(interfacemgr, true, false);
+}
+
int
setup_server(void **state) {
isc_result_t result;
result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr);
if (result != ISC_R_SUCCESS) {
- return (-1);
+ goto cleanup;
}
- result = ns_interfacemgr_create(mctx, sctx, taskmgr, timermgr, netmgr,
+ result = ns_interfacemgr_create(mctx, sctx, loopmgr, taskmgr, netmgr,
dispatchmgr, maintask, NULL, false,
&interfacemgr);
if (result != ISC_R_SUCCESS) {
- return (-1);
+ goto cleanup;
}
result = ns_listenlist_default(mctx, port, -1, true, AF_INET,
&listenon);
if (result != ISC_R_SUCCESS) {
- return (-1);
+ goto cleanup;
}
ns_interfacemgr_setlistenon4(interfacemgr, listenon);
ns_listenlist_detach(&listenon);
- clientmgr = ns_interfacemgr_getclientmgr(interfacemgr);
+ isc_loop_setup(mainloop, scan_interfaces, NULL);
return (0);
+
+cleanup:
+ teardown_server(state);
+ return (-1);
}
-int
-teardown_server(void **state) {
+void
+shutdown_interfacemgr(void *arg __attribute__((__unused__))) {
if (interfacemgr != NULL) {
ns_interfacemgr_shutdown(interfacemgr);
ns_interfacemgr_detach(&interfacemgr);
}
+}
+
+int
+teardown_server(void **state) {
+ shutdown_interfacemgr(NULL);
if (dispatchmgr != NULL) {
dns_dispatchmgr_detach(&dispatchmgr);
static dns_zone_t *served_zone = NULL;
-/*
- * We don't want to use netmgr-based client accounting, we need to emulate it.
- */
-atomic_uint_fast32_t client_refs[32];
-atomic_uintptr_t client_addrs[32];
-
-void
-isc__nmhandle_attach(isc_nmhandle_t *source, isc_nmhandle_t **targetp FLARG) {
- ns_client_t *client = (ns_client_t *)source;
- int i;
-
- for (i = 0; i < 32; i++) {
- if (atomic_load(&client_addrs[i]) == (uintptr_t)client) {
- break;
- }
- }
- INSIST(i < 32);
- INSIST(atomic_load(&client_refs[i]) > 0);
-
- atomic_fetch_add(&client_refs[i], 1);
-
- *targetp = source;
- return;
-}
-
-void
-isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) {
- isc_nmhandle_t *handle = *handlep;
- ns_client_t *client = (ns_client_t *)handle;
- int i;
-
- *handlep = NULL;
-
- for (i = 0; i < 32; i++) {
- if (atomic_load(&client_addrs[i]) == (uintptr_t)client) {
- break;
- }
- }
- INSIST(i < 32);
-
- if (atomic_fetch_sub(&client_refs[i], 1) == 1) {
- dns_view_detach(&client->view);
- client->state = 4;
- ns__client_reset_cb(client);
- ns__client_put_cb(client);
- atomic_store(&client_addrs[i], (uintptr_t)NULL);
- }
-
- return;
-}
-
isc_result_t
ns_test_serve_zone(const char *zonename, const char *filename,
dns_view_t *view) {
isc_result_t
ns_test_getclient(ns_interface_t *ifp0, bool tcp, ns_client_t **clientp) {
isc_result_t result;
- ns_client_t *client = isc_mem_get(clientmgr->mctx, sizeof(*client));
+ ns_client_t *client;
+ ns_clientmgr_t *clientmgr;
int i;
UNUSED(ifp0);
UNUSED(tcp);
+ clientmgr = ns_interfacemgr_getclientmgr(interfacemgr);
+
+ client = isc_mem_get(clientmgr->mctx, sizeof(*client));
result = ns__client_setup(client, clientmgr, true);
for (i = 0; i < 32; i++) {
continue;
}
if (len % 2 != 0U) {
- CHECK(ISC_R_UNEXPECTEDEND);
+ result = ISC_R_UNEXPECTEDEND;
+ goto cleanup;
}
if (len > bufsiz * 2) {
- CHECK(ISC_R_NOSPACE);
+ result = ISC_R_NOSPACE;
+ goto cleanup;
}
rp = s;
for (i = 0; i < len; i += 2) {
plugin_test \
query_test
+notify_test_SOURCES = \
+ notify_test.c \
+ netmgr_wrap.c
+
+query_test_SOURCES = \
+ query_test.c \
+ netmgr_wrap.c
+
EXTRA_DIST = testdata
include $(top_srcdir)/Makefile.tests
#include <tests/ns.h>
-static int
-_setup(void **state) {
- isc__nm_force_tid(0);
-
- setup_managers(state);
-
- return (0);
-}
-
-static int
-_teardown(void **state) {
- isc__nm_force_tid(-1);
-
- teardown_managers(state);
-
- return (0);
-}
-
/* test that ns_listenlist_default() works */
ISC_RUN_TEST_IMPL(ns_listenlist_default) {
isc_result_t result;
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(ns_listenlist_default, _setup, _teardown)
-
+ISC_TEST_ENTRY(ns_listenlist_default)
ISC_TEST_LIST_END
ISC_TEST_MAIN
--- /dev/null
+/*
+ * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
+ *
+ * SPDX-License-Identifier: MPL-2.0
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, you can obtain one at https://mozilla.org/MPL/2.0/.
+ *
+ * See the COPYRIGHT file distributed with this work for additional
+ * information regarding copyright ownership.
+ */
+
+/*! \file */
+
+#include <isc/atomic.h>
+#include <isc/netmgr.h>
+#include <isc/util.h>
+
+#include <dns/view.h>
+
+#include <ns/client.h>
+
+#ifdef NETMGR_TRACE
+#define FLARG \
+ , const char *file __attribute__((unused)), \
+ unsigned int line __attribute__((unused)), \
+ const char *func __attribute__((unused))
+#else
+#define FLARG
+#endif
+
+/*
+ * We don't want to use netmgr-based client accounting, we need to emulate it.
+ */
+atomic_uint_fast32_t client_refs[32];
+atomic_uintptr_t client_addrs[32];
+
+void
+isc__nmhandle_attach(isc_nmhandle_t *source, isc_nmhandle_t **targetp FLARG) {
+ ns_client_t *client = (ns_client_t *)source;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if (atomic_load(&client_addrs[i]) == (uintptr_t)client) {
+ break;
+ }
+ }
+ INSIST(i < 32);
+ INSIST(atomic_load(&client_refs[i]) > 0);
+
+ atomic_fetch_add(&client_refs[i], 1);
+#if 0
+ fprintf(stderr, "%s:%s:%s:%d -> %ld\n", __func__, func, file, line,
+ client_refs[i]);
+#endif
+
+ *targetp = source;
+ return;
+}
+
+void
+isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) {
+ isc_nmhandle_t *handle = *handlep;
+ ns_client_t *client = (ns_client_t *)handle;
+ int i;
+
+ *handlep = NULL;
+
+ for (i = 0; i < 32; i++) {
+ if (atomic_load(&client_addrs[i]) == (uintptr_t)client) {
+ break;
+ }
+ }
+ INSIST(i < 32);
+
+ if (atomic_fetch_sub(&client_refs[i], 1) == 1) {
+ dns_view_detach(&client->view);
+ client->state = 4;
+ ns__client_reset_cb(client);
+ ns__client_put_cb(client);
+ atomic_store(&client_addrs[i], (uintptr_t)NULL);
+ }
+#if 0
+ fprintf(stderr, "%s:%s:%s:%d -> %ld\n", __func__, func, file, line,
+ client_refs[i]);
+#endif
+
+ return;
+}
#include <ns/client.h>
#include <ns/notify.h>
-#include <tests/dns.h>
#include <tests/ns.h>
-static int
-setup_test(void **state) {
- isc__nm_force_tid(0);
- return (setup_server(state));
-}
-
-static int
-teardown_test(void **state) {
- isc__nm_force_tid(-1);
- return (teardown_server(state));
-}
-
static void
check_response(isc_buffer_t *buf) {
isc_result_t result;
}
/* test ns_notify_start() */
-ISC_RUN_TEST_IMPL(ns_notify_start) {
+ISC_LOOP_TEST_IMPL(notify_start) {
isc_result_t result;
ns_client_t *client = NULL;
isc_nmhandle_t *handle = NULL;
isc_buffer_t nbuf;
size_t nsize;
- UNUSED(state);
-
result = ns_test_getclient(NULL, false, &client);
assert_int_equal(result, ISC_R_SUCCESS);
handle = client->handle;
isc_nmhandle_detach(&client->handle);
isc_nmhandle_detach(&handle);
+
+ isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL);
+ isc_loopmgr_shutdown(loopmgr);
}
ISC_TEST_LIST_START
-ISC_TEST_ENTRY_CUSTOM(ns_notify_start, setup_test, teardown_test)
+ISC_TEST_ENTRY_CUSTOM(notify_start, setup_server, teardown_server)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#include <isc/types.h>
#include <isc/util.h>
-noreturn void
-_fail(const char *const file, const int line);
-
#include <ns/hooks.h>
#include <tests/ns.h>
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(ns_plugin_expandpath, setup_managers, teardown_managers)
-
+ISC_TEST_ENTRY(ns_plugin_expandpath)
ISC_TEST_LIST_END
ISC_TEST_MAIN
#include <tests/ns.h>
-static int
-setup_test(void **state) {
- isc__nm_force_tid(0);
- setup_server(state);
- return (0);
-}
-
-static int
-teardown_test(void **state) {
- isc__nm_force_tid(-1);
- teardown_server(state);
- return (0);
-}
-
/* can be used for client->sendcb to avoid disruption on sending a response */
static void
send_noop(isc_buffer_t *buffer) {
}
/*****
-***** ns__query_sfcache() tests
-*****/
+ ***** ns__query_sfcache() tests
+ *****/
/*%
* Structure containing parameters for ns__query_sfcache_test().
}
/* test ns__query_sfcache() */
-ISC_RUN_TEST_IMPL(ns_query_sfcache) {
- size_t i;
-
+ISC_LOOP_TEST_IMPL(ns__query_sfcache) {
const ns__query_sfcache_test_params_t tests[] = {
/*
* Sanity check for an empty SERVFAIL cache.
},
};
- UNUSED(state);
-
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
run_sfcache_test(&tests[i]);
}
+
+ isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL);
+ isc_loopmgr_shutdown(loopmgr);
}
/*****
/*
* Interrupt execution if query_lookup() or ns_query_done() is called.
*/
-
ns_hooktable_create(mctx, &query_hooks);
ns_hook_add(query_hooks, mctx, NS_QUERY_LOOKUP_BEGIN, &hook);
ns_hook_add(query_hooks, mctx, NS_QUERY_DONE_BEGIN, &hook);
}
/* test ns__query_start() */
-ISC_RUN_TEST_IMPL(ns_query_start) {
+ISC_LOOP_TEST_IMPL(ns__query_start) {
size_t i;
const ns__query_start_test_params_t tests[] = {
},
};
- UNUSED(state);
-
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
run_start_test(&tests[i]);
}
+
+ isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL);
+ isc_loopmgr_shutdown(loopmgr);
}
/*****
}
}
-ISC_RUN_TEST_IMPL(ns_query_hookasync) {
+ISC_LOOP_TEST_IMPL(ns__query_hookasync) {
size_t i;
- UNUSED(state);
-
const ns__query_hookasync_test_params_t tests[] = {
{
NS_TEST_ID("normal case"),
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
run_hookasync_test(&tests[i]);
}
+
+ isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL);
+ isc_loopmgr_shutdown(loopmgr);
}
/*****
ns_hooktable_free(mctx, (void **)&ns__hook_table);
}
-ISC_RUN_TEST_IMPL(ns_query_hookasync_e2e) {
- UNUSED(state);
-
+ISC_LOOP_TEST_IMPL(ns__query_hookasync_e2e) {
const ns__query_hookasync_e2e_test_params_t tests[] = {
{
NS_TEST_ID("positive answer"),
for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
run_hookasync_e2e_test(&tests[i]);
}
+
+ isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL);
+ isc_loopmgr_shutdown(loopmgr);
}
ISC_TEST_LIST_START
-
-ISC_TEST_ENTRY_CUSTOM(ns_query_sfcache, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(ns_query_start, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(ns_query_hookasync, setup_test, teardown_test)
-ISC_TEST_ENTRY_CUSTOM(ns_query_hookasync_e2e, setup_test, teardown_test)
-
+ISC_TEST_ENTRY_CUSTOM(ns__query_sfcache, setup_server, teardown_server)
+ISC_TEST_ENTRY_CUSTOM(ns__query_start, setup_server, teardown_server)
+ISC_TEST_ENTRY_CUSTOM(ns__query_hookasync, setup_server, teardown_server)
+ISC_TEST_ENTRY_CUSTOM(ns__query_hookasync_e2e, setup_server, teardown_server)
ISC_TEST_LIST_END
+
ISC_TEST_MAIN