sock = *sockp;
if (sock != NULL) {
result = isc_socket_open(sock);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
return (result);
- } else if (dup_socket != NULL && (!isc_socket_hasreuseport() || duponly)) {
+ }
+ } else if (dup_socket != NULL &&
+ (!isc_socket_hasreuseport() || duponly))
+ {
result = isc_socket_dup(dup_socket, &sock);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
return (result);
+ }
isc_socket_setname(sock, "dispatcher", NULL);
*sockp = sock;
} else {
result = isc_socket_create(mgr, isc_sockaddr_pf(local),
isc_sockettype_udp, &sock);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
return (result);
+ }
}
isc_socket_setname(sock, "dispatcher", NULL);
#endif
result = isc_socket_bind(sock, local, options);
if (result != ISC_R_SUCCESS) {
- if (*sockp == NULL)
+ if (*sockp == NULL) {
isc_socket_detach(&sock);
- else {
+ } else {
isc_socket_close(sock);
}
return (result);
dns_dispatch_t *disp;
isc_socket_t *sock = NULL;
int i = 0;
-
bool duponly = ((attributes & DNS_DISPATCHATTR_CANREUSE) == 0);
+
/* This is an attribute needed only at creation time */
attributes &= ~DNS_DISPATCHATTR_CANREUSE;
/*
*/
disp = NULL;
result = dispatch_allocate(mgr, maxrequests, &disp);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
return (result);
+ }
disp->socktype = isc_sockettype_udp;
if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) == 0) {
result = get_udpsocket(mgr, disp, sockmgr, localaddr, &sock,
dup_socket, duponly);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
goto deallocate_dispatch;
+ }
if (isc_log_wouldlog(dns_lctx, 90)) {
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
*/
isc_sockaddr_anyofpf(&sa_any, isc_sockaddr_pf(localaddr));
if (!isc_sockaddr_eqaddr(&sa_any, localaddr)) {
- result = open_socket(sockmgr, localaddr, 0, &sock, NULL, false);
- if (sock != NULL)
+ result = open_socket(sockmgr, localaddr, 0,
+ &sock, NULL, false);
+ if (sock != NULL) {
isc_socket_detach(&sock);
- if (result != ISC_R_SUCCESS)
+ }
+ if (result != ISC_R_SUCCESS) {
goto deallocate_dispatch;
+ }
}
disp->port_table = isc_mem_get(mgr->mctx,
sizeof(disp->port_table[0]) *
DNS_DISPATCH_PORTTABLESIZE);
- if (disp->port_table == NULL)
+ if (disp->port_table == NULL) {
goto deallocate_dispatch;
- for (i = 0; i < DNS_DISPATCH_PORTTABLESIZE; i++)
+ }
+ for (i = 0; i < DNS_DISPATCH_PORTTABLESIZE; i++) {
ISC_LIST_INIT(disp->port_table[i]);
+ }
result = isc_mempool_create(mgr->mctx, sizeof(dispportentry_t),
&disp->portpool);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
goto deallocate_dispatch;
+ }
isc_mempool_setname(disp->portpool, "disp_portpool");
isc_mempool_setfreemax(disp->portpool, 128);
}
disp->socket = sock;
disp->local = *localaddr;
- if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) != 0)
+ if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) != 0) {
disp->ntasks = MAX_INTERNAL_TASKS;
- else
+ } else {
disp->ntasks = 1;
+ }
for (i = 0; i < disp->ntasks; i++) {
disp->task[i] = NULL;
result = isc_task_create(taskmgr, 50, &disp->task[i]);
}
result = isc_mutex_init(&disp->sepool_lock);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
goto kill_sepool;
+ }
isc_mempool_setname(disp->sepool, "disp_sepool");
isc_mempool_setmaxalloc(disp->sepool, 32768);
mgr_log(mgr, LVL(90), "created UDP dispatcher %p", disp);
dispatch_log(disp, LVL(90), "created task %p", disp->task[0]); /* XXX */
- if (disp->socket != NULL)
+ if (disp->socket != NULL) {
dispatch_log(disp, LVL(90), "created socket %p", disp->socket);
+ }
*dispp = disp;
kill_ctlevent:
isc_event_free(&disp->ctlevent);
kill_task:
- for (i = 0; i < disp->ntasks; i++)
+ for (i = 0; i < disp->ntasks; i++) {
isc_task_detach(&disp->task[i]);
+ }
kill_socket:
- if (disp->socket != NULL)
+ if (disp->socket != NULL) {
isc_socket_detach(&disp->socket);
+ }
deallocate_dispatch:
dispatch_free(&disp);
static inline void
wake_all_queues(isc__taskmgr_t *manager) {
- for (unsigned i=0; i < manager->workers; i++) {
+ for (unsigned int i = 0; i < manager->workers; i++) {
LOCK(&manager->queues[i].lock);
BROADCAST(&manager->queues[i].work_available);
UNLOCK(&manager->queues[i].lock);
XTRACE("task_ready");
LOCK(&manager->queues[task->threadid].lock);
push_readyq(manager, task, task->threadid);
- if (manager->mode == isc_taskmgrmode_normal || has_privilege)
+ if (manager->mode == isc_taskmgrmode_normal || has_privilege) {
SIGNAL(&manager->queues[task->threadid].work_available);
+ }
UNLOCK(&manager->queues[task->threadid].lock);
}
pop_readyq(isc__taskmgr_t *manager, int c) {
isc__task_t *task;
- if (manager->mode == isc_taskmgrmode_normal)
+ if (manager->mode == isc_taskmgrmode_normal) {
task = HEAD(manager->queues[c].ready_tasks);
- else
+ } else {
task = HEAD(manager->queues[c].ready_priority_tasks);
+ }
if (task != NULL) {
DEQUEUE(manager->queues[c].ready_tasks, task, ready_link);
- if (ISC_LINK_LINKED(task, ready_priority_link))
+ if (ISC_LINK_LINKED(task, ready_priority_link)) {
DEQUEUE(manager->queues[c].ready_priority_tasks, task,
ready_priority_link);
+ }
}
return (task);
static inline void
push_readyq(isc__taskmgr_t *manager, isc__task_t *task, int c) {
ENQUEUE(manager->queues[c].ready_tasks, task, ready_link);
- if ((task->flags & TASK_F_PRIVILEGED) != 0)
+ if ((task->flags & TASK_F_PRIVILEGED) != 0) {
ENQUEUE(manager->queues[c].ready_priority_tasks, task,
ready_priority_link);
+ }
atomic_fetch_add_explicit(&manager->tasks_ready, 1,
memory_order_acquire);
}
* If a pause has been requested, don't do any work
* until it's been released.
*/
- while ((empty_readyq(manager, threadid) && !manager->pause_requested &&
- !manager->exclusive_requested) && !FINISHED(manager))
+ while ((empty_readyq(manager, threadid) &&
+ !manager->pause_requested &&
+ !manager->exclusive_requested) &&
+ !FINISHED(manager))
{
XTHREADTRACE(isc_msgcat_get(isc_msgcat,
ISC_MSGSET_GENERAL,
ISC_MSG_WAIT, "wait"));
XTHREADTRACE(isc_msgcat_get(isc_msgcat,
ISC_MSGSET_GENERAL,
- ISC_MSG_WAIT, manager->pause_requested ? "paused" : "notpaused"));
+ ISC_MSG_WAIT,
+ manager->pause_requested
+ ? "paused" : "notpaused"));
XTHREADTRACE(isc_msgcat_get(isc_msgcat,
ISC_MSGSET_GENERAL,
- ISC_MSG_WAIT, manager->exclusive_requested ? "excreq" : "notexcreq"));
- WAIT(&manager->queues[threadid].work_available, &manager->queues[threadid].lock);
+ ISC_MSG_WAIT,
+ manager->exclusive_requested
+ ? "excreq" : "notexcreq"));
+ WAIT(&manager->queues[threadid].work_available,
+ &manager->queues[threadid].lock);
XTHREADTRACE(isc_msgcat_get(isc_msgcat,
ISC_MSGSET_TASK,
ISC_MSG_AWAKE, "awake"));
ISC_MSG_WORKING, "halting"));
/*
- * Switching to exclusive mode is done as a 2-phase-lock,
- * checking if we have to switch is done without any locks
- * on pause_requested and exclusive_requested to save time -
- * the worst thing that can happen is that we'll launch one task
- * more and exclusive task will be postponed a bit.
+ * Switching to exclusive mode is done as a
+ * 2-phase-lock, checking if we have to switch is
+ * done without any locks on pause_requested and
+ * exclusive_requested to save time - the worst
+ * thing that can happen is that we'll launch one
+ * task more and exclusive task will be postponed a
+ * bit.
*
- * Broadcasting on halt_cond seems suboptimal, but exclusive tasks
- * are rare enought that we don't care.
+ * Broadcasting on halt_cond seems suboptimal, but
+ * exclusive tasks are rare enought that we don't
+ * care.
*/
LOCK(&manager->halt_lock);
manager->halted++;
BROADCAST(&manager->halt_cond);
- while (manager->pause_requested || manager->exclusive_requested) {
+ while (manager->pause_requested ||
+ manager->exclusive_requested)
+ {
WAIT(&manager->halt_cond, &manager->halt_lock);
}
manager->halted--;
* lock before exiting the 'if (task != NULL)' block.
*/
UNLOCK(&manager->queues[threadid].lock);
- RUNTIME_CHECK(atomic_fetch_sub_explicit(&manager->tasks_ready,
- 1, memory_order_release) > 0);
+ RUNTIME_CHECK(
+ atomic_fetch_sub_explicit(&manager->tasks_ready,
+ 1, memory_order_release) > 0);
atomic_fetch_add_explicit(&manager->tasks_running, 1,
memory_order_acquire);
if (finished)
task_finished(task);
- RUNTIME_CHECK(atomic_fetch_sub_explicit(&manager->tasks_running,
+ RUNTIME_CHECK(
+ atomic_fetch_sub_explicit(&manager->tasks_running,
1, memory_order_release) > 0);
LOCK(&manager->queues[threadid].lock);
if (requeue) {
{
bool empty = true;
unsigned int i;
- for (i=0; i<manager->workers && empty; i++)
+ for (i = 0; i < manager->workers && empty; i++)
{
LOCK(&manager->queues[i].lock);
empty &= empty_readyq(manager, i);
static void
manager_free(isc__taskmgr_t *manager) {
- for (unsigned int i=0; i < manager->workers; i++) {
+ for (unsigned int i = 0; i < manager->workers; i++) {
DESTROYLOCK(&manager->queues[i].lock);
}
DESTROYLOCK(&manager->lock);
RUNTIME_CHECK(isc_mutex_init(&manager->lock) == ISC_R_SUCCESS);
RUNTIME_CHECK(isc_mutex_init(&manager->excl_lock) == ISC_R_SUCCESS);
- RUNTIME_CHECK(isc_mutex_init(&manager->halt_lock)
- == ISC_R_SUCCESS);
- RUNTIME_CHECK(isc_condition_init(&manager->halt_cond)
- == ISC_R_SUCCESS);
+ RUNTIME_CHECK(isc_mutex_init(&manager->halt_lock) == ISC_R_SUCCESS);
+ RUNTIME_CHECK(isc_condition_init(&manager->halt_cond) == ISC_R_SUCCESS);
manager->workers = workers;
- if (default_quantum == 0)
+ if (default_quantum == 0) {
default_quantum = DEFAULT_DEFAULT_QUANTUM;
+ }
manager->default_quantum = default_quantum;
INIT_LIST(manager->tasks);
- manager->queues = isc_mem_get(mctx, workers *
- sizeof(isc__taskqueue_t));
+ manager->queues = isc_mem_get(mctx, workers * sizeof(isc__taskqueue_t));
RUNTIME_CHECK(manager->queues != NULL);
manager->tasks_running = 0;
RUNTIME_CHECK(isc_mutex_init(&manager->queues[i].lock)
== ISC_R_SUCCESS);
RUNTIME_CHECK(isc_condition_init(
- &manager->queues[i].work_available)
+ &manager->queues[i].work_available)
== ISC_R_SUCCESS);
manager->queues[i].manager = manager;
manager->queues[i].threadid = i;
const char *fmt, ...) ISC_FORMAT_PRINTF(5, 6);
static void
thread_log(isc__socketthread_t *thread,
- isc_logcategory_t *category, isc_logmodule_t *module, int level,
- const char *fmt, ...)
+ isc_logcategory_t *category, isc_logmodule_t *module, int level,
+ const char *fmt, ...)
{
char msgbuf[2048];
va_list ap;
- if (! isc_log_wouldlog(isc_lctx, level))
+ if (! isc_log_wouldlog(isc_lctx, level)) {
return;
+ }
va_start(ap, fmt);
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
va_end(ap);
isc_log_write(isc_lctx, category, module, level,
- "sockmgr %p thread %d: %s", thread->manager, thread->threadid, msgbuf);
+ "sockmgr %p thread %d: %s",
+ thread->manager, thread->threadid, msgbuf);
}
static void
struct kevent evchange;
memset(&evchange, 0, sizeof(evchange));
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
evchange.filter = EVFILT_READ;
- else
+ } else {
evchange.filter = EVFILT_WRITE;
+ }
evchange.flags = EV_ADD;
evchange.ident = fd;
- if (kevent(thread->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0)
+ if (kevent(thread->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0) {
result = isc__errno2result(errno);
+ }
return (result);
#elif defined(USE_EPOLL)
int op;
oldevents = thread->epoll_events[fd];
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
thread->epoll_events[fd] |= EPOLLIN;
- else
+ } else {
thread->epoll_events[fd] |= EPOLLOUT;
+ }
event.events = thread->epoll_events[fd];
memset(&event.data, 0, sizeof(event.data));
op = (oldevents == 0U) ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
ret = epoll_ctl(thread->epoll_fd, op, fd, &event);
if (ret == -1) {
- if (errno == EEXIST)
+ if (errno == EEXIST) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"epoll_ctl(ADD/MOD) returned "
"EEXIST for fd %d", fd);
+ }
result = isc__errno2result(errno);
}
int lockid = FDLOCK_ID(fd);
memset(&pfd, 0, sizeof(pfd));
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
pfd.events = POLLIN;
- else
+ } else {
pfd.events = POLLOUT;
+ }
pfd.fd = fd;
pfd.revents = 0;
- if (write(thread->devpoll_fd, &pfd, sizeof(pfd)) == -1)
+ if (write(thread->devpoll_fd, &pfd, sizeof(pfd)) == -1) {
result = isc__errno2result(errno);
- else {
- if (msg == SELECT_POKE_READ)
+ } else {
+ if (msg == SELECT_POKE_READ) {
thread->fdpollinfo[fd].want_read = 1;
- else
+ } else {
thread->fdpollinfo[fd].want_write = 1;
+ }
}
return (result);
#elif defined(USE_SELECT)
LOCK(&thread->manager->lock);
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
FD_SET(fd, thread->read_fds);
- if (msg == SELECT_POKE_WRITE)
+ }
+ if (msg == SELECT_POKE_WRITE) {
FD_SET(fd, thread->write_fds);
+ }
UNLOCK(&thread->manager->lock);
return (result);
struct kevent evchange;
memset(&evchange, 0, sizeof(evchange));
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
evchange.filter = EVFILT_READ;
- else
+ } else {
evchange.filter = EVFILT_WRITE;
+ }
evchange.flags = EV_DELETE;
evchange.ident = fd;
- if (kevent(thread->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0)
+ if (kevent(thread->kqueue_fd, &evchange, 1, NULL, 0, NULL) != 0) {
result = isc__errno2result(errno);
+ }
return (result);
#elif defined(USE_EPOLL)
* socket for the other operation.
*/
if (msg == SELECT_POKE_READ &&
- thread->fdpollinfo[fd].want_write == 1) {
+ thread->fdpollinfo[fd].want_write == 1)
+ {
pfds[1].events = POLLOUT;
pfds[1].fd = fd;
writelen += sizeof(pfds[1]);
}
if (msg == SELECT_POKE_WRITE &&
- thread->fdpollinfo[fd].want_read == 1) {
+ thread->fdpollinfo[fd].want_read == 1)
+ {
pfds[1].events = POLLIN;
pfds[1].fd = fd;
writelen += sizeof(pfds[1]);
}
- if (write(thread->devpoll_fd, pfds, writelen) == -1)
+ if (write(thread->devpoll_fd, pfds, writelen) == -1) {
result = isc__errno2result(errno);
- else {
- if (msg == SELECT_POKE_READ)
+ } else {
+ if (msg == SELECT_POKE_READ) {
thread->fdpollinfo[fd].want_read = 0;
- else
+ } else {
thread->fdpollinfo[fd].want_write = 0;
+ }
}
return (result);
#elif defined(USE_SELECT)
LOCK(&thread->manager->lock);
- if (msg == SELECT_POKE_READ)
+ if (msg == SELECT_POKE_READ) {
FD_CLR(fd, thread->read_fds);
- else if (msg == SELECT_POKE_WRITE)
+ } else if (msg == SELECT_POKE_WRITE) {
FD_CLR(fd, thread->write_fds);
+ }
UNLOCK(&thread->manager->lock);
return (result);
buf[1] = msg;
do {
- cc = write(mgr->threads[threadid].pipe_fds[1], buf, sizeof(buf));
+ cc = write(mgr->threads[threadid].pipe_fds[1],
+ buf, sizeof(buf));
#ifdef ENOSR
/*
* Treat ENOSR as EAGAIN but loop slowly as it is
inc_stats(thread->manager->stats, sock->statsindex[STATID_CLOSE]);
if (sock->active == 1) {
- dec_stats(thread->manager->stats, sock->statsindex[STATID_ACTIVE]);
+ dec_stats(thread->manager->stats,
+ sock->statsindex[STATID_ACTIVE]);
sock->active = 0;
}
}
UNLOCK(&thread->fdlock[lockid]);
}
- if (thread->maxfd < thread->pipe_fds[0])
+ if (thread->maxfd < thread->pipe_fds[0]) {
thread->maxfd = thread->pipe_fds[0];
+ }
}
UNLOCK(&thread->manager->lock);
task = (*dev)->ev_sender;
(*dev)->ev_sender = sock;
- if (ISC_LINK_LINKED(*dev, ev_link))
+ if (ISC_LINK_LINKED(*dev, ev_link)) {
ISC_LIST_DEQUEUE(sock->connect_list, *dev, ev_link);
+ }
isc_task_sendtoanddetach(&task, (isc_event_t **)dev, sock->threadid);
}
}
finish:
- if (ISC_LIST_EMPTY(sock->recv_list))
+ if (ISC_LIST_EMPTY(sock->recv_list)) {
unwatch_fd(&sock->manager->threads[sock->threadid], sock->fd,
SELECT_POKE_READ);
+ }
UNLOCK(&sock->lock);
}
}
finish:
- if (ISC_LIST_EMPTY(sock->send_list))
+ if (ISC_LIST_EMPTY(sock->send_list)) {
unwatch_fd(&sock->manager->threads[sock->threadid],
sock->fd, SELECT_POKE_WRITE);
+ }
UNLOCK(&sock->lock);
}
isc_refcount_increment(&sock->references);
if (readable) {
- if (sock->listener)
+ if (sock->listener) {
internal_accept(sock);
- else
+ } else {
internal_recv(sock);
+ }
}
if (writeable) {
- if (sock->connecting)
+ if (sock->connecting) {
internal_connect(sock);
- else
+ } else {
internal_send(sock);
+ }
}
unlock_fd:
result = isc_resource_getcurlimit(
isc_resource_openfiles,
&thread->open_max);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
thread->open_max = 64;
+ }
thread->calls = 0;
}
for (pass = 0; pass < 2; pass++) {
dvp.dp_fds = thread->events;
dvp.dp_nfds = thread->nevents;
- if (dvp.dp_nfds >= thread->open_max)
+ if (dvp.dp_nfds >= thread->open_max) {
dvp.dp_nfds = thread->open_max - 1;
+ }
#ifndef ISC_SOCKET_USE_POLLWATCH
dvp.dp_timeout = -1;
#else
- if (pollstate == poll_idle)
+ if (pollstate == poll_idle) {
dvp.dp_timeout = -1;
- else
+ } else {
dvp.dp_timeout =
ISC_SOCKET_POLLWATCH_TIMEOUT;
+ }
#endif /* ISC_SOCKET_USE_POLLWATCH */
cc = ioctl(thread->devpoll_fd, DP_POLL, &dvp);
if (cc == -1 && errno == EINVAL) {
result = isc_resource_getcurlimit(
isc_resource_openfiles,
&thread->open_max);
- if (result != ISC_R_SUCCESS)
+ if (result != ISC_R_SUCCESS) {
thread->open_max = 64;
- } else
+ }
+ } else {
break;
+ }
}
#elif defined(USE_SELECT)
/*
#if defined(USE_DEVPOLL) && defined(ISC_SOCKET_USE_POLLWATCH)
if (cc == 0) {
- if (pollstate == poll_active)
+ if (pollstate == poll_active) {
pollstate = poll_checking;
- else if (pollstate == poll_checking)
+ } else if (pollstate == poll_checking) {
pollstate = poll_idle;
+ }
} else if (cc > 0) {
if (pollstate == poll_checking) {
/*
/*
* Process reads on internal, control fd.
*/
- if (FD_ISSET(ctlfd, thread->read_fds_copy))
+ if (FD_ISSET(ctlfd, thread->read_fds_copy)) {
done = process_ctlfd(thread);
+ }
#endif
}
isc_result_t result = ISC_R_SUCCESS;
int i;
char strbuf[ISC_STRERRORSIZE];
+
REQUIRE(thread != NULL);
REQUIRE(VALID_MANAGER(thread->manager));
- REQUIRE(thread->threadid >= 0 && thread->threadid < thread->manager->nthreads);
+ REQUIRE(thread->threadid >= 0 &&
+ thread->threadid < thread->manager->nthreads);
+
thread->fds = isc_mem_get(thread->manager->mctx,
- thread->manager->maxsocks * sizeof(isc__socket_t *));
+ thread->manager->maxsocks *
+ sizeof(isc__socket_t *));
if (thread->fds == NULL) {
- result = ISC_R_NOMEMORY;
- return (result); // TODO
+ return (ISC_R_NOMEMORY);
}
- memset(thread->fds, 0, thread->manager->maxsocks * sizeof(isc_socket_t *));
- thread->fdstate = isc_mem_get(thread->manager->mctx, thread->manager->maxsocks * sizeof(int));
+ memset(thread->fds, 0,
+ thread->manager->maxsocks * sizeof(isc_socket_t *));
+
+ thread->fdstate = isc_mem_get(thread->manager->mctx,
+ thread->manager->maxsocks * sizeof(int));
if (thread ->fdstate == NULL) {
- result = ISC_R_NOMEMORY;
- return (result); // TODO
+ return (ISC_R_NOMEMORY);
}
+
memset(thread->fdstate, 0, thread->manager->maxsocks * sizeof(int));
- thread->fdlock = isc_mem_get(thread->manager->mctx, FDLOCK_COUNT * sizeof(isc_mutex_t));
+ thread->fdlock = isc_mem_get(thread->manager->mctx,
+ FDLOCK_COUNT * sizeof(isc_mutex_t));
if (thread->fdlock == NULL) {
- result = ISC_R_NOMEMORY;
- return (result);
+ return (ISC_R_NOMEMORY);
}
for (i = 0; i < FDLOCK_COUNT; i++) {
#ifdef USE_KQUEUE
thread->nevents = ISC_SOCKET_MAXEVENTS;
- thread->events = isc_mem_get(thread->manager->mctx, sizeof(struct kevent) *
- thread->nevents);
- if (thread->events == NULL)
+ thread->events = isc_mem_get(thread->manager->mctx,
+ sizeof(struct kevent) * thread->nevents);
+ if (thread->events == NULL) {
return (ISC_R_NOMEMORY);
+ }
+
thread->kqueue_fd = kqueue();
if (thread->kqueue_fd == -1) {
result = isc__errno2result(errno);
thread->nevents = ISC_SOCKET_MAXEVENTS;
thread->epoll_events = isc_mem_get(thread->manager->mctx,
(thread->manager->maxsocks *
- sizeof(uint32_t)));
+ sizeof(uint32_t)));
if (thread->epoll_events == NULL) {
return (ISC_R_NOMEMORY);
}
+
memset(thread->epoll_events, 0,
thread->manager->maxsocks * sizeof(uint32_t));
thread->events = isc_mem_get(thread->manager->mctx,
sizeof(struct epoll_event) *
- thread->nevents);
+ thread->nevents);
if (thread->events == NULL) {
return (ISC_R_NOMEMORY);
}
return (result);
}
+
result = watch_fd(thread, thread->pipe_fds[0], SELECT_POKE_READ);
return (result);
thread->open_max = 64;
thread->calls = 0;
thread->events = isc_mem_get(thread->manager->mctx,
- sizeof(struct pollfd) *
- thread->nevents);
- if (thread->events == NULL)
+ sizeof(struct pollfd) * thread->nevents);
+ if (thread->events == NULL) {
return (ISC_R_NOMEMORY);
+ }
+
/*
* Note: fdpollinfo should be able to support all possible FDs, so
* it must have maxsocks entries (not nevents).
*/
thread->fdpollinfo = isc_mem_get(thread->manager->mctx,
sizeof(pollinfo_t) *
- thread->manager->maxsocks);
+ thread->manager->maxsocks);
if (thread->fdpollinfo == NULL) {
isc_mem_put(thread->manager->mctx, thread->events,
sizeof(struct pollfd) * thread->nevents);
return (ISC_R_NOMEMORY);
}
+
memset(thread->fdpollinfo, 0, sizeof(pollinfo_t) *
thread->manager->maxsocks);
thread->devpoll_fd = open("/dev/poll", O_RDWR);
* FD_SETSIZE, but we separate the cases to avoid possible portability
* issues regarding howmany() and the actual representation of fd_set.
*/
- thread->fd_bufsize = howmany(manager->maxsocks, NFDBITS) *
- sizeof(fd_mask);
+ thread->fd_bufsize =
+ howmany(manager->maxsocks, NFDBITS) * sizeof(fd_mask);
#else
thread->fd_bufsize = sizeof(fd_set);
#endif
thread->write_fds = NULL;
thread->write_fds_copy = NULL;
- thread->read_fds = isc_mem_get(thread->manager->mctx, thread->fd_bufsize);
- if (thread->read_fds != NULL)
+ thread->read_fds = isc_mem_get(thread->manager->mctx,
+ thread->fd_bufsize);
+ if (thread->read_fds != NULL) {
thread->read_fds_copy = isc_mem_get(thread->manager->mctx,
thread->fd_bufsize);
- if (thread->read_fds_copy != NULL)
+ }
+ if (thread->read_fds_copy != NULL) {
thread->write_fds = isc_mem_get(thread->manager->mctx,
thread->fd_bufsize);
+ }
if (thread->write_fds != NULL) {
thread->write_fds_copy = isc_mem_get(thread->manager->mctx,
- thread->fd_bufsize);
+ thread->fd_bufsize);
}
if (thread->write_fds_copy == NULL) {
if (thread->write_fds != NULL) {
- isc_mem_put(thread->manager->mctx, thread->write_fds,
- thread->fd_bufsize);
+ isc_mem_put(thread->manager->mctx,
+ thread->write_fds, thread->fd_bufsize);
}
if (thread->read_fds_copy != NULL) {
isc_mem_put(thread->manager->mctx,
- thread->read_fds_copy,
- thread->fd_bufsize);
+ thread->read_fds_copy, thread->fd_bufsize);
}
if (thread->read_fds != NULL) {
- isc_mem_put(thread->manager->mctx, thread->read_fds,
- thread->fd_bufsize);
+ isc_mem_put(thread->manager->mctx,
+ thread->read_fds, thread->fd_bufsize);
}
return (ISC_R_NOMEMORY);
}
isc_mem_put(mctx, thread->fdpollinfo,
sizeof(pollinfo_t) * thread->manager->maxsocks);
#elif defined(USE_SELECT)
- if (thread->read_fds != NULL)
+ if (thread->read_fds != NULL) {
isc_mem_put(mctx, thread->read_fds, thread->fd_bufsize);
- if (thread->read_fds_copy != NULL)
+ }
+ if (thread->read_fds_copy != NULL) {
isc_mem_put(mctx, thread->read_fds_copy, thread->fd_bufsize);
- if (thread->write_fds != NULL)
+ }
+ if (thread->write_fds != NULL) {
isc_mem_put(mctx, thread->write_fds, thread->fd_bufsize);
- if (thread->write_fds_copy != NULL)
+ }
+ if (thread->write_fds_copy != NULL) {
isc_mem_put(mctx, thread->write_fds_copy, thread->fd_bufsize);
+ }
#endif /* USE_KQUEUE */
- for (i = 0; i < (int)thread->manager->maxsocks; i++)
- if (thread->fdstate[i] == CLOSE_PENDING) /* no need to lock */
+ for (i = 0; i < (int)thread->manager->maxsocks; i++) {
+ if (thread->fdstate[i] == CLOSE_PENDING) {
+ /* no need to lock */
(void)close(i);
+ }
+ }
#if defined(USE_EPOLL)
isc_mem_put(thread->manager->mctx, thread->epoll_events,
if (thread->fdlock != NULL) {
- for (i = 0; i < FDLOCK_COUNT; i++)
+ for (i = 0; i < FDLOCK_COUNT; i++) {
DESTROYLOCK(&thread->fdlock[i]);
+ }
isc_mem_put(thread->manager->mctx, thread->fdlock,
FDLOCK_COUNT * sizeof(isc_mutex_t));
}
isc_result_t
isc_socketmgr_create2(isc_mem_t *mctx, isc_socketmgr_t **managerp,
- unsigned int maxsocks, int nthreads)
+ unsigned int maxsocks, int nthreads)
{
int i;
isc__socketmgr_t *manager;
/*
* Start up the select/poll thread.
*/
- manager->threads = isc_mem_get(mctx, sizeof(isc__socketthread_t) * manager->nthreads);
+ manager->threads = isc_mem_get(mctx, sizeof(isc__socketthread_t)
+ * manager->nthreads);
RUNTIME_CHECK(manager->threads != NULL);
isc_mem_attach(mctx, &manager->mctx);
manager->threads[i].manager = manager;
manager->threads[i].threadid = i;
setup_thread(&manager->threads[i]);
- if (isc_thread_create(netthread, &manager->threads[i], &manager->threads[i].thread) !=
- ISC_R_SUCCESS) {
+ result = isc_thread_create(netthread, &manager->threads[i],
+ &manager->threads[i].thread);
+ if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"isc_thread_create() %s",
- isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
- ISC_MSG_FAILED, "failed"));
+ isc_msgcat_get(isc_msgcat,
+ ISC_MSGSET_GENERAL,
+ ISC_MSG_FAILED,
+ "failed"));
return (ISC_R_UNEXPECTED);
}
char tname[1024];
void
isc_socketmgr_destroy(isc_socketmgr_t **managerp) {
isc__socketmgr_t *manager;
- int i;
isc_mem_t *mctx;
+ int i;
/*
* Destroy a socket manager.
* Wait for thread to exit.
*/
for (i = 0; i < manager->nthreads; i++) {
- if (isc_thread_join(manager->threads[i].thread, NULL) != ISC_R_SUCCESS)
+ isc_result_t result;
+ result = isc_thread_join(manager->threads[i].thread, NULL);
+ if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"isc_thread_join() %s",
- isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
- ISC_MSG_FAILED, "failed"));
+ isc_msgcat_get(isc_msgcat,
+ ISC_MSGSET_GENERAL,
+ ISC_MSG_FAILED,
+ "failed"));
+ }
cleanup_thread(manager->mctx, &manager->threads[i]);
}
/*
* Clean up.
*/
- isc_mem_put(manager->mctx, manager->threads, sizeof(isc__socketthread_t) * manager->nthreads);
+ isc_mem_put(manager->mctx, manager->threads,
+ sizeof(isc__socketthread_t) * manager->nthreads);
(void)isc_condition_destroy(&manager->shutdown_ok);
-
- if (manager->stats != NULL)
+ if (manager->stats != NULL) {
isc_stats_detach(&manager->stats);
+ }
DESTROYLOCK(&manager->lock);
manager->common.magic = 0;
manager->common.impmagic = 0;
LOCK(&sock->lock);
have_lock = true;
- if (ISC_LIST_EMPTY(sock->recv_list))
+ if (ISC_LIST_EMPTY(sock->recv_list)) {
io_state = doio_recv(sock, dev);
- else
+ } else {
io_state = DOIO_SOFT;
+ }
}
switch (io_state) {
"socket_recv: event %p -> task %p",
dev, ntask);
- if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0)
+ if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0) {
result = ISC_R_INPROGRESS;
+ }
break;
case DOIO_EOF:
case DOIO_HARD:
case DOIO_SUCCESS:
- if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0)
+ if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0) {
send_recvdone_event(sock, &dev);
+ }
break;
}
- if (have_lock)
+ if (have_lock) {
UNLOCK(&sock->lock);
+ }
return (result);
}
}
}
- if (sock->type == isc_sockettype_udp)
+ if (sock->type == isc_sockettype_udp) {
io_state = doio_send(sock, dev);
- else {
+ } else {
LOCK(&sock->lock);
have_lock = true;
- if (ISC_LIST_EMPTY(sock->send_list))
+ if (ISC_LIST_EMPTY(sock->send_list)) {
io_state = doio_send(sock, dev);
- else
+ } else {
io_state = DOIO_SOFT;
+ }
}
switch (io_state) {
"socket_send: event %p -> task %p",
dev, ntask);
- if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0)
+ if ((flags & ISC_SOCKFLAG_IMMEDIATE) != 0) {
result = ISC_R_INPROGRESS;
+ }
break;
}
case DOIO_HARD:
case DOIO_SUCCESS:
- if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0)
+ if ((flags & ISC_SOCKFLAG_IMMEDIATE) == 0) {
send_senddone_event(sock, &dev);
+ }
break;
}
- if (have_lock)
+ if (have_lock) {
UNLOCK(&sock->lock);
+ }
return (result);
}
* Only set SO_REUSEADDR when we want a specific port.
*/
#ifdef AF_UNIX
- if (sock->pf == AF_UNIX)
+ if (sock->pf == AF_UNIX) {
goto bind_socket;
+ }
#endif
if ((options & ISC_SOCKET_REUSEADDRESS) != 0 &&
- isc_sockaddr_getport(sockaddr) != (in_port_t)0) {
- if (setsockopt(sock->fd, SOL_SOCKET, SO_REUSEADDR, (void *)&on,
- sizeof(on)) < 0) {
+ isc_sockaddr_getport(sockaddr) != (in_port_t) 0)
+ {
+ if (setsockopt(sock->fd, SOL_SOCKET, SO_REUSEADDR,
+ (void *)&on, sizeof(on)) < 0)
+ {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"setsockopt(%d) %s", sock->fd,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
ISC_MSG_FAILED, "failed"));
}
- if (setsockopt(sock->fd, SOL_SOCKET, SO_REUSEPORT, (void *)&on,
- sizeof(on)) < 0) {
+ if (setsockopt(sock->fd, SOL_SOCKET, SO_REUSEPORT,
+ (void *)&on, sizeof(on)) < 0)
+ {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"setsockopt(%d) %s", sock->fd,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
if (sock < 0) {
close(sock);
return;
- } else if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void *)&yes,
- sizeof(yes)) < 0) {
+ } else if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (void *)&yes, sizeof(yes)) < 0)
+ {
close(sock);
return;
- } else if (setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (void *)&yes,
- sizeof(yes)) < 0) {
+ } else if (setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
+ (void *)&yes, sizeof(yes)) < 0)
+ {
close(sock);
return;
}
static const char *
_socktype(isc_sockettype_t type)
{
- if (type == isc_sockettype_udp)
+ switch (type) {
+ case isc_sockettype_udp:
return ("udp");
- else if (type == isc_sockettype_tcp)
+ case isc_sockettype_tcp:
return ("tcp");
- else if (type == isc_sockettype_unix)
+ case isc_sockettype_unix:
return ("unix");
- else
+ default:
return ("not-initialized");
+ }
}
#endif
TRY0(xmlTextWriterStartElement(writer,
ISC_XMLCHAR "references"));
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
- (int)isc_refcount_current(&sock->references)));
+ (int)isc_refcount_current(&sock->references)));
TRY0(xmlTextWriterEndElement(writer));
TRY0(xmlTextWriterWriteElement(writer, ISC_XMLCHAR "type",
json_object_object_add(entry, "name", obj);
}
- obj = json_object_new_int((int)isc_refcount_current(&sock->references));
+ obj = json_object_new_int(
+ (int)isc_refcount_current(&sock->references));
CHECKMEM(obj);
json_object_object_add(entry, "references", obj);