From: Roy Marples Date: Thu, 30 Oct 2025 14:07:54 +0000 (+0000) Subject: eloop: Import latest from dhcpsd X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=c4d6af24d34dbb77188ec199c10e31b2c3bc410c;p=thirdparty%2Fdhcpcd.git eloop: Import latest from dhcpsd Finally have kqueue and epoll working for a multi-process setup. The secret sauce was that after forking the fd for polling is invalid and as such should not be closed blindly. --- diff --git a/src/eloop.c b/src/eloop.c index 49337837..a65e1679 100644 --- a/src/eloop.c +++ b/src/eloop.c @@ -26,136 +26,49 @@ * SUCH DAMAGE. */ -/* NOTES: - * Basically for a small number of fd's (total, not max fd) - * of say a few hundred, ppoll(2) performs just fine, if not faster than others. - * It also has the smallest memory and binary size footprint. - * ppoll(2) is available on all modern OS my software runs on and should be - * an up and coming POSIX standard interface. - * If ppoll is not available, then pselect(2) can be used instead which has - * even smaller memory and binary size footprint. - * However, this difference is quite tiny and the ppoll API is superior. - * pselect cannot return error conditions such as EOF for example. - * - * Both epoll(7) and kqueue(2) require an extra fd per process to manage - * their respective list of interest AND syscalls to manage it. - * So for a small number of fd's, these are more resource intensive, - * especially when used with more than one process. - * - * epoll avoids the resource limit RLIMIT_NOFILE Linux poll stupidly applies. - * kqueue avoids the same limit on OpenBSD. - * ppoll can still be secured in both by using SEECOMP or pledge. - * - * kqueue can avoid the signal trick we use here so that we function calls - * other than those listed in sigaction(2) in our signal handlers which is - * probably more robust than ours at surviving a signal storm. - * signalfd(2) is available for Linux which probably works in a similar way - * but it's yet another fd to use. - * - * Taking this all into account, ppoll(2) is the default mechanism used here. - */ - #if (defined(__unix__) || defined(unix)) && !defined(USG) #include #endif #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* config.h should define HAVE_PPOLL, etc. */ -#if defined(HAVE_CONFIG_H) && !defined(NO_CONFIG_H) -#include "config.h" -#endif - -/* Prioritise which mechanism we want to use.*/ -#if defined(HAVE_PPOLL) -#undef HAVE_EPOLL -#undef HAVE_KQUEUE -#undef HAVE_PSELECT -#elif defined(HAVE_POLLTS) -#define HAVE_PPOLL -#define ppoll pollts -#undef HAVE_EPOLL -#undef HAVE_KQUEUE -#undef HAVE_PSELECT -#elif defined(HAVE_KQUEUE) -#undef HAVE_EPOLL -#undef HAVE_PSELECT -#elif defined(HAVE_EPOLL) -#undef HAVE_KQUEUE -#undef HAVE_PSELECT -#elif !defined(HAVE_PSELECT) -#define HAVE_PPOLL -#endif - -#if defined(HAVE_KQUEUE) +/* + * On BSD use kqueue(2) + * On Linux use epoll(7) + * Everywhere else use ppoll(2) + */ +#ifdef BSD #include +#define USE_KQUEUE +#define NFD 2 +#if defined(__NetBSD__) || defined(__OpenBSD__) +#define HAVE_KQUEUE1 +#endif #if defined(__DragonFly__) || defined(__FreeBSD__) -#define _kevent(kq, cl, ncl, el, nel, t) \ +#define _kevent(kq, cl, ncl, el, nel, t) \ kevent((kq), (cl), (int)(ncl), (el), (int)(nel), (t)) #else -#define _kevent kevent +#define _kevent kevent #endif -#define NFD 2 -#elif defined(HAVE_EPOLL) +#elif defined(__linux__) #include -#define NFD 1 -#elif defined(HAVE_PPOLL) +#define USE_EPOLL +#else #include -#define NFD 1 -#elif defined(HAVE_PSELECT) -#include +#define USE_PPOLL #endif -#include "eloop.h" - -#ifndef UNUSED -#define UNUSED(a) (void)((a)) -#endif -#ifndef __unused -#ifdef __GNUC__ -#define __unused __attribute__((__unused__)) -#else -#define __unused -#endif -#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include -/* Our structures require TAILQ macros, which really every libc should - * ship as they are useful beyond belief. - * Sadly some libc's don't have sys/queue.h and some that do don't have - * the TAILQ_FOREACH macro. For those that don't, the application using - * this implementation will need to ship a working queue.h somewhere. - * If we don't have sys/queue.h found in config.h, then - * allow QUEUE_H to override loading queue.h in the current directory. */ -#ifndef TAILQ_FOREACH -#ifdef HAVE_SYS_QUEUE_H -#include -#elif defined(QUEUE_H) -#define __QUEUE_HEADER(x) #x -#define _QUEUE_HEADER(x) __QUEUE_HEADER(x) -#include _QUEUE_HEADER(QUEUE_H) -#else +#include "eloop.h" #include "queue.h" -#endif -#endif - -#ifdef ELOOP_DEBUG -#include -#endif - -#ifndef __arraycount -# define __arraycount(__x) (sizeof(__x) / sizeof(__x[0])) -#endif /* * Allow a backlog of signals. @@ -163,7 +76,7 @@ * use the same signal handler or have the signal handler unset. * Otherwise the signal might not behave as expected. */ -#define ELOOP_NSIGNALS 5 +#define ELOOP_NSIGNALS 5 /* * time_t is a signed integer of an unspecified size. @@ -171,11 +84,19 @@ * value and use that as a maximum. */ #ifndef TIME_MAX -#define TIME_MAX ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1) +#define TIME_MAX ((1ULL << (sizeof(time_t) * NBBY - 1)) - 1) #endif /* The unsigned maximum is then simple - multiply by two and add one. */ #ifndef UTIME_MAX -#define UTIME_MAX (TIME_MAX * 2) + 1 +#define UTIME_MAX (TIME_MAX * 2) + 1 +#endif + +#ifndef NFD +#define NFD 1 +#endif + +#ifndef UNUSED +#define UNUSED(a) (void)(a) #endif struct eloop_event { @@ -184,7 +105,7 @@ struct eloop_event { void (*cb)(void *, unsigned short); void *cb_arg; unsigned short events; -#ifdef HAVE_PPOLL +#ifdef USE_PPOLL struct pollfd *pollfd; #endif }; @@ -199,41 +120,40 @@ struct eloop_timeout { }; struct eloop { - TAILQ_HEAD (event_head, eloop_event) events; + TAILQ_HEAD(event_head, eloop_event) events; size_t nevents; struct event_head free_events; struct timespec now; - TAILQ_HEAD (timeout_head, eloop_timeout) timeouts; + TAILQ_HEAD(timeout_head, eloop_timeout) timeouts; struct timeout_head free_timeouts; const int *signals; size_t nsignals; + sigset_t sigset; void (*signal_cb)(int, void *); void *signal_cb_ctx; -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) +#if defined(USE_KQUEUE) || defined(USE_EPOLL) int fd; #endif -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) struct kevent *fds; -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) struct epoll_event *fds; -#elif defined(HAVE_PPOLL) +#elif defined(USE_PPOLL) struct pollfd *fds; #endif -#if !defined(HAVE_PSELECT) size_t nfds; -#endif int exitcode; bool exitnow; bool events_need_setup; - bool cleared; + bool events_invalid; }; #ifdef HAVE_REALLOCARRAY -#define eloop_realloca reallocarray +#define eloop_realloca reallocarray #else /* Handy routing to check for potential overflow. * reallocarray(3) and reallocarr(3) are not portable. */ @@ -241,7 +161,6 @@ struct eloop { static void * eloop_realloca(void *ptr, size_t n, size_t size) { - if ((n | size) >= SQRT_SIZE_MAX && n > SIZE_MAX / size) { errno = EOVERFLOW; return NULL; @@ -250,24 +169,22 @@ eloop_realloca(void *ptr, size_t n, size_t size) } #endif - static int eloop_event_setup_fds(struct eloop *eloop) { struct eloop_event *e, *ne; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) struct kevent *pfd; size_t nfds = eloop->nsignals; -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) struct epoll_event *pfd; size_t nfds = 0; -#elif defined(HAVE_PPOLL) +#elif defined(USE_PPOLL) struct pollfd *pfd; size_t nfds = 0; #endif -#ifndef HAVE_PSELECT - nfds += eloop->nevents * NFD; + nfds += eloop->nevents; if (eloop->nfds < nfds) { pfd = eloop_realloca(eloop->fds, nfds, sizeof(*pfd)); if (pfd == NULL) @@ -275,9 +192,8 @@ eloop_event_setup_fds(struct eloop *eloop) eloop->fds = pfd; eloop->nfds = nfds; } -#endif -#ifdef HAVE_PPOLL +#ifdef USE_PPOLL pfd = eloop->fds; #endif TAILQ_FOREACH_SAFE(e, &eloop->events, next, ne) { @@ -286,7 +202,7 @@ eloop_event_setup_fds(struct eloop *eloop) TAILQ_INSERT_TAIL(&eloop->free_events, e, next); continue; } -#ifdef HAVE_PPOLL +#ifdef USE_PPOLL e->pollfd = pfd; pfd->fd = e->fd; pfd->events = 0; @@ -306,7 +222,6 @@ eloop_event_setup_fds(struct eloop *eloop) size_t eloop_event_count(const struct eloop *eloop) { - return eloop->nevents; } @@ -316,16 +231,14 @@ eloop_event_add(struct eloop *eloop, int fd, unsigned short events, { struct eloop_event *e; bool added; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) struct kevent ke[2], *kep = &ke[0]; size_t n; -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) struct epoll_event epe; int op; #endif - assert(eloop != NULL); - assert(cb != NULL && cb_arg != NULL); if (fd == -1 || !(events & (ELE_READ | ELE_WRITE | ELE_HANGUP))) { errno = EINVAL; return -1; @@ -357,7 +270,7 @@ eloop_event_add(struct eloop *eloop, int fd, unsigned short events, e->cb = cb; e->cb_arg = cb_arg; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) n = 2; if (events & ELE_READ && !(e->events & ELE_READ)) EV_SET(kep++, (uintptr_t)fd, EVFILT_READ, EV_ADD, 0, 0, e); @@ -373,8 +286,8 @@ eloop_event_add(struct eloop *eloop, int fd, unsigned short events, n--; #ifdef EVFILT_PROCDESC if (events & ELE_HANGUP) - EV_SET(kep++, (uintptr_t)fd, EVFILT_PROCDESC, EV_ADD, - NOTE_EXIT, 0, e); + EV_SET(kep++, (uintptr_t)fd, EVFILT_PROCDESC, EV_ADD, NOTE_EXIT, + 0, e); else n--; #endif @@ -385,7 +298,7 @@ eloop_event_add(struct eloop *eloop, int fd, unsigned short events, } return -1; } -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) memset(&epe, 0, sizeof(epe)); epe.data.ptr = e; if (events & ELE_READ) @@ -400,11 +313,9 @@ eloop_event_add(struct eloop *eloop, int fd, unsigned short events, } return -1; } -#elif defined(HAVE_PPOLL) +#elif defined(USE_PPOLL) e->pollfd = NULL; UNUSED(added); -#else - UNUSED(added); #endif e->events = events; eloop->events_need_setup = true; @@ -415,12 +326,11 @@ int eloop_event_delete(struct eloop *eloop, int fd) { struct eloop_event *e; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) struct kevent ke[2], *kep = &ke[0]; size_t n; #endif - assert(eloop != NULL); if (fd == -1) { errno = EINVAL; return -1; @@ -435,7 +345,7 @@ eloop_event_delete(struct eloop *eloop, int fd) return -1; } -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) n = 0; if (e->events & ELE_READ) { EV_SET(kep++, (uintptr_t)fd, EVFILT_READ, EV_DELETE, 0, 0, e); @@ -447,10 +357,11 @@ eloop_event_delete(struct eloop *eloop, int fd) } if (n != 0 && _kevent(eloop->fd, ke, n, NULL, 0, NULL) == -1) return -1; -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) if (epoll_ctl(eloop->fd, EPOLL_CTL_DEL, fd, NULL) == -1) return -1; #endif + e->fd = -1; eloop->nevents--; eloop->events_need_setup = true; @@ -492,7 +403,7 @@ eloop_timespec_diff(const struct timespec *tsp, const struct timespec *usp, return secs; } -static void +static int eloop_reduce_timers(struct eloop *eloop) { struct timespec now; @@ -500,7 +411,8 @@ eloop_reduce_timers(struct eloop *eloop) unsigned int nsecs; struct eloop_timeout *t; - clock_gettime(CLOCK_MONOTONIC, &now); + if (clock_gettime(CLOCK_MONOTONIC, &now) == -1) + return -1; secs = eloop_timespec_diff(&now, &eloop->now, &nsecs); TAILQ_FOREACH(t, &eloop->timeouts, next) { @@ -514,8 +426,8 @@ eloop_reduce_timers(struct eloop *eloop) t->nseconds = 0; else { t->seconds--; - t->nseconds = NSEC_PER_SEC - - (nsecs - t->nseconds); + t->nseconds = NSEC_PER_SEC - + (nsecs - t->nseconds); } } else t->nseconds -= nsecs; @@ -523,6 +435,7 @@ eloop_reduce_timers(struct eloop *eloop) } eloop->now = now; + return 0; } /* @@ -532,16 +445,11 @@ eloop_reduce_timers(struct eloop *eloop) * unsigned int should match or be greater than any on wire specified timeout. */ static int -eloop_q_timeout_add(struct eloop *eloop, int queue, - unsigned int seconds, unsigned int nseconds, - void (*callback)(void *), void *arg) +eloop_q_timeout_add(struct eloop *eloop, int queue, unsigned int seconds, + unsigned int nseconds, void (*callback)(void *), void *arg) { struct eloop_timeout *t, *tt = NULL; - assert(eloop != NULL); - assert(callback != NULL); - assert(nseconds <= NSEC_PER_SEC); - /* Remove existing timeout if present. */ TAILQ_FOREACH(t, &eloop->timeouts, next) { if (t->callback == callback && t->arg == arg) { @@ -550,6 +458,12 @@ eloop_q_timeout_add(struct eloop *eloop, int queue, } } + if (eloop_reduce_timers(eloop) == -1) { + if (t != NULL) + free(t); + return -1; + } + if (t == NULL) { /* No existing, so allocate or grab one from the free pool. */ if ((t = TAILQ_FIRST(&eloop->free_timeouts))) { @@ -560,8 +474,6 @@ eloop_q_timeout_add(struct eloop *eloop, int queue, } } - eloop_reduce_timers(eloop); - t->seconds = seconds; t->nseconds = nseconds; t->callback = callback; @@ -572,8 +484,7 @@ eloop_q_timeout_add(struct eloop *eloop, int queue, * soonest first. */ TAILQ_FOREACH(tt, &eloop->timeouts, next) { if (t->seconds < tt->seconds || - (t->seconds == tt->seconds && t->nseconds < tt->nseconds)) - { + (t->seconds == tt->seconds && t->nseconds < tt->nseconds)) { TAILQ_INSERT_BEFORE(tt, t, next); return 0; } @@ -586,7 +497,6 @@ int eloop_q_timeout_add_tv(struct eloop *eloop, int queue, const struct timespec *when, void (*callback)(void *), void *arg) { - if (when->tv_sec < 0 || (unsigned long)when->tv_sec > UINT_MAX) { errno = EINVAL; return -1; @@ -596,16 +506,14 @@ eloop_q_timeout_add_tv(struct eloop *eloop, int queue, return -1; } - return eloop_q_timeout_add(eloop, queue, - (unsigned int)when->tv_sec, (unsigned int)when->tv_sec, - callback, arg); + return eloop_q_timeout_add(eloop, queue, (unsigned int)when->tv_sec, + (unsigned int)when->tv_sec, callback, arg); } int eloop_q_timeout_add_sec(struct eloop *eloop, int queue, unsigned int seconds, void (*callback)(void *), void *arg) { - return eloop_q_timeout_add(eloop, queue, seconds, 0, callback, arg); } @@ -622,25 +530,21 @@ eloop_q_timeout_add_msec(struct eloop *eloop, int queue, unsigned long when, } nseconds = (when % MSEC_PER_SEC) * NSEC_PER_MSEC; - return eloop_q_timeout_add(eloop, queue, - (unsigned int)seconds, (unsigned int)nseconds, callback, arg); + return eloop_q_timeout_add(eloop, queue, (unsigned int)seconds, + (unsigned int)nseconds, callback, arg); } int -eloop_q_timeout_delete(struct eloop *eloop, int queue, - void (*callback)(void *), void *arg) +eloop_q_timeout_delete(struct eloop *eloop, int queue, void (*callback)(void *), + void *arg) { struct eloop_timeout *t, *tt; int n; - assert(eloop != NULL); - n = 0; TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tt) { - if ((queue == 0 || t->queue == queue) && - t->arg == arg && - (!callback || t->callback == callback)) - { + if ((queue == 0 || t->queue == queue) && t->arg == arg && + (!callback || t->callback == callback)) { TAILQ_REMOVE(&eloop->timeouts, t, next); TAILQ_INSERT_TAIL(&eloop->free_timeouts, t, next); n++; @@ -652,45 +556,107 @@ eloop_q_timeout_delete(struct eloop *eloop, int queue, void eloop_exit(struct eloop *eloop, int code) { - - assert(eloop != NULL); - eloop->exitcode = code; eloop->exitnow = true; } -void -eloop_enter(struct eloop *eloop) +#if defined(USE_KQUEUE) || defined(USE_EPOLL) +static int +eloop_open(struct eloop *eloop) { + int fd; - assert(eloop != NULL); +#if defined(HAVE_KQUEUE1) + fd = kqueue1(O_CLOEXEC); +#elif defined(USE_KQUEUE) + int flags; - eloop->exitnow = false; + fd = kqueue(); + flags = fcntl(fd, F_GETFD, 0); + if (!(flags != -1 && !(flags & FD_CLOEXEC) && + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == 0)) { + close(fd); + return -1; + } +#elif defined(USE_EPOLL) + fd = epoll_create1(EPOLL_CLOEXEC); +#endif + + eloop->fd = fd; + return fd; +} +#endif + +static void +eloop_clear(struct eloop *eloop, unsigned short flags) +{ + if (eloop == NULL) + return; + + if (!(flags & ELF_KEEP_SIGNALS)) { + eloop->signals = NULL; + eloop->nsignals = 0; + eloop->signal_cb = NULL; + eloop->signal_cb_ctx = NULL; + } + + if (!(flags & ELF_KEEP_EVENTS)) { + struct eloop_event *e, *en; + + TAILQ_FOREACH_SAFE(e, &eloop->events, next, en) + free(e); + TAILQ_INIT(&eloop->events); + + TAILQ_FOREACH_SAFE(e, &eloop->free_events, next, en) + free(e); + TAILQ_INIT(&eloop->free_events); + + eloop->nevents = 0; + eloop->events_invalid = true; + } + + if (!(flags & ELF_KEEP_TIMEOUTS)) { + struct eloop_timeout *t, *tn; + + TAILQ_FOREACH_SAFE(t, &eloop->timeouts, next, tn) + free(t); + TAILQ_INIT(&eloop->timeouts); + + TAILQ_FOREACH_SAFE(t, &eloop->free_timeouts, next, tn) + free(t); + TAILQ_INIT(&eloop->free_timeouts); + } } /* Must be called after fork(2) */ int -eloop_forked(struct eloop *eloop) +eloop_forked(struct eloop *eloop, unsigned short flags) { -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) +#if defined(USE_KQUEUE) || defined(USE_EPOLL) struct eloop_event *e; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) struct kevent *pfds, *pfd; size_t i; -#elif defined(HAVE_EPOLL) + int err; +#elif defined(USE_EPOLL) struct epoll_event epe = { .events = 0 }; #endif - assert(eloop != NULL); -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) - if (eloop->fd != -1) - close(eloop->fd); - if (eloop_open(eloop) == -1) +#if defined(USE_KQUEUE) || defined(USE_EPOLL) + /* The fd is invalid after a fork, no need to close it. */ + eloop->fd = -1; + if (flags && eloop_open(eloop) == -1) return -1; #endif + eloop_clear(eloop, flags); + if (!flags) + return 0; -#ifdef HAVE_KQUEUE - pfds = malloc((eloop->nsignals + (eloop->nevents * NFD)) * sizeof(*pfds)); +#ifdef USE_KQUEUE + pfds = malloc( + (eloop->nsignals + (eloop->nevents * NFD)) * sizeof(*pfds)); + if (pfds == NULL) + return -1; pfd = pfds; if (eloop->signal_cb != NULL) { @@ -705,18 +671,18 @@ eloop_forked(struct eloop *eloop) TAILQ_FOREACH(e, &eloop->events, next) { if (e->fd == -1) continue; -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) if (e->events & ELE_READ) { - EV_SET(pfd++, (uintptr_t)e->fd, - EVFILT_READ, EV_ADD, 0, 0, e); + EV_SET(pfd++, (uintptr_t)e->fd, EVFILT_READ, EV_ADD, 0, + 0, e); i++; } if (e->events & ELE_WRITE) { - EV_SET(pfd++, (uintptr_t)e->fd, - EVFILT_WRITE, EV_ADD, 0, 0, e); + EV_SET(pfd++, (uintptr_t)e->fd, EVFILT_WRITE, EV_ADD, 0, + 0, e); i++; } -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) memset(&epe, 0, sizeof(epe)); epe.data.ptr = e; if (e->events & ELE_READ) @@ -728,71 +694,39 @@ eloop_forked(struct eloop *eloop) #endif } -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) if (i == 0) - return 0; - return _kevent(eloop->fd, pfds, i, NULL, 0, NULL); -#else - return 0; -#endif + err = 0; + else + err = _kevent(eloop->fd, pfds, i, NULL, 0, NULL); + free(pfds); + return err; #else - UNUSED(eloop); return 0; #endif -} - -int -eloop_open(struct eloop *eloop) -{ -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) - int fd; - - assert(eloop != NULL); -#if defined(HAVE_KQUEUE1) - fd = kqueue1(O_CLOEXEC); -#elif defined(HAVE_KQUEUE) - int flags; - - fd = kqueue(); - flags = fcntl(fd, F_GETFD, 0); - if (!(flags != -1 && !(flags & FD_CLOEXEC) && - fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == 0)) - { - close(fd); - return -1; - } -#elif defined(HAVE_EPOLL) - fd = epoll_create1(EPOLL_CLOEXEC); -#endif - - eloop->fd = fd; - return fd; #else - UNUSED(eloop); + eloop_clear(eloop, flags); return 0; #endif } int -eloop_signal_set_cb(struct eloop *eloop, - const int *signals, size_t nsignals, +eloop_signal_set_cb(struct eloop *eloop, const int *signals, size_t nsignals, void (*signal_cb)(int, void *), void *signal_cb_ctx) { -#ifdef HAVE_KQUEUE +#ifdef USE_KQUEUE size_t i; struct kevent *ke, *kes; #endif int error = 0; - assert(eloop != NULL); - -#ifdef HAVE_KQUEUE +#ifdef USE_KQUEUE ke = kes = malloc(MAX(eloop->nsignals, nsignals) * sizeof(*kes)); if (kes == NULL) return -1; for (i = 0; i < eloop->nsignals; i++) { - EV_SET(ke++, (uintptr_t)eloop->signals[i], - EVFILT_SIGNAL, EV_DELETE, 0, 0, NULL); + EV_SET(ke++, (uintptr_t)eloop->signals[i], EVFILT_SIGNAL, + EV_DELETE, 0, 0, NULL); } if (i != 0 && _kevent(eloop->fd, kes, i, NULL, 0, NULL) == -1) { error = -1; @@ -805,13 +739,13 @@ eloop_signal_set_cb(struct eloop *eloop, eloop->signal_cb = signal_cb; eloop->signal_cb_ctx = signal_cb_ctx; -#ifdef HAVE_KQUEUE +#ifdef USE_KQUEUE if (signal_cb == NULL) goto out; ke = kes; for (i = 0; i < eloop->nsignals; i++) { - EV_SET(ke++, (uintptr_t)eloop->signals[i], - EVFILT_SIGNAL, EV_ADD, 0, 0, NULL); + EV_SET(ke++, (uintptr_t)eloop->signals[i], EVFILT_SIGNAL, + EV_ADD, 0, 0, NULL); } if (i != 0 && _kevent(eloop->fd, kes, i, NULL, 0, NULL) == -1) error = -1; @@ -822,47 +756,46 @@ out: return error; } -#ifndef HAVE_KQUEUE -static volatile int _eloop_sig[ELOOP_NSIGNALS]; -static volatile size_t _eloop_nsig; +#ifndef USE_KQUEUE +static volatile int eloop_sig[ELOOP_NSIGNALS]; +static volatile size_t eloop_nsig; static void -eloop_signal3(int sig, __unused siginfo_t *siginfo, __unused void *arg) +eloop_signal3(int sig, siginfo_t *siginfo, void *arg) { + (void)(siginfo); + (void)(arg); - if (_eloop_nsig == __arraycount(_eloop_sig)) { + if (eloop_nsig == sizeof(eloop_sig) / sizeof(eloop_sig[0])) { #ifdef ELOOP_DEBUG fprintf(stderr, "%s: signal storm, discarding signal %d\n", __func__, sig); #endif return; } - - _eloop_sig[_eloop_nsig++] = sig; + eloop_sig[eloop_nsig++] = sig; } #endif int -eloop_signal_mask(struct eloop *eloop, sigset_t *oldset) +eloop_signal_mask(struct eloop *eloop) { sigset_t newset; size_t i; -#ifndef HAVE_KQUEUE +#ifndef USE_KQUEUE struct sigaction sa = { - .sa_sigaction = eloop_signal3, - .sa_flags = SA_SIGINFO, + .sa_sigaction = eloop_signal3, + .sa_flags = SA_SIGINFO, }; #endif - assert(eloop != NULL); - sigemptyset(&newset); for (i = 0; i < eloop->nsignals; i++) sigaddset(&newset, eloop->signals[i]); - if (sigprocmask(SIG_SETMASK, &newset, oldset) == -1) + if (sigprocmask(SIG_SETMASK, &newset, &eloop->sigset) == -1) return -1; -#ifndef HAVE_KQUEUE +#ifndef USE_KQUEUE sigemptyset(&sa.sa_mask); for (i = 0; i < eloop->nsignals; i++) { @@ -895,7 +828,7 @@ eloop_new(void) TAILQ_INIT(&eloop->free_timeouts); eloop->exitcode = EXIT_FAILURE; -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) +#if defined(USE_KQUEUE) || defined(USE_EPOLL) if (eloop_open(eloop) == -1) { eloop_free(eloop); return NULL; @@ -905,73 +838,42 @@ eloop_new(void) return eloop; } -void -eloop_clear(struct eloop *eloop, ...) +struct eloop * +eloop_new_with_signals(struct eloop *eloop) { - va_list va1, va2; - int except_fd; - struct eloop_event *e, *ne; - struct eloop_timeout *t; + struct eloop *e; + int err; - if (eloop == NULL) - return; + e = eloop_new(); + if (e == NULL) + return NULL; - va_start(va1, eloop); - TAILQ_FOREACH_SAFE(e, &eloop->events, next, ne) { - va_copy(va2, va1); - do - except_fd = va_arg(va2, int); - while (except_fd != -1 && except_fd != e->fd); - va_end(va2); - if (e->fd == except_fd && e->fd != -1) - continue; - TAILQ_REMOVE(&eloop->events, e, next); - if (e->fd != -1) { - close(e->fd); - eloop->nevents--; - } - free(e); + err = eloop_signal_set_cb(e, eloop->signals, eloop->nsignals, + eloop->signal_cb, eloop->signal_cb_ctx); + if (err == -1) { + eloop_free(e); + return NULL; } - va_end(va1); - -#if !defined(HAVE_PSELECT) - /* Free the pollfd buffer and ensure it's re-created before - * the next run. This allows us to shrink it incase we use a lot less - * signals and fds to respond to after forking. */ - free(eloop->fds); - eloop->fds = NULL; - eloop->nfds = 0; - eloop->events_need_setup = true; -#endif + memcpy(&e->sigset, &eloop->sigset, sizeof(e->sigset)); - while ((e = TAILQ_FIRST(&eloop->free_events))) { - TAILQ_REMOVE(&eloop->free_events, e, next); - free(e); - } - while ((t = TAILQ_FIRST(&eloop->timeouts))) { - TAILQ_REMOVE(&eloop->timeouts, t, next); - free(t); - } - while ((t = TAILQ_FIRST(&eloop->free_timeouts))) { - TAILQ_REMOVE(&eloop->free_timeouts, t, next); - free(t); - } - eloop->cleared = true; + return e; } void eloop_free(struct eloop *eloop) { + if (eloop == NULL) + return; - eloop_clear(eloop, -1); -#if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL) - if (eloop != NULL && eloop->fd != -1) + eloop_clear(eloop, 0); +#if defined(USE_KQUEUE) || defined(USE_EPOLL) + if (eloop->fd != -1) close(eloop->fd); #endif free(eloop); } -#if defined(HAVE_KQUEUE) +#if defined(USE_KQUEUE) static int eloop_run_kqueue(struct eloop *eloop, const struct timespec *ts) { @@ -985,12 +887,11 @@ eloop_run_kqueue(struct eloop *eloop, const struct timespec *ts) return -1; for (nn = n, ke = eloop->fds; nn != 0; nn--, ke++) { - if (eloop->cleared || eloop->exitnow) + if (eloop->exitnow || eloop->events_invalid) break; e = (struct eloop_event *)ke->udata; if (ke->filter == EVFILT_SIGNAL) { - eloop->signal_cb((int)ke->ident, - eloop->signal_cb_ctx); + eloop->signal_cb((int)ke->ident, eloop->signal_cb_ctx); continue; } if (ke->filter == EVFILT_READ) @@ -1000,9 +901,8 @@ eloop_run_kqueue(struct eloop *eloop, const struct timespec *ts) #ifdef EVFILT_PROCDESC else if (ke->filter == EVFILT_PROCDESC && ke->fflags & NOTE_EXIT) - /* exit status is in ke->data. - * As we default to using ppoll anyway - * we don't have to do anything with it right now. */ + /* exit status is in ke->data + * should we do anything with it? */ events = ELE_HANGUP; #endif else @@ -1016,11 +916,10 @@ eloop_run_kqueue(struct eloop *eloop, const struct timespec *ts) return n; } -#elif defined(HAVE_EPOLL) +#elif defined(USE_EPOLL) static int -eloop_run_epoll(struct eloop *eloop, - const struct timespec *ts, const sigset_t *signals) +eloop_run_epoll(struct eloop *eloop, const struct timespec *ts) { int timeout, n, nn; struct epoll_event *epe; @@ -1030,7 +929,7 @@ eloop_run_epoll(struct eloop *eloop, if (ts != NULL) { if (ts->tv_sec > INT_MAX / 1000 || (ts->tv_sec == INT_MAX / 1000 && - ((ts->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))) + ((ts->tv_nsec + 999999) / 1000000 > INT_MAX % 1000000))) timeout = INT_MAX; else timeout = (int)(ts->tv_sec * 1000 + @@ -1038,17 +937,13 @@ eloop_run_epoll(struct eloop *eloop, } else timeout = -1; - if (signals != NULL) - n = epoll_pwait(eloop->fd, eloop->fds, - (int)eloop->nevents, timeout, signals); - else - n = epoll_wait(eloop->fd, eloop->fds, - (int)eloop->nevents, timeout); + n = epoll_pwait(eloop->fd, eloop->fds, (int)eloop->nevents, timeout, + &eloop->sigset); if (n == -1) return -1; for (nn = n, epe = eloop->fds; nn != 0; nn--, epe++) { - if (eloop->cleared || eloop->exitnow) + if (eloop->exitnow || eloop->events_invalid) break; e = (struct eloop_event *)epe->data.ptr; if (e->fd == -1) @@ -1067,24 +962,23 @@ eloop_run_epoll(struct eloop *eloop, return n; } -#elif defined(HAVE_PPOLL) +#elif defined(USE_PPOLL) static int -eloop_run_ppoll(struct eloop *eloop, - const struct timespec *ts, const sigset_t *signals) +eloop_run_ppoll(struct eloop *eloop, const struct timespec *ts) { int n, nn; struct eloop_event *e; struct pollfd *pfd; unsigned short events; - n = ppoll(eloop->fds, (nfds_t)eloop->nevents, ts, signals); + n = ppoll(eloop->fds, (nfds_t)eloop->nevents, ts, &eloop->sigset); if (n == -1 || n == 0) return n; nn = n; TAILQ_FOREACH(e, &eloop->events, next) { - if (eloop->cleared || eloop->exitnow) + if (eloop->exitnow || eloop->events_invalid) break; /* Skip freshly added events */ if ((pfd = e->pollfd) == NULL) @@ -1111,77 +1005,28 @@ eloop_run_ppoll(struct eloop *eloop, return n; } -#elif defined(HAVE_PSELECT) - -static int -eloop_run_pselect(struct eloop *eloop, - const struct timespec *ts, const sigset_t *sigmask) -{ - fd_set read_fds, write_fds; - int maxfd, n; - struct eloop_event *e; - unsigned short events; - - FD_ZERO(&read_fds); - FD_ZERO(&write_fds); - maxfd = 0; - TAILQ_FOREACH(e, &eloop->events, next) { - if (e->fd == -1) - continue; - if (e->events & ELE_READ) { - FD_SET(e->fd, &read_fds); - if (e->fd > maxfd) - maxfd = e->fd; - } - if (e->events & ELE_WRITE) { - FD_SET(e->fd, &write_fds); - if (e->fd > maxfd) - maxfd = e->fd; - } - } - - /* except_fd's is for STREAMS devices which we don't use. */ - n = pselect(maxfd + 1, &read_fds, &write_fds, NULL, ts, sigmask); - if (n == -1 || n == 0) - return n; - - TAILQ_FOREACH(e, &eloop->events, next) { - if (eloop->cleared || eloop->exitnow) - break; - if (e->fd == -1) - continue; - events = 0; - if (FD_ISSET(e->fd, &read_fds)) - events |= ELE_READ; - if (FD_ISSET(e->fd, &write_fds)) - events |= ELE_WRITE; - if (events) - e->cb(e->cb_arg, events); - } - - return n; -} #endif int -eloop_start(struct eloop *eloop, sigset_t *signals) +eloop_start(struct eloop *eloop) { int error; struct eloop_timeout *t; struct timespec ts, *tsp; - assert(eloop != NULL); -#ifdef HAVE_KQUEUE - UNUSED(signals); -#endif + eloop->exitnow = false; for (;;) { if (eloop->exitnow) break; + if (eloop->events_invalid) { + eloop->events_invalid = false; + eloop->events_need_setup = true; + } -#ifndef HAVE_KQUEUE - if (_eloop_nsig != 0) { - int n = _eloop_sig[--_eloop_nsig]; +#ifndef USE_KQUEUE + if (eloop_nsig != 0) { + int n = eloop_sig[--eloop_nsig]; if (eloop->signal_cb != NULL) eloop->signal_cb(n, eloop->signal_cb_ctx); @@ -1215,21 +1060,15 @@ eloop_start(struct eloop *eloop, sigset_t *signals) } else tsp = NULL; - eloop->cleared = false; if (eloop->events_need_setup) eloop_event_setup_fds(eloop); -#if defined(HAVE_KQUEUE) - UNUSED(signals); +#if defined(USE_KQUEUE) error = eloop_run_kqueue(eloop, tsp); -#elif defined(HAVE_EPOLL) - error = eloop_run_epoll(eloop, tsp, signals); -#elif defined(HAVE_PPOLL) - error = eloop_run_ppoll(eloop, tsp, signals); -#elif defined(HAVE_PSELECT) - error = eloop_run_pselect(eloop, tsp, signals); -#else -#error no polling mechanism to run! +#elif defined(USE_EPOLL) + error = eloop_run_epoll(eloop, tsp); +#elif defined(USE_PPOLL) + error = eloop_run_ppoll(eloop, tsp); #endif if (error == -1) { if (errno == EINTR) diff --git a/src/eloop.h b/src/eloop.h index 181e6e75..cbd88ec9 100644 --- a/src/eloop.h +++ b/src/eloop.h @@ -32,33 +32,41 @@ #include /* Handy macros to create subsecond timeouts */ -#define CSEC_PER_SEC 100 -#define MSEC_PER_SEC 1000 -#define NSEC_PER_CSEC 10000000 -#define NSEC_PER_MSEC 1000000 -#define NSEC_PER_SEC 1000000000 +#define CSEC_PER_SEC 100 +#define MSEC_PER_SEC 1000 +#define NSEC_PER_CSEC 10000000 +#define NSEC_PER_MSEC 1000000 +#define NSEC_PER_SEC 1000000000 /* eloop queues are really only for deleting timeouts registered * for a function or object. * The idea being that one interface has different timeouts for * say DHCP and DHCPv6. */ #ifndef ELOOP_QUEUE - #define ELOOP_QUEUE 1 +#define ELOOP_QUEUE 1 #endif /* Used for deleting a timeout for all queues. */ -#define ELOOP_QUEUE_ALL 0 +#define ELOOP_QUEUE_ALL 0 /* Forward declare eloop - the content should be invisible to the outside */ struct eloop; -#define ELE_READ 0x0001 -#define ELE_WRITE 0x0002 -#define ELE_ERROR 0x0100 -#define ELE_HANGUP 0x0200 -#define ELE_NVAL 0x0400 +/* eloop fd events */ +#define ELE_READ 0x0001 +#define ELE_WRITE 0x0002 +#define ELE_ERROR 0x0100 +#define ELE_HANGUP 0x0200 +#define ELE_NVAL 0x0400 -size_t eloop_event_count(const struct eloop *); +/* What to keep when forking */ +#define ELF_KEEP_SIGNALS 0x0001 +#define ELF_KEEP_EVENTS 0x0002 +#define ELF_KEEP_TIMEOUTS 0x0004 + +#define ELF_KEEP_ALL (ELF_KEEP_SIGNALS | ELF_KEEP_EVENTS | ELF_KEEP_TIMEOUTS) + +size_t eloop_event_count(const struct eloop *); int eloop_event_add(struct eloop *, int, unsigned short, void (*)(void *, unsigned short), void *); int eloop_event_delete(struct eloop *, int); @@ -66,32 +74,30 @@ int eloop_event_delete(struct eloop *, int); unsigned long long eloop_timespec_diff(const struct timespec *tsp, const struct timespec *usp, unsigned int *nsp); #define eloop_timeout_add_tv(eloop, tv, cb, ctx) \ - eloop_q_timeout_add_tv((eloop), ELOOP_QUEUE, (tv), (cb), (ctx)) + eloop_q_timeout_add_tv((eloop), ELOOP_QUEUE, (tv), (cb), (ctx)) #define eloop_timeout_add_sec(eloop, tv, cb, ctx) \ - eloop_q_timeout_add_sec((eloop), ELOOP_QUEUE, (tv), (cb), (ctx)) + eloop_q_timeout_add_sec((eloop), ELOOP_QUEUE, (tv), (cb), (ctx)) #define eloop_timeout_add_msec(eloop, ms, cb, ctx) \ - eloop_q_timeout_add_msec((eloop), ELOOP_QUEUE, (ms), (cb), (ctx)) + eloop_q_timeout_add_msec((eloop), ELOOP_QUEUE, (ms), (cb), (ctx)) #define eloop_timeout_delete(eloop, cb, ctx) \ - eloop_q_timeout_delete((eloop), ELOOP_QUEUE, (cb), (ctx)) -int eloop_q_timeout_add_tv(struct eloop *, int, - const struct timespec *, void (*)(void *), void *); -int eloop_q_timeout_add_sec(struct eloop *, int, - unsigned int, void (*)(void *), void *); -int eloop_q_timeout_add_msec(struct eloop *, int, - unsigned long, void (*)(void *), void *); + eloop_q_timeout_delete((eloop), ELOOP_QUEUE, (cb), (ctx)) +int eloop_q_timeout_add_tv(struct eloop *, int, const struct timespec *, + void (*)(void *), void *); +int eloop_q_timeout_add_sec(struct eloop *, int, unsigned int, void (*)(void *), + void *); +int eloop_q_timeout_add_msec(struct eloop *, int, unsigned long, + void (*)(void *), void *); int eloop_q_timeout_delete(struct eloop *, int, void (*)(void *), void *); int eloop_signal_set_cb(struct eloop *, const int *, size_t, void (*)(int, void *), void *); -int eloop_signal_mask(struct eloop *, sigset_t *oldset); +int eloop_signal_mask(struct eloop *); -struct eloop * eloop_new(void); -void eloop_clear(struct eloop *, ...); +struct eloop *eloop_new(void); +struct eloop *eloop_new_with_signals(struct eloop *); void eloop_free(struct eloop *); void eloop_exit(struct eloop *, int); -void eloop_enter(struct eloop *); -int eloop_forked(struct eloop *); -int eloop_open(struct eloop *); -int eloop_start(struct eloop *, sigset_t *); +int eloop_forked(struct eloop *, unsigned short); +int eloop_start(struct eloop *); #endif