]> git.ipfire.org Git - thirdparty/postgresql.git/commitdiff
Split WaitEventSet functions to separate source file
authorHeikki Linnakangas <heikki.linnakangas@iki.fi>
Wed, 5 Mar 2025 23:26:16 +0000 (01:26 +0200)
committerHeikki Linnakangas <heikki.linnakangas@iki.fi>
Wed, 5 Mar 2025 23:26:16 +0000 (01:26 +0200)
latch.c now only contains the Latch related functions, which build on
the WaitEventSet abstraction. Most of the platform-dependent stuff is
now in waiteventset.c.

Reviewed-by: Andres Freund <andres@anarazel.de>
Discussion: https://www.postgresql.org/message-id/8a507fb6-df28-49d3-81a5-ede180d7f0fb@iki.fi

src/backend/libpq/pqsignal.c
src/backend/postmaster/postmaster.c
src/backend/storage/ipc/Makefile
src/backend/storage/ipc/latch.c
src/backend/storage/ipc/meson.build
src/backend/storage/ipc/waiteventset.c [new file with mode: 0644]
src/backend/utils/init/miscinit.c
src/include/storage/latch.h
src/include/storage/waiteventset.h [new file with mode: 0644]

index 1742e90ea9e9a0df668ddbbd5f228ba7dbbe3fdb..d866307a4dc2e61eb7bdbc2178e925723d5ab887 100644 (file)
@@ -42,7 +42,7 @@ pqinitmask(void)
 {
        sigemptyset(&UnBlockSig);
 
-       /* Note: InitializeLatchSupport() modifies UnBlockSig. */
+       /* Note: InitializeWaitEventSupport() modifies UnBlockSig. */
 
        /* First set all signals, then clear some. */
        sigfillset(&BlockSig);
index 5dd3b6a4fd4965b57d7cf79658e6cedd04016265..d2a7a7add6fa0294f30b8edb053c0d80fca784a3 100644 (file)
@@ -548,7 +548,7 @@ PostmasterMain(int argc, char *argv[])
        pqsignal(SIGCHLD, handle_pm_child_exit_signal);
 
        /* This may configure SIGURG, depending on platform. */
-       InitializeLatchSupport();
+       InitializeWaitEventSupport();
        InitProcessLocalLatch();
 
        /*
index d8a1653eb6a6206d2c96dc194f0cd2ccd91046f6..9a07f6e1d92ab4a664959ff1ca68c19235d8de5f 100644 (file)
@@ -25,6 +25,7 @@ OBJS = \
        signalfuncs.o \
        sinval.o \
        sinvaladt.o \
-       standby.o
+       standby.o \
+       waiteventset.o
 
 include $(top_srcdir)/src/backend/common.mk
index 997bcb58ff75091ae67019481631ce7b1265894d..c6aefd2f688dd3cb37051b8401423322ffc7d630 100644 (file)
@@ -3,25 +3,10 @@
  * latch.c
  *       Routines for inter-process latches
  *
- * The poll() implementation uses the so-called self-pipe trick to overcome the
- * race condition involved with poll() and setting a global flag in the signal
- * handler. When a latch is set and the current process is waiting for it, the
- * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
- * A signal by itself doesn't interrupt poll() on all platforms, and even on
- * platforms where it does, a signal that arrives just before the poll() call
- * does not prevent poll() from entering sleep. An incoming byte on a pipe
- * however reliably interrupts the sleep, and causes poll() to return
- * immediately even if the signal arrives before poll() begins.
- *
- * The epoll() implementation overcomes the race with a different technique: it
- * keeps SIGURG blocked and consumes from a signalfd() descriptor instead.  We
- * don't need to register a signal handler or create our own self-pipe.  We
- * assume that any system that has Linux epoll() also has Linux signalfd().
- *
- * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
- *
- * The Windows implementation uses Windows events that are inherited by all
- * postmaster child processes. There's no need for the self-pipe trick there.
+ * The latch interface is a reliable replacement for the common pattern of
+ * using pg_usleep() or select() to wait until a signal arrives, where the
+ * signal handler sets a flag variable.  See latch.h for more information
+ * on how to use them.
  *
  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  */
 #include "postgres.h"
 
-#include <fcntl.h>
-#include <limits.h>
-#include <signal.h>
-#include <unistd.h>
-#ifdef HAVE_SYS_EPOLL_H
-#include <sys/epoll.h>
-#endif
-#ifdef HAVE_SYS_EVENT_H
-#include <sys/event.h>
-#endif
-#ifdef HAVE_SYS_SIGNALFD_H
-#include <sys/signalfd.h>
-#endif
-#ifdef HAVE_POLL_H
-#include <poll.h>
-#endif
-
-#include "libpq/pqsignal.h"
 #include "miscadmin.h"
-#include "pgstat.h"
 #include "port/atomics.h"
-#include "portability/instr_time.h"
-#include "postmaster/postmaster.h"
-#include "storage/fd.h"
-#include "storage/ipc.h"
 #include "storage/latch.h"
-#include "storage/pmsignal.h"
-#include "utils/memutils.h"
+#include "storage/waiteventset.h"
 #include "utils/resowner.h"
 
-/*
- * Select the fd readiness primitive to use. Normally the "most modern"
- * primitive supported by the OS will be used, but for testing it can be
- * useful to manually specify the used primitive.  If desired, just add a
- * define somewhere before this block.
- */
-#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
-       defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
-/* don't overwrite manual choice */
-#elif defined(HAVE_SYS_EPOLL_H)
-#define WAIT_USE_EPOLL
-#elif defined(HAVE_KQUEUE)
-#define WAIT_USE_KQUEUE
-#elif defined(HAVE_POLL)
-#define WAIT_USE_POLL
-#elif WIN32
-#define WAIT_USE_WIN32
-#else
-#error "no wait set implementation available"
-#endif
-
-/*
- * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
- * available.  For testing the choice can also be manually specified.
- */
-#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
-#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
-/* don't overwrite manual choice */
-#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
-#define WAIT_USE_SIGNALFD
-#else
-#define WAIT_USE_SELF_PIPE
-#endif
-#endif
-
-/* typedef in latch.h */
-struct WaitEventSet
-{
-       ResourceOwner owner;
-
-       int                     nevents;                /* number of registered events */
-       int                     nevents_space;  /* maximum number of events in this set */
-
-       /*
-        * Array, of nevents_space length, storing the definition of events this
-        * set is waiting for.
-        */
-       WaitEvent  *events;
-
-       /*
-        * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
-        * said latch, and latch_pos the offset in the ->events array. This is
-        * useful because we check the state of the latch before performing doing
-        * syscalls related to waiting.
-        */
-       Latch      *latch;
-       int                     latch_pos;
-
-       /*
-        * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
-        * is set so that we'll exit immediately if postmaster death is detected,
-        * instead of returning.
-        */
-       bool            exit_on_postmaster_death;
-
-#if defined(WAIT_USE_EPOLL)
-       int                     epoll_fd;
-       /* epoll_wait returns events in a user provided arrays, allocate once */
-       struct epoll_event *epoll_ret_events;
-#elif defined(WAIT_USE_KQUEUE)
-       int                     kqueue_fd;
-       /* kevent returns events in a user provided arrays, allocate once */
-       struct kevent *kqueue_ret_events;
-       bool            report_postmaster_not_running;
-#elif defined(WAIT_USE_POLL)
-       /* poll expects events to be waited on every poll() call, prepare once */
-       struct pollfd *pollfds;
-#elif defined(WAIT_USE_WIN32)
-
-       /*
-        * Array of windows events. The first element always contains
-        * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
-        * event->pos + 1).
-        */
-       HANDLE     *handles;
-#endif
-};
-
 /* A common WaitEventSet used to implement WaitLatch() */
 static WaitEventSet *LatchWaitSet;
 
@@ -158,191 +31,6 @@ static WaitEventSet *LatchWaitSet;
 #define LatchWaitSetLatchPos 0
 #define LatchWaitSetPostmasterDeathPos 1
 
-#ifndef WIN32
-/* Are we currently in WaitLatch? The signal handler would like to know. */
-static volatile sig_atomic_t waiting = false;
-#endif
-
-#ifdef WAIT_USE_SIGNALFD
-/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
-static int     signal_fd = -1;
-#endif
-
-#ifdef WAIT_USE_SELF_PIPE
-/* Read and write ends of the self-pipe */
-static int     selfpipe_readfd = -1;
-static int     selfpipe_writefd = -1;
-
-/* Process owning the self-pipe --- needed for checking purposes */
-static int     selfpipe_owner_pid = 0;
-
-/* Private function prototypes */
-static void latch_sigurg_handler(SIGNAL_ARGS);
-static void sendSelfPipeByte(void);
-#endif
-
-#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
-static void drain(void);
-#endif
-
-#if defined(WAIT_USE_EPOLL)
-static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
-#elif defined(WAIT_USE_KQUEUE)
-static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
-#elif defined(WAIT_USE_POLL)
-static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
-#elif defined(WAIT_USE_WIN32)
-static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
-#endif
-
-static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
-                                                                               WaitEvent *occurred_events, int nevents);
-
-/* ResourceOwner support to hold WaitEventSets */
-static void ResOwnerReleaseWaitEventSet(Datum res);
-
-static const ResourceOwnerDesc wait_event_set_resowner_desc =
-{
-       .name = "WaitEventSet",
-       .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
-       .release_priority = RELEASE_PRIO_WAITEVENTSETS,
-       .ReleaseResource = ResOwnerReleaseWaitEventSet,
-       .DebugPrint = NULL
-};
-
-/* Convenience wrappers over ResourceOwnerRemember/Forget */
-static inline void
-ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
-{
-       ResourceOwnerRemember(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
-}
-static inline void
-ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
-{
-       ResourceOwnerForget(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
-}
-
-
-/*
- * Initialize the process-local latch infrastructure.
- *
- * This must be called once during startup of any process that can wait on
- * latches, before it issues any InitLatch() or OwnLatch() calls.
- */
-void
-InitializeLatchSupport(void)
-{
-#if defined(WAIT_USE_SELF_PIPE)
-       int                     pipefd[2];
-
-       if (IsUnderPostmaster)
-       {
-               /*
-                * We might have inherited connections to a self-pipe created by the
-                * postmaster.  It's critical that child processes create their own
-                * self-pipes, of course, and we really want them to close the
-                * inherited FDs for safety's sake.
-                */
-               if (selfpipe_owner_pid != 0)
-               {
-                       /* Assert we go through here but once in a child process */
-                       Assert(selfpipe_owner_pid != MyProcPid);
-                       /* Release postmaster's pipe FDs; ignore any error */
-                       (void) close(selfpipe_readfd);
-                       (void) close(selfpipe_writefd);
-                       /* Clean up, just for safety's sake; we'll set these below */
-                       selfpipe_readfd = selfpipe_writefd = -1;
-                       selfpipe_owner_pid = 0;
-                       /* Keep fd.c's accounting straight */
-                       ReleaseExternalFD();
-                       ReleaseExternalFD();
-               }
-               else
-               {
-                       /*
-                        * Postmaster didn't create a self-pipe ... or else we're in an
-                        * EXEC_BACKEND build, in which case it doesn't matter since the
-                        * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
-                        * fd.c won't have state to clean up, either.
-                        */
-                       Assert(selfpipe_readfd == -1);
-               }
-       }
-       else
-       {
-               /* In postmaster or standalone backend, assert we do this but once */
-               Assert(selfpipe_readfd == -1);
-               Assert(selfpipe_owner_pid == 0);
-       }
-
-       /*
-        * Set up the self-pipe that allows a signal handler to wake up the
-        * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
-        * that SetLatch won't block if the event has already been set many times
-        * filling the kernel buffer. Make the read-end non-blocking too, so that
-        * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
-        * Also, make both FDs close-on-exec, since we surely do not want any
-        * child processes messing with them.
-        */
-       if (pipe(pipefd) < 0)
-               elog(FATAL, "pipe() failed: %m");
-       if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
-               elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
-       if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
-               elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
-       if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
-               elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
-       if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
-               elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
-
-       selfpipe_readfd = pipefd[0];
-       selfpipe_writefd = pipefd[1];
-       selfpipe_owner_pid = MyProcPid;
-
-       /* Tell fd.c about these two long-lived FDs */
-       ReserveExternalFD();
-       ReserveExternalFD();
-
-       pqsignal(SIGURG, latch_sigurg_handler);
-#endif
-
-#ifdef WAIT_USE_SIGNALFD
-       sigset_t        signalfd_mask;
-
-       if (IsUnderPostmaster)
-       {
-               /*
-                * It would probably be safe to re-use the inherited signalfd since
-                * signalfds only see the current process's pending signals, but it
-                * seems less surprising to close it and create our own.
-                */
-               if (signal_fd != -1)
-               {
-                       /* Release postmaster's signal FD; ignore any error */
-                       (void) close(signal_fd);
-                       signal_fd = -1;
-                       ReleaseExternalFD();
-               }
-       }
-
-       /* Block SIGURG, because we'll receive it through a signalfd. */
-       sigaddset(&UnBlockSig, SIGURG);
-
-       /* Set up the signalfd to receive SIGURG notifications. */
-       sigemptyset(&signalfd_mask);
-       sigaddset(&signalfd_mask, SIGURG);
-       signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
-       if (signal_fd < 0)
-               elog(FATAL, "signalfd() failed");
-       ReserveExternalFD();
-#endif
-
-#ifdef WAIT_USE_KQUEUE
-       /* Ignore SIGURG, because we'll receive it via kqueue. */
-       pqsignal(SIGURG, SIG_IGN);
-#endif
-}
-
 void
 InitializeLatchWaitSet(void)
 {
@@ -379,13 +67,7 @@ InitLatch(Latch *latch)
        latch->owner_pid = MyProcPid;
        latch->is_shared = false;
 
-#if defined(WAIT_USE_SELF_PIPE)
-       /* Assert InitializeLatchSupport has been called in this process */
-       Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
-#elif defined(WAIT_USE_SIGNALFD)
-       /* Assert InitializeLatchSupport has been called in this process */
-       Assert(signal_fd >= 0);
-#elif defined(WAIT_USE_WIN32)
+#ifdef WIN32
        latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
        if (latch->event == NULL)
                elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
@@ -448,14 +130,6 @@ OwnLatch(Latch *latch)
        /* Sanity checks */
        Assert(latch->is_shared);
 
-#if defined(WAIT_USE_SELF_PIPE)
-       /* Assert InitializeLatchSupport has been called in this process */
-       Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
-#elif defined(WAIT_USE_SIGNALFD)
-       /* Assert InitializeLatchSupport has been called in this process */
-       Assert(signal_fd >= 0);
-#endif
-
        owner_pid = latch->owner_pid;
        if (owner_pid != 0)
                elog(PANIC, "latch already owned by PID %d", owner_pid);
@@ -664,17 +338,9 @@ SetLatch(Latch *latch)
        if (owner_pid == 0)
                return;
        else if (owner_pid == MyProcPid)
-       {
-#if defined(WAIT_USE_SELF_PIPE)
-               if (waiting)
-                       sendSelfPipeByte();
-#else
-               if (waiting)
-                       kill(MyProcPid, SIGURG);
-#endif
-       }
+               WakeupMyProc();
        else
-               kill(owner_pid, SIGURG);
+               WakeupOtherProc(owner_pid);
 
 #else
 
@@ -719,1655 +385,3 @@ ResetLatch(Latch *latch)
         */
        pg_memory_barrier();
 }
-
-/*
- * Create a WaitEventSet with space for nevents different events to wait for.
- *
- * These events can then be efficiently waited upon together, using
- * WaitEventSetWait().
- *
- * The WaitEventSet is tracked by the given 'resowner'.  Use NULL for session
- * lifetime.
- */
-WaitEventSet *
-CreateWaitEventSet(ResourceOwner resowner, int nevents)
-{
-       WaitEventSet *set;
-       char       *data;
-       Size            sz = 0;
-
-       /*
-        * Use MAXALIGN size/alignment to guarantee that later uses of memory are
-        * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
-        * platforms, but earlier allocations like WaitEventSet and WaitEvent
-        * might not be sized to guarantee that when purely using sizeof().
-        */
-       sz += MAXALIGN(sizeof(WaitEventSet));
-       sz += MAXALIGN(sizeof(WaitEvent) * nevents);
-
-#if defined(WAIT_USE_EPOLL)
-       sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
-#elif defined(WAIT_USE_KQUEUE)
-       sz += MAXALIGN(sizeof(struct kevent) * nevents);
-#elif defined(WAIT_USE_POLL)
-       sz += MAXALIGN(sizeof(struct pollfd) * nevents);
-#elif defined(WAIT_USE_WIN32)
-       /* need space for the pgwin32_signal_event */
-       sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
-#endif
-
-       if (resowner != NULL)
-               ResourceOwnerEnlarge(resowner);
-
-       data = (char *) MemoryContextAllocZero(TopMemoryContext, sz);
-
-       set = (WaitEventSet *) data;
-       data += MAXALIGN(sizeof(WaitEventSet));
-
-       set->events = (WaitEvent *) data;
-       data += MAXALIGN(sizeof(WaitEvent) * nevents);
-
-#if defined(WAIT_USE_EPOLL)
-       set->epoll_ret_events = (struct epoll_event *) data;
-       data += MAXALIGN(sizeof(struct epoll_event) * nevents);
-#elif defined(WAIT_USE_KQUEUE)
-       set->kqueue_ret_events = (struct kevent *) data;
-       data += MAXALIGN(sizeof(struct kevent) * nevents);
-#elif defined(WAIT_USE_POLL)
-       set->pollfds = (struct pollfd *) data;
-       data += MAXALIGN(sizeof(struct pollfd) * nevents);
-#elif defined(WAIT_USE_WIN32)
-       set->handles = (HANDLE) data;
-       data += MAXALIGN(sizeof(HANDLE) * nevents);
-#endif
-
-       set->latch = NULL;
-       set->nevents_space = nevents;
-       set->exit_on_postmaster_death = false;
-
-       if (resowner != NULL)
-       {
-               ResourceOwnerRememberWaitEventSet(resowner, set);
-               set->owner = resowner;
-       }
-
-#if defined(WAIT_USE_EPOLL)
-       if (!AcquireExternalFD())
-               elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
-       set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
-       if (set->epoll_fd < 0)
-       {
-               ReleaseExternalFD();
-               elog(ERROR, "epoll_create1 failed: %m");
-       }
-#elif defined(WAIT_USE_KQUEUE)
-       if (!AcquireExternalFD())
-               elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
-       set->kqueue_fd = kqueue();
-       if (set->kqueue_fd < 0)
-       {
-               ReleaseExternalFD();
-               elog(ERROR, "kqueue failed: %m");
-       }
-       if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
-       {
-               int                     save_errno = errno;
-
-               close(set->kqueue_fd);
-               ReleaseExternalFD();
-               errno = save_errno;
-               elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
-       }
-       set->report_postmaster_not_running = false;
-#elif defined(WAIT_USE_WIN32)
-
-       /*
-        * To handle signals while waiting, we need to add a win32 specific event.
-        * We accounted for the additional event at the top of this routine. See
-        * port/win32/signal.c for more details.
-        *
-        * Note: pgwin32_signal_event should be first to ensure that it will be
-        * reported when multiple events are set.  We want to guarantee that
-        * pending signals are serviced.
-        */
-       set->handles[0] = pgwin32_signal_event;
-       StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
-#endif
-
-       return set;
-}
-
-/*
- * Free a previously created WaitEventSet.
- *
- * Note: preferably, this shouldn't have to free any resources that could be
- * inherited across an exec().  If it did, we'd likely leak those resources in
- * many scenarios.  For the epoll case, we ensure that by setting EPOLL_CLOEXEC
- * when the FD is created.  For the Windows case, we assume that the handles
- * involved are non-inheritable.
- */
-void
-FreeWaitEventSet(WaitEventSet *set)
-{
-       if (set->owner)
-       {
-               ResourceOwnerForgetWaitEventSet(set->owner, set);
-               set->owner = NULL;
-       }
-
-#if defined(WAIT_USE_EPOLL)
-       close(set->epoll_fd);
-       ReleaseExternalFD();
-#elif defined(WAIT_USE_KQUEUE)
-       close(set->kqueue_fd);
-       ReleaseExternalFD();
-#elif defined(WAIT_USE_WIN32)
-       for (WaitEvent *cur_event = set->events;
-                cur_event < (set->events + set->nevents);
-                cur_event++)
-       {
-               if (cur_event->events & WL_LATCH_SET)
-               {
-                       /* uses the latch's HANDLE */
-               }
-               else if (cur_event->events & WL_POSTMASTER_DEATH)
-               {
-                       /* uses PostmasterHandle */
-               }
-               else
-               {
-                       /* Clean up the event object we created for the socket */
-                       WSAEventSelect(cur_event->fd, NULL, 0);
-                       WSACloseEvent(set->handles[cur_event->pos + 1]);
-               }
-       }
-#endif
-
-       pfree(set);
-}
-
-/*
- * Free a previously created WaitEventSet in a child process after a fork().
- */
-void
-FreeWaitEventSetAfterFork(WaitEventSet *set)
-{
-#if defined(WAIT_USE_EPOLL)
-       close(set->epoll_fd);
-       ReleaseExternalFD();
-#elif defined(WAIT_USE_KQUEUE)
-       /* kqueues are not normally inherited by child processes */
-       ReleaseExternalFD();
-#endif
-
-       pfree(set);
-}
-
-/* ---
- * Add an event to the set. Possible events are:
- * - WL_LATCH_SET: Wait for the latch to be set
- * - WL_POSTMASTER_DEATH: Wait for postmaster to die
- * - WL_SOCKET_READABLE: Wait for socket to become readable,
- *      can be combined in one event with other WL_SOCKET_* events
- * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
- *      can be combined with other WL_SOCKET_* events
- * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
- *      can be combined with other WL_SOCKET_* events (on non-Windows
- *      platforms, this is the same as WL_SOCKET_WRITEABLE)
- * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
- *      can be combined with other WL_SOCKET_* events (on non-Windows
- *      platforms, this is the same as WL_SOCKET_READABLE)
- * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
- * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
- *
- * Returns the offset in WaitEventSet->events (starting from 0), which can be
- * used to modify previously added wait events using ModifyWaitEvent().
- *
- * In the WL_LATCH_SET case the latch must be owned by the current process,
- * i.e. it must be a process-local latch initialized with InitLatch, or a
- * shared latch associated with the current process by calling OwnLatch.
- *
- * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
- * conditions cause the socket to be reported as readable/writable/connected,
- * so that the caller can deal with the condition.
- *
- * The user_data pointer specified here will be set for the events returned
- * by WaitEventSetWait(), allowing to easily associate additional data with
- * events.
- */
-int
-AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
-                                 void *user_data)
-{
-       WaitEvent  *event;
-
-       /* not enough space */
-       Assert(set->nevents < set->nevents_space);
-
-       if (events == WL_EXIT_ON_PM_DEATH)
-       {
-               events = WL_POSTMASTER_DEATH;
-               set->exit_on_postmaster_death = true;
-       }
-
-       if (latch)
-       {
-               if (latch->owner_pid != MyProcPid)
-                       elog(ERROR, "cannot wait on a latch owned by another process");
-               if (set->latch)
-                       elog(ERROR, "cannot wait on more than one latch");
-               if ((events & WL_LATCH_SET) != WL_LATCH_SET)
-                       elog(ERROR, "latch events only support being set");
-       }
-       else
-       {
-               if (events & WL_LATCH_SET)
-                       elog(ERROR, "cannot wait on latch without a specified latch");
-       }
-
-       /* waiting for socket readiness without a socket indicates a bug */
-       if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
-               elog(ERROR, "cannot wait on socket event without a socket");
-
-       event = &set->events[set->nevents];
-       event->pos = set->nevents++;
-       event->fd = fd;
-       event->events = events;
-       event->user_data = user_data;
-#ifdef WIN32
-       event->reset = false;
-#endif
-
-       if (events == WL_LATCH_SET)
-       {
-               set->latch = latch;
-               set->latch_pos = event->pos;
-#if defined(WAIT_USE_SELF_PIPE)
-               event->fd = selfpipe_readfd;
-#elif defined(WAIT_USE_SIGNALFD)
-               event->fd = signal_fd;
-#else
-               event->fd = PGINVALID_SOCKET;
-#ifdef WAIT_USE_EPOLL
-               return event->pos;
-#endif
-#endif
-       }
-       else if (events == WL_POSTMASTER_DEATH)
-       {
-#ifndef WIN32
-               event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
-#endif
-       }
-
-       /* perform wait primitive specific initialization, if needed */
-#if defined(WAIT_USE_EPOLL)
-       WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
-#elif defined(WAIT_USE_KQUEUE)
-       WaitEventAdjustKqueue(set, event, 0);
-#elif defined(WAIT_USE_POLL)
-       WaitEventAdjustPoll(set, event);
-#elif defined(WAIT_USE_WIN32)
-       WaitEventAdjustWin32(set, event);
-#endif
-
-       return event->pos;
-}
-
-/*
- * Change the event mask and, in the WL_LATCH_SET case, the latch associated
- * with the WaitEvent.  The latch may be changed to NULL to disable the latch
- * temporarily, and then set back to a latch later.
- *
- * 'pos' is the id returned by AddWaitEventToSet.
- */
-void
-ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
-{
-       WaitEvent  *event;
-#if defined(WAIT_USE_KQUEUE)
-       int                     old_events;
-#endif
-
-       Assert(pos < set->nevents);
-
-       event = &set->events[pos];
-#if defined(WAIT_USE_KQUEUE)
-       old_events = event->events;
-#endif
-
-       /*
-        * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
-        *
-        * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
-        * in AddWaitEventToSet(), this needs to be checked before the fast-path
-        * below that checks if 'events' has changed.
-        */
-       if (event->events == WL_POSTMASTER_DEATH)
-       {
-               if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
-                       elog(ERROR, "cannot remove postmaster death event");
-               set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
-               return;
-       }
-
-       /*
-        * If neither the event mask nor the associated latch changes, return
-        * early. That's an important optimization for some sockets, where
-        * ModifyWaitEvent is frequently used to switch from waiting for reads to
-        * waiting on writes.
-        */
-       if (events == event->events &&
-               (!(event->events & WL_LATCH_SET) || set->latch == latch))
-               return;
-
-       if (event->events & WL_LATCH_SET && events != event->events)
-               elog(ERROR, "cannot modify latch event");
-
-       /* FIXME: validate event mask */
-       event->events = events;
-
-       if (events == WL_LATCH_SET)
-       {
-               if (latch && latch->owner_pid != MyProcPid)
-                       elog(ERROR, "cannot wait on a latch owned by another process");
-               set->latch = latch;
-
-               /*
-                * On Unix, we don't need to modify the kernel object because the
-                * underlying pipe (if there is one) is the same for all latches so we
-                * can return immediately.  On Windows, we need to update our array of
-                * handles, but we leave the old one in place and tolerate spurious
-                * wakeups if the latch is disabled.
-                */
-#if defined(WAIT_USE_WIN32)
-               if (!latch)
-                       return;
-#else
-               return;
-#endif
-       }
-
-#if defined(WAIT_USE_EPOLL)
-       WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
-#elif defined(WAIT_USE_KQUEUE)
-       WaitEventAdjustKqueue(set, event, old_events);
-#elif defined(WAIT_USE_POLL)
-       WaitEventAdjustPoll(set, event);
-#elif defined(WAIT_USE_WIN32)
-       WaitEventAdjustWin32(set, event);
-#endif
-}
-
-#if defined(WAIT_USE_EPOLL)
-/*
- * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
- */
-static void
-WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
-{
-       struct epoll_event epoll_ev;
-       int                     rc;
-
-       /* pointer to our event, returned by epoll_wait */
-       epoll_ev.data.ptr = event;
-       /* always wait for errors */
-       epoll_ev.events = EPOLLERR | EPOLLHUP;
-
-       /* prepare pollfd entry once */
-       if (event->events == WL_LATCH_SET)
-       {
-               Assert(set->latch != NULL);
-               epoll_ev.events |= EPOLLIN;
-       }
-       else if (event->events == WL_POSTMASTER_DEATH)
-       {
-               epoll_ev.events |= EPOLLIN;
-       }
-       else
-       {
-               Assert(event->fd != PGINVALID_SOCKET);
-               Assert(event->events & (WL_SOCKET_READABLE |
-                                                               WL_SOCKET_WRITEABLE |
-                                                               WL_SOCKET_CLOSED));
-
-               if (event->events & WL_SOCKET_READABLE)
-                       epoll_ev.events |= EPOLLIN;
-               if (event->events & WL_SOCKET_WRITEABLE)
-                       epoll_ev.events |= EPOLLOUT;
-               if (event->events & WL_SOCKET_CLOSED)
-                       epoll_ev.events |= EPOLLRDHUP;
-       }
-
-       /*
-        * Even though unused, we also pass epoll_ev as the data argument if
-        * EPOLL_CTL_DEL is passed as action.  There used to be an epoll bug
-        * requiring that, and actually it makes the code simpler...
-        */
-       rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
-
-       if (rc < 0)
-               ereport(ERROR,
-                               (errcode_for_socket_access(),
-                                errmsg("%s() failed: %m",
-                                               "epoll_ctl")));
-}
-#endif
-
-#if defined(WAIT_USE_POLL)
-static void
-WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
-{
-       struct pollfd *pollfd = &set->pollfds[event->pos];
-
-       pollfd->revents = 0;
-       pollfd->fd = event->fd;
-
-       /* prepare pollfd entry once */
-       if (event->events == WL_LATCH_SET)
-       {
-               Assert(set->latch != NULL);
-               pollfd->events = POLLIN;
-       }
-       else if (event->events == WL_POSTMASTER_DEATH)
-       {
-               pollfd->events = POLLIN;
-       }
-       else
-       {
-               Assert(event->events & (WL_SOCKET_READABLE |
-                                                               WL_SOCKET_WRITEABLE |
-                                                               WL_SOCKET_CLOSED));
-               pollfd->events = 0;
-               if (event->events & WL_SOCKET_READABLE)
-                       pollfd->events |= POLLIN;
-               if (event->events & WL_SOCKET_WRITEABLE)
-                       pollfd->events |= POLLOUT;
-#ifdef POLLRDHUP
-               if (event->events & WL_SOCKET_CLOSED)
-                       pollfd->events |= POLLRDHUP;
-#endif
-       }
-
-       Assert(event->fd != PGINVALID_SOCKET);
-}
-#endif
-
-#if defined(WAIT_USE_KQUEUE)
-
-/*
- * On most BSD family systems, the udata member of struct kevent is of type
- * void *, so we could directly convert to/from WaitEvent *.  Unfortunately,
- * NetBSD has it as intptr_t, so here we wallpaper over that difference with
- * an lvalue cast.
- */
-#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
-
-static inline void
-WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
-                                                WaitEvent *event)
-{
-       k_ev->ident = event->fd;
-       k_ev->filter = filter;
-       k_ev->flags = action;
-       k_ev->fflags = 0;
-       k_ev->data = 0;
-       AccessWaitEvent(k_ev) = event;
-}
-
-static inline void
-WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
-{
-       /* For now postmaster death can only be added, not removed. */
-       k_ev->ident = PostmasterPid;
-       k_ev->filter = EVFILT_PROC;
-       k_ev->flags = EV_ADD;
-       k_ev->fflags = NOTE_EXIT;
-       k_ev->data = 0;
-       AccessWaitEvent(k_ev) = event;
-}
-
-static inline void
-WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
-{
-       /* For now latch can only be added, not removed. */
-       k_ev->ident = SIGURG;
-       k_ev->filter = EVFILT_SIGNAL;
-       k_ev->flags = EV_ADD;
-       k_ev->fflags = 0;
-       k_ev->data = 0;
-       AccessWaitEvent(k_ev) = event;
-}
-
-/*
- * old_events is the previous event mask, used to compute what has changed.
- */
-static void
-WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
-{
-       int                     rc;
-       struct kevent k_ev[2];
-       int                     count = 0;
-       bool            new_filt_read = false;
-       bool            old_filt_read = false;
-       bool            new_filt_write = false;
-       bool            old_filt_write = false;
-
-       if (old_events == event->events)
-               return;
-
-       Assert(event->events != WL_LATCH_SET || set->latch != NULL);
-       Assert(event->events == WL_LATCH_SET ||
-                  event->events == WL_POSTMASTER_DEATH ||
-                  (event->events & (WL_SOCKET_READABLE |
-                                                        WL_SOCKET_WRITEABLE |
-                                                        WL_SOCKET_CLOSED)));
-
-       if (event->events == WL_POSTMASTER_DEATH)
-       {
-               /*
-                * Unlike all the other implementations, we detect postmaster death
-                * using process notification instead of waiting on the postmaster
-                * alive pipe.
-                */
-               WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
-       }
-       else if (event->events == WL_LATCH_SET)
-       {
-               /* We detect latch wakeup using a signal event. */
-               WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
-       }
-       else
-       {
-               /*
-                * We need to compute the adds and deletes required to get from the
-                * old event mask to the new event mask, since kevent treats readable
-                * and writable as separate events.
-                */
-               if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
-                       old_filt_read = true;
-               if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
-                       new_filt_read = true;
-               if (old_events & WL_SOCKET_WRITEABLE)
-                       old_filt_write = true;
-               if (event->events & WL_SOCKET_WRITEABLE)
-                       new_filt_write = true;
-               if (old_filt_read && !new_filt_read)
-                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
-                                                                        event);
-               else if (!old_filt_read && new_filt_read)
-                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
-                                                                        event);
-               if (old_filt_write && !new_filt_write)
-                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
-                                                                        event);
-               else if (!old_filt_write && new_filt_write)
-                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
-                                                                        event);
-       }
-
-       /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
-       if (count == 0)
-               return;
-
-       Assert(count <= 2);
-
-       rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
-
-       /*
-        * When adding the postmaster's pid, we have to consider that it might
-        * already have exited and perhaps even been replaced by another process
-        * with the same pid.  If so, we have to defer reporting this as an event
-        * until the next call to WaitEventSetWaitBlock().
-        */
-
-       if (rc < 0)
-       {
-               if (event->events == WL_POSTMASTER_DEATH &&
-                       (errno == ESRCH || errno == EACCES))
-                       set->report_postmaster_not_running = true;
-               else
-                       ereport(ERROR,
-                                       (errcode_for_socket_access(),
-                                        errmsg("%s() failed: %m",
-                                                       "kevent")));
-       }
-       else if (event->events == WL_POSTMASTER_DEATH &&
-                        PostmasterPid != getppid() &&
-                        !PostmasterIsAlive())
-       {
-               /*
-                * The extra PostmasterIsAliveInternal() check prevents false alarms
-                * on systems that give a different value for getppid() while being
-                * traced by a debugger.
-                */
-               set->report_postmaster_not_running = true;
-       }
-}
-
-#endif
-
-#if defined(WAIT_USE_WIN32)
-static void
-WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
-{
-       HANDLE     *handle = &set->handles[event->pos + 1];
-
-       if (event->events == WL_LATCH_SET)
-       {
-               Assert(set->latch != NULL);
-               *handle = set->latch->event;
-       }
-       else if (event->events == WL_POSTMASTER_DEATH)
-       {
-               *handle = PostmasterHandle;
-       }
-       else
-       {
-               int                     flags = FD_CLOSE;       /* always check for errors/EOF */
-
-               if (event->events & WL_SOCKET_READABLE)
-                       flags |= FD_READ;
-               if (event->events & WL_SOCKET_WRITEABLE)
-                       flags |= FD_WRITE;
-               if (event->events & WL_SOCKET_CONNECTED)
-                       flags |= FD_CONNECT;
-               if (event->events & WL_SOCKET_ACCEPT)
-                       flags |= FD_ACCEPT;
-
-               if (*handle == WSA_INVALID_EVENT)
-               {
-                       *handle = WSACreateEvent();
-                       if (*handle == WSA_INVALID_EVENT)
-                               elog(ERROR, "failed to create event for socket: error code %d",
-                                        WSAGetLastError());
-               }
-               if (WSAEventSelect(event->fd, *handle, flags) != 0)
-                       elog(ERROR, "failed to set up event for socket: error code %d",
-                                WSAGetLastError());
-
-               Assert(event->fd != PGINVALID_SOCKET);
-       }
-}
-#endif
-
-/*
- * Wait for events added to the set to happen, or until the timeout is
- * reached.  At most nevents occurred events are returned.
- *
- * If timeout = -1, block until an event occurs; if 0, check sockets for
- * readiness, but don't block; if > 0, block for at most timeout milliseconds.
- *
- * Returns the number of events occurred, or 0 if the timeout was reached.
- *
- * Returned events will have the fd, pos, user_data fields set to the
- * values associated with the registered event.
- */
-int
-WaitEventSetWait(WaitEventSet *set, long timeout,
-                                WaitEvent *occurred_events, int nevents,
-                                uint32 wait_event_info)
-{
-       int                     returned_events = 0;
-       instr_time      start_time;
-       instr_time      cur_time;
-       long            cur_timeout = -1;
-
-       Assert(nevents > 0);
-
-       /*
-        * Initialize timeout if requested.  We must record the current time so
-        * that we can determine the remaining timeout if interrupted.
-        */
-       if (timeout >= 0)
-       {
-               INSTR_TIME_SET_CURRENT(start_time);
-               Assert(timeout >= 0 && timeout <= INT_MAX);
-               cur_timeout = timeout;
-       }
-       else
-               INSTR_TIME_SET_ZERO(start_time);
-
-       pgstat_report_wait_start(wait_event_info);
-
-#ifndef WIN32
-       waiting = true;
-#else
-       /* Ensure that signals are serviced even if latch is already set */
-       pgwin32_dispatch_queued_signals();
-#endif
-       while (returned_events == 0)
-       {
-               int                     rc;
-
-               /*
-                * Check if the latch is set already first.  If so, we either exit
-                * immediately or ask the kernel for further events available right
-                * now without waiting, depending on how many events the caller wants.
-                *
-                * If someone sets the latch between this and the
-                * WaitEventSetWaitBlock() below, the setter will write a byte to the
-                * pipe (or signal us and the signal handler will do that), and the
-                * readiness routine will return immediately.
-                *
-                * On unix, If there's a pending byte in the self pipe, we'll notice
-                * whenever blocking. Only clearing the pipe in that case avoids
-                * having to drain it every time WaitLatchOrSocket() is used. Should
-                * the pipe-buffer fill up we're still ok, because the pipe is in
-                * nonblocking mode. It's unlikely for that to happen, because the
-                * self pipe isn't filled unless we're blocking (waiting = true), or
-                * from inside a signal handler in latch_sigurg_handler().
-                *
-                * On windows, we'll also notice if there's a pending event for the
-                * latch when blocking, but there's no danger of anything filling up,
-                * as "Setting an event that is already set has no effect.".
-                *
-                * Note: we assume that the kernel calls involved in latch management
-                * will provide adequate synchronization on machines with weak memory
-                * ordering, so that we cannot miss seeing is_set if a notification
-                * has already been queued.
-                */
-               if (set->latch && !set->latch->is_set)
-               {
-                       /* about to sleep on a latch */
-                       set->latch->maybe_sleeping = true;
-                       pg_memory_barrier();
-                       /* and recheck */
-               }
-
-               if (set->latch && set->latch->is_set)
-               {
-                       occurred_events->fd = PGINVALID_SOCKET;
-                       occurred_events->pos = set->latch_pos;
-                       occurred_events->user_data =
-                               set->events[set->latch_pos].user_data;
-                       occurred_events->events = WL_LATCH_SET;
-                       occurred_events++;
-                       returned_events++;
-
-                       /* could have been set above */
-                       set->latch->maybe_sleeping = false;
-
-                       if (returned_events == nevents)
-                               break;                  /* output buffer full already */
-
-                       /*
-                        * Even though we already have an event, we'll poll just once with
-                        * zero timeout to see what non-latch events we can fit into the
-                        * output buffer at the same time.
-                        */
-                       cur_timeout = 0;
-                       timeout = 0;
-               }
-
-               /*
-                * Wait for events using the readiness primitive chosen at the top of
-                * this file. If -1 is returned, a timeout has occurred, if 0 we have
-                * to retry, everything >= 1 is the number of returned events.
-                */
-               rc = WaitEventSetWaitBlock(set, cur_timeout,
-                                                                  occurred_events, nevents - returned_events);
-
-               if (set->latch &&
-                       set->latch->maybe_sleeping)
-                       set->latch->maybe_sleeping = false;
-
-               if (rc == -1)
-                       break;                          /* timeout occurred */
-               else
-                       returned_events += rc;
-
-               /* If we're not done, update cur_timeout for next iteration */
-               if (returned_events == 0 && timeout >= 0)
-               {
-                       INSTR_TIME_SET_CURRENT(cur_time);
-                       INSTR_TIME_SUBTRACT(cur_time, start_time);
-                       cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
-                       if (cur_timeout <= 0)
-                               break;
-               }
-       }
-#ifndef WIN32
-       waiting = false;
-#endif
-
-       pgstat_report_wait_end();
-
-       return returned_events;
-}
-
-
-#if defined(WAIT_USE_EPOLL)
-
-/*
- * Wait using linux's epoll_wait(2).
- *
- * This is the preferable wait method, as several readiness notifications are
- * delivered, without having to iterate through all of set->events. The return
- * epoll_event struct contain a pointer to our events, making association
- * easy.
- */
-static inline int
-WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
-                                         WaitEvent *occurred_events, int nevents)
-{
-       int                     returned_events = 0;
-       int                     rc;
-       WaitEvent  *cur_event;
-       struct epoll_event *cur_epoll_event;
-
-       /* Sleep */
-       rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
-                                       Min(nevents, set->nevents_space), cur_timeout);
-
-       /* Check return code */
-       if (rc < 0)
-       {
-               /* EINTR is okay, otherwise complain */
-               if (errno != EINTR)
-               {
-                       waiting = false;
-                       ereport(ERROR,
-                                       (errcode_for_socket_access(),
-                                        errmsg("%s() failed: %m",
-                                                       "epoll_wait")));
-               }
-               return 0;
-       }
-       else if (rc == 0)
-       {
-               /* timeout exceeded */
-               return -1;
-       }
-
-       /*
-        * At least one event occurred, iterate over the returned epoll events
-        * until they're either all processed, or we've returned all the events
-        * the caller desired.
-        */
-       for (cur_epoll_event = set->epoll_ret_events;
-                cur_epoll_event < (set->epoll_ret_events + rc) &&
-                returned_events < nevents;
-                cur_epoll_event++)
-       {
-               /* epoll's data pointer is set to the associated WaitEvent */
-               cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
-
-               occurred_events->pos = cur_event->pos;
-               occurred_events->user_data = cur_event->user_data;
-               occurred_events->events = 0;
-
-               if (cur_event->events == WL_LATCH_SET &&
-                       cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
-               {
-                       /* Drain the signalfd. */
-                       drain();
-
-                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
-                       {
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_LATCH_SET;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events == WL_POSTMASTER_DEATH &&
-                                cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
-               {
-                       /*
-                        * We expect an EPOLLHUP when the remote end is closed, but
-                        * because we don't expect the pipe to become readable or to have
-                        * any errors either, treat those cases as postmaster death, too.
-                        *
-                        * Be paranoid about a spurious event signaling the postmaster as
-                        * being dead.  There have been reports about that happening with
-                        * older primitives (select(2) to be specific), and a spurious
-                        * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
-                        * cost much.
-                        */
-                       if (!PostmasterIsAliveInternal())
-                       {
-                               if (set->exit_on_postmaster_death)
-                                       proc_exit(1);
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_POSTMASTER_DEATH;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events & (WL_SOCKET_READABLE |
-                                                                         WL_SOCKET_WRITEABLE |
-                                                                         WL_SOCKET_CLOSED))
-               {
-                       Assert(cur_event->fd != PGINVALID_SOCKET);
-
-                       if ((cur_event->events & WL_SOCKET_READABLE) &&
-                               (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
-                       {
-                               /* data available in socket, or EOF */
-                               occurred_events->events |= WL_SOCKET_READABLE;
-                       }
-
-                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
-                               (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
-                       {
-                               /* writable, or EOF */
-                               occurred_events->events |= WL_SOCKET_WRITEABLE;
-                       }
-
-                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
-                               (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
-                       {
-                               /* remote peer shut down, or error */
-                               occurred_events->events |= WL_SOCKET_CLOSED;
-                       }
-
-                       if (occurred_events->events != 0)
-                       {
-                               occurred_events->fd = cur_event->fd;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-       }
-
-       return returned_events;
-}
-
-#elif defined(WAIT_USE_KQUEUE)
-
-/*
- * Wait using kevent(2) on BSD-family systems and macOS.
- *
- * For now this mirrors the epoll code, but in future it could modify the fd
- * set in the same call to kevent as it uses for waiting instead of doing that
- * with separate system calls.
- */
-static int
-WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
-                                         WaitEvent *occurred_events, int nevents)
-{
-       int                     returned_events = 0;
-       int                     rc;
-       WaitEvent  *cur_event;
-       struct kevent *cur_kqueue_event;
-       struct timespec timeout;
-       struct timespec *timeout_p;
-
-       if (cur_timeout < 0)
-               timeout_p = NULL;
-       else
-       {
-               timeout.tv_sec = cur_timeout / 1000;
-               timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
-               timeout_p = &timeout;
-       }
-
-       /*
-        * Report postmaster events discovered by WaitEventAdjustKqueue() or an
-        * earlier call to WaitEventSetWait().
-        */
-       if (unlikely(set->report_postmaster_not_running))
-       {
-               if (set->exit_on_postmaster_death)
-                       proc_exit(1);
-               occurred_events->fd = PGINVALID_SOCKET;
-               occurred_events->events = WL_POSTMASTER_DEATH;
-               return 1;
-       }
-
-       /* Sleep */
-       rc = kevent(set->kqueue_fd, NULL, 0,
-                               set->kqueue_ret_events,
-                               Min(nevents, set->nevents_space),
-                               timeout_p);
-
-       /* Check return code */
-       if (rc < 0)
-       {
-               /* EINTR is okay, otherwise complain */
-               if (errno != EINTR)
-               {
-                       waiting = false;
-                       ereport(ERROR,
-                                       (errcode_for_socket_access(),
-                                        errmsg("%s() failed: %m",
-                                                       "kevent")));
-               }
-               return 0;
-       }
-       else if (rc == 0)
-       {
-               /* timeout exceeded */
-               return -1;
-       }
-
-       /*
-        * At least one event occurred, iterate over the returned kqueue events
-        * until they're either all processed, or we've returned all the events
-        * the caller desired.
-        */
-       for (cur_kqueue_event = set->kqueue_ret_events;
-                cur_kqueue_event < (set->kqueue_ret_events + rc) &&
-                returned_events < nevents;
-                cur_kqueue_event++)
-       {
-               /* kevent's udata points to the associated WaitEvent */
-               cur_event = AccessWaitEvent(cur_kqueue_event);
-
-               occurred_events->pos = cur_event->pos;
-               occurred_events->user_data = cur_event->user_data;
-               occurred_events->events = 0;
-
-               if (cur_event->events == WL_LATCH_SET &&
-                       cur_kqueue_event->filter == EVFILT_SIGNAL)
-               {
-                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
-                       {
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_LATCH_SET;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events == WL_POSTMASTER_DEATH &&
-                                cur_kqueue_event->filter == EVFILT_PROC &&
-                                (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
-               {
-                       /*
-                        * The kernel will tell this kqueue object only once about the
-                        * exit of the postmaster, so let's remember that for next time so
-                        * that we provide level-triggered semantics.
-                        */
-                       set->report_postmaster_not_running = true;
-
-                       if (set->exit_on_postmaster_death)
-                               proc_exit(1);
-                       occurred_events->fd = PGINVALID_SOCKET;
-                       occurred_events->events = WL_POSTMASTER_DEATH;
-                       occurred_events++;
-                       returned_events++;
-               }
-               else if (cur_event->events & (WL_SOCKET_READABLE |
-                                                                         WL_SOCKET_WRITEABLE |
-                                                                         WL_SOCKET_CLOSED))
-               {
-                       Assert(cur_event->fd >= 0);
-
-                       if ((cur_event->events & WL_SOCKET_READABLE) &&
-                               (cur_kqueue_event->filter == EVFILT_READ))
-                       {
-                               /* readable, or EOF */
-                               occurred_events->events |= WL_SOCKET_READABLE;
-                       }
-
-                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
-                               (cur_kqueue_event->filter == EVFILT_READ) &&
-                               (cur_kqueue_event->flags & EV_EOF))
-                       {
-                               /* the remote peer has shut down */
-                               occurred_events->events |= WL_SOCKET_CLOSED;
-                       }
-
-                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
-                               (cur_kqueue_event->filter == EVFILT_WRITE))
-                       {
-                               /* writable, or EOF */
-                               occurred_events->events |= WL_SOCKET_WRITEABLE;
-                       }
-
-                       if (occurred_events->events != 0)
-                       {
-                               occurred_events->fd = cur_event->fd;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-       }
-
-       return returned_events;
-}
-
-#elif defined(WAIT_USE_POLL)
-
-/*
- * Wait using poll(2).
- *
- * This allows to receive readiness notifications for several events at once,
- * but requires iterating through all of set->pollfds.
- */
-static inline int
-WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
-                                         WaitEvent *occurred_events, int nevents)
-{
-       int                     returned_events = 0;
-       int                     rc;
-       WaitEvent  *cur_event;
-       struct pollfd *cur_pollfd;
-
-       /* Sleep */
-       rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
-
-       /* Check return code */
-       if (rc < 0)
-       {
-               /* EINTR is okay, otherwise complain */
-               if (errno != EINTR)
-               {
-                       waiting = false;
-                       ereport(ERROR,
-                                       (errcode_for_socket_access(),
-                                        errmsg("%s() failed: %m",
-                                                       "poll")));
-               }
-               return 0;
-       }
-       else if (rc == 0)
-       {
-               /* timeout exceeded */
-               return -1;
-       }
-
-       for (cur_event = set->events, cur_pollfd = set->pollfds;
-                cur_event < (set->events + set->nevents) &&
-                returned_events < nevents;
-                cur_event++, cur_pollfd++)
-       {
-               /* no activity on this FD, skip */
-               if (cur_pollfd->revents == 0)
-                       continue;
-
-               occurred_events->pos = cur_event->pos;
-               occurred_events->user_data = cur_event->user_data;
-               occurred_events->events = 0;
-
-               if (cur_event->events == WL_LATCH_SET &&
-                       (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
-               {
-                       /* There's data in the self-pipe, clear it. */
-                       drain();
-
-                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
-                       {
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_LATCH_SET;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events == WL_POSTMASTER_DEATH &&
-                                (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
-               {
-                       /*
-                        * We expect an POLLHUP when the remote end is closed, but because
-                        * we don't expect the pipe to become readable or to have any
-                        * errors either, treat those cases as postmaster death, too.
-                        *
-                        * Be paranoid about a spurious event signaling the postmaster as
-                        * being dead.  There have been reports about that happening with
-                        * older primitives (select(2) to be specific), and a spurious
-                        * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
-                        * cost much.
-                        */
-                       if (!PostmasterIsAliveInternal())
-                       {
-                               if (set->exit_on_postmaster_death)
-                                       proc_exit(1);
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_POSTMASTER_DEATH;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events & (WL_SOCKET_READABLE |
-                                                                         WL_SOCKET_WRITEABLE |
-                                                                         WL_SOCKET_CLOSED))
-               {
-                       int                     errflags = POLLHUP | POLLERR | POLLNVAL;
-
-                       Assert(cur_event->fd >= PGINVALID_SOCKET);
-
-                       if ((cur_event->events & WL_SOCKET_READABLE) &&
-                               (cur_pollfd->revents & (POLLIN | errflags)))
-                       {
-                               /* data available in socket, or EOF */
-                               occurred_events->events |= WL_SOCKET_READABLE;
-                       }
-
-                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
-                               (cur_pollfd->revents & (POLLOUT | errflags)))
-                       {
-                               /* writeable, or EOF */
-                               occurred_events->events |= WL_SOCKET_WRITEABLE;
-                       }
-
-#ifdef POLLRDHUP
-                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
-                               (cur_pollfd->revents & (POLLRDHUP | errflags)))
-                       {
-                               /* remote peer closed, or error */
-                               occurred_events->events |= WL_SOCKET_CLOSED;
-                       }
-#endif
-
-                       if (occurred_events->events != 0)
-                       {
-                               occurred_events->fd = cur_event->fd;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-       }
-       return returned_events;
-}
-
-#elif defined(WAIT_USE_WIN32)
-
-/*
- * Wait using Windows' WaitForMultipleObjects().  Each call only "consumes" one
- * event, so we keep calling until we've filled up our output buffer to match
- * the behavior of the other implementations.
- *
- * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
- */
-static inline int
-WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
-                                         WaitEvent *occurred_events, int nevents)
-{
-       int                     returned_events = 0;
-       DWORD           rc;
-       WaitEvent  *cur_event;
-
-       /* Reset any wait events that need it */
-       for (cur_event = set->events;
-                cur_event < (set->events + set->nevents);
-                cur_event++)
-       {
-               if (cur_event->reset)
-               {
-                       WaitEventAdjustWin32(set, cur_event);
-                       cur_event->reset = false;
-               }
-
-               /*
-                * We associate the socket with a new event handle for each
-                * WaitEventSet.  FD_CLOSE is only generated once if the other end
-                * closes gracefully.  Therefore we might miss the FD_CLOSE
-                * notification, if it was delivered to another event after we stopped
-                * waiting for it.  Close that race by peeking for EOF after setting
-                * up this handle to receive notifications, and before entering the
-                * sleep.
-                *
-                * XXX If we had one event handle for the lifetime of a socket, we
-                * wouldn't need this.
-                */
-               if (cur_event->events & WL_SOCKET_READABLE)
-               {
-                       char            c;
-                       WSABUF          buf;
-                       DWORD           received;
-                       DWORD           flags;
-
-                       buf.buf = &c;
-                       buf.len = 1;
-                       flags = MSG_PEEK;
-                       if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
-                       {
-                               occurred_events->pos = cur_event->pos;
-                               occurred_events->user_data = cur_event->user_data;
-                               occurred_events->events = WL_SOCKET_READABLE;
-                               occurred_events->fd = cur_event->fd;
-                               return 1;
-                       }
-               }
-
-               /*
-                * Windows does not guarantee to log an FD_WRITE network event
-                * indicating that more data can be sent unless the previous send()
-                * failed with WSAEWOULDBLOCK.  While our caller might well have made
-                * such a call, we cannot assume that here.  Therefore, if waiting for
-                * write-ready, force the issue by doing a dummy send().  If the dummy
-                * send() succeeds, assume that the socket is in fact write-ready, and
-                * return immediately.  Also, if it fails with something other than
-                * WSAEWOULDBLOCK, return a write-ready indication to let our caller
-                * deal with the error condition.
-                */
-               if (cur_event->events & WL_SOCKET_WRITEABLE)
-               {
-                       char            c;
-                       WSABUF          buf;
-                       DWORD           sent;
-                       int                     r;
-
-                       buf.buf = &c;
-                       buf.len = 0;
-
-                       r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
-                       if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
-                       {
-                               occurred_events->pos = cur_event->pos;
-                               occurred_events->user_data = cur_event->user_data;
-                               occurred_events->events = WL_SOCKET_WRITEABLE;
-                               occurred_events->fd = cur_event->fd;
-                               return 1;
-                       }
-               }
-       }
-
-       /*
-        * Sleep.
-        *
-        * Need to wait for ->nevents + 1, because signal handle is in [0].
-        */
-       rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
-                                                               cur_timeout);
-
-       /* Check return code */
-       if (rc == WAIT_FAILED)
-               elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
-                        GetLastError());
-       else if (rc == WAIT_TIMEOUT)
-       {
-               /* timeout exceeded */
-               return -1;
-       }
-
-       if (rc == WAIT_OBJECT_0)
-       {
-               /* Service newly-arrived signals */
-               pgwin32_dispatch_queued_signals();
-               return 0;                               /* retry */
-       }
-
-       /*
-        * With an offset of one, due to the always present pgwin32_signal_event,
-        * the handle offset directly corresponds to a wait event.
-        */
-       cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
-
-       for (;;)
-       {
-               int                     next_pos;
-               int                     count;
-
-               occurred_events->pos = cur_event->pos;
-               occurred_events->user_data = cur_event->user_data;
-               occurred_events->events = 0;
-
-               if (cur_event->events == WL_LATCH_SET)
-               {
-                       /*
-                        * We cannot use set->latch->event to reset the fired event if we
-                        * aren't waiting on this latch now.
-                        */
-                       if (!ResetEvent(set->handles[cur_event->pos + 1]))
-                               elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
-
-                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
-                       {
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_LATCH_SET;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events == WL_POSTMASTER_DEATH)
-               {
-                       /*
-                        * Postmaster apparently died.  Since the consequences of falsely
-                        * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
-                        * take the trouble to positively verify this with
-                        * PostmasterIsAlive(), even though there is no known reason to
-                        * think that the event could be falsely set on Windows.
-                        */
-                       if (!PostmasterIsAliveInternal())
-                       {
-                               if (set->exit_on_postmaster_death)
-                                       proc_exit(1);
-                               occurred_events->fd = PGINVALID_SOCKET;
-                               occurred_events->events = WL_POSTMASTER_DEATH;
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-               else if (cur_event->events & WL_SOCKET_MASK)
-               {
-                       WSANETWORKEVENTS resEvents;
-                       HANDLE          handle = set->handles[cur_event->pos + 1];
-
-                       Assert(cur_event->fd);
-
-                       occurred_events->fd = cur_event->fd;
-
-                       ZeroMemory(&resEvents, sizeof(resEvents));
-                       if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
-                               elog(ERROR, "failed to enumerate network events: error code %d",
-                                        WSAGetLastError());
-                       if ((cur_event->events & WL_SOCKET_READABLE) &&
-                               (resEvents.lNetworkEvents & FD_READ))
-                       {
-                               /* data available in socket */
-                               occurred_events->events |= WL_SOCKET_READABLE;
-
-                               /*------
-                                * WaitForMultipleObjects doesn't guarantee that a read event
-                                * will be returned if the latch is set at the same time.  Even
-                                * if it did, the caller might drop that event expecting it to
-                                * reoccur on next call.  So, we must force the event to be
-                                * reset if this WaitEventSet is used again in order to avoid
-                                * an indefinite hang.
-                                *
-                                * Refer
-                                * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
-                                * for the behavior of socket events.
-                                *------
-                                */
-                               cur_event->reset = true;
-                       }
-                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
-                               (resEvents.lNetworkEvents & FD_WRITE))
-                       {
-                               /* writeable */
-                               occurred_events->events |= WL_SOCKET_WRITEABLE;
-                       }
-                       if ((cur_event->events & WL_SOCKET_CONNECTED) &&
-                               (resEvents.lNetworkEvents & FD_CONNECT))
-                       {
-                               /* connected */
-                               occurred_events->events |= WL_SOCKET_CONNECTED;
-                       }
-                       if ((cur_event->events & WL_SOCKET_ACCEPT) &&
-                               (resEvents.lNetworkEvents & FD_ACCEPT))
-                       {
-                               /* incoming connection could be accepted */
-                               occurred_events->events |= WL_SOCKET_ACCEPT;
-                       }
-                       if (resEvents.lNetworkEvents & FD_CLOSE)
-                       {
-                               /* EOF/error, so signal all caller-requested socket flags */
-                               occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
-                       }
-
-                       if (occurred_events->events != 0)
-                       {
-                               occurred_events++;
-                               returned_events++;
-                       }
-               }
-
-               /* Is the output buffer full? */
-               if (returned_events == nevents)
-                       break;
-
-               /* Have we run out of possible events? */
-               next_pos = cur_event->pos + 1;
-               if (next_pos == set->nevents)
-                       break;
-
-               /*
-                * Poll the rest of the event handles in the array starting at
-                * next_pos being careful to skip over the initial signal handle too.
-                * This time we use a zero timeout.
-                */
-               count = set->nevents - next_pos;
-               rc = WaitForMultipleObjects(count,
-                                                                       set->handles + 1 + next_pos,
-                                                                       false,
-                                                                       0);
-
-               /*
-                * We don't distinguish between errors and WAIT_TIMEOUT here because
-                * we already have events to report.
-                */
-               if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
-                       break;
-
-               /* We have another event to decode. */
-               cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
-       }
-
-       return returned_events;
-}
-#endif
-
-/*
- * Return whether the current build options can report WL_SOCKET_CLOSED.
- */
-bool
-WaitEventSetCanReportClosed(void)
-{
-#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
-       defined(WAIT_USE_EPOLL) || \
-       defined(WAIT_USE_KQUEUE)
-       return true;
-#else
-       return false;
-#endif
-}
-
-/*
- * Get the number of wait events registered in a given WaitEventSet.
- */
-int
-GetNumRegisteredWaitEvents(WaitEventSet *set)
-{
-       return set->nevents;
-}
-
-#if defined(WAIT_USE_SELF_PIPE)
-
-/*
- * SetLatch uses SIGURG to wake up the process waiting on the latch.
- *
- * Wake up WaitLatch, if we're waiting.
- */
-static void
-latch_sigurg_handler(SIGNAL_ARGS)
-{
-       if (waiting)
-               sendSelfPipeByte();
-}
-
-/* Send one byte to the self-pipe, to wake up WaitLatch */
-static void
-sendSelfPipeByte(void)
-{
-       int                     rc;
-       char            dummy = 0;
-
-retry:
-       rc = write(selfpipe_writefd, &dummy, 1);
-       if (rc < 0)
-       {
-               /* If interrupted by signal, just retry */
-               if (errno == EINTR)
-                       goto retry;
-
-               /*
-                * If the pipe is full, we don't need to retry, the data that's there
-                * already is enough to wake up WaitLatch.
-                */
-               if (errno == EAGAIN || errno == EWOULDBLOCK)
-                       return;
-
-               /*
-                * Oops, the write() failed for some other reason. We might be in a
-                * signal handler, so it's not safe to elog(). We have no choice but
-                * silently ignore the error.
-                */
-               return;
-       }
-}
-
-#endif
-
-#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
-
-/*
- * Read all available data from self-pipe or signalfd.
- *
- * Note: this is only called when waiting = true.  If it fails and doesn't
- * return, it must reset that flag first (though ideally, this will never
- * happen).
- */
-static void
-drain(void)
-{
-       char            buf[1024];
-       int                     rc;
-       int                     fd;
-
-#ifdef WAIT_USE_SELF_PIPE
-       fd = selfpipe_readfd;
-#else
-       fd = signal_fd;
-#endif
-
-       for (;;)
-       {
-               rc = read(fd, buf, sizeof(buf));
-               if (rc < 0)
-               {
-                       if (errno == EAGAIN || errno == EWOULDBLOCK)
-                               break;                  /* the descriptor is empty */
-                       else if (errno == EINTR)
-                               continue;               /* retry */
-                       else
-                       {
-                               waiting = false;
-#ifdef WAIT_USE_SELF_PIPE
-                               elog(ERROR, "read() on self-pipe failed: %m");
-#else
-                               elog(ERROR, "read() on signalfd failed: %m");
-#endif
-                       }
-               }
-               else if (rc == 0)
-               {
-                       waiting = false;
-#ifdef WAIT_USE_SELF_PIPE
-                       elog(ERROR, "unexpected EOF on self-pipe");
-#else
-                       elog(ERROR, "unexpected EOF on signalfd");
-#endif
-               }
-               else if (rc < sizeof(buf))
-               {
-                       /* we successfully drained the pipe; no need to read() again */
-                       break;
-               }
-               /* else buffer wasn't big enough, so read again */
-       }
-}
-
-#endif
-
-static void
-ResOwnerReleaseWaitEventSet(Datum res)
-{
-       WaitEventSet *set = (WaitEventSet *) DatumGetPointer(res);
-
-       Assert(set->owner != NULL);
-       set->owner = NULL;
-       FreeWaitEventSet(set);
-}
index 7473bd1dd73752276e679c78addd62bbe3898720..b1b73dac3bed6d37930a3f766e6c1db95d91c905 100644 (file)
@@ -18,5 +18,6 @@ backend_sources += files(
   'sinval.c',
   'sinvaladt.c',
   'standby.c',
+  'waiteventset.c',
 
 )
diff --git a/src/backend/storage/ipc/waiteventset.c b/src/backend/storage/ipc/waiteventset.c
new file mode 100644 (file)
index 0000000..7c0e669
--- /dev/null
@@ -0,0 +1,2036 @@
+/*-------------------------------------------------------------------------
+ *
+ * waiteventset.c
+ *       ppoll()/pselect() like abstraction
+ *
+ * WaitEvents are an abstraction for waiting for one or more events at a time.
+ * The waiting can be done in a race free fashion, similar ppoll() or
+ * pselect() (as opposed to plain poll()/select()).
+ *
+ * You can wait for:
+ * - a latch being set from another process or from signal handler in the same
+ *   process (WL_LATCH_SET)
+ * - data to become readable or writeable on a socket (WL_SOCKET_*)
+ * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
+ * - timeout (WL_TIMEOUT)
+ *
+ * Implementation
+ * --------------
+ *
+ * The poll() implementation uses the so-called self-pipe trick to overcome the
+ * race condition involved with poll() and setting a global flag in the signal
+ * handler. When a latch is set and the current process is waiting for it, the
+ * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
+ * A signal by itself doesn't interrupt poll() on all platforms, and even on
+ * platforms where it does, a signal that arrives just before the poll() call
+ * does not prevent poll() from entering sleep. An incoming byte on a pipe
+ * however reliably interrupts the sleep, and causes poll() to return
+ * immediately even if the signal arrives before poll() begins.
+ *
+ * The epoll() implementation overcomes the race with a different technique: it
+ * keeps SIGURG blocked and consumes from a signalfd() descriptor instead.  We
+ * don't need to register a signal handler or create our own self-pipe.  We
+ * assume that any system that has Linux epoll() also has Linux signalfd().
+ *
+ * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
+ *
+ * The Windows implementation uses Windows events that are inherited by all
+ * postmaster child processes. There's no need for the self-pipe trick there.
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *       src/backend/storage/ipc/waiteventset.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <fcntl.h>
+#include <limits.h>
+#include <signal.h>
+#include <unistd.h>
+#ifdef HAVE_SYS_EPOLL_H
+#include <sys/epoll.h>
+#endif
+#ifdef HAVE_SYS_EVENT_H
+#include <sys/event.h>
+#endif
+#ifdef HAVE_SYS_SIGNALFD_H
+#include <sys/signalfd.h>
+#endif
+#ifdef HAVE_POLL_H
+#include <poll.h>
+#endif
+
+#include "libpq/pqsignal.h"
+#include "miscadmin.h"
+#include "pgstat.h"
+#include "portability/instr_time.h"
+#include "postmaster/postmaster.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "storage/pmsignal.h"
+#include "storage/latch.h"
+#include "storage/waiteventset.h"
+#include "utils/memutils.h"
+#include "utils/resowner.h"
+
+/*
+ * Select the fd readiness primitive to use. Normally the "most modern"
+ * primitive supported by the OS will be used, but for testing it can be
+ * useful to manually specify the used primitive.  If desired, just add a
+ * define somewhere before this block.
+ */
+#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
+       defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
+/* don't overwrite manual choice */
+#elif defined(HAVE_SYS_EPOLL_H)
+#define WAIT_USE_EPOLL
+#elif defined(HAVE_KQUEUE)
+#define WAIT_USE_KQUEUE
+#elif defined(HAVE_POLL)
+#define WAIT_USE_POLL
+#elif WIN32
+#define WAIT_USE_WIN32
+#else
+#error "no wait set implementation available"
+#endif
+
+/*
+ * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
+ * available.  For testing the choice can also be manually specified.
+ */
+#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
+#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
+/* don't overwrite manual choice */
+#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
+#define WAIT_USE_SIGNALFD
+#else
+#define WAIT_USE_SELF_PIPE
+#endif
+#endif
+
+/* typedef in waiteventset.h */
+struct WaitEventSet
+{
+       ResourceOwner owner;
+
+       int                     nevents;                /* number of registered events */
+       int                     nevents_space;  /* maximum number of events in this set */
+
+       /*
+        * Array, of nevents_space length, storing the definition of events this
+        * set is waiting for.
+        */
+       WaitEvent  *events;
+
+       /*
+        * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
+        * said latch, and latch_pos the offset in the ->events array. This is
+        * useful because we check the state of the latch before performing doing
+        * syscalls related to waiting.
+        */
+       Latch      *latch;
+       int                     latch_pos;
+
+       /*
+        * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
+        * is set so that we'll exit immediately if postmaster death is detected,
+        * instead of returning.
+        */
+       bool            exit_on_postmaster_death;
+
+#if defined(WAIT_USE_EPOLL)
+       int                     epoll_fd;
+       /* epoll_wait returns events in a user provided arrays, allocate once */
+       struct epoll_event *epoll_ret_events;
+#elif defined(WAIT_USE_KQUEUE)
+       int                     kqueue_fd;
+       /* kevent returns events in a user provided arrays, allocate once */
+       struct kevent *kqueue_ret_events;
+       bool            report_postmaster_not_running;
+#elif defined(WAIT_USE_POLL)
+       /* poll expects events to be waited on every poll() call, prepare once */
+       struct pollfd *pollfds;
+#elif defined(WAIT_USE_WIN32)
+
+       /*
+        * Array of windows events. The first element always contains
+        * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
+        * event->pos + 1).
+        */
+       HANDLE     *handles;
+#endif
+};
+
+#ifndef WIN32
+/* Are we currently in WaitLatch? The signal handler would like to know. */
+static volatile sig_atomic_t waiting = false;
+#endif
+
+#ifdef WAIT_USE_SIGNALFD
+/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
+static int     signal_fd = -1;
+#endif
+
+#ifdef WAIT_USE_SELF_PIPE
+/* Read and write ends of the self-pipe */
+static int     selfpipe_readfd = -1;
+static int     selfpipe_writefd = -1;
+
+/* Process owning the self-pipe --- needed for checking purposes */
+static int     selfpipe_owner_pid = 0;
+
+/* Private function prototypes */
+static void latch_sigurg_handler(SIGNAL_ARGS);
+static void sendSelfPipeByte(void);
+#endif
+
+#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
+static void drain(void);
+#endif
+
+#if defined(WAIT_USE_EPOLL)
+static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
+#elif defined(WAIT_USE_KQUEUE)
+static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
+#elif defined(WAIT_USE_POLL)
+static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
+#elif defined(WAIT_USE_WIN32)
+static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
+#endif
+
+static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
+                                                                               WaitEvent *occurred_events, int nevents);
+
+/* ResourceOwner support to hold WaitEventSets */
+static void ResOwnerReleaseWaitEventSet(Datum res);
+
+static const ResourceOwnerDesc wait_event_set_resowner_desc =
+{
+       .name = "WaitEventSet",
+       .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
+       .release_priority = RELEASE_PRIO_WAITEVENTSETS,
+       .ReleaseResource = ResOwnerReleaseWaitEventSet,
+       .DebugPrint = NULL
+};
+
+/* Convenience wrappers over ResourceOwnerRemember/Forget */
+static inline void
+ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
+{
+       ResourceOwnerRemember(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
+}
+static inline void
+ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
+{
+       ResourceOwnerForget(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
+}
+
+
+/*
+ * Initialize the process-local wait event infrastructure.
+ *
+ * This must be called once during startup of any process that can wait on
+ * latches, before it issues any InitLatch() or OwnLatch() calls.
+ */
+void
+InitializeWaitEventSupport(void)
+{
+#if defined(WAIT_USE_SELF_PIPE)
+       int                     pipefd[2];
+
+       if (IsUnderPostmaster)
+       {
+               /*
+                * We might have inherited connections to a self-pipe created by the
+                * postmaster.  It's critical that child processes create their own
+                * self-pipes, of course, and we really want them to close the
+                * inherited FDs for safety's sake.
+                */
+               if (selfpipe_owner_pid != 0)
+               {
+                       /* Assert we go through here but once in a child process */
+                       Assert(selfpipe_owner_pid != MyProcPid);
+                       /* Release postmaster's pipe FDs; ignore any error */
+                       (void) close(selfpipe_readfd);
+                       (void) close(selfpipe_writefd);
+                       /* Clean up, just for safety's sake; we'll set these below */
+                       selfpipe_readfd = selfpipe_writefd = -1;
+                       selfpipe_owner_pid = 0;
+                       /* Keep fd.c's accounting straight */
+                       ReleaseExternalFD();
+                       ReleaseExternalFD();
+               }
+               else
+               {
+                       /*
+                        * Postmaster didn't create a self-pipe ... or else we're in an
+                        * EXEC_BACKEND build, in which case it doesn't matter since the
+                        * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
+                        * fd.c won't have state to clean up, either.
+                        */
+                       Assert(selfpipe_readfd == -1);
+               }
+       }
+       else
+       {
+               /* In postmaster or standalone backend, assert we do this but once */
+               Assert(selfpipe_readfd == -1);
+               Assert(selfpipe_owner_pid == 0);
+       }
+
+       /*
+        * Set up the self-pipe that allows a signal handler to wake up the
+        * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
+        * that SetLatch won't block if the event has already been set many times
+        * filling the kernel buffer. Make the read-end non-blocking too, so that
+        * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
+        * Also, make both FDs close-on-exec, since we surely do not want any
+        * child processes messing with them.
+        */
+       if (pipe(pipefd) < 0)
+               elog(FATAL, "pipe() failed: %m");
+       if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
+               elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
+       if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
+               elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
+       if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
+               elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
+       if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
+               elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
+
+       selfpipe_readfd = pipefd[0];
+       selfpipe_writefd = pipefd[1];
+       selfpipe_owner_pid = MyProcPid;
+
+       /* Tell fd.c about these two long-lived FDs */
+       ReserveExternalFD();
+       ReserveExternalFD();
+
+       pqsignal(SIGURG, latch_sigurg_handler);
+#endif
+
+#ifdef WAIT_USE_SIGNALFD
+       sigset_t        signalfd_mask;
+
+       if (IsUnderPostmaster)
+       {
+               /*
+                * It would probably be safe to re-use the inherited signalfd since
+                * signalfds only see the current process's pending signals, but it
+                * seems less surprising to close it and create our own.
+                */
+               if (signal_fd != -1)
+               {
+                       /* Release postmaster's signal FD; ignore any error */
+                       (void) close(signal_fd);
+                       signal_fd = -1;
+                       ReleaseExternalFD();
+               }
+       }
+
+       /* Block SIGURG, because we'll receive it through a signalfd. */
+       sigaddset(&UnBlockSig, SIGURG);
+
+       /* Set up the signalfd to receive SIGURG notifications. */
+       sigemptyset(&signalfd_mask);
+       sigaddset(&signalfd_mask, SIGURG);
+       signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
+       if (signal_fd < 0)
+               elog(FATAL, "signalfd() failed");
+       ReserveExternalFD();
+#endif
+
+#ifdef WAIT_USE_KQUEUE
+       /* Ignore SIGURG, because we'll receive it via kqueue. */
+       pqsignal(SIGURG, SIG_IGN);
+#endif
+}
+
+/*
+ * Create a WaitEventSet with space for nevents different events to wait for.
+ *
+ * These events can then be efficiently waited upon together, using
+ * WaitEventSetWait().
+ *
+ * The WaitEventSet is tracked by the given 'resowner'.  Use NULL for session
+ * lifetime.
+ */
+WaitEventSet *
+CreateWaitEventSet(ResourceOwner resowner, int nevents)
+{
+       WaitEventSet *set;
+       char       *data;
+       Size            sz = 0;
+
+       /*
+        * Use MAXALIGN size/alignment to guarantee that later uses of memory are
+        * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
+        * platforms, but earlier allocations like WaitEventSet and WaitEvent
+        * might not be sized to guarantee that when purely using sizeof().
+        */
+       sz += MAXALIGN(sizeof(WaitEventSet));
+       sz += MAXALIGN(sizeof(WaitEvent) * nevents);
+
+#if defined(WAIT_USE_EPOLL)
+       sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
+#elif defined(WAIT_USE_KQUEUE)
+       sz += MAXALIGN(sizeof(struct kevent) * nevents);
+#elif defined(WAIT_USE_POLL)
+       sz += MAXALIGN(sizeof(struct pollfd) * nevents);
+#elif defined(WAIT_USE_WIN32)
+       /* need space for the pgwin32_signal_event */
+       sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
+#endif
+
+       if (resowner != NULL)
+               ResourceOwnerEnlarge(resowner);
+
+       data = (char *) MemoryContextAllocZero(TopMemoryContext, sz);
+
+       set = (WaitEventSet *) data;
+       data += MAXALIGN(sizeof(WaitEventSet));
+
+       set->events = (WaitEvent *) data;
+       data += MAXALIGN(sizeof(WaitEvent) * nevents);
+
+#if defined(WAIT_USE_EPOLL)
+       set->epoll_ret_events = (struct epoll_event *) data;
+       data += MAXALIGN(sizeof(struct epoll_event) * nevents);
+#elif defined(WAIT_USE_KQUEUE)
+       set->kqueue_ret_events = (struct kevent *) data;
+       data += MAXALIGN(sizeof(struct kevent) * nevents);
+#elif defined(WAIT_USE_POLL)
+       set->pollfds = (struct pollfd *) data;
+       data += MAXALIGN(sizeof(struct pollfd) * nevents);
+#elif defined(WAIT_USE_WIN32)
+       set->handles = (HANDLE) data;
+       data += MAXALIGN(sizeof(HANDLE) * nevents);
+#endif
+
+       set->latch = NULL;
+       set->nevents_space = nevents;
+       set->exit_on_postmaster_death = false;
+
+       if (resowner != NULL)
+       {
+               ResourceOwnerRememberWaitEventSet(resowner, set);
+               set->owner = resowner;
+       }
+
+#if defined(WAIT_USE_EPOLL)
+       if (!AcquireExternalFD())
+               elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
+       set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+       if (set->epoll_fd < 0)
+       {
+               ReleaseExternalFD();
+               elog(ERROR, "epoll_create1 failed: %m");
+       }
+#elif defined(WAIT_USE_KQUEUE)
+       if (!AcquireExternalFD())
+               elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
+       set->kqueue_fd = kqueue();
+       if (set->kqueue_fd < 0)
+       {
+               ReleaseExternalFD();
+               elog(ERROR, "kqueue failed: %m");
+       }
+       if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
+       {
+               int                     save_errno = errno;
+
+               close(set->kqueue_fd);
+               ReleaseExternalFD();
+               errno = save_errno;
+               elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
+       }
+       set->report_postmaster_not_running = false;
+#elif defined(WAIT_USE_WIN32)
+
+       /*
+        * To handle signals while waiting, we need to add a win32 specific event.
+        * We accounted for the additional event at the top of this routine. See
+        * port/win32/signal.c for more details.
+        *
+        * Note: pgwin32_signal_event should be first to ensure that it will be
+        * reported when multiple events are set.  We want to guarantee that
+        * pending signals are serviced.
+        */
+       set->handles[0] = pgwin32_signal_event;
+       StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
+#endif
+
+       return set;
+}
+
+/*
+ * Free a previously created WaitEventSet.
+ *
+ * Note: preferably, this shouldn't have to free any resources that could be
+ * inherited across an exec().  If it did, we'd likely leak those resources in
+ * many scenarios.  For the epoll case, we ensure that by setting EPOLL_CLOEXEC
+ * when the FD is created.  For the Windows case, we assume that the handles
+ * involved are non-inheritable.
+ */
+void
+FreeWaitEventSet(WaitEventSet *set)
+{
+       if (set->owner)
+       {
+               ResourceOwnerForgetWaitEventSet(set->owner, set);
+               set->owner = NULL;
+       }
+
+#if defined(WAIT_USE_EPOLL)
+       close(set->epoll_fd);
+       ReleaseExternalFD();
+#elif defined(WAIT_USE_KQUEUE)
+       close(set->kqueue_fd);
+       ReleaseExternalFD();
+#elif defined(WAIT_USE_WIN32)
+       for (WaitEvent *cur_event = set->events;
+                cur_event < (set->events + set->nevents);
+                cur_event++)
+       {
+               if (cur_event->events & WL_LATCH_SET)
+               {
+                       /* uses the latch's HANDLE */
+               }
+               else if (cur_event->events & WL_POSTMASTER_DEATH)
+               {
+                       /* uses PostmasterHandle */
+               }
+               else
+               {
+                       /* Clean up the event object we created for the socket */
+                       WSAEventSelect(cur_event->fd, NULL, 0);
+                       WSACloseEvent(set->handles[cur_event->pos + 1]);
+               }
+       }
+#endif
+
+       pfree(set);
+}
+
+/*
+ * Free a previously created WaitEventSet in a child process after a fork().
+ */
+void
+FreeWaitEventSetAfterFork(WaitEventSet *set)
+{
+#if defined(WAIT_USE_EPOLL)
+       close(set->epoll_fd);
+       ReleaseExternalFD();
+#elif defined(WAIT_USE_KQUEUE)
+       /* kqueues are not normally inherited by child processes */
+       ReleaseExternalFD();
+#endif
+
+       pfree(set);
+}
+
+/* ---
+ * Add an event to the set. Possible events are:
+ * - WL_LATCH_SET: Wait for the latch to be set
+ * - WL_POSTMASTER_DEATH: Wait for postmaster to die
+ * - WL_SOCKET_READABLE: Wait for socket to become readable,
+ *      can be combined in one event with other WL_SOCKET_* events
+ * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
+ *      can be combined with other WL_SOCKET_* events
+ * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
+ *      can be combined with other WL_SOCKET_* events (on non-Windows
+ *      platforms, this is the same as WL_SOCKET_WRITEABLE)
+ * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
+ *      can be combined with other WL_SOCKET_* events (on non-Windows
+ *      platforms, this is the same as WL_SOCKET_READABLE)
+ * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
+ * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
+ *
+ * Returns the offset in WaitEventSet->events (starting from 0), which can be
+ * used to modify previously added wait events using ModifyWaitEvent().
+ *
+ * In the WL_LATCH_SET case the latch must be owned by the current process,
+ * i.e. it must be a process-local latch initialized with InitLatch, or a
+ * shared latch associated with the current process by calling OwnLatch.
+ *
+ * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
+ * conditions cause the socket to be reported as readable/writable/connected,
+ * so that the caller can deal with the condition.
+ *
+ * The user_data pointer specified here will be set for the events returned
+ * by WaitEventSetWait(), allowing to easily associate additional data with
+ * events.
+ */
+int
+AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
+                                 void *user_data)
+{
+       WaitEvent  *event;
+
+       /* not enough space */
+       Assert(set->nevents < set->nevents_space);
+
+       if (events == WL_EXIT_ON_PM_DEATH)
+       {
+               events = WL_POSTMASTER_DEATH;
+               set->exit_on_postmaster_death = true;
+       }
+
+       if (latch)
+       {
+               if (latch->owner_pid != MyProcPid)
+                       elog(ERROR, "cannot wait on a latch owned by another process");
+               if (set->latch)
+                       elog(ERROR, "cannot wait on more than one latch");
+               if ((events & WL_LATCH_SET) != WL_LATCH_SET)
+                       elog(ERROR, "latch events only support being set");
+       }
+       else
+       {
+               if (events & WL_LATCH_SET)
+                       elog(ERROR, "cannot wait on latch without a specified latch");
+       }
+
+       /* waiting for socket readiness without a socket indicates a bug */
+       if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
+               elog(ERROR, "cannot wait on socket event without a socket");
+
+       event = &set->events[set->nevents];
+       event->pos = set->nevents++;
+       event->fd = fd;
+       event->events = events;
+       event->user_data = user_data;
+#ifdef WIN32
+       event->reset = false;
+#endif
+
+       if (events == WL_LATCH_SET)
+       {
+               set->latch = latch;
+               set->latch_pos = event->pos;
+#if defined(WAIT_USE_SELF_PIPE)
+               event->fd = selfpipe_readfd;
+#elif defined(WAIT_USE_SIGNALFD)
+               event->fd = signal_fd;
+#else
+               event->fd = PGINVALID_SOCKET;
+#ifdef WAIT_USE_EPOLL
+               return event->pos;
+#endif
+#endif
+       }
+       else if (events == WL_POSTMASTER_DEATH)
+       {
+#ifndef WIN32
+               event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
+#endif
+       }
+
+       /* perform wait primitive specific initialization, if needed */
+#if defined(WAIT_USE_EPOLL)
+       WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
+#elif defined(WAIT_USE_KQUEUE)
+       WaitEventAdjustKqueue(set, event, 0);
+#elif defined(WAIT_USE_POLL)
+       WaitEventAdjustPoll(set, event);
+#elif defined(WAIT_USE_WIN32)
+       WaitEventAdjustWin32(set, event);
+#endif
+
+       return event->pos;
+}
+
+/*
+ * Change the event mask and, in the WL_LATCH_SET case, the latch associated
+ * with the WaitEvent.  The latch may be changed to NULL to disable the latch
+ * temporarily, and then set back to a latch later.
+ *
+ * 'pos' is the id returned by AddWaitEventToSet.
+ */
+void
+ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
+{
+       WaitEvent  *event;
+#if defined(WAIT_USE_KQUEUE)
+       int                     old_events;
+#endif
+
+       Assert(pos < set->nevents);
+
+       event = &set->events[pos];
+#if defined(WAIT_USE_KQUEUE)
+       old_events = event->events;
+#endif
+
+       /*
+        * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
+        *
+        * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
+        * in AddWaitEventToSet(), this needs to be checked before the fast-path
+        * below that checks if 'events' has changed.
+        */
+       if (event->events == WL_POSTMASTER_DEATH)
+       {
+               if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
+                       elog(ERROR, "cannot remove postmaster death event");
+               set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
+               return;
+       }
+
+       /*
+        * If neither the event mask nor the associated latch changes, return
+        * early. That's an important optimization for some sockets, where
+        * ModifyWaitEvent is frequently used to switch from waiting for reads to
+        * waiting on writes.
+        */
+       if (events == event->events &&
+               (!(event->events & WL_LATCH_SET) || set->latch == latch))
+               return;
+
+       if (event->events & WL_LATCH_SET && events != event->events)
+               elog(ERROR, "cannot modify latch event");
+
+       /* FIXME: validate event mask */
+       event->events = events;
+
+       if (events == WL_LATCH_SET)
+       {
+               if (latch && latch->owner_pid != MyProcPid)
+                       elog(ERROR, "cannot wait on a latch owned by another process");
+               set->latch = latch;
+
+               /*
+                * On Unix, we don't need to modify the kernel object because the
+                * underlying pipe (if there is one) is the same for all latches so we
+                * can return immediately.  On Windows, we need to update our array of
+                * handles, but we leave the old one in place and tolerate spurious
+                * wakeups if the latch is disabled.
+                */
+#if defined(WAIT_USE_WIN32)
+               if (!latch)
+                       return;
+#else
+               return;
+#endif
+       }
+
+#if defined(WAIT_USE_EPOLL)
+       WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
+#elif defined(WAIT_USE_KQUEUE)
+       WaitEventAdjustKqueue(set, event, old_events);
+#elif defined(WAIT_USE_POLL)
+       WaitEventAdjustPoll(set, event);
+#elif defined(WAIT_USE_WIN32)
+       WaitEventAdjustWin32(set, event);
+#endif
+}
+
+#if defined(WAIT_USE_EPOLL)
+/*
+ * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
+ */
+static void
+WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
+{
+       struct epoll_event epoll_ev;
+       int                     rc;
+
+       /* pointer to our event, returned by epoll_wait */
+       epoll_ev.data.ptr = event;
+       /* always wait for errors */
+       epoll_ev.events = EPOLLERR | EPOLLHUP;
+
+       /* prepare pollfd entry once */
+       if (event->events == WL_LATCH_SET)
+       {
+               Assert(set->latch != NULL);
+               epoll_ev.events |= EPOLLIN;
+       }
+       else if (event->events == WL_POSTMASTER_DEATH)
+       {
+               epoll_ev.events |= EPOLLIN;
+       }
+       else
+       {
+               Assert(event->fd != PGINVALID_SOCKET);
+               Assert(event->events & (WL_SOCKET_READABLE |
+                                                               WL_SOCKET_WRITEABLE |
+                                                               WL_SOCKET_CLOSED));
+
+               if (event->events & WL_SOCKET_READABLE)
+                       epoll_ev.events |= EPOLLIN;
+               if (event->events & WL_SOCKET_WRITEABLE)
+                       epoll_ev.events |= EPOLLOUT;
+               if (event->events & WL_SOCKET_CLOSED)
+                       epoll_ev.events |= EPOLLRDHUP;
+       }
+
+       /*
+        * Even though unused, we also pass epoll_ev as the data argument if
+        * EPOLL_CTL_DEL is passed as action.  There used to be an epoll bug
+        * requiring that, and actually it makes the code simpler...
+        */
+       rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
+
+       if (rc < 0)
+               ereport(ERROR,
+                               (errcode_for_socket_access(),
+                                errmsg("%s() failed: %m",
+                                               "epoll_ctl")));
+}
+#endif
+
+#if defined(WAIT_USE_POLL)
+static void
+WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
+{
+       struct pollfd *pollfd = &set->pollfds[event->pos];
+
+       pollfd->revents = 0;
+       pollfd->fd = event->fd;
+
+       /* prepare pollfd entry once */
+       if (event->events == WL_LATCH_SET)
+       {
+               Assert(set->latch != NULL);
+               pollfd->events = POLLIN;
+       }
+       else if (event->events == WL_POSTMASTER_DEATH)
+       {
+               pollfd->events = POLLIN;
+       }
+       else
+       {
+               Assert(event->events & (WL_SOCKET_READABLE |
+                                                               WL_SOCKET_WRITEABLE |
+                                                               WL_SOCKET_CLOSED));
+               pollfd->events = 0;
+               if (event->events & WL_SOCKET_READABLE)
+                       pollfd->events |= POLLIN;
+               if (event->events & WL_SOCKET_WRITEABLE)
+                       pollfd->events |= POLLOUT;
+#ifdef POLLRDHUP
+               if (event->events & WL_SOCKET_CLOSED)
+                       pollfd->events |= POLLRDHUP;
+#endif
+       }
+
+       Assert(event->fd != PGINVALID_SOCKET);
+}
+#endif
+
+#if defined(WAIT_USE_KQUEUE)
+
+/*
+ * On most BSD family systems, the udata member of struct kevent is of type
+ * void *, so we could directly convert to/from WaitEvent *.  Unfortunately,
+ * NetBSD has it as intptr_t, so here we wallpaper over that difference with
+ * an lvalue cast.
+ */
+#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
+
+static inline void
+WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
+                                                WaitEvent *event)
+{
+       k_ev->ident = event->fd;
+       k_ev->filter = filter;
+       k_ev->flags = action;
+       k_ev->fflags = 0;
+       k_ev->data = 0;
+       AccessWaitEvent(k_ev) = event;
+}
+
+static inline void
+WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
+{
+       /* For now postmaster death can only be added, not removed. */
+       k_ev->ident = PostmasterPid;
+       k_ev->filter = EVFILT_PROC;
+       k_ev->flags = EV_ADD;
+       k_ev->fflags = NOTE_EXIT;
+       k_ev->data = 0;
+       AccessWaitEvent(k_ev) = event;
+}
+
+static inline void
+WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
+{
+       /* For now latch can only be added, not removed. */
+       k_ev->ident = SIGURG;
+       k_ev->filter = EVFILT_SIGNAL;
+       k_ev->flags = EV_ADD;
+       k_ev->fflags = 0;
+       k_ev->data = 0;
+       AccessWaitEvent(k_ev) = event;
+}
+
+/*
+ * old_events is the previous event mask, used to compute what has changed.
+ */
+static void
+WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
+{
+       int                     rc;
+       struct kevent k_ev[2];
+       int                     count = 0;
+       bool            new_filt_read = false;
+       bool            old_filt_read = false;
+       bool            new_filt_write = false;
+       bool            old_filt_write = false;
+
+       if (old_events == event->events)
+               return;
+
+       Assert(event->events != WL_LATCH_SET || set->latch != NULL);
+       Assert(event->events == WL_LATCH_SET ||
+                  event->events == WL_POSTMASTER_DEATH ||
+                  (event->events & (WL_SOCKET_READABLE |
+                                                        WL_SOCKET_WRITEABLE |
+                                                        WL_SOCKET_CLOSED)));
+
+       if (event->events == WL_POSTMASTER_DEATH)
+       {
+               /*
+                * Unlike all the other implementations, we detect postmaster death
+                * using process notification instead of waiting on the postmaster
+                * alive pipe.
+                */
+               WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
+       }
+       else if (event->events == WL_LATCH_SET)
+       {
+               /* We detect latch wakeup using a signal event. */
+               WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
+       }
+       else
+       {
+               /*
+                * We need to compute the adds and deletes required to get from the
+                * old event mask to the new event mask, since kevent treats readable
+                * and writable as separate events.
+                */
+               if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
+                       old_filt_read = true;
+               if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
+                       new_filt_read = true;
+               if (old_events & WL_SOCKET_WRITEABLE)
+                       old_filt_write = true;
+               if (event->events & WL_SOCKET_WRITEABLE)
+                       new_filt_write = true;
+               if (old_filt_read && !new_filt_read)
+                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
+                                                                        event);
+               else if (!old_filt_read && new_filt_read)
+                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
+                                                                        event);
+               if (old_filt_write && !new_filt_write)
+                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
+                                                                        event);
+               else if (!old_filt_write && new_filt_write)
+                       WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
+                                                                        event);
+       }
+
+       /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
+       if (count == 0)
+               return;
+
+       Assert(count <= 2);
+
+       rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
+
+       /*
+        * When adding the postmaster's pid, we have to consider that it might
+        * already have exited and perhaps even been replaced by another process
+        * with the same pid.  If so, we have to defer reporting this as an event
+        * until the next call to WaitEventSetWaitBlock().
+        */
+
+       if (rc < 0)
+       {
+               if (event->events == WL_POSTMASTER_DEATH &&
+                       (errno == ESRCH || errno == EACCES))
+                       set->report_postmaster_not_running = true;
+               else
+                       ereport(ERROR,
+                                       (errcode_for_socket_access(),
+                                        errmsg("%s() failed: %m",
+                                                       "kevent")));
+       }
+       else if (event->events == WL_POSTMASTER_DEATH &&
+                        PostmasterPid != getppid() &&
+                        !PostmasterIsAlive())
+       {
+               /*
+                * The extra PostmasterIsAliveInternal() check prevents false alarms
+                * on systems that give a different value for getppid() while being
+                * traced by a debugger.
+                */
+               set->report_postmaster_not_running = true;
+       }
+}
+
+#endif
+
+#if defined(WAIT_USE_WIN32)
+static void
+WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
+{
+       HANDLE     *handle = &set->handles[event->pos + 1];
+
+       if (event->events == WL_LATCH_SET)
+       {
+               Assert(set->latch != NULL);
+               *handle = set->latch->event;
+       }
+       else if (event->events == WL_POSTMASTER_DEATH)
+       {
+               *handle = PostmasterHandle;
+       }
+       else
+       {
+               int                     flags = FD_CLOSE;       /* always check for errors/EOF */
+
+               if (event->events & WL_SOCKET_READABLE)
+                       flags |= FD_READ;
+               if (event->events & WL_SOCKET_WRITEABLE)
+                       flags |= FD_WRITE;
+               if (event->events & WL_SOCKET_CONNECTED)
+                       flags |= FD_CONNECT;
+               if (event->events & WL_SOCKET_ACCEPT)
+                       flags |= FD_ACCEPT;
+
+               if (*handle == WSA_INVALID_EVENT)
+               {
+                       *handle = WSACreateEvent();
+                       if (*handle == WSA_INVALID_EVENT)
+                               elog(ERROR, "failed to create event for socket: error code %d",
+                                        WSAGetLastError());
+               }
+               if (WSAEventSelect(event->fd, *handle, flags) != 0)
+                       elog(ERROR, "failed to set up event for socket: error code %d",
+                                WSAGetLastError());
+
+               Assert(event->fd != PGINVALID_SOCKET);
+       }
+}
+#endif
+
+/*
+ * Wait for events added to the set to happen, or until the timeout is
+ * reached.  At most nevents occurred events are returned.
+ *
+ * If timeout = -1, block until an event occurs; if 0, check sockets for
+ * readiness, but don't block; if > 0, block for at most timeout milliseconds.
+ *
+ * Returns the number of events occurred, or 0 if the timeout was reached.
+ *
+ * Returned events will have the fd, pos, user_data fields set to the
+ * values associated with the registered event.
+ */
+int
+WaitEventSetWait(WaitEventSet *set, long timeout,
+                                WaitEvent *occurred_events, int nevents,
+                                uint32 wait_event_info)
+{
+       int                     returned_events = 0;
+       instr_time      start_time;
+       instr_time      cur_time;
+       long            cur_timeout = -1;
+
+       Assert(nevents > 0);
+
+       /*
+        * Initialize timeout if requested.  We must record the current time so
+        * that we can determine the remaining timeout if interrupted.
+        */
+       if (timeout >= 0)
+       {
+               INSTR_TIME_SET_CURRENT(start_time);
+               Assert(timeout >= 0 && timeout <= INT_MAX);
+               cur_timeout = timeout;
+       }
+       else
+               INSTR_TIME_SET_ZERO(start_time);
+
+       pgstat_report_wait_start(wait_event_info);
+
+#ifndef WIN32
+       waiting = true;
+#else
+       /* Ensure that signals are serviced even if latch is already set */
+       pgwin32_dispatch_queued_signals();
+#endif
+       while (returned_events == 0)
+       {
+               int                     rc;
+
+               /*
+                * Check if the latch is set already first.  If so, we either exit
+                * immediately or ask the kernel for further events available right
+                * now without waiting, depending on how many events the caller wants.
+                *
+                * If someone sets the latch between this and the
+                * WaitEventSetWaitBlock() below, the setter will write a byte to the
+                * pipe (or signal us and the signal handler will do that), and the
+                * readiness routine will return immediately.
+                *
+                * On unix, If there's a pending byte in the self pipe, we'll notice
+                * whenever blocking. Only clearing the pipe in that case avoids
+                * having to drain it every time WaitLatchOrSocket() is used. Should
+                * the pipe-buffer fill up we're still ok, because the pipe is in
+                * nonblocking mode. It's unlikely for that to happen, because the
+                * self pipe isn't filled unless we're blocking (waiting = true), or
+                * from inside a signal handler in latch_sigurg_handler().
+                *
+                * On windows, we'll also notice if there's a pending event for the
+                * latch when blocking, but there's no danger of anything filling up,
+                * as "Setting an event that is already set has no effect.".
+                *
+                * Note: we assume that the kernel calls involved in latch management
+                * will provide adequate synchronization on machines with weak memory
+                * ordering, so that we cannot miss seeing is_set if a notification
+                * has already been queued.
+                */
+               if (set->latch && !set->latch->is_set)
+               {
+                       /* about to sleep on a latch */
+                       set->latch->maybe_sleeping = true;
+                       pg_memory_barrier();
+                       /* and recheck */
+               }
+
+               if (set->latch && set->latch->is_set)
+               {
+                       occurred_events->fd = PGINVALID_SOCKET;
+                       occurred_events->pos = set->latch_pos;
+                       occurred_events->user_data =
+                               set->events[set->latch_pos].user_data;
+                       occurred_events->events = WL_LATCH_SET;
+                       occurred_events++;
+                       returned_events++;
+
+                       /* could have been set above */
+                       set->latch->maybe_sleeping = false;
+
+                       if (returned_events == nevents)
+                               break;                  /* output buffer full already */
+
+                       /*
+                        * Even though we already have an event, we'll poll just once with
+                        * zero timeout to see what non-latch events we can fit into the
+                        * output buffer at the same time.
+                        */
+                       cur_timeout = 0;
+                       timeout = 0;
+               }
+
+               /*
+                * Wait for events using the readiness primitive chosen at the top of
+                * this file. If -1 is returned, a timeout has occurred, if 0 we have
+                * to retry, everything >= 1 is the number of returned events.
+                */
+               rc = WaitEventSetWaitBlock(set, cur_timeout,
+                                                                  occurred_events, nevents - returned_events);
+
+               if (set->latch &&
+                       set->latch->maybe_sleeping)
+                       set->latch->maybe_sleeping = false;
+
+               if (rc == -1)
+                       break;                          /* timeout occurred */
+               else
+                       returned_events += rc;
+
+               /* If we're not done, update cur_timeout for next iteration */
+               if (returned_events == 0 && timeout >= 0)
+               {
+                       INSTR_TIME_SET_CURRENT(cur_time);
+                       INSTR_TIME_SUBTRACT(cur_time, start_time);
+                       cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+                       if (cur_timeout <= 0)
+                               break;
+               }
+       }
+#ifndef WIN32
+       waiting = false;
+#endif
+
+       pgstat_report_wait_end();
+
+       return returned_events;
+}
+
+
+#if defined(WAIT_USE_EPOLL)
+
+/*
+ * Wait using linux's epoll_wait(2).
+ *
+ * This is the preferable wait method, as several readiness notifications are
+ * delivered, without having to iterate through all of set->events. The return
+ * epoll_event struct contain a pointer to our events, making association
+ * easy.
+ */
+static inline int
+WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
+                                         WaitEvent *occurred_events, int nevents)
+{
+       int                     returned_events = 0;
+       int                     rc;
+       WaitEvent  *cur_event;
+       struct epoll_event *cur_epoll_event;
+
+       /* Sleep */
+       rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
+                                       Min(nevents, set->nevents_space), cur_timeout);
+
+       /* Check return code */
+       if (rc < 0)
+       {
+               /* EINTR is okay, otherwise complain */
+               if (errno != EINTR)
+               {
+                       waiting = false;
+                       ereport(ERROR,
+                                       (errcode_for_socket_access(),
+                                        errmsg("%s() failed: %m",
+                                                       "epoll_wait")));
+               }
+               return 0;
+       }
+       else if (rc == 0)
+       {
+               /* timeout exceeded */
+               return -1;
+       }
+
+       /*
+        * At least one event occurred, iterate over the returned epoll events
+        * until they're either all processed, or we've returned all the events
+        * the caller desired.
+        */
+       for (cur_epoll_event = set->epoll_ret_events;
+                cur_epoll_event < (set->epoll_ret_events + rc) &&
+                returned_events < nevents;
+                cur_epoll_event++)
+       {
+               /* epoll's data pointer is set to the associated WaitEvent */
+               cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
+
+               occurred_events->pos = cur_event->pos;
+               occurred_events->user_data = cur_event->user_data;
+               occurred_events->events = 0;
+
+               if (cur_event->events == WL_LATCH_SET &&
+                       cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
+               {
+                       /* Drain the signalfd. */
+                       drain();
+
+                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
+                       {
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_LATCH_SET;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events == WL_POSTMASTER_DEATH &&
+                                cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
+               {
+                       /*
+                        * We expect an EPOLLHUP when the remote end is closed, but
+                        * because we don't expect the pipe to become readable or to have
+                        * any errors either, treat those cases as postmaster death, too.
+                        *
+                        * Be paranoid about a spurious event signaling the postmaster as
+                        * being dead.  There have been reports about that happening with
+                        * older primitives (select(2) to be specific), and a spurious
+                        * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
+                        * cost much.
+                        */
+                       if (!PostmasterIsAliveInternal())
+                       {
+                               if (set->exit_on_postmaster_death)
+                                       proc_exit(1);
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_POSTMASTER_DEATH;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events & (WL_SOCKET_READABLE |
+                                                                         WL_SOCKET_WRITEABLE |
+                                                                         WL_SOCKET_CLOSED))
+               {
+                       Assert(cur_event->fd != PGINVALID_SOCKET);
+
+                       if ((cur_event->events & WL_SOCKET_READABLE) &&
+                               (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
+                       {
+                               /* data available in socket, or EOF */
+                               occurred_events->events |= WL_SOCKET_READABLE;
+                       }
+
+                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
+                               (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
+                       {
+                               /* writable, or EOF */
+                               occurred_events->events |= WL_SOCKET_WRITEABLE;
+                       }
+
+                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
+                               (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
+                       {
+                               /* remote peer shut down, or error */
+                               occurred_events->events |= WL_SOCKET_CLOSED;
+                       }
+
+                       if (occurred_events->events != 0)
+                       {
+                               occurred_events->fd = cur_event->fd;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+       }
+
+       return returned_events;
+}
+
+#elif defined(WAIT_USE_KQUEUE)
+
+/*
+ * Wait using kevent(2) on BSD-family systems and macOS.
+ *
+ * For now this mirrors the epoll code, but in future it could modify the fd
+ * set in the same call to kevent as it uses for waiting instead of doing that
+ * with separate system calls.
+ */
+static int
+WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
+                                         WaitEvent *occurred_events, int nevents)
+{
+       int                     returned_events = 0;
+       int                     rc;
+       WaitEvent  *cur_event;
+       struct kevent *cur_kqueue_event;
+       struct timespec timeout;
+       struct timespec *timeout_p;
+
+       if (cur_timeout < 0)
+               timeout_p = NULL;
+       else
+       {
+               timeout.tv_sec = cur_timeout / 1000;
+               timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
+               timeout_p = &timeout;
+       }
+
+       /*
+        * Report postmaster events discovered by WaitEventAdjustKqueue() or an
+        * earlier call to WaitEventSetWait().
+        */
+       if (unlikely(set->report_postmaster_not_running))
+       {
+               if (set->exit_on_postmaster_death)
+                       proc_exit(1);
+               occurred_events->fd = PGINVALID_SOCKET;
+               occurred_events->events = WL_POSTMASTER_DEATH;
+               return 1;
+       }
+
+       /* Sleep */
+       rc = kevent(set->kqueue_fd, NULL, 0,
+                               set->kqueue_ret_events,
+                               Min(nevents, set->nevents_space),
+                               timeout_p);
+
+       /* Check return code */
+       if (rc < 0)
+       {
+               /* EINTR is okay, otherwise complain */
+               if (errno != EINTR)
+               {
+                       waiting = false;
+                       ereport(ERROR,
+                                       (errcode_for_socket_access(),
+                                        errmsg("%s() failed: %m",
+                                                       "kevent")));
+               }
+               return 0;
+       }
+       else if (rc == 0)
+       {
+               /* timeout exceeded */
+               return -1;
+       }
+
+       /*
+        * At least one event occurred, iterate over the returned kqueue events
+        * until they're either all processed, or we've returned all the events
+        * the caller desired.
+        */
+       for (cur_kqueue_event = set->kqueue_ret_events;
+                cur_kqueue_event < (set->kqueue_ret_events + rc) &&
+                returned_events < nevents;
+                cur_kqueue_event++)
+       {
+               /* kevent's udata points to the associated WaitEvent */
+               cur_event = AccessWaitEvent(cur_kqueue_event);
+
+               occurred_events->pos = cur_event->pos;
+               occurred_events->user_data = cur_event->user_data;
+               occurred_events->events = 0;
+
+               if (cur_event->events == WL_LATCH_SET &&
+                       cur_kqueue_event->filter == EVFILT_SIGNAL)
+               {
+                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
+                       {
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_LATCH_SET;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events == WL_POSTMASTER_DEATH &&
+                                cur_kqueue_event->filter == EVFILT_PROC &&
+                                (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
+               {
+                       /*
+                        * The kernel will tell this kqueue object only once about the
+                        * exit of the postmaster, so let's remember that for next time so
+                        * that we provide level-triggered semantics.
+                        */
+                       set->report_postmaster_not_running = true;
+
+                       if (set->exit_on_postmaster_death)
+                               proc_exit(1);
+                       occurred_events->fd = PGINVALID_SOCKET;
+                       occurred_events->events = WL_POSTMASTER_DEATH;
+                       occurred_events++;
+                       returned_events++;
+               }
+               else if (cur_event->events & (WL_SOCKET_READABLE |
+                                                                         WL_SOCKET_WRITEABLE |
+                                                                         WL_SOCKET_CLOSED))
+               {
+                       Assert(cur_event->fd >= 0);
+
+                       if ((cur_event->events & WL_SOCKET_READABLE) &&
+                               (cur_kqueue_event->filter == EVFILT_READ))
+                       {
+                               /* readable, or EOF */
+                               occurred_events->events |= WL_SOCKET_READABLE;
+                       }
+
+                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
+                               (cur_kqueue_event->filter == EVFILT_READ) &&
+                               (cur_kqueue_event->flags & EV_EOF))
+                       {
+                               /* the remote peer has shut down */
+                               occurred_events->events |= WL_SOCKET_CLOSED;
+                       }
+
+                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
+                               (cur_kqueue_event->filter == EVFILT_WRITE))
+                       {
+                               /* writable, or EOF */
+                               occurred_events->events |= WL_SOCKET_WRITEABLE;
+                       }
+
+                       if (occurred_events->events != 0)
+                       {
+                               occurred_events->fd = cur_event->fd;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+       }
+
+       return returned_events;
+}
+
+#elif defined(WAIT_USE_POLL)
+
+/*
+ * Wait using poll(2).
+ *
+ * This allows to receive readiness notifications for several events at once,
+ * but requires iterating through all of set->pollfds.
+ */
+static inline int
+WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
+                                         WaitEvent *occurred_events, int nevents)
+{
+       int                     returned_events = 0;
+       int                     rc;
+       WaitEvent  *cur_event;
+       struct pollfd *cur_pollfd;
+
+       /* Sleep */
+       rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
+
+       /* Check return code */
+       if (rc < 0)
+       {
+               /* EINTR is okay, otherwise complain */
+               if (errno != EINTR)
+               {
+                       waiting = false;
+                       ereport(ERROR,
+                                       (errcode_for_socket_access(),
+                                        errmsg("%s() failed: %m",
+                                                       "poll")));
+               }
+               return 0;
+       }
+       else if (rc == 0)
+       {
+               /* timeout exceeded */
+               return -1;
+       }
+
+       for (cur_event = set->events, cur_pollfd = set->pollfds;
+                cur_event < (set->events + set->nevents) &&
+                returned_events < nevents;
+                cur_event++, cur_pollfd++)
+       {
+               /* no activity on this FD, skip */
+               if (cur_pollfd->revents == 0)
+                       continue;
+
+               occurred_events->pos = cur_event->pos;
+               occurred_events->user_data = cur_event->user_data;
+               occurred_events->events = 0;
+
+               if (cur_event->events == WL_LATCH_SET &&
+                       (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
+               {
+                       /* There's data in the self-pipe, clear it. */
+                       drain();
+
+                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
+                       {
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_LATCH_SET;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events == WL_POSTMASTER_DEATH &&
+                                (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
+               {
+                       /*
+                        * We expect an POLLHUP when the remote end is closed, but because
+                        * we don't expect the pipe to become readable or to have any
+                        * errors either, treat those cases as postmaster death, too.
+                        *
+                        * Be paranoid about a spurious event signaling the postmaster as
+                        * being dead.  There have been reports about that happening with
+                        * older primitives (select(2) to be specific), and a spurious
+                        * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
+                        * cost much.
+                        */
+                       if (!PostmasterIsAliveInternal())
+                       {
+                               if (set->exit_on_postmaster_death)
+                                       proc_exit(1);
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_POSTMASTER_DEATH;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events & (WL_SOCKET_READABLE |
+                                                                         WL_SOCKET_WRITEABLE |
+                                                                         WL_SOCKET_CLOSED))
+               {
+                       int                     errflags = POLLHUP | POLLERR | POLLNVAL;
+
+                       Assert(cur_event->fd >= PGINVALID_SOCKET);
+
+                       if ((cur_event->events & WL_SOCKET_READABLE) &&
+                               (cur_pollfd->revents & (POLLIN | errflags)))
+                       {
+                               /* data available in socket, or EOF */
+                               occurred_events->events |= WL_SOCKET_READABLE;
+                       }
+
+                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
+                               (cur_pollfd->revents & (POLLOUT | errflags)))
+                       {
+                               /* writeable, or EOF */
+                               occurred_events->events |= WL_SOCKET_WRITEABLE;
+                       }
+
+#ifdef POLLRDHUP
+                       if ((cur_event->events & WL_SOCKET_CLOSED) &&
+                               (cur_pollfd->revents & (POLLRDHUP | errflags)))
+                       {
+                               /* remote peer closed, or error */
+                               occurred_events->events |= WL_SOCKET_CLOSED;
+                       }
+#endif
+
+                       if (occurred_events->events != 0)
+                       {
+                               occurred_events->fd = cur_event->fd;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+       }
+       return returned_events;
+}
+
+#elif defined(WAIT_USE_WIN32)
+
+/*
+ * Wait using Windows' WaitForMultipleObjects().  Each call only "consumes" one
+ * event, so we keep calling until we've filled up our output buffer to match
+ * the behavior of the other implementations.
+ *
+ * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
+ */
+static inline int
+WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
+                                         WaitEvent *occurred_events, int nevents)
+{
+       int                     returned_events = 0;
+       DWORD           rc;
+       WaitEvent  *cur_event;
+
+       /* Reset any wait events that need it */
+       for (cur_event = set->events;
+                cur_event < (set->events + set->nevents);
+                cur_event++)
+       {
+               if (cur_event->reset)
+               {
+                       WaitEventAdjustWin32(set, cur_event);
+                       cur_event->reset = false;
+               }
+
+               /*
+                * We associate the socket with a new event handle for each
+                * WaitEventSet.  FD_CLOSE is only generated once if the other end
+                * closes gracefully.  Therefore we might miss the FD_CLOSE
+                * notification, if it was delivered to another event after we stopped
+                * waiting for it.  Close that race by peeking for EOF after setting
+                * up this handle to receive notifications, and before entering the
+                * sleep.
+                *
+                * XXX If we had one event handle for the lifetime of a socket, we
+                * wouldn't need this.
+                */
+               if (cur_event->events & WL_SOCKET_READABLE)
+               {
+                       char            c;
+                       WSABUF          buf;
+                       DWORD           received;
+                       DWORD           flags;
+
+                       buf.buf = &c;
+                       buf.len = 1;
+                       flags = MSG_PEEK;
+                       if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
+                       {
+                               occurred_events->pos = cur_event->pos;
+                               occurred_events->user_data = cur_event->user_data;
+                               occurred_events->events = WL_SOCKET_READABLE;
+                               occurred_events->fd = cur_event->fd;
+                               return 1;
+                       }
+               }
+
+               /*
+                * Windows does not guarantee to log an FD_WRITE network event
+                * indicating that more data can be sent unless the previous send()
+                * failed with WSAEWOULDBLOCK.  While our caller might well have made
+                * such a call, we cannot assume that here.  Therefore, if waiting for
+                * write-ready, force the issue by doing a dummy send().  If the dummy
+                * send() succeeds, assume that the socket is in fact write-ready, and
+                * return immediately.  Also, if it fails with something other than
+                * WSAEWOULDBLOCK, return a write-ready indication to let our caller
+                * deal with the error condition.
+                */
+               if (cur_event->events & WL_SOCKET_WRITEABLE)
+               {
+                       char            c;
+                       WSABUF          buf;
+                       DWORD           sent;
+                       int                     r;
+
+                       buf.buf = &c;
+                       buf.len = 0;
+
+                       r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
+                       if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
+                       {
+                               occurred_events->pos = cur_event->pos;
+                               occurred_events->user_data = cur_event->user_data;
+                               occurred_events->events = WL_SOCKET_WRITEABLE;
+                               occurred_events->fd = cur_event->fd;
+                               return 1;
+                       }
+               }
+       }
+
+       /*
+        * Sleep.
+        *
+        * Need to wait for ->nevents + 1, because signal handle is in [0].
+        */
+       rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
+                                                               cur_timeout);
+
+       /* Check return code */
+       if (rc == WAIT_FAILED)
+               elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
+                        GetLastError());
+       else if (rc == WAIT_TIMEOUT)
+       {
+               /* timeout exceeded */
+               return -1;
+       }
+
+       if (rc == WAIT_OBJECT_0)
+       {
+               /* Service newly-arrived signals */
+               pgwin32_dispatch_queued_signals();
+               return 0;                               /* retry */
+       }
+
+       /*
+        * With an offset of one, due to the always present pgwin32_signal_event,
+        * the handle offset directly corresponds to a wait event.
+        */
+       cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
+
+       for (;;)
+       {
+               int                     next_pos;
+               int                     count;
+
+               occurred_events->pos = cur_event->pos;
+               occurred_events->user_data = cur_event->user_data;
+               occurred_events->events = 0;
+
+               if (cur_event->events == WL_LATCH_SET)
+               {
+                       /*
+                        * We cannot use set->latch->event to reset the fired event if we
+                        * aren't waiting on this latch now.
+                        */
+                       if (!ResetEvent(set->handles[cur_event->pos + 1]))
+                               elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
+
+                       if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
+                       {
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_LATCH_SET;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events == WL_POSTMASTER_DEATH)
+               {
+                       /*
+                        * Postmaster apparently died.  Since the consequences of falsely
+                        * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
+                        * take the trouble to positively verify this with
+                        * PostmasterIsAlive(), even though there is no known reason to
+                        * think that the event could be falsely set on Windows.
+                        */
+                       if (!PostmasterIsAliveInternal())
+                       {
+                               if (set->exit_on_postmaster_death)
+                                       proc_exit(1);
+                               occurred_events->fd = PGINVALID_SOCKET;
+                               occurred_events->events = WL_POSTMASTER_DEATH;
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+               else if (cur_event->events & WL_SOCKET_MASK)
+               {
+                       WSANETWORKEVENTS resEvents;
+                       HANDLE          handle = set->handles[cur_event->pos + 1];
+
+                       Assert(cur_event->fd);
+
+                       occurred_events->fd = cur_event->fd;
+
+                       ZeroMemory(&resEvents, sizeof(resEvents));
+                       if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
+                               elog(ERROR, "failed to enumerate network events: error code %d",
+                                        WSAGetLastError());
+                       if ((cur_event->events & WL_SOCKET_READABLE) &&
+                               (resEvents.lNetworkEvents & FD_READ))
+                       {
+                               /* data available in socket */
+                               occurred_events->events |= WL_SOCKET_READABLE;
+
+                               /*------
+                                * WaitForMultipleObjects doesn't guarantee that a read event
+                                * will be returned if the latch is set at the same time.  Even
+                                * if it did, the caller might drop that event expecting it to
+                                * reoccur on next call.  So, we must force the event to be
+                                * reset if this WaitEventSet is used again in order to avoid
+                                * an indefinite hang.
+                                *
+                                * Refer
+                                * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
+                                * for the behavior of socket events.
+                                *------
+                                */
+                               cur_event->reset = true;
+                       }
+                       if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
+                               (resEvents.lNetworkEvents & FD_WRITE))
+                       {
+                               /* writeable */
+                               occurred_events->events |= WL_SOCKET_WRITEABLE;
+                       }
+                       if ((cur_event->events & WL_SOCKET_CONNECTED) &&
+                               (resEvents.lNetworkEvents & FD_CONNECT))
+                       {
+                               /* connected */
+                               occurred_events->events |= WL_SOCKET_CONNECTED;
+                       }
+                       if ((cur_event->events & WL_SOCKET_ACCEPT) &&
+                               (resEvents.lNetworkEvents & FD_ACCEPT))
+                       {
+                               /* incoming connection could be accepted */
+                               occurred_events->events |= WL_SOCKET_ACCEPT;
+                       }
+                       if (resEvents.lNetworkEvents & FD_CLOSE)
+                       {
+                               /* EOF/error, so signal all caller-requested socket flags */
+                               occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
+                       }
+
+                       if (occurred_events->events != 0)
+                       {
+                               occurred_events++;
+                               returned_events++;
+                       }
+               }
+
+               /* Is the output buffer full? */
+               if (returned_events == nevents)
+                       break;
+
+               /* Have we run out of possible events? */
+               next_pos = cur_event->pos + 1;
+               if (next_pos == set->nevents)
+                       break;
+
+               /*
+                * Poll the rest of the event handles in the array starting at
+                * next_pos being careful to skip over the initial signal handle too.
+                * This time we use a zero timeout.
+                */
+               count = set->nevents - next_pos;
+               rc = WaitForMultipleObjects(count,
+                                                                       set->handles + 1 + next_pos,
+                                                                       false,
+                                                                       0);
+
+               /*
+                * We don't distinguish between errors and WAIT_TIMEOUT here because
+                * we already have events to report.
+                */
+               if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
+                       break;
+
+               /* We have another event to decode. */
+               cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
+       }
+
+       return returned_events;
+}
+#endif
+
+/*
+ * Return whether the current build options can report WL_SOCKET_CLOSED.
+ */
+bool
+WaitEventSetCanReportClosed(void)
+{
+#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
+       defined(WAIT_USE_EPOLL) || \
+       defined(WAIT_USE_KQUEUE)
+       return true;
+#else
+       return false;
+#endif
+}
+
+/*
+ * Get the number of wait events registered in a given WaitEventSet.
+ */
+int
+GetNumRegisteredWaitEvents(WaitEventSet *set)
+{
+       return set->nevents;
+}
+
+#if defined(WAIT_USE_SELF_PIPE)
+
+/*
+ * SetLatch uses SIGURG to wake up the process waiting on the latch.
+ *
+ * Wake up WaitLatch, if we're waiting.
+ */
+static void
+latch_sigurg_handler(SIGNAL_ARGS)
+{
+       if (waiting)
+               sendSelfPipeByte();
+}
+
+/* Send one byte to the self-pipe, to wake up WaitLatch */
+static void
+sendSelfPipeByte(void)
+{
+       int                     rc;
+       char            dummy = 0;
+
+retry:
+       rc = write(selfpipe_writefd, &dummy, 1);
+       if (rc < 0)
+       {
+               /* If interrupted by signal, just retry */
+               if (errno == EINTR)
+                       goto retry;
+
+               /*
+                * If the pipe is full, we don't need to retry, the data that's there
+                * already is enough to wake up WaitLatch.
+                */
+               if (errno == EAGAIN || errno == EWOULDBLOCK)
+                       return;
+
+               /*
+                * Oops, the write() failed for some other reason. We might be in a
+                * signal handler, so it's not safe to elog(). We have no choice but
+                * silently ignore the error.
+                */
+               return;
+       }
+}
+
+#endif
+
+#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
+
+/*
+ * Read all available data from self-pipe or signalfd.
+ *
+ * Note: this is only called when waiting = true.  If it fails and doesn't
+ * return, it must reset that flag first (though ideally, this will never
+ * happen).
+ */
+static void
+drain(void)
+{
+       char            buf[1024];
+       int                     rc;
+       int                     fd;
+
+#ifdef WAIT_USE_SELF_PIPE
+       fd = selfpipe_readfd;
+#else
+       fd = signal_fd;
+#endif
+
+       for (;;)
+       {
+               rc = read(fd, buf, sizeof(buf));
+               if (rc < 0)
+               {
+                       if (errno == EAGAIN || errno == EWOULDBLOCK)
+                               break;                  /* the descriptor is empty */
+                       else if (errno == EINTR)
+                               continue;               /* retry */
+                       else
+                       {
+                               waiting = false;
+#ifdef WAIT_USE_SELF_PIPE
+                               elog(ERROR, "read() on self-pipe failed: %m");
+#else
+                               elog(ERROR, "read() on signalfd failed: %m");
+#endif
+                       }
+               }
+               else if (rc == 0)
+               {
+                       waiting = false;
+#ifdef WAIT_USE_SELF_PIPE
+                       elog(ERROR, "unexpected EOF on self-pipe");
+#else
+                       elog(ERROR, "unexpected EOF on signalfd");
+#endif
+               }
+               else if (rc < sizeof(buf))
+               {
+                       /* we successfully drained the pipe; no need to read() again */
+                       break;
+               }
+               /* else buffer wasn't big enough, so read again */
+       }
+}
+
+#endif
+
+static void
+ResOwnerReleaseWaitEventSet(Datum res)
+{
+       WaitEventSet *set = (WaitEventSet *) DatumGetPointer(res);
+
+       Assert(set->owner != NULL);
+       set->owner = NULL;
+       FreeWaitEventSet(set);
+}
+
+#ifndef WIN32
+/*
+ * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
+ *
+ * NB: be sure to save and restore errno around it.  (That's standard practice
+ * in most signal handlers, of course, but we used to omit it in handlers that
+ * only set a flag.) XXX
+  *
+ * NB: this function is called from critical sections and signal handlers so
+ * throwing an error is not a good idea.
+ *
+ * On Windows, Latch uses SetEvent directly and this is not used.
+ */
+void
+WakeupMyProc(void)
+{
+#if defined(WAIT_USE_SELF_PIPE)
+       if (waiting)
+               sendSelfPipeByte();
+#else
+       if (waiting)
+               kill(MyProcPid, SIGURG);
+#endif
+}
+
+/* Similar to WakeupMyProc, but wake up another process */
+void
+WakeupOtherProc(int pid)
+{
+       kill(pid, SIGURG);
+}
+#endif
index 0347fc11092a1520b21865a81ef36e18b72b7beb..dc3521457c7674fd652dd131b85571e204040026 100644 (file)
@@ -127,7 +127,7 @@ InitPostmasterChild(void)
 #endif
 
        /* Initialize process-local latch support */
-       InitializeLatchSupport();
+       InitializeWaitEventSupport();
        InitProcessLocalLatch();
        InitializeLatchWaitSet();
 
@@ -188,7 +188,7 @@ InitStandaloneProcess(const char *argv0)
        InitProcessGlobals();
 
        /* Initialize process-local latch support */
-       InitializeLatchSupport();
+       InitializeWaitEventSupport();
        InitProcessLocalLatch();
        InitializeLatchWaitSet();
 
index 66e7a5b7c08cddcd60c4ea4463662324f071a6a4..e41dc70785afe78ea5effd7509d848e5106d5041 100644 (file)
  * use of any generic handler.
  *
  *
- * WaitEventSets allow to wait for latches being set and additional events -
- * postmaster dying and socket readiness of several sockets currently - at the
- * same time.  On many platforms using a long lived event set is more
- * efficient than using WaitLatch or WaitLatchOrSocket.
+ * See also WaitEventSets in waiteventset.h. They allow to wait for latches
+ * being set and additional events - postmaster dying and socket readiness of
+ * several sockets currently - at the same time.  On many platforms using a
+ * long lived event set is more efficient than using WaitLatch or
+ * WaitLatchOrSocket.
  *
  *
  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
 
 #include <signal.h>
 
-#include "utils/resowner.h"
+#include "storage/waiteventset.h"      /* for WL_* arguments to WaitLatch */
 
 /*
  * Latch structure should be treated as opaque and only accessed through
@@ -120,53 +121,9 @@ typedef struct Latch
 #endif
 } Latch;
 
-/*
- * Bitmasks for events that may wake-up WaitLatch(), WaitLatchOrSocket(), or
- * WaitEventSetWait().
- */
-#define WL_LATCH_SET            (1 << 0)
-#define WL_SOCKET_READABLE      (1 << 1)
-#define WL_SOCKET_WRITEABLE  (1 << 2)
-#define WL_TIMEOUT                      (1 << 3)       /* not for WaitEventSetWait() */
-#define WL_POSTMASTER_DEATH  (1 << 4)
-#define WL_EXIT_ON_PM_DEATH     (1 << 5)
-#ifdef WIN32
-#define WL_SOCKET_CONNECTED  (1 << 6)
-#else
-/* avoid having to deal with case on platforms not requiring it */
-#define WL_SOCKET_CONNECTED  WL_SOCKET_WRITEABLE
-#endif
-#define WL_SOCKET_CLOSED        (1 << 7)
-#ifdef WIN32
-#define WL_SOCKET_ACCEPT        (1 << 8)
-#else
-/* avoid having to deal with case on platforms not requiring it */
-#define WL_SOCKET_ACCEPT       WL_SOCKET_READABLE
-#endif
-#define WL_SOCKET_MASK         (WL_SOCKET_READABLE | \
-                                                        WL_SOCKET_WRITEABLE | \
-                                                        WL_SOCKET_CONNECTED | \
-                                                        WL_SOCKET_ACCEPT | \
-                                                        WL_SOCKET_CLOSED)
-
-typedef struct WaitEvent
-{
-       int                     pos;                    /* position in the event data structure */
-       uint32          events;                 /* triggered events */
-       pgsocket        fd;                             /* socket fd associated with event */
-       void       *user_data;          /* pointer provided in AddWaitEventToSet */
-#ifdef WIN32
-       bool            reset;                  /* Is reset of the event required? */
-#endif
-} WaitEvent;
-
-/* forward declaration to avoid exposing latch.c implementation details */
-typedef struct WaitEventSet WaitEventSet;
-
 /*
  * prototypes for functions in latch.c
  */
-extern void InitializeLatchSupport(void);
 extern void InitLatch(Latch *latch);
 extern void InitSharedLatch(Latch *latch);
 extern void OwnLatch(Latch *latch);
@@ -174,22 +131,10 @@ extern void DisownLatch(Latch *latch);
 extern void SetLatch(Latch *latch);
 extern void ResetLatch(Latch *latch);
 
-extern WaitEventSet *CreateWaitEventSet(ResourceOwner resowner, int nevents);
-extern void FreeWaitEventSet(WaitEventSet *set);
-extern void FreeWaitEventSetAfterFork(WaitEventSet *set);
-extern int     AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd,
-                                                         Latch *latch, void *user_data);
-extern void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch);
-
-extern int     WaitEventSetWait(WaitEventSet *set, long timeout,
-                                                        WaitEvent *occurred_events, int nevents,
-                                                        uint32 wait_event_info);
 extern int     WaitLatch(Latch *latch, int wakeEvents, long timeout,
                                          uint32 wait_event_info);
 extern int     WaitLatchOrSocket(Latch *latch, int wakeEvents,
                                                          pgsocket sock, long timeout, uint32 wait_event_info);
 extern void InitializeLatchWaitSet(void);
-extern int     GetNumRegisteredWaitEvents(WaitEventSet *set);
-extern bool WaitEventSetCanReportClosed(void);
 
 #endif                                                 /* LATCH_H */
diff --git a/src/include/storage/waiteventset.h b/src/include/storage/waiteventset.h
new file mode 100644 (file)
index 0000000..9947491
--- /dev/null
@@ -0,0 +1,97 @@
+/*-------------------------------------------------------------------------
+ *
+ * waiteventset.h
+ *             ppoll() / pselect() like interface for waiting for events
+ *
+ * WaitEventSets allow to wait for latches being set and additional events -
+ * postmaster dying and socket readiness of several sockets currently - at the
+ * same time.  On many platforms using a long lived event set is more
+ * efficient than using WaitLatch or WaitLatchOrSocket.
+ *
+ * WaitEventSetWait includes a provision for timeouts (which should be avoided
+ * when possible, as they incur extra overhead) and a provision for postmaster
+ * child processes to wake up immediately on postmaster death.  See
+ * storage/ipc/waiteventset.c for detailed specifications for the exported
+ * functions.
+ *
+ *
+ * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/waiteventset.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef WAITEVENTSET_H
+#define WAITEVENTSET_H
+
+#include "utils/resowner.h"
+
+/*
+ * Bitmasks for events that may wake-up WaitLatch(), WaitLatchOrSocket(), or
+ * WaitEventSetWait().
+ */
+#define WL_LATCH_SET            (1 << 0)
+#define WL_SOCKET_READABLE      (1 << 1)
+#define WL_SOCKET_WRITEABLE  (1 << 2)
+#define WL_TIMEOUT                      (1 << 3)       /* not for WaitEventSetWait() */
+#define WL_POSTMASTER_DEATH  (1 << 4)
+#define WL_EXIT_ON_PM_DEATH     (1 << 5)
+#ifdef WIN32
+#define WL_SOCKET_CONNECTED  (1 << 6)
+#else
+/* avoid having to deal with case on platforms not requiring it */
+#define WL_SOCKET_CONNECTED  WL_SOCKET_WRITEABLE
+#endif
+#define WL_SOCKET_CLOSED        (1 << 7)
+#ifdef WIN32
+#define WL_SOCKET_ACCEPT        (1 << 8)
+#else
+/* avoid having to deal with case on platforms not requiring it */
+#define WL_SOCKET_ACCEPT       WL_SOCKET_READABLE
+#endif
+#define WL_SOCKET_MASK         (WL_SOCKET_READABLE | \
+                                                        WL_SOCKET_WRITEABLE | \
+                                                        WL_SOCKET_CONNECTED | \
+                                                        WL_SOCKET_ACCEPT | \
+                                                        WL_SOCKET_CLOSED)
+
+typedef struct WaitEvent
+{
+       int                     pos;                    /* position in the event data structure */
+       uint32          events;                 /* triggered events */
+       pgsocket        fd;                             /* socket fd associated with event */
+       void       *user_data;          /* pointer provided in AddWaitEventToSet */
+#ifdef WIN32
+       bool            reset;                  /* Is reset of the event required? */
+#endif
+} WaitEvent;
+
+/* forward declarations to avoid exposing waiteventset.c implementation details */
+typedef struct WaitEventSet WaitEventSet;
+
+typedef struct Latch Latch;
+
+/*
+ * prototypes for functions in waiteventset.c
+ */
+extern void InitializeWaitEventSupport(void);
+
+extern WaitEventSet *CreateWaitEventSet(ResourceOwner resowner, int nevents);
+extern void FreeWaitEventSet(WaitEventSet *set);
+extern void FreeWaitEventSetAfterFork(WaitEventSet *set);
+extern int     AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd,
+                                                         Latch *latch, void *user_data);
+extern void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch);
+extern int     WaitEventSetWait(WaitEventSet *set, long timeout,
+                                                        WaitEvent *occurred_events, int nevents,
+                                                        uint32 wait_event_info);
+extern int     GetNumRegisteredWaitEvents(WaitEventSet *set);
+extern bool WaitEventSetCanReportClosed(void);
+
+#ifndef WIN32
+extern void WakeupMyProc(void);
+extern void WakeupOtherProc(int pid);
+#endif
+
+#endif                                                 /* WAITEVENTSET_H */