]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: polling: centralize polled events processing
authorWilly Tarreau <w@1wt.eu>
Sat, 25 Jan 2014 08:58:06 +0000 (09:58 +0100)
committerWilly Tarreau <w@1wt.eu>
Sat, 25 Jan 2014 23:42:32 +0000 (00:42 +0100)
Currently, each poll loop handles the polled events the same way,
resulting in a lot of duplicated, complex code. Additionally, epoll
was the only one to handle newly created FDs immediately.

So instead, let's move that code to fd.c in a new function dedicated
to this task : fd_process_polled_events(). All pollers now use this
function.

include/proto/fd.h
src/ev_epoll.c
src/ev_kqueue.c
src/ev_poll.c
src/ev_select.c
src/fd.c

index 4f75bd6a33e3b678d335b741dea3e9a625e03290..f74fbcbb526ae9f0e58bb5240947cdd17d2b0e72 100644 (file)
@@ -81,6 +81,14 @@ void run_poller();
  */
 void fd_process_cached_events();
 
+/* Check the events attached to a file descriptor, update its cache
+ * accordingly, and call the associated I/O callback. If new updates are
+ * detected, the function tries to process them as well in order to save
+ * wakeups after accept().
+ */
+void fd_process_polled_events(int fd);
+
+
 /* Mark fd <fd> as updated and allocate an entry in the update list for this if
  * it was not already there. This can be done at any time.
  */
index ae415fc620c95addba584bc608c2b365dc8e0b49..b90d9c153e2a92c2b1a0dfefcf1e3595c04ffee4 100644 (file)
@@ -174,63 +174,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
                if (e & EPOLLRDHUP)
                        n |= FD_POLL_HUP;
 
-               if (!n)
-                       continue;
-
                fdtab[fd].ev |= n;
-
-               if (fdtab[fd].iocb) {
-                       int new_updt, old_updt;
-
-                       /* Mark the events as ready before processing */
-                       if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
-                               fd_may_recv(fd);
-
-                       if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
-                               fd_may_send(fd);
-
-                       if (fdtab[fd].cache)
-                               continue;
-
-                       /* Save number of updates to detect creation of new FDs. */
-                       old_updt = fd_nbupdt;
-                       fdtab[fd].iocb(fd);
-
-                       /* One or more fd might have been created during the iocb().
-                        * This mainly happens with new incoming connections that have
-                        * just been accepted, so we'd like to process them immediately
-                        * for better efficiency. Second benefit, if at the end the fds
-                        * are disabled again, we can safely destroy their update entry
-                        * to reduce the scope of later scans. This is the reason we
-                        * scan the new entries backwards.
-                        */
-
-                       for (new_updt = fd_nbupdt; new_updt > old_updt; new_updt--) {
-                               fd = fd_updt[new_updt - 1];
-                               if (!fdtab[fd].new)
-                                       continue;
-
-                               fdtab[fd].new = 0;
-                               fdtab[fd].ev &= FD_POLL_STICKY;
-
-                               if ((fdtab[fd].state & FD_EV_STATUS_R) == (FD_EV_READY_R | FD_EV_ACTIVE_R))
-                                       fdtab[fd].ev |= FD_POLL_IN;
-
-                               if ((fdtab[fd].state & FD_EV_STATUS_W) == (FD_EV_READY_W | FD_EV_ACTIVE_W))
-                                       fdtab[fd].ev |= FD_POLL_OUT;
-
-                               if (fdtab[fd].ev && fdtab[fd].iocb && fdtab[fd].owner)
-                                       fdtab[fd].iocb(fd);
-
-                               /* we can remove this update entry if it's the last one and is
-                                * unused, otherwise we don't touch anything.
-                                */
-                               if (new_updt == fd_nbupdt && !fd_recv_active(fd) && !fd_send_active(fd)) {
-                                       fdtab[fd].updated = 0;
-                                       fd_nbupdt--;
-                               }
-                       }
-               }
+               fd_process_polled_events(fd);
        }
        /* the caller will take care of cached events */
 }
index dab6f5bdc023f6c360d6a66ba7962f3d42f9bfed..0473adc349a8a56a0edb561bbd281cb53ff5570f 100644 (file)
@@ -152,18 +152,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
                                fdtab[fd].ev |= FD_POLL_OUT;
                }
 
-               if (fdtab[fd].iocb && fdtab[fd].ev) {
-                       if (fdtab[fd].ev & FD_POLL_IN)
-                               fd_may_recv(fd);
-
-                       if (fdtab[fd].ev & FD_POLL_OUT)
-                               fd_may_send(fd);
-
-                       if (fdtab[fd].cache)
-                               continue;
-
-                       fdtab[fd].iocb(fd);
-               }
+               fd_process_polled_events(fd);
        }
 }
 
index da927dc5f9b5f84284de07fca17d8b6ac46294e1..84ba486b8914bff9a3d8acc783fa403080349eb0 100644 (file)
@@ -177,18 +177,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
                                ((e & POLLHUP) ? FD_POLL_HUP : 0);
                }
 
-               if (fdtab[fd].iocb && fdtab[fd].ev) {
-                       if (fdtab[fd].ev & FD_POLL_IN)
-                               fd_may_recv(fd);
-
-                       if (fdtab[fd].ev & FD_POLL_OUT)
-                               fd_may_send(fd);
-
-                       if (fdtab[fd].cache)
-                               continue;
-
-                       fdtab[fd].iocb(fd);
-               }
+               fd_process_polled_events(fd);
        }
 
 }
index a87834028517d983a8f8be4f006ae3a140314a3f..87ca348a680b41bf6bcc2d3cfe42d4ce30937090 100644 (file)
@@ -161,18 +161,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
                        if (FD_ISSET(fd, tmp_evts[DIR_WR]))
                                fdtab[fd].ev |= FD_POLL_OUT;
 
-                       if (fdtab[fd].iocb && fdtab[fd].ev) {
-                               if (fdtab[fd].ev & FD_POLL_IN)
-                                       fd_may_recv(fd);
-
-                               if (fdtab[fd].ev & FD_POLL_OUT)
-                                       fd_may_send(fd);
-
-                               if (fdtab[fd].cache)
-                                       continue;
-
-                               fdtab[fd].iocb(fd);
-                       }
+                       fd_process_polled_events(fd);
                }
        }
 }
index 9e3706824038007679e895cc112551349c0e542f..66f1e8bd65c16eb454246b1ca3380bc192deb10c 100644 (file)
--- a/src/fd.c
+++ b/src/fd.c
@@ -237,6 +237,75 @@ void fd_process_cached_events()
        }
 }
 
+/* Check the events attached to a file descriptor, update its cache
+ * accordingly, and call the associated I/O callback. If new updates are
+ * detected, the function tries to process them as well in order to save
+ * wakeups after accept().
+ */
+void fd_process_polled_events(int fd)
+{
+       int new_updt, old_updt;
+
+       /* First thing to do is to mark the reported events as ready, in order
+        * for them to later be continued from the cache without polling if
+        * they have to be interrupted (eg: recv fills a buffer).
+        */
+       if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
+               fd_may_recv(fd);
+
+       if (fdtab[fd].ev & (FD_POLL_OUT | FD_POLL_ERR))
+               fd_may_send(fd);
+
+       if (fdtab[fd].cache) {
+               /* This fd is already cached, no need to process it now. */
+               return;
+       }
+
+       if (unlikely(!fdtab[fd].iocb || !fdtab[fd].ev)) {
+               /* nothing to do */
+               return;
+       }
+
+       /* Save number of updates to detect creation of new FDs. */
+       old_updt = fd_nbupdt;
+       fdtab[fd].iocb(fd);
+
+       /* One or more fd might have been created during the iocb().
+        * This mainly happens with new incoming connections that have
+        * just been accepted, so we'd like to process them immediately
+        * for better efficiency, as it saves one useless task wakeup.
+        * Second benefit, if at the end the fds are disabled again, we can
+        * safely destroy their update entry to reduce the scope of later
+        * scans. This is the reason we scan the new entries backwards.
+        */
+       for (new_updt = fd_nbupdt; new_updt > old_updt; new_updt--) {
+               fd = fd_updt[new_updt - 1];
+               if (!fdtab[fd].new)
+                       continue;
+
+               fdtab[fd].new = 0;
+               fdtab[fd].ev &= FD_POLL_STICKY;
+
+               if ((fdtab[fd].state & FD_EV_STATUS_R) == (FD_EV_READY_R | FD_EV_ACTIVE_R))
+                       fdtab[fd].ev |= FD_POLL_IN;
+
+               if ((fdtab[fd].state & FD_EV_STATUS_W) == (FD_EV_READY_W | FD_EV_ACTIVE_W))
+                       fdtab[fd].ev |= FD_POLL_OUT;
+
+               if (fdtab[fd].ev && fdtab[fd].iocb && fdtab[fd].owner)
+                       fdtab[fd].iocb(fd);
+
+               /* we can remove this update entry if it's the last one and is
+                * unused, otherwise we don't touch anything, especially given
+                * that the FD might have been closed already.
+                */
+               if (new_updt == fd_nbupdt && !fd_recv_active(fd) && !fd_send_active(fd)) {
+                       fdtab[fd].updated = 0;
+                       fd_nbupdt--;
+               }
+       }
+}
+
 /* disable the specified poller */
 void disable_poller(const char *poller_name)
 {