]> git.ipfire.org Git - thirdparty/squid.git/commitdiff
SourceLayout: Refactor comm_incoming mechanism (#1572)
authorAmos Jeffries <yadij@users.noreply.github.com>
Wed, 29 Nov 2023 01:19:13 +0000 (01:19 +0000)
committerSquid Anubis <squid-anubis@squid-cache.org>
Wed, 29 Nov 2023 01:19:21 +0000 (01:19 +0000)
doc/release-notes/release-7.sgml.in
src/SquidConfig.h
src/StatCounters.h
src/comm/Incoming.cc [new file with mode: 0644]
src/comm/Incoming.h [new file with mode: 0644]
src/comm/Makefile.am
src/comm/ModPoll.cc
src/comm/ModSelect.cc
src/stat.cc

index 8ed6306c9c0ed6bf7159d6b725379718ac4a7a20..30b24de2072afa673634708d05a2ff6f1a40bfd8 100644 (file)
@@ -151,6 +151,12 @@ This section gives an account of those changes in three categories:
        <tag>--without-psapi</tag>
        <p>Disable auto-detection of Windows PSAPI library.
 
+       <tag>CPPFLAGS=-DINCOMING_FACTOR=</tag>
+       <p>Control the listening sockets responsiveness with poll(2) and select(2).
+          The higher the INCOMING_FACTOR, the slower the algorithm will
+          respond to load spikes/increases/decreases in demand. A value
+          between 3 and 8 is recommended. Default is 5.
+
 </descrip>
 
 <sect1>Changes to existing options<label id="modifiedoptions">
index 5f1bd5fa79007a8a0eaccc2df1c994f6e9c39203..a065d1cd4497d9d263f05bd0b2356bfba46b56b9 100644 (file)
@@ -447,8 +447,8 @@ public:
     MessageDelayConfig MessageDelay;
 #endif
 
-    struct {
-        struct {
+    struct CommIncoming {
+        struct Measure {
             int average;
             int min_poll;
         } dns, udp, tcp;
index 4d9c5c66f9f6ff2fe2e9da4eb929ac998f3698b0..aadd9ff3b5b6869307d62d3927e225a1dac2f9be 100644 (file)
@@ -10,6 +10,7 @@
 #define STATCOUNTERS_H_
 
 #include "base/ByteCounter.h"
+#include "comm/Incoming.h"
 #include "StatHist.h"
 
 #if USE_CACHE_DIGESTS
@@ -121,9 +122,11 @@ public:
     double cputime = 0.0;
 
     struct timeval timestamp;
-    StatHist comm_udp_incoming;
-    StatHist comm_dns_incoming;
-    StatHist comm_tcp_incoming;
+#if USE_POLL || USE_SELECT
+    Comm::Incoming comm_dns;
+    Comm::Incoming comm_tcp;
+    Comm::Incoming comm_udp;
+#endif
     StatHist select_fds_hist;
 
     struct {
diff --git a/src/comm/Incoming.cc b/src/comm/Incoming.cc
new file mode 100644 (file)
index 0000000..74a8d4c
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#include "squid.h"
+
+#if USE_POLL || USE_SELECT
+#include "comm/Incoming.h"
+
+void
+Comm::Incoming::finishPolling(int n, SquidConfig::CommIncoming::Measure &cfg)
+{
+    if (n < 0)
+        return;
+
+    interval += cfg.average - n;
+
+    if (interval < cfg.min_poll)
+        interval = cfg.min_poll;
+
+    if (interval > MaxInterval)
+        interval = MaxInterval;
+
+    if (n > nMaximum)
+        n = nMaximum;
+
+    history.count(n);
+}
+
+#endif /* USE_POLL || USE_SELECT */
diff --git a/src/comm/Incoming.h b/src/comm/Incoming.h
new file mode 100644 (file)
index 0000000..ef25a92
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID__SRC_COMM_INCOMING_H
+#define SQUID__SRC_COMM_INCOMING_H
+
+#if USE_POLL || USE_SELECT
+
+#include "SquidConfig.h"
+#include "StatHist.h"
+
+namespace Comm
+{
+
+/**
+ * Automatic tuning for incoming requests.
+ *
+ * INCOMING sockets are the listening ports for transport protocols.
+ * We need to check these fairly regularly, but how often?  When the
+ * load increases, we want to check the incoming sockets more often.
+ * If we have a lot of one protocol incoming, then we need to check
+ * those sockets more than others.
+ *
+ * \copydoc Comm::Incoming::check()
+ *
+ * \copydoc Comm::Incoming::finishPolling()
+ *
+ * Caveats:
+ *
+ *   \copydoc Comm::Incoming::Factor
+ *
+ *   \copydoc Comm::Incoming::MaxInterval
+ */
+class Incoming
+{
+public:
+#if !defined(INCOMING_FACTOR)
+#define INCOMING_FACTOR 5
+#endif
+    /**
+     * The higher the INCOMING_FACTOR, the slower the algorithm will
+     * respond to load spikes/increases/decreases in demand. A value
+     * between 3 and 8 is recommended.
+     */
+    static const int Factor = INCOMING_FACTOR;
+
+    /**
+     * Magic upper limit on interval.
+     * At the largest value the cache will effectively be idling.
+     */
+    static const int MaxInterval = (256 << Factor);
+
+    // TODO replace with constructor initialization
+    void init(int n) { nMaximum = n; history.enumInit(n); }
+
+    /**
+     * Preparation for polling incoming sockets.
+     *
+     * \param n  the number of relevant listening FDs currently open.
+     *
+     * \return whether it is possible to check with poll(2)/select(2).
+     */
+    bool startPolling(int n) { ioEvents = 0; return (n > 0); }
+
+    /**
+     * Finalize and update records when incoming sockets polled.
+     *
+     * The new interval is calculated as the current interval,
+     * plus what we would like to see as an average number of events,
+     * minus the number of events just processed.
+     */
+    void finishPolling(int, SquidConfig::CommIncoming::Measure &);
+
+    /**
+     * Every time we check incoming sockets, we count how many new
+     * messages or connections were processed.  This is used to adjust
+     * the interval for the next iteration.
+     *
+     * \return whether it is time to check incoming sockets.
+     */
+    bool check() { return (++ioEvents > (interval >> Factor)); }
+
+    /*
+     * How many normal I/O events to process before checking
+     * incoming sockets again.
+     *
+     * \note We store the interval multiplied by a factor of
+     *       (2^Factor) to have some pseudo-floating
+     *       point precision.
+     */
+    int interval = (16 << Factor);
+
+    /** History of I/O events timing on listening ports.
+     *
+     * You can see the current values of the interval's,
+     * as well as histograms of 'incoming_events' in the cache
+     * manager 'comm_*_incoming' reports.
+     */
+    StatHist history;
+
+private:
+    /**
+     * Count of normal I/O events processed since last call to
+     * startPolling().  When ioEvents > interval, it is time to
+     * check incoming sockets again.
+     */
+    int ioEvents = 0;
+
+    /**
+     * Maximum value to record for number of I/O events within
+     * an interval. Set using init(N).
+     */
+    int nMaximum = 0;
+};
+
+} // namespace Comm
+
+#endif /* USE_POLL || USE_SELECT */
+#endif /* SQUID__SRC_COMM_INCOMING_H */
index 630a238476463cdae66234c90e5a5861bf8050d2..7ea6cf20b48389414bcc6aa52cbde87f44534f51 100644 (file)
@@ -20,6 +20,8 @@ libcomm_la_SOURCES = \
        Connection.cc \
        Connection.h \
        Flag.h \
+       Incoming.cc \
+       Incoming.h \
        IoCallback.cc \
        IoCallback.h \
        Loops.h \
index 8928378070500c7e358e82afee419452e69fcd15..dcebaeb67dbdfcd38b7796ced2078da242260b27 100644 (file)
@@ -57,67 +57,6 @@ static OBJH commIncomingStats;
 static int comm_check_incoming_poll_handlers(int nfds, int *fds);
 static void comm_poll_dns_incoming(void);
 
-/*
- * Automatic tuning for incoming requests:
- *
- * INCOMING sockets are the ICP and HTTP ports.  We need to check these
- * fairly regularly, but how often?  When the load increases, we
- * want to check the incoming sockets more often.  If we have a lot
- * of incoming ICP, then we need to check these sockets more than
- * if we just have HTTP.
- *
- * The variables 'incoming_icp_interval' and 'incoming_http_interval'
- * determine how many normal I/O events to process before checking
- * incoming sockets again.  Note we store the incoming_interval
- * multiplied by a factor of (2^INCOMING_FACTOR) to have some
- * pseudo-floating point precision.
- *
- * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
- * I/O events have been processed since the last check on the incoming
- * sockets.  When io_events > incoming_interval, its time to check incoming
- * sockets.
- *
- * Every time we check incoming sockets, we count how many new messages
- * or connections were processed.  This is used to adjust the
- * incoming_interval for the next iteration.  The new incoming_interval
- * is calculated as the current incoming_interval plus what we would
- * like to see as an average number of events minus the number of
- * events just processed.
- *
- *  incoming_interval = incoming_interval + target_average - number_of_events_processed
- *
- * There are separate incoming_interval counters for TCP-based, UDP-based, and DNS events
- *
- * You can see the current values of the incoming_interval's, as well as
- * a histogram of 'incoming_events' by asking the cache manager
- * for 'comm_incoming', e.g.:
- *
- *      % ./client mgr:comm_poll_incoming
- *
- * Caveats:
- *
- *      - We have MAX_INCOMING_INTEGER as a magic upper limit on
- *        incoming_interval for both types of sockets.  At the
- *        largest value the cache will effectively be idling.
- *
- *      - The higher the INCOMING_FACTOR, the slower the algorithm will
- *        respond to load spikes/increases/decreases in demand. A value
- *        between 3 and 8 is recommended.
- */
-
-#define MAX_INCOMING_INTEGER 256
-#define INCOMING_FACTOR 5
-#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
-static int udp_io_events = 0; ///< I/O events passed since last UDP receiver socket poll
-static int dns_io_events = 0; ///< I/O events passed since last DNS socket poll
-static int tcp_io_events = 0; ///< I/O events passed since last TCP listening socket poll
-static int incoming_udp_interval = 16 << INCOMING_FACTOR;
-static int incoming_dns_interval = 16 << INCOMING_FACTOR;
-static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
-#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
-#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
-#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
-
 void
 Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
 {
@@ -247,8 +186,6 @@ comm_poll_udp_incoming(void)
 {
     int nfds = 0;
     int fds[2];
-    int nevents;
-    udp_io_events = 0;
 
     if (Comm::IsConnOpen(icpIncomingConn)) {
         fds[nfds] = icpIncomingConn->fd;
@@ -260,23 +197,10 @@ comm_poll_udp_incoming(void)
         ++nfds;
     }
 
-    if (nfds == 0)
-        return;
-
-    nevents = comm_check_incoming_poll_handlers(nfds, fds);
-
-    incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
-
-    if (incoming_udp_interval < Config.comm_incoming.udp.min_poll)
-        incoming_udp_interval = Config.comm_incoming.udp.min_poll;
-
-    if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
-        incoming_udp_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_UDP_MAX)
-        nevents = INCOMING_UDP_MAX;
-
-    statCounter.comm_udp_incoming.count(nevents);
+    if (statCounter.comm_udp.startPolling(nfds)) {
+        auto n = comm_check_incoming_poll_handlers(nfds, fds);
+        statCounter.comm_udp.finishPolling(n, Config.comm_incoming.udp);
+    }
 }
 
 static void
@@ -284,34 +208,20 @@ comm_poll_tcp_incoming(void)
 {
     int nfds = 0;
     int fds[MAXTCPLISTENPORTS];
-    int j;
-    int nevents;
-    tcp_io_events = 0;
 
     // XXX: only poll sockets that won't be deferred. But how do we identify them?
 
-    for (j = 0; j < NHttpSockets; ++j) {
-        if (HttpSockets[j] < 0)
-            continue;
-
-        fds[nfds] = HttpSockets[j];
-        ++nfds;
+    for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
+        if (Comm::IsConnOpen(s->listenConn)) {
+            fds[nfds] = s->listenConn->fd;
+            ++nfds;
+        }
     }
 
-    nevents = comm_check_incoming_poll_handlers(nfds, fds);
-    incoming_tcp_interval = incoming_tcp_interval
-                            + Config.comm_incoming.tcp.average - nevents;
-
-    if (incoming_tcp_interval < Config.comm_incoming.tcp.min_poll)
-        incoming_tcp_interval = Config.comm_incoming.tcp.min_poll;
-
-    if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
-        incoming_tcp_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_TCP_MAX)
-        nevents = INCOMING_TCP_MAX;
-
-    statCounter.comm_tcp_incoming.count(nevents);
+    if (statCounter.comm_tcp.startPolling(nfds)) {
+        auto n = comm_check_incoming_poll_handlers(nfds, fds);
+        statCounter.comm_tcp.finishPolling(n, Config.comm_incoming.tcp);
+    }
 }
 
 /* poll all sockets; call handlers for those that are ready. */
@@ -334,13 +244,13 @@ Comm::DoSelect(int msec)
         getCurrentTime();
         start = current_dtime;
 
-        if (commCheckUdpIncoming)
+        if (statCounter.comm_udp.check())
             comm_poll_udp_incoming();
 
-        if (commCheckDnsIncoming)
+        if (statCounter.comm_dns.check())
             comm_poll_dns_incoming();
 
-        if (commCheckTcpIncoming)
+        if (statCounter.comm_tcp.check())
             comm_poll_tcp_incoming();
 
         calldns = calludp = calltcp = 0;
@@ -463,13 +373,13 @@ Comm::DoSelect(int msec)
                     hdl(fd, F->read_data);
                     ++ statCounter.select_fds;
 
-                    if (commCheckUdpIncoming)
+                    if (statCounter.comm_udp.check())
                         comm_poll_udp_incoming();
 
-                    if (commCheckDnsIncoming)
+                    if (statCounter.comm_dns.check())
                         comm_poll_dns_incoming();
 
-                    if (commCheckTcpIncoming)
+                    if (statCounter.comm_tcp.check())
                         comm_poll_tcp_incoming();
                 }
             }
@@ -482,13 +392,13 @@ Comm::DoSelect(int msec)
                     hdl(fd, F->write_data);
                     ++ statCounter.select_fds;
 
-                    if (commCheckUdpIncoming)
+                    if (statCounter.comm_udp.check())
                         comm_poll_udp_incoming();
 
-                    if (commCheckDnsIncoming)
+                    if (statCounter.comm_dns.check())
                         comm_poll_dns_incoming();
 
-                    if (commCheckTcpIncoming)
+                    if (statCounter.comm_tcp.check())
                         comm_poll_tcp_incoming();
                 }
             }
@@ -547,11 +457,6 @@ comm_poll_dns_incoming(void)
 {
     int nfds = 0;
     int fds[2];
-    int nevents;
-    dns_io_events = 0;
-
-    if (DnsSocketA < 0 && DnsSocketB < 0)
-        return;
 
     if (DnsSocketA >= 0) {
         fds[nfds] = DnsSocketA;
@@ -563,23 +468,10 @@ comm_poll_dns_incoming(void)
         ++nfds;
     }
 
-    nevents = comm_check_incoming_poll_handlers(nfds, fds);
-
-    if (nevents < 0)
-        return;
-
-    incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
-
-    if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
-        incoming_dns_interval = Config.comm_incoming.dns.min_poll;
-
-    if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
-        incoming_dns_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_DNS_MAX)
-        nevents = INCOMING_DNS_MAX;
-
-    statCounter.comm_dns_incoming.count(nevents);
+    if (statCounter.comm_dns.startPolling(nfds)) {
+        auto n = comm_check_incoming_poll_handlers(nfds, fds);
+        statCounter.comm_dns.finishPolling(n, Config.comm_incoming.dns);
+    }
 }
 
 static void
@@ -600,19 +492,19 @@ static void
 commIncomingStats(StoreEntry * sentry)
 {
     storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
-                      incoming_udp_interval >> INCOMING_FACTOR);
+                      statCounter.comm_udp.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
-                      incoming_dns_interval >> INCOMING_FACTOR);
+                      statCounter.comm_dns.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
-                      incoming_tcp_interval >> INCOMING_FACTOR);
+                      statCounter.comm_tcp.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "\n");
     storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
     storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_udp_incoming() call:\n");
-    statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_udp.history.dump(sentry, statHistIntDumper);
     storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n");
-    statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_dns.history.dump(sentry, statHistIntDumper);
     storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_tcp_incoming() call:\n");
-    statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_tcp.history.dump(sentry, statHistIntDumper);
 }
 
 /* Called by async-io or diskd to speed up the polling */
index 0b36610802a3184d4bc973ff5c444fe35d99b8a2..9d0e1ebf9af6f050b7a6eb9a22eef87126f3c777 100644 (file)
@@ -57,67 +57,6 @@ static fd_set global_writefds;
 static int nreadfds;
 static int nwritefds;
 
-/*
- * Automatic tuning for incoming requests:
- *
- * INCOMING sockets are the ICP and HTTP ports.  We need to check these
- * fairly regularly, but how often?  When the load increases, we
- * want to check the incoming sockets more often.  If we have a lot
- * of incoming ICP, then we need to check these sockets more than
- * if we just have HTTP.
- *
- * The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
- * determine how many normal I/O events to process before checking
- * incoming sockets again.  Note we store the incoming_interval
- * multiplied by a factor of (2^INCOMING_FACTOR) to have some
- * pseudo-floating point precision.
- *
- * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
- * I/O events have been processed since the last check on the incoming
- * sockets.  When io_events > incoming_interval, its time to check incoming
- * sockets.
- *
- * Every time we check incoming sockets, we count how many new messages
- * or connections were processed.  This is used to adjust the
- * incoming_interval for the next iteration.  The new incoming_interval
- * is calculated as the current incoming_interval plus what we would
- * like to see as an average number of events minus the number of
- * events just processed.
- *
- *  incoming_interval = incoming_interval + target_average - number_of_events_processed
- *
- * There are separate incoming_interval counters for DNS, UDP and TCP events
- *
- * You can see the current values of the incoming_interval's, as well as
- * a histogram of 'incoming_events' by asking the cache manager
- * for 'comm_incoming', e.g.:
- *
- *      % ./client mgr:comm_incoming
- *
- * Caveats:
- *
- *      - We have MAX_INCOMING_INTEGER as a magic upper limit on
- *        incoming_interval for both types of sockets.  At the
- *        largest value the cache will effectively be idling.
- *
- *      - The higher the INCOMING_FACTOR, the slower the algorithm will
- *        respond to load spikes/increases/decreases in demand. A value
- *        between 3 and 8 is recommended.
- */
-
-#define MAX_INCOMING_INTEGER 256
-#define INCOMING_FACTOR 5
-#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
-static int udp_io_events = 0;
-static int dns_io_events = 0;
-static int tcp_io_events = 0;
-static int incoming_udp_interval = 16 << INCOMING_FACTOR;
-static int incoming_dns_interval = 16 << INCOMING_FACTOR;
-static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
-#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
-#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
-#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
-
 void
 Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
 {
@@ -252,8 +191,6 @@ comm_select_udp_incoming(void)
 {
     int nfds = 0;
     int fds[2];
-    int nevents;
-    udp_io_events = 0;
 
     if (Comm::IsConnOpen(icpIncomingConn)) {
         fds[nfds] = icpIncomingConn->fd;
@@ -265,23 +202,10 @@ comm_select_udp_incoming(void)
         ++nfds;
     }
 
-    if (nfds == 0)
-        return;
-
-    nevents = comm_check_incoming_select_handlers(nfds, fds);
-
-    incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
-
-    if (incoming_udp_interval < 0)
-        incoming_udp_interval = 0;
-
-    if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
-        incoming_udp_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_UDP_MAX)
-        nevents = INCOMING_UDP_MAX;
-
-    statCounter.comm_udp_incoming.count(nevents);
+    if (statCounter.comm_udp.startPolling(nfds)) {
+        auto n = comm_check_incoming_select_handlers(nfds, fds);
+        statCounter.comm_udp.finishPolling(n, Config.comm_incoming.udp);
+    }
 }
 
 static void
@@ -289,8 +213,6 @@ comm_select_tcp_incoming(void)
 {
     int nfds = 0;
     int fds[MAXTCPLISTENPORTS];
-    int nevents;
-    tcp_io_events = 0;
 
     // XXX: only poll sockets that won't be deferred. But how do we identify them?
 
@@ -301,19 +223,10 @@ comm_select_tcp_incoming(void)
         }
     }
 
-    nevents = comm_check_incoming_select_handlers(nfds, fds);
-    incoming_tcp_interval += Config.comm_incoming.tcp.average - nevents;
-
-    if (incoming_tcp_interval < 0)
-        incoming_tcp_interval = 0;
-
-    if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
-        incoming_tcp_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_TCP_MAX)
-        nevents = INCOMING_TCP_MAX;
-
-    statCounter.comm_tcp_incoming.count(nevents);
+    if (statCounter.comm_tcp.startPolling(nfds)) {
+        auto n = comm_check_incoming_select_handlers(nfds, fds);
+        statCounter.comm_tcp.finishPolling(n, Config.comm_incoming.tcp);
+    }
 }
 
 /* Select on all sockets; call handlers for those that are ready. */
@@ -346,13 +259,13 @@ Comm::DoSelect(int msec)
         getCurrentTime();
         start = current_dtime;
 
-        if (commCheckUdpIncoming)
+        if (statCounter.comm_udp.check())
             comm_select_udp_incoming();
 
-        if (commCheckDnsIncoming)
+        if (statCounter.comm_dns.check())
             comm_select_dns_incoming();
 
-        if (commCheckTcpIncoming)
+        if (statCounter.comm_tcp.check())
             comm_select_tcp_incoming();
 
         calldns = calludp = calltcp = 0;
@@ -487,13 +400,13 @@ Comm::DoSelect(int msec)
                     hdl(fd, F->read_data);
                     ++ statCounter.select_fds;
 
-                    if (commCheckUdpIncoming)
+                    if (statCounter.comm_udp.check())
                         comm_select_udp_incoming();
 
-                    if (commCheckDnsIncoming)
+                    if (statCounter.comm_dns.check())
                         comm_select_dns_incoming();
 
-                    if (commCheckTcpIncoming)
+                    if (statCounter.comm_tcp.check())
                         comm_select_tcp_incoming();
                 }
             }
@@ -541,13 +454,13 @@ Comm::DoSelect(int msec)
                     hdl(fd, F->write_data);
                     ++ statCounter.select_fds;
 
-                    if (commCheckUdpIncoming)
+                    if (statCounter.comm_udp.check())
                         comm_select_udp_incoming();
 
-                    if (commCheckDnsIncoming)
+                    if (statCounter.comm_dns.check())
                         comm_select_dns_incoming();
 
-                    if (commCheckTcpIncoming)
+                    if (statCounter.comm_tcp.check())
                         comm_select_tcp_incoming();
                 }
             }
@@ -578,11 +491,6 @@ comm_select_dns_incoming(void)
 {
     int nfds = 0;
     int fds[3];
-    int nevents;
-    dns_io_events = 0;
-
-    if (DnsSocketA < 0 && DnsSocketB < 0)
-        return;
 
     if (DnsSocketA >= 0) {
         fds[nfds] = DnsSocketA;
@@ -594,23 +502,10 @@ comm_select_dns_incoming(void)
         ++nfds;
     }
 
-    nevents = comm_check_incoming_select_handlers(nfds, fds);
-
-    if (nevents < 0)
-        return;
-
-    incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
-
-    if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
-        incoming_dns_interval = Config.comm_incoming.dns.min_poll;
-
-    if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
-        incoming_dns_interval = MAX_INCOMING_INTERVAL;
-
-    if (nevents > INCOMING_DNS_MAX)
-        nevents = INCOMING_DNS_MAX;
-
-    statCounter.comm_dns_incoming.count(nevents);
+    if (statCounter.comm_dns.startPolling(nfds)) {
+        auto n = comm_check_incoming_select_handlers(nfds, fds);
+        statCounter.comm_dns.finishPolling(n, Config.comm_incoming.dns);
+    }
 }
 
 void
@@ -703,19 +598,19 @@ static void
 commIncomingStats(StoreEntry * sentry)
 {
     storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
-                      incoming_udp_interval >> INCOMING_FACTOR);
+                      statCounter.comm_udp.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
-                      incoming_dns_interval >> INCOMING_FACTOR);
+                      statCounter.comm_dns.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
-                      incoming_tcp_interval >> INCOMING_FACTOR);
+                      statCounter.comm_tcp.interval >> Comm::Incoming::Factor);
     storeAppendPrintf(sentry, "\n");
     storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
     storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
-    statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_udp.history.dump(sentry, statHistIntDumper);
     storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
-    statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_dns.history.dump(sentry, statHistIntDumper);
     storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
-    statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
+    statCounter.comm_tcp.history.dump(sentry, statHistIntDumper);
 }
 
 void
index eb78565cddd3134647032ac303be76c5644835f6..e26360930db89a52a578168b326ea8a8fafa0297 100644 (file)
@@ -1230,9 +1230,11 @@ statCountersInitSpecial(StatCounters * C)
      * Cache Digest Stuff
      */
     C->cd.on_xition_count.enumInit(CacheDigestHashFuncCount);
-    C->comm_udp_incoming.enumInit(INCOMING_UDP_MAX);
-    C->comm_dns_incoming.enumInit(INCOMING_DNS_MAX);
-    C->comm_tcp_incoming.enumInit(INCOMING_TCP_MAX);
+#if USE_POLL || USE_SELECT
+    C->comm_udp.init(INCOMING_UDP_MAX);
+    C->comm_dns.init(INCOMING_DNS_MAX);
+    C->comm_tcp.init(INCOMING_TCP_MAX);
+#endif
     C->select_fds_hist.enumInit(256);   /* was SQUID_MAXFD, but it is way too much. It is OK to crop this statistics */
 }