<tag>--without-psapi</tag>
<p>Disable auto-detection of Windows PSAPI library.
+ <tag>CPPFLAGS=-DINCOMING_FACTOR=</tag>
+ <p>Control the listening sockets responsiveness with poll(2) and select(2).
+ The higher the INCOMING_FACTOR, the slower the algorithm will
+ respond to load spikes/increases/decreases in demand. A value
+ between 3 and 8 is recommended. Default is 5.
+
</descrip>
<sect1>Changes to existing options<label id="modifiedoptions">
MessageDelayConfig MessageDelay;
#endif
- struct {
- struct {
+ struct CommIncoming {
+ struct Measure {
int average;
int min_poll;
} dns, udp, tcp;
#define STATCOUNTERS_H_
#include "base/ByteCounter.h"
+#include "comm/Incoming.h"
#include "StatHist.h"
#if USE_CACHE_DIGESTS
double cputime = 0.0;
struct timeval timestamp;
- StatHist comm_udp_incoming;
- StatHist comm_dns_incoming;
- StatHist comm_tcp_incoming;
+#if USE_POLL || USE_SELECT
+ Comm::Incoming comm_dns;
+ Comm::Incoming comm_tcp;
+ Comm::Incoming comm_udp;
+#endif
StatHist select_fds_hist;
struct {
--- /dev/null
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#include "squid.h"
+
+#if USE_POLL || USE_SELECT
+#include "comm/Incoming.h"
+
+void
+Comm::Incoming::finishPolling(int n, SquidConfig::CommIncoming::Measure &cfg)
+{
+ if (n < 0)
+ return;
+
+ interval += cfg.average - n;
+
+ if (interval < cfg.min_poll)
+ interval = cfg.min_poll;
+
+ if (interval > MaxInterval)
+ interval = MaxInterval;
+
+ if (n > nMaximum)
+ n = nMaximum;
+
+ history.count(n);
+}
+
+#endif /* USE_POLL || USE_SELECT */
--- /dev/null
+/*
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID__SRC_COMM_INCOMING_H
+#define SQUID__SRC_COMM_INCOMING_H
+
+#if USE_POLL || USE_SELECT
+
+#include "SquidConfig.h"
+#include "StatHist.h"
+
+namespace Comm
+{
+
+/**
+ * Automatic tuning for incoming requests.
+ *
+ * INCOMING sockets are the listening ports for transport protocols.
+ * We need to check these fairly regularly, but how often? When the
+ * load increases, we want to check the incoming sockets more often.
+ * If we have a lot of one protocol incoming, then we need to check
+ * those sockets more than others.
+ *
+ * \copydoc Comm::Incoming::check()
+ *
+ * \copydoc Comm::Incoming::finishPolling()
+ *
+ * Caveats:
+ *
+ * \copydoc Comm::Incoming::Factor
+ *
+ * \copydoc Comm::Incoming::MaxInterval
+ */
+class Incoming
+{
+public:
+#if !defined(INCOMING_FACTOR)
+#define INCOMING_FACTOR 5
+#endif
+ /**
+ * The higher the INCOMING_FACTOR, the slower the algorithm will
+ * respond to load spikes/increases/decreases in demand. A value
+ * between 3 and 8 is recommended.
+ */
+ static const int Factor = INCOMING_FACTOR;
+
+ /**
+ * Magic upper limit on interval.
+ * At the largest value the cache will effectively be idling.
+ */
+ static const int MaxInterval = (256 << Factor);
+
+ // TODO replace with constructor initialization
+ void init(int n) { nMaximum = n; history.enumInit(n); }
+
+ /**
+ * Preparation for polling incoming sockets.
+ *
+ * \param n the number of relevant listening FDs currently open.
+ *
+ * \return whether it is possible to check with poll(2)/select(2).
+ */
+ bool startPolling(int n) { ioEvents = 0; return (n > 0); }
+
+ /**
+ * Finalize and update records when incoming sockets polled.
+ *
+ * The new interval is calculated as the current interval,
+ * plus what we would like to see as an average number of events,
+ * minus the number of events just processed.
+ */
+ void finishPolling(int, SquidConfig::CommIncoming::Measure &);
+
+ /**
+ * Every time we check incoming sockets, we count how many new
+ * messages or connections were processed. This is used to adjust
+ * the interval for the next iteration.
+ *
+ * \return whether it is time to check incoming sockets.
+ */
+ bool check() { return (++ioEvents > (interval >> Factor)); }
+
+ /*
+ * How many normal I/O events to process before checking
+ * incoming sockets again.
+ *
+ * \note We store the interval multiplied by a factor of
+ * (2^Factor) to have some pseudo-floating
+ * point precision.
+ */
+ int interval = (16 << Factor);
+
+ /** History of I/O events timing on listening ports.
+ *
+ * You can see the current values of the interval's,
+ * as well as histograms of 'incoming_events' in the cache
+ * manager 'comm_*_incoming' reports.
+ */
+ StatHist history;
+
+private:
+ /**
+ * Count of normal I/O events processed since last call to
+ * startPolling(). When ioEvents > interval, it is time to
+ * check incoming sockets again.
+ */
+ int ioEvents = 0;
+
+ /**
+ * Maximum value to record for number of I/O events within
+ * an interval. Set using init(N).
+ */
+ int nMaximum = 0;
+};
+
+} // namespace Comm
+
+#endif /* USE_POLL || USE_SELECT */
+#endif /* SQUID__SRC_COMM_INCOMING_H */
Connection.cc \
Connection.h \
Flag.h \
+ Incoming.cc \
+ Incoming.h \
IoCallback.cc \
IoCallback.h \
Loops.h \
static int comm_check_incoming_poll_handlers(int nfds, int *fds);
static void comm_poll_dns_incoming(void);
-/*
- * Automatic tuning for incoming requests:
- *
- * INCOMING sockets are the ICP and HTTP ports. We need to check these
- * fairly regularly, but how often? When the load increases, we
- * want to check the incoming sockets more often. If we have a lot
- * of incoming ICP, then we need to check these sockets more than
- * if we just have HTTP.
- *
- * The variables 'incoming_icp_interval' and 'incoming_http_interval'
- * determine how many normal I/O events to process before checking
- * incoming sockets again. Note we store the incoming_interval
- * multiplied by a factor of (2^INCOMING_FACTOR) to have some
- * pseudo-floating point precision.
- *
- * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
- * I/O events have been processed since the last check on the incoming
- * sockets. When io_events > incoming_interval, its time to check incoming
- * sockets.
- *
- * Every time we check incoming sockets, we count how many new messages
- * or connections were processed. This is used to adjust the
- * incoming_interval for the next iteration. The new incoming_interval
- * is calculated as the current incoming_interval plus what we would
- * like to see as an average number of events minus the number of
- * events just processed.
- *
- * incoming_interval = incoming_interval + target_average - number_of_events_processed
- *
- * There are separate incoming_interval counters for TCP-based, UDP-based, and DNS events
- *
- * You can see the current values of the incoming_interval's, as well as
- * a histogram of 'incoming_events' by asking the cache manager
- * for 'comm_incoming', e.g.:
- *
- * % ./client mgr:comm_poll_incoming
- *
- * Caveats:
- *
- * - We have MAX_INCOMING_INTEGER as a magic upper limit on
- * incoming_interval for both types of sockets. At the
- * largest value the cache will effectively be idling.
- *
- * - The higher the INCOMING_FACTOR, the slower the algorithm will
- * respond to load spikes/increases/decreases in demand. A value
- * between 3 and 8 is recommended.
- */
-
-#define MAX_INCOMING_INTEGER 256
-#define INCOMING_FACTOR 5
-#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
-static int udp_io_events = 0; ///< I/O events passed since last UDP receiver socket poll
-static int dns_io_events = 0; ///< I/O events passed since last DNS socket poll
-static int tcp_io_events = 0; ///< I/O events passed since last TCP listening socket poll
-static int incoming_udp_interval = 16 << INCOMING_FACTOR;
-static int incoming_dns_interval = 16 << INCOMING_FACTOR;
-static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
-#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
-#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
-#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
-
void
Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
{
{
int nfds = 0;
int fds[2];
- int nevents;
- udp_io_events = 0;
if (Comm::IsConnOpen(icpIncomingConn)) {
fds[nfds] = icpIncomingConn->fd;
++nfds;
}
- if (nfds == 0)
- return;
-
- nevents = comm_check_incoming_poll_handlers(nfds, fds);
-
- incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
-
- if (incoming_udp_interval < Config.comm_incoming.udp.min_poll)
- incoming_udp_interval = Config.comm_incoming.udp.min_poll;
-
- if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
- incoming_udp_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_UDP_MAX)
- nevents = INCOMING_UDP_MAX;
-
- statCounter.comm_udp_incoming.count(nevents);
+ if (statCounter.comm_udp.startPolling(nfds)) {
+ auto n = comm_check_incoming_poll_handlers(nfds, fds);
+ statCounter.comm_udp.finishPolling(n, Config.comm_incoming.udp);
+ }
}
static void
{
int nfds = 0;
int fds[MAXTCPLISTENPORTS];
- int j;
- int nevents;
- tcp_io_events = 0;
// XXX: only poll sockets that won't be deferred. But how do we identify them?
- for (j = 0; j < NHttpSockets; ++j) {
- if (HttpSockets[j] < 0)
- continue;
-
- fds[nfds] = HttpSockets[j];
- ++nfds;
+ for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
+ if (Comm::IsConnOpen(s->listenConn)) {
+ fds[nfds] = s->listenConn->fd;
+ ++nfds;
+ }
}
- nevents = comm_check_incoming_poll_handlers(nfds, fds);
- incoming_tcp_interval = incoming_tcp_interval
- + Config.comm_incoming.tcp.average - nevents;
-
- if (incoming_tcp_interval < Config.comm_incoming.tcp.min_poll)
- incoming_tcp_interval = Config.comm_incoming.tcp.min_poll;
-
- if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
- incoming_tcp_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_TCP_MAX)
- nevents = INCOMING_TCP_MAX;
-
- statCounter.comm_tcp_incoming.count(nevents);
+ if (statCounter.comm_tcp.startPolling(nfds)) {
+ auto n = comm_check_incoming_poll_handlers(nfds, fds);
+ statCounter.comm_tcp.finishPolling(n, Config.comm_incoming.tcp);
+ }
}
/* poll all sockets; call handlers for those that are ready. */
getCurrentTime();
start = current_dtime;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_poll_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_poll_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_poll_tcp_incoming();
calldns = calludp = calltcp = 0;
hdl(fd, F->read_data);
++ statCounter.select_fds;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_poll_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_poll_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_poll_tcp_incoming();
}
}
hdl(fd, F->write_data);
++ statCounter.select_fds;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_poll_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_poll_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_poll_tcp_incoming();
}
}
{
int nfds = 0;
int fds[2];
- int nevents;
- dns_io_events = 0;
-
- if (DnsSocketA < 0 && DnsSocketB < 0)
- return;
if (DnsSocketA >= 0) {
fds[nfds] = DnsSocketA;
++nfds;
}
- nevents = comm_check_incoming_poll_handlers(nfds, fds);
-
- if (nevents < 0)
- return;
-
- incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
-
- if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
- incoming_dns_interval = Config.comm_incoming.dns.min_poll;
-
- if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
- incoming_dns_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_DNS_MAX)
- nevents = INCOMING_DNS_MAX;
-
- statCounter.comm_dns_incoming.count(nevents);
+ if (statCounter.comm_dns.startPolling(nfds)) {
+ auto n = comm_check_incoming_poll_handlers(nfds, fds);
+ statCounter.comm_dns.finishPolling(n, Config.comm_incoming.dns);
+ }
}
static void
commIncomingStats(StoreEntry * sentry)
{
storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
- incoming_udp_interval >> INCOMING_FACTOR);
+ statCounter.comm_udp.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
- incoming_dns_interval >> INCOMING_FACTOR);
+ statCounter.comm_dns.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
- incoming_tcp_interval >> INCOMING_FACTOR);
+ statCounter.comm_tcp.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "\n");
storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_udp_incoming() call:\n");
- statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_udp.history.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n");
- statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_dns.history.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_tcp_incoming() call:\n");
- statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_tcp.history.dump(sentry, statHistIntDumper);
}
/* Called by async-io or diskd to speed up the polling */
static int nreadfds;
static int nwritefds;
-/*
- * Automatic tuning for incoming requests:
- *
- * INCOMING sockets are the ICP and HTTP ports. We need to check these
- * fairly regularly, but how often? When the load increases, we
- * want to check the incoming sockets more often. If we have a lot
- * of incoming ICP, then we need to check these sockets more than
- * if we just have HTTP.
- *
- * The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
- * determine how many normal I/O events to process before checking
- * incoming sockets again. Note we store the incoming_interval
- * multiplied by a factor of (2^INCOMING_FACTOR) to have some
- * pseudo-floating point precision.
- *
- * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
- * I/O events have been processed since the last check on the incoming
- * sockets. When io_events > incoming_interval, its time to check incoming
- * sockets.
- *
- * Every time we check incoming sockets, we count how many new messages
- * or connections were processed. This is used to adjust the
- * incoming_interval for the next iteration. The new incoming_interval
- * is calculated as the current incoming_interval plus what we would
- * like to see as an average number of events minus the number of
- * events just processed.
- *
- * incoming_interval = incoming_interval + target_average - number_of_events_processed
- *
- * There are separate incoming_interval counters for DNS, UDP and TCP events
- *
- * You can see the current values of the incoming_interval's, as well as
- * a histogram of 'incoming_events' by asking the cache manager
- * for 'comm_incoming', e.g.:
- *
- * % ./client mgr:comm_incoming
- *
- * Caveats:
- *
- * - We have MAX_INCOMING_INTEGER as a magic upper limit on
- * incoming_interval for both types of sockets. At the
- * largest value the cache will effectively be idling.
- *
- * - The higher the INCOMING_FACTOR, the slower the algorithm will
- * respond to load spikes/increases/decreases in demand. A value
- * between 3 and 8 is recommended.
- */
-
-#define MAX_INCOMING_INTEGER 256
-#define INCOMING_FACTOR 5
-#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
-static int udp_io_events = 0;
-static int dns_io_events = 0;
-static int tcp_io_events = 0;
-static int incoming_udp_interval = 16 << INCOMING_FACTOR;
-static int incoming_dns_interval = 16 << INCOMING_FACTOR;
-static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
-#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
-#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
-#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
-
void
Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
{
{
int nfds = 0;
int fds[2];
- int nevents;
- udp_io_events = 0;
if (Comm::IsConnOpen(icpIncomingConn)) {
fds[nfds] = icpIncomingConn->fd;
++nfds;
}
- if (nfds == 0)
- return;
-
- nevents = comm_check_incoming_select_handlers(nfds, fds);
-
- incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
-
- if (incoming_udp_interval < 0)
- incoming_udp_interval = 0;
-
- if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
- incoming_udp_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_UDP_MAX)
- nevents = INCOMING_UDP_MAX;
-
- statCounter.comm_udp_incoming.count(nevents);
+ if (statCounter.comm_udp.startPolling(nfds)) {
+ auto n = comm_check_incoming_select_handlers(nfds, fds);
+ statCounter.comm_udp.finishPolling(n, Config.comm_incoming.udp);
+ }
}
static void
{
int nfds = 0;
int fds[MAXTCPLISTENPORTS];
- int nevents;
- tcp_io_events = 0;
// XXX: only poll sockets that won't be deferred. But how do we identify them?
}
}
- nevents = comm_check_incoming_select_handlers(nfds, fds);
- incoming_tcp_interval += Config.comm_incoming.tcp.average - nevents;
-
- if (incoming_tcp_interval < 0)
- incoming_tcp_interval = 0;
-
- if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
- incoming_tcp_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_TCP_MAX)
- nevents = INCOMING_TCP_MAX;
-
- statCounter.comm_tcp_incoming.count(nevents);
+ if (statCounter.comm_tcp.startPolling(nfds)) {
+ auto n = comm_check_incoming_select_handlers(nfds, fds);
+ statCounter.comm_tcp.finishPolling(n, Config.comm_incoming.tcp);
+ }
}
/* Select on all sockets; call handlers for those that are ready. */
getCurrentTime();
start = current_dtime;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_select_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_select_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_select_tcp_incoming();
calldns = calludp = calltcp = 0;
hdl(fd, F->read_data);
++ statCounter.select_fds;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_select_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_select_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_select_tcp_incoming();
}
}
hdl(fd, F->write_data);
++ statCounter.select_fds;
- if (commCheckUdpIncoming)
+ if (statCounter.comm_udp.check())
comm_select_udp_incoming();
- if (commCheckDnsIncoming)
+ if (statCounter.comm_dns.check())
comm_select_dns_incoming();
- if (commCheckTcpIncoming)
+ if (statCounter.comm_tcp.check())
comm_select_tcp_incoming();
}
}
{
int nfds = 0;
int fds[3];
- int nevents;
- dns_io_events = 0;
-
- if (DnsSocketA < 0 && DnsSocketB < 0)
- return;
if (DnsSocketA >= 0) {
fds[nfds] = DnsSocketA;
++nfds;
}
- nevents = comm_check_incoming_select_handlers(nfds, fds);
-
- if (nevents < 0)
- return;
-
- incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
-
- if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
- incoming_dns_interval = Config.comm_incoming.dns.min_poll;
-
- if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
- incoming_dns_interval = MAX_INCOMING_INTERVAL;
-
- if (nevents > INCOMING_DNS_MAX)
- nevents = INCOMING_DNS_MAX;
-
- statCounter.comm_dns_incoming.count(nevents);
+ if (statCounter.comm_dns.startPolling(nfds)) {
+ auto n = comm_check_incoming_select_handlers(nfds, fds);
+ statCounter.comm_dns.finishPolling(n, Config.comm_incoming.dns);
+ }
}
void
commIncomingStats(StoreEntry * sentry)
{
storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
- incoming_udp_interval >> INCOMING_FACTOR);
+ statCounter.comm_udp.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
- incoming_dns_interval >> INCOMING_FACTOR);
+ statCounter.comm_dns.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
- incoming_tcp_interval >> INCOMING_FACTOR);
+ statCounter.comm_tcp.interval >> Comm::Incoming::Factor);
storeAppendPrintf(sentry, "\n");
storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
- statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_udp.history.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
- statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_dns.history.dump(sentry, statHistIntDumper);
storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
- statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
+ statCounter.comm_tcp.history.dump(sentry, statHistIntDumper);
}
void
* Cache Digest Stuff
*/
C->cd.on_xition_count.enumInit(CacheDigestHashFuncCount);
- C->comm_udp_incoming.enumInit(INCOMING_UDP_MAX);
- C->comm_dns_incoming.enumInit(INCOMING_DNS_MAX);
- C->comm_tcp_incoming.enumInit(INCOMING_TCP_MAX);
+#if USE_POLL || USE_SELECT
+ C->comm_udp.init(INCOMING_UDP_MAX);
+ C->comm_dns.init(INCOMING_DNS_MAX);
+ C->comm_tcp.init(INCOMING_TCP_MAX);
+#endif
C->select_fds_hist.enumInit(256); /* was SQUID_MAXFD, but it is way too much. It is OK to crop this statistics */
}