]> git.ipfire.org Git - thirdparty/squid.git/blame - src/comm/ModPoll.cc
Source Format Enforcement (#763)
[thirdparty/squid.git] / src / comm / ModPoll.cc
CommitLineData
1b3db6d9 1/*
f70aedc4 2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
1b3db6d9 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
1b3db6d9 7 */
bbc27441
AJ
8
9/* DEBUG: section 05 Socket Functions */
10
f7f3304a 11#include "squid.h"
d841c88d
AJ
12
13#if USE_POLL
65d448bc 14#include "anyp/PortCfg.h"
1b76e6c1 15#include "comm/Connection.h"
d841c88d 16#include "comm/Loops.h"
c4ad1349 17#include "fd.h"
d841c88d 18#include "fde.h"
582c2af2 19#include "globals.h"
1b76e6c1 20#include "ICP.h"
8822ebee 21#include "mgr/Registration.h"
582c2af2 22#include "profiler/Profiler.h"
4d5904f7 23#include "SquidConfig.h"
985c86bc 24#include "SquidTime.h"
e4f1fdae 25#include "StatCounters.h"
e6ccf245 26#include "Store.h"
1b3db6d9 27
1a30fdf5 28#include <cerrno>
dc47f531
AJ
29#if HAVE_POLL_H
30#include <poll.h>
31#endif
32
33/* Needed for poll() on Linux at least */
34#if USE_POLL
35#ifndef POLLRDNORM
36#define POLLRDNORM POLLIN
37#endif
38#ifndef POLLWRNORM
39#define POLLWRNORM POLLOUT
40#endif
41#endif
42
f53969cc 43static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
1b3db6d9 44
45#ifndef howmany
46#define howmany(x, y) (((x)+((y)-1))/(y))
47#endif
48#ifndef NBBY
49#define NBBY 8
50#endif
51#define FD_MASK_BYTES sizeof(fd_mask)
52#define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
53
54/* STATIC */
65d448bc
AJ
55static int fdIsTcpListen(int fd);
56static int fdIsUdpListen(int fd);
1b3db6d9 57static int fdIsDns(int fd);
58static OBJH commIncomingStats;
59static int comm_check_incoming_poll_handlers(int nfds, int *fds);
60static void comm_poll_dns_incoming(void);
1b3db6d9 61
62/*
63 * Automatic tuning for incoming requests:
64 *
65 * INCOMING sockets are the ICP and HTTP ports. We need to check these
66 * fairly regularly, but how often? When the load increases, we
67 * want to check the incoming sockets more often. If we have a lot
68 * of incoming ICP, then we need to check these sockets more than
69 * if we just have HTTP.
70 *
26ac0430 71 * The variables 'incoming_icp_interval' and 'incoming_http_interval'
1b3db6d9 72 * determine how many normal I/O events to process before checking
73 * incoming sockets again. Note we store the incoming_interval
2f8abb64 74 * multiplied by a factor of (2^INCOMING_FACTOR) to have some
1b3db6d9 75 * pseudo-floating point precision.
76 *
65d448bc 77 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
1b3db6d9 78 * I/O events have been processed since the last check on the incoming
79 * sockets. When io_events > incoming_interval, its time to check incoming
80 * sockets.
81 *
82 * Every time we check incoming sockets, we count how many new messages
83 * or connections were processed. This is used to adjust the
84 * incoming_interval for the next iteration. The new incoming_interval
85 * is calculated as the current incoming_interval plus what we would
86 * like to see as an average number of events minus the number of
87 * events just processed.
88 *
89 * incoming_interval = incoming_interval + target_average - number_of_events_processed
90 *
65d448bc 91 * There are separate incoming_interval counters for TCP-based, UDP-based, and DNS events
26ac0430 92 *
1b3db6d9 93 * You can see the current values of the incoming_interval's, as well as
94 * a histogram of 'incoming_events' by asking the cache manager
95 * for 'comm_incoming', e.g.:
96 *
62ee09ca 97 * % ./client mgr:comm_poll_incoming
1b3db6d9 98 *
99 * Caveats:
100 *
101 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
102 * incoming_interval for both types of sockets. At the
103 * largest value the cache will effectively be idling.
104 *
105 * - The higher the INCOMING_FACTOR, the slower the algorithm will
106 * respond to load spikes/increases/decreases in demand. A value
107 * between 3 and 8 is recommended.
108 */
109
110#define MAX_INCOMING_INTEGER 256
111#define INCOMING_FACTOR 5
112#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
65d448bc
AJ
113static int udp_io_events = 0; ///< I/O events passed since last UDP receiver socket poll
114static int dns_io_events = 0; ///< I/O events passed since last DNS socket poll
115static int tcp_io_events = 0; ///< I/O events passed since last TCP listening socket poll
116static int incoming_udp_interval = 16 << INCOMING_FACTOR;
1b3db6d9 117static int incoming_dns_interval = 16 << INCOMING_FACTOR;
65d448bc
AJ
118static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
119#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
120#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
121#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
1b3db6d9 122
1b3db6d9 123void
d841c88d 124Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
1b3db6d9 125{
126 fde *F = &fd_table[fd];
127 assert(fd >= 0);
508e3438 128 assert(F->flags.open || (!handler && !client_data && !timeout));
48e7baac
AJ
129 debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
130 ", handler=" << handler << ", client_data=" << client_data <<
131 ", timeout=" << timeout);
62e76326 132
1b3db6d9 133 if (type & COMM_SELECT_READ) {
62e76326 134 F->read_handler = handler;
135 F->read_data = client_data;
1b3db6d9 136 }
62e76326 137
1b3db6d9 138 if (type & COMM_SELECT_WRITE) {
62e76326 139 F->write_handler = handler;
140 F->write_data = client_data;
1b3db6d9 141 }
62e76326 142
1b3db6d9 143 if (timeout)
62e76326 144 F->timeout = squid_curtime + timeout;
1b3db6d9 145}
146
147static int
65d448bc 148fdIsUdpListen(int fd)
1b3db6d9 149{
1b76e6c1 150 if (icpIncomingConn != NULL && icpIncomingConn->fd == fd)
62e76326 151 return 1;
152
1b76e6c1 153 if (icpOutgoingConn != NULL && icpOutgoingConn->fd == fd)
62e76326 154 return 1;
155
1b3db6d9 156 return 0;
157}
158
159static int
160fdIsDns(int fd)
161{
4d6c8504
AJ
162 if (fd == DnsSocketA)
163 return 1;
164
165 if (fd == DnsSocketB)
62e76326 166 return 1;
167
1b3db6d9 168 return 0;
169}
170
171static int
65d448bc 172fdIsTcpListen(int fd)
1b3db6d9 173{
d00790b2 174 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
65d448bc 175 if (s->listenConn != NULL && s->listenConn->fd == fd)
62e76326 176 return 1;
1b3db6d9 177 }
62e76326 178
1b3db6d9 179 return 0;
180}
181
1b3db6d9 182static int
183comm_check_incoming_poll_handlers(int nfds, int *fds)
184{
185 int i;
186 int fd;
187 PF *hdl = NULL;
188 int npfds;
62e76326 189
65d448bc 190 struct pollfd pfds[3 + MAXTCPLISTENPORTS];
88bfe092 191 PROF_start(comm_check_incoming);
1b3db6d9 192 incoming_sockets_accepted = 0;
62e76326 193
098346fd 194 for (i = npfds = 0; i < nfds; ++i) {
62e76326 195 int events;
196 fd = fds[i];
197 events = 0;
198
199 if (fd_table[fd].read_handler)
200 events |= POLLRDNORM;
201
202 if (fd_table[fd].write_handler)
203 events |= POLLWRNORM;
204
205 if (events) {
206 pfds[npfds].fd = fd;
207 pfds[npfds].events = events;
208 pfds[npfds].revents = 0;
cbebe602 209 ++npfds;
62e76326 210 }
1b3db6d9 211 }
62e76326 212
88bfe092 213 if (!nfds) {
62e76326 214 PROF_stop(comm_check_incoming);
215 return -1;
88bfe092 216 }
62e76326 217
1b3db6d9 218 getCurrentTime();
098346fd 219 ++ statCounter.syscalls.selects;
62e76326 220
88bfe092 221 if (poll(pfds, npfds, 0) < 1) {
62e76326 222 PROF_stop(comm_check_incoming);
223 return incoming_sockets_accepted;
88bfe092 224 }
62e76326 225
cbebe602 226 for (i = 0; i < npfds; ++i) {
62e76326 227 int revents;
228
229 if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1))
230 continue;
231
232 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
233 if ((hdl = fd_table[fd].read_handler)) {
234 fd_table[fd].read_handler = NULL;
235 hdl(fd, fd_table[fd].read_data);
236 } else if (pfds[i].events & POLLRDNORM)
e0236918 237 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL read handler");
62e76326 238 }
239
240 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
241 if ((hdl = fd_table[fd].write_handler)) {
242 fd_table[fd].write_handler = NULL;
243 hdl(fd, fd_table[fd].write_data);
244 } else if (pfds[i].events & POLLWRNORM)
e0236918 245 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL write_handler");
62e76326 246 }
1b3db6d9 247 }
62e76326 248
88bfe092 249 PROF_stop(comm_check_incoming);
1b3db6d9 250 return incoming_sockets_accepted;
251}
252
253static void
65d448bc 254comm_poll_udp_incoming(void)
1b3db6d9 255{
256 int nfds = 0;
257 int fds[2];
258 int nevents;
65d448bc 259 udp_io_events = 0;
62e76326 260
098346fd
FC
261 if (Comm::IsConnOpen(icpIncomingConn)) {
262 fds[nfds] = icpIncomingConn->fd;
263 ++nfds;
264 }
62e76326 265
098346fd
FC
266 if (icpIncomingConn != icpOutgoingConn && Comm::IsConnOpen(icpOutgoingConn)) {
267 fds[nfds] = icpOutgoingConn->fd;
268 ++nfds;
269 }
62e76326 270
1b3db6d9 271 if (nfds == 0)
62e76326 272 return;
273
1b3db6d9 274 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 275
65d448bc 276 incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
62e76326 277
65d448bc
AJ
278 if (incoming_udp_interval < Config.comm_incoming.udp.min_poll)
279 incoming_udp_interval = Config.comm_incoming.udp.min_poll;
62e76326 280
65d448bc
AJ
281 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
282 incoming_udp_interval = MAX_INCOMING_INTERVAL;
62e76326 283
65d448bc
AJ
284 if (nevents > INCOMING_UDP_MAX)
285 nevents = INCOMING_UDP_MAX;
62e76326 286
65d448bc 287 statCounter.comm_udp_incoming.count(nevents);
1b3db6d9 288}
289
290static void
65d448bc 291comm_poll_tcp_incoming(void)
1b3db6d9 292{
293 int nfds = 0;
65d448bc 294 int fds[MAXTCPLISTENPORTS];
1b3db6d9 295 int j;
296 int nevents;
65d448bc 297 tcp_io_events = 0;
62e76326 298
65d448bc 299 // XXX: only poll sockets that won't be deferred. But how do we identify them?
a46d2c0e 300
098346fd 301 for (j = 0; j < NHttpSockets; ++j) {
62e76326 302 if (HttpSockets[j] < 0)
303 continue;
304
098346fd
FC
305 fds[nfds] = HttpSockets[j];
306 ++nfds;
1b3db6d9 307 }
62e76326 308
1b3db6d9 309 nevents = comm_check_incoming_poll_handlers(nfds, fds);
65d448bc 310 incoming_tcp_interval = incoming_tcp_interval
04401ab0 311 + Config.comm_incoming.tcp.average - nevents;
62e76326 312
65d448bc
AJ
313 if (incoming_tcp_interval < Config.comm_incoming.tcp.min_poll)
314 incoming_tcp_interval = Config.comm_incoming.tcp.min_poll;
62e76326 315
65d448bc
AJ
316 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
317 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
62e76326 318
65d448bc
AJ
319 if (nevents > INCOMING_TCP_MAX)
320 nevents = INCOMING_TCP_MAX;
62e76326 321
65d448bc 322 statCounter.comm_tcp_incoming.count(nevents);
1b3db6d9 323}
324
325/* poll all sockets; call handlers for those that are ready. */
c8407295 326Comm::Flag
d841c88d 327Comm::DoSelect(int msec)
1b3db6d9 328{
329 struct pollfd pfds[SQUID_MAXFD];
62e76326 330
1b3db6d9 331 PF *hdl = NULL;
332 int fd;
1b3db6d9 333 int maxfd;
334 unsigned long nfds;
335 unsigned long npending;
336 int num;
65d448bc 337 int calldns = 0, calludp = 0, calltcp = 0;
1b3db6d9 338 double timeout = current_dtime + (msec / 1000.0);
62e76326 339
1b3db6d9 340 do {
62e76326 341 double start;
342 getCurrentTime();
343 start = current_dtime;
62e76326 344
65d448bc
AJ
345 if (commCheckUdpIncoming)
346 comm_poll_udp_incoming();
62e76326 347
65d448bc 348 if (commCheckDnsIncoming)
62e76326 349 comm_poll_dns_incoming();
350
65d448bc
AJ
351 if (commCheckTcpIncoming)
352 comm_poll_tcp_incoming();
62e76326 353
354 PROF_start(comm_poll_prep_pfds);
355
65d448bc 356 calldns = calludp = calltcp = 0;
62e76326 357
358 nfds = 0;
359
360 npending = 0;
361
362 maxfd = Biggest_FD + 1;
363
098346fd 364 for (int i = 0; i < maxfd; ++i) {
62e76326 365 int events;
366 events = 0;
367 /* Check each open socket for a handler. */
368
a46d2c0e 369 if (fd_table[i].read_handler)
370 events |= POLLRDNORM;
62e76326 371
372 if (fd_table[i].write_handler)
373 events |= POLLWRNORM;
374
375 if (events) {
376 pfds[nfds].fd = i;
377 pfds[nfds].events = events;
378 pfds[nfds].revents = 0;
cbebe602 379 ++nfds;
62e76326 380
381 if ((events & POLLRDNORM) && fd_table[i].flags.read_pending)
cbebe602 382 ++npending;
62e76326 383 }
384 }
385
386 PROF_stop(comm_poll_prep_pfds);
387
62e76326 388 if (npending)
389 msec = 0;
390
391 if (msec > MAX_POLL_TIME)
392 msec = MAX_POLL_TIME;
393
8ff3fa2e 394 /* nothing to do
395 *
396 * Note that this will only ever trigger when there are no log files
397 * and stdout/err/in are all closed too.
398 */
425e3a42 399 if (nfds == 0 && npending == 0) {
a553a5a3 400 if (shutting_down)
23ff0bee 401 return Comm::SHUTDOWN;
a553a5a3 402 else
23ff0bee 403 return Comm::IDLE;
a553a5a3 404 }
405
62e76326 406 for (;;) {
407 PROF_start(comm_poll_normal);
098346fd 408 ++ statCounter.syscalls.selects;
62e76326 409 num = poll(pfds, nfds, msec);
b69e9ffa 410 int xerrno = errno;
098346fd 411 ++ statCounter.select_loops;
62e76326 412 PROF_stop(comm_poll_normal);
413
425e3a42 414 if (num >= 0 || npending > 0)
62e76326 415 break;
416
b69e9ffa 417 if (ignoreErrno(xerrno))
62e76326 418 continue;
419
b69e9ffa 420 debugs(5, DBG_CRITICAL, MYNAME << "poll failure: " << xstrerr(xerrno));
62e76326 421
b69e9ffa 422 assert(xerrno != EINVAL);
62e76326 423
4ee57cbe 424 return Comm::COMM_ERROR;
62e76326 425
426 /* NOTREACHED */
427 }
428
40a77eef 429 getCurrentTime();
430
bf8fe701 431 debugs(5, num ? 5 : 8, "comm_poll: " << num << "+" << npending << " FDs ready");
f30f7998 432 statCounter.select_fds_hist.count(num);
62e76326 433
434 if (num == 0 && npending == 0)
435 continue;
436
437 /* scan each socket but the accept socket. Poll this
26ac0430 438 * more frequently to minimize losses due to the 5 connect
62e76326 439 * limit in SunOS */
440 PROF_start(comm_handle_ready_fd);
441
098346fd 442 for (size_t loopIndex = 0; loopIndex < nfds; ++loopIndex) {
62e76326 443 fde *F;
444 int revents = pfds[loopIndex].revents;
445 fd = pfds[loopIndex].fd;
446
447 if (fd == -1)
448 continue;
449
450 if (fd_table[fd].flags.read_pending)
451 revents |= POLLIN;
452
453 if (revents == 0)
454 continue;
455
65d448bc
AJ
456 if (fdIsUdpListen(fd)) {
457 calludp = 1;
62e76326 458 continue;
459 }
460
461 if (fdIsDns(fd)) {
462 calldns = 1;
463 continue;
464 }
465
65d448bc
AJ
466 if (fdIsTcpListen(fd)) {
467 calltcp = 1;
62e76326 468 continue;
469 }
470
471 F = &fd_table[fd];
472
473 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
bf8fe701 474 debugs(5, 6, "comm_poll: FD " << fd << " ready for reading");
62e76326 475
65d448bc 476 if ((hdl = F->read_handler)) {
62e76326 477 PROF_start(comm_read_handler);
478 F->read_handler = NULL;
479 hdl(fd, F->read_data);
480 PROF_stop(comm_read_handler);
098346fd 481 ++ statCounter.select_fds;
62e76326 482
65d448bc
AJ
483 if (commCheckUdpIncoming)
484 comm_poll_udp_incoming();
62e76326 485
65d448bc 486 if (commCheckDnsIncoming)
62e76326 487 comm_poll_dns_incoming();
488
65d448bc
AJ
489 if (commCheckTcpIncoming)
490 comm_poll_tcp_incoming();
62e76326 491 }
492 }
493
494 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
48e7baac 495 debugs(5, 6, "comm_poll: FD " << fd << " ready for writing");
62e76326 496
497 if ((hdl = F->write_handler)) {
498 PROF_start(comm_write_handler);
499 F->write_handler = NULL;
500 hdl(fd, F->write_data);
501 PROF_stop(comm_write_handler);
098346fd 502 ++ statCounter.select_fds;
62e76326 503
65d448bc
AJ
504 if (commCheckUdpIncoming)
505 comm_poll_udp_incoming();
62e76326 506
65d448bc 507 if (commCheckDnsIncoming)
62e76326 508 comm_poll_dns_incoming();
509
65d448bc
AJ
510 if (commCheckTcpIncoming)
511 comm_poll_tcp_incoming();
62e76326 512 }
513 }
514
515 if (revents & POLLNVAL) {
6d527e0a 516 AsyncCall::Pointer ch;
fa84c01d
FC
517 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
518 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type]);
519 debugs(5, DBG_CRITICAL, "--> " << F->desc);
520 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << "read:" <<
bf8fe701 521 F->read_handler << " write:" << F->write_handler);
62e76326 522
6d527e0a 523 for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
fa84c01d 524 debugs(5, DBG_CRITICAL, " close handler: " << ch);
62e76326 525
6d527e0a 526 if (F->closeHandler != NULL) {
62e76326 527 commCallCloseHandlers(fd);
6d527e0a 528 } else if (F->timeoutHandler != NULL) {
fa84c01d 529 debugs(5, DBG_CRITICAL, "comm_poll: Calling Timeout Handler");
26ac0430 530 ScheduleCallHere(F->timeoutHandler);
62e76326 531 }
532
533 F->closeHandler = NULL;
6d527e0a 534 F->timeoutHandler = NULL;
62e76326 535 F->read_handler = NULL;
536 F->write_handler = NULL;
537
538 if (F->flags.open)
539 fd_close(fd);
540 }
541 }
542
543 PROF_stop(comm_handle_ready_fd);
544
65d448bc
AJ
545 if (calludp)
546 comm_poll_udp_incoming();
62e76326 547
548 if (calldns)
549 comm_poll_dns_incoming();
550
65d448bc
AJ
551 if (calltcp)
552 comm_poll_tcp_incoming();
62e76326 553
62e76326 554 getCurrentTime();
555
556 statCounter.select_time += (current_dtime - start);
557
c8407295 558 return Comm::OK;
62e76326 559 } while (timeout > current_dtime);
560
4a7a3d56 561 debugs(5, 8, "comm_poll: time out: " << squid_curtime << ".");
62e76326 562
c8407295 563 return Comm::TIMEOUT;
1b3db6d9 564}
565
1b3db6d9 566static void
567comm_poll_dns_incoming(void)
568{
569 int nfds = 0;
570 int fds[2];
571 int nevents;
572 dns_io_events = 0;
62e76326 573
055421ee 574 if (DnsSocketA < 0 && DnsSocketB < 0)
62e76326 575 return;
576
098346fd
FC
577 if (DnsSocketA >= 0) {
578 fds[nfds] = DnsSocketA;
579 ++nfds;
580 }
4d6c8504 581
098346fd
FC
582 if (DnsSocketB >= 0) {
583 fds[nfds] = DnsSocketB;
584 ++nfds;
585 }
62e76326 586
1b3db6d9 587 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 588
1b3db6d9 589 if (nevents < 0)
62e76326 590 return;
591
65d448bc 592 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
62e76326 593
65d448bc
AJ
594 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
595 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
62e76326 596
1b3db6d9 597 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
62e76326 598 incoming_dns_interval = MAX_INCOMING_INTERVAL;
599
1b3db6d9 600 if (nevents > INCOMING_DNS_MAX)
62e76326 601 nevents = INCOMING_DNS_MAX;
602
f30f7998 603 statCounter.comm_dns_incoming.count(nevents);
1b3db6d9 604}
605
5acc9f37 606static void
edfab338 607commPollRegisterWithCacheManager(void)
1b3db6d9 608{
8822ebee 609 Mgr::RegisterAction("comm_poll_incoming",
d9fc6862
A
610 "comm_incoming() stats",
611 commIncomingStats, 0, 1);
1b3db6d9 612}
613
5acc9f37 614void
d841c88d 615Comm::SelectLoopInit(void)
5acc9f37
FC
616{
617 commPollRegisterWithCacheManager();
618}
1b3db6d9 619
620static void
621commIncomingStats(StoreEntry * sentry)
622{
65d448bc
AJ
623 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
624 incoming_udp_interval >> INCOMING_FACTOR);
1b3db6d9 625 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
62e76326 626 incoming_dns_interval >> INCOMING_FACTOR);
65d448bc
AJ
627 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
628 incoming_tcp_interval >> INCOMING_FACTOR);
1b3db6d9 629 storeAppendPrintf(sentry, "\n");
630 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
65d448bc
AJ
631 storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_udp_incoming() call:\n");
632 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 633 storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n");
82146cc8 634 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
65d448bc
AJ
635 storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_tcp_incoming() call:\n");
636 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 637}
638
1b3db6d9 639/* Called by async-io or diskd to speed up the polling */
640void
d841c88d 641Comm::QuickPollRequired(void)
1b3db6d9 642{
643 MAX_POLL_TIME = 10;
644}
645
646#endif /* USE_POLL */
f53969cc 647