]> git.ipfire.org Git - thirdparty/squid.git/blame - src/comm/ModPoll.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / comm / ModPoll.cc
CommitLineData
1b3db6d9 1/*
ef57eb7b 2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
1b3db6d9 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
1b3db6d9 7 */
bbc27441
AJ
8
9/* DEBUG: section 05 Socket Functions */
10
f7f3304a 11#include "squid.h"
d841c88d
AJ
12
13#if USE_POLL
65d448bc 14#include "anyp/PortCfg.h"
1b76e6c1 15#include "comm/Connection.h"
d841c88d 16#include "comm/Loops.h"
c4ad1349 17#include "fd.h"
d841c88d 18#include "fde.h"
582c2af2 19#include "globals.h"
1b76e6c1 20#include "ICP.h"
8822ebee 21#include "mgr/Registration.h"
582c2af2 22#include "profiler/Profiler.h"
4d5904f7 23#include "SquidConfig.h"
985c86bc 24#include "SquidTime.h"
e4f1fdae 25#include "StatCounters.h"
e6ccf245 26#include "Store.h"
1b3db6d9 27
1a30fdf5 28#include <cerrno>
dc47f531
AJ
29#if HAVE_POLL_H
30#include <poll.h>
31#endif
32
33/* Needed for poll() on Linux at least */
34#if USE_POLL
35#ifndef POLLRDNORM
36#define POLLRDNORM POLLIN
37#endif
38#ifndef POLLWRNORM
39#define POLLWRNORM POLLOUT
40#endif
41#endif
42
f53969cc 43static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
1b3db6d9 44
45#ifndef howmany
46#define howmany(x, y) (((x)+((y)-1))/(y))
47#endif
48#ifndef NBBY
49#define NBBY 8
50#endif
51#define FD_MASK_BYTES sizeof(fd_mask)
52#define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
53
54/* STATIC */
65d448bc
AJ
55static int fdIsTcpListen(int fd);
56static int fdIsUdpListen(int fd);
1b3db6d9 57static int fdIsDns(int fd);
58static OBJH commIncomingStats;
59static int comm_check_incoming_poll_handlers(int nfds, int *fds);
60static void comm_poll_dns_incoming(void);
1b3db6d9 61
62/*
63 * Automatic tuning for incoming requests:
64 *
65 * INCOMING sockets are the ICP and HTTP ports. We need to check these
66 * fairly regularly, but how often? When the load increases, we
67 * want to check the incoming sockets more often. If we have a lot
68 * of incoming ICP, then we need to check these sockets more than
69 * if we just have HTTP.
70 *
26ac0430 71 * The variables 'incoming_icp_interval' and 'incoming_http_interval'
1b3db6d9 72 * determine how many normal I/O events to process before checking
73 * incoming sockets again. Note we store the incoming_interval
74 * multipled by a factor of (2^INCOMING_FACTOR) to have some
75 * pseudo-floating point precision.
76 *
65d448bc 77 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
1b3db6d9 78 * I/O events have been processed since the last check on the incoming
79 * sockets. When io_events > incoming_interval, its time to check incoming
80 * sockets.
81 *
82 * Every time we check incoming sockets, we count how many new messages
83 * or connections were processed. This is used to adjust the
84 * incoming_interval for the next iteration. The new incoming_interval
85 * is calculated as the current incoming_interval plus what we would
86 * like to see as an average number of events minus the number of
87 * events just processed.
88 *
89 * incoming_interval = incoming_interval + target_average - number_of_events_processed
90 *
65d448bc 91 * There are separate incoming_interval counters for TCP-based, UDP-based, and DNS events
26ac0430 92 *
1b3db6d9 93 * You can see the current values of the incoming_interval's, as well as
94 * a histogram of 'incoming_events' by asking the cache manager
95 * for 'comm_incoming', e.g.:
96 *
62ee09ca 97 * % ./client mgr:comm_poll_incoming
1b3db6d9 98 *
99 * Caveats:
100 *
101 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
102 * incoming_interval for both types of sockets. At the
103 * largest value the cache will effectively be idling.
104 *
105 * - The higher the INCOMING_FACTOR, the slower the algorithm will
106 * respond to load spikes/increases/decreases in demand. A value
107 * between 3 and 8 is recommended.
108 */
109
110#define MAX_INCOMING_INTEGER 256
111#define INCOMING_FACTOR 5
112#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
65d448bc
AJ
113static int udp_io_events = 0; ///< I/O events passed since last UDP receiver socket poll
114static int dns_io_events = 0; ///< I/O events passed since last DNS socket poll
115static int tcp_io_events = 0; ///< I/O events passed since last TCP listening socket poll
116static int incoming_udp_interval = 16 << INCOMING_FACTOR;
1b3db6d9 117static int incoming_dns_interval = 16 << INCOMING_FACTOR;
65d448bc
AJ
118static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
119#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
120#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
121#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
1b3db6d9 122
1b3db6d9 123void
d841c88d 124Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
1b3db6d9 125{
126 fde *F = &fd_table[fd];
127 assert(fd >= 0);
128 assert(F->flags.open);
48e7baac
AJ
129 debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
130 ", handler=" << handler << ", client_data=" << client_data <<
131 ", timeout=" << timeout);
62e76326 132
1b3db6d9 133 if (type & COMM_SELECT_READ) {
62e76326 134 F->read_handler = handler;
135 F->read_data = client_data;
1b3db6d9 136 }
62e76326 137
1b3db6d9 138 if (type & COMM_SELECT_WRITE) {
62e76326 139 F->write_handler = handler;
140 F->write_data = client_data;
1b3db6d9 141 }
62e76326 142
1b3db6d9 143 if (timeout)
62e76326 144 F->timeout = squid_curtime + timeout;
1b3db6d9 145}
146
3a5a4930 147void
d841c88d 148Comm::ResetSelect(int fd)
3a5a4930 149{
150}
151
1b3db6d9 152static int
65d448bc 153fdIsUdpListen(int fd)
1b3db6d9 154{
1b76e6c1 155 if (icpIncomingConn != NULL && icpIncomingConn->fd == fd)
62e76326 156 return 1;
157
1b76e6c1 158 if (icpOutgoingConn != NULL && icpOutgoingConn->fd == fd)
62e76326 159 return 1;
160
1b3db6d9 161 return 0;
162}
163
164static int
165fdIsDns(int fd)
166{
4d6c8504
AJ
167 if (fd == DnsSocketA)
168 return 1;
169
170 if (fd == DnsSocketB)
62e76326 171 return 1;
172
1b3db6d9 173 return 0;
174}
175
176static int
65d448bc 177fdIsTcpListen(int fd)
1b3db6d9 178{
d00790b2 179 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
65d448bc 180 if (s->listenConn != NULL && s->listenConn->fd == fd)
62e76326 181 return 1;
1b3db6d9 182 }
62e76326 183
1b3db6d9 184 return 0;
185}
186
1b3db6d9 187static int
188comm_check_incoming_poll_handlers(int nfds, int *fds)
189{
190 int i;
191 int fd;
192 PF *hdl = NULL;
193 int npfds;
62e76326 194
65d448bc 195 struct pollfd pfds[3 + MAXTCPLISTENPORTS];
88bfe092 196 PROF_start(comm_check_incoming);
1b3db6d9 197 incoming_sockets_accepted = 0;
62e76326 198
098346fd 199 for (i = npfds = 0; i < nfds; ++i) {
62e76326 200 int events;
201 fd = fds[i];
202 events = 0;
203
204 if (fd_table[fd].read_handler)
205 events |= POLLRDNORM;
206
207 if (fd_table[fd].write_handler)
208 events |= POLLWRNORM;
209
210 if (events) {
211 pfds[npfds].fd = fd;
212 pfds[npfds].events = events;
213 pfds[npfds].revents = 0;
cbebe602 214 ++npfds;
62e76326 215 }
1b3db6d9 216 }
62e76326 217
88bfe092 218 if (!nfds) {
62e76326 219 PROF_stop(comm_check_incoming);
220 return -1;
88bfe092 221 }
62e76326 222
1b3db6d9 223 getCurrentTime();
098346fd 224 ++ statCounter.syscalls.selects;
62e76326 225
88bfe092 226 if (poll(pfds, npfds, 0) < 1) {
62e76326 227 PROF_stop(comm_check_incoming);
228 return incoming_sockets_accepted;
88bfe092 229 }
62e76326 230
cbebe602 231 for (i = 0; i < npfds; ++i) {
62e76326 232 int revents;
233
234 if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1))
235 continue;
236
237 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
238 if ((hdl = fd_table[fd].read_handler)) {
239 fd_table[fd].read_handler = NULL;
240 hdl(fd, fd_table[fd].read_data);
241 } else if (pfds[i].events & POLLRDNORM)
e0236918 242 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL read handler");
62e76326 243 }
244
245 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
246 if ((hdl = fd_table[fd].write_handler)) {
247 fd_table[fd].write_handler = NULL;
248 hdl(fd, fd_table[fd].write_data);
249 } else if (pfds[i].events & POLLWRNORM)
e0236918 250 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL write_handler");
62e76326 251 }
1b3db6d9 252 }
62e76326 253
88bfe092 254 PROF_stop(comm_check_incoming);
1b3db6d9 255 return incoming_sockets_accepted;
256}
257
258static void
65d448bc 259comm_poll_udp_incoming(void)
1b3db6d9 260{
261 int nfds = 0;
262 int fds[2];
263 int nevents;
65d448bc 264 udp_io_events = 0;
62e76326 265
098346fd
FC
266 if (Comm::IsConnOpen(icpIncomingConn)) {
267 fds[nfds] = icpIncomingConn->fd;
268 ++nfds;
269 }
62e76326 270
098346fd
FC
271 if (icpIncomingConn != icpOutgoingConn && Comm::IsConnOpen(icpOutgoingConn)) {
272 fds[nfds] = icpOutgoingConn->fd;
273 ++nfds;
274 }
62e76326 275
1b3db6d9 276 if (nfds == 0)
62e76326 277 return;
278
1b3db6d9 279 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 280
65d448bc 281 incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
62e76326 282
65d448bc
AJ
283 if (incoming_udp_interval < Config.comm_incoming.udp.min_poll)
284 incoming_udp_interval = Config.comm_incoming.udp.min_poll;
62e76326 285
65d448bc
AJ
286 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
287 incoming_udp_interval = MAX_INCOMING_INTERVAL;
62e76326 288
65d448bc
AJ
289 if (nevents > INCOMING_UDP_MAX)
290 nevents = INCOMING_UDP_MAX;
62e76326 291
65d448bc 292 statCounter.comm_udp_incoming.count(nevents);
1b3db6d9 293}
294
295static void
65d448bc 296comm_poll_tcp_incoming(void)
1b3db6d9 297{
298 int nfds = 0;
65d448bc 299 int fds[MAXTCPLISTENPORTS];
1b3db6d9 300 int j;
301 int nevents;
65d448bc 302 tcp_io_events = 0;
62e76326 303
65d448bc 304 // XXX: only poll sockets that won't be deferred. But how do we identify them?
a46d2c0e 305
098346fd 306 for (j = 0; j < NHttpSockets; ++j) {
62e76326 307 if (HttpSockets[j] < 0)
308 continue;
309
098346fd
FC
310 fds[nfds] = HttpSockets[j];
311 ++nfds;
1b3db6d9 312 }
62e76326 313
1b3db6d9 314 nevents = comm_check_incoming_poll_handlers(nfds, fds);
65d448bc 315 incoming_tcp_interval = incoming_tcp_interval
04401ab0 316 + Config.comm_incoming.tcp.average - nevents;
62e76326 317
65d448bc
AJ
318 if (incoming_tcp_interval < Config.comm_incoming.tcp.min_poll)
319 incoming_tcp_interval = Config.comm_incoming.tcp.min_poll;
62e76326 320
65d448bc
AJ
321 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
322 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
62e76326 323
65d448bc
AJ
324 if (nevents > INCOMING_TCP_MAX)
325 nevents = INCOMING_TCP_MAX;
62e76326 326
65d448bc 327 statCounter.comm_tcp_incoming.count(nevents);
1b3db6d9 328}
329
330/* poll all sockets; call handlers for those that are ready. */
c8407295 331Comm::Flag
d841c88d 332Comm::DoSelect(int msec)
1b3db6d9 333{
334 struct pollfd pfds[SQUID_MAXFD];
62e76326 335
1b3db6d9 336 PF *hdl = NULL;
337 int fd;
1b3db6d9 338 int maxfd;
339 unsigned long nfds;
340 unsigned long npending;
341 int num;
65d448bc 342 int calldns = 0, calludp = 0, calltcp = 0;
1b3db6d9 343 double timeout = current_dtime + (msec / 1000.0);
62e76326 344
1b3db6d9 345 do {
62e76326 346 double start;
347 getCurrentTime();
348 start = current_dtime;
62e76326 349
65d448bc
AJ
350 if (commCheckUdpIncoming)
351 comm_poll_udp_incoming();
62e76326 352
65d448bc 353 if (commCheckDnsIncoming)
62e76326 354 comm_poll_dns_incoming();
355
65d448bc
AJ
356 if (commCheckTcpIncoming)
357 comm_poll_tcp_incoming();
62e76326 358
359 PROF_start(comm_poll_prep_pfds);
360
65d448bc 361 calldns = calludp = calltcp = 0;
62e76326 362
363 nfds = 0;
364
365 npending = 0;
366
367 maxfd = Biggest_FD + 1;
368
098346fd 369 for (int i = 0; i < maxfd; ++i) {
62e76326 370 int events;
371 events = 0;
372 /* Check each open socket for a handler. */
373
a46d2c0e 374 if (fd_table[i].read_handler)
375 events |= POLLRDNORM;
62e76326 376
377 if (fd_table[i].write_handler)
378 events |= POLLWRNORM;
379
380 if (events) {
381 pfds[nfds].fd = i;
382 pfds[nfds].events = events;
383 pfds[nfds].revents = 0;
cbebe602 384 ++nfds;
62e76326 385
386 if ((events & POLLRDNORM) && fd_table[i].flags.read_pending)
cbebe602 387 ++npending;
62e76326 388 }
389 }
390
391 PROF_stop(comm_poll_prep_pfds);
392
62e76326 393 if (npending)
394 msec = 0;
395
396 if (msec > MAX_POLL_TIME)
397 msec = MAX_POLL_TIME;
398
8ff3fa2e 399 /* nothing to do
400 *
401 * Note that this will only ever trigger when there are no log files
402 * and stdout/err/in are all closed too.
403 */
425e3a42 404 if (nfds == 0 && npending == 0) {
a553a5a3 405 if (shutting_down)
23ff0bee 406 return Comm::SHUTDOWN;
a553a5a3 407 else
23ff0bee 408 return Comm::IDLE;
a553a5a3 409 }
410
62e76326 411 for (;;) {
412 PROF_start(comm_poll_normal);
098346fd 413 ++ statCounter.syscalls.selects;
62e76326 414 num = poll(pfds, nfds, msec);
098346fd 415 ++ statCounter.select_loops;
62e76326 416 PROF_stop(comm_poll_normal);
417
425e3a42 418 if (num >= 0 || npending > 0)
62e76326 419 break;
420
421 if (ignoreErrno(errno))
422 continue;
423
fa84c01d 424 debugs(5, DBG_CRITICAL, "comm_poll: poll failure: " << xstrerror());
62e76326 425
426 assert(errno != EINVAL);
427
4ee57cbe 428 return Comm::COMM_ERROR;
62e76326 429
430 /* NOTREACHED */
431 }
432
40a77eef 433 getCurrentTime();
434
bf8fe701 435 debugs(5, num ? 5 : 8, "comm_poll: " << num << "+" << npending << " FDs ready");
f30f7998 436 statCounter.select_fds_hist.count(num);
62e76326 437
438 if (num == 0 && npending == 0)
439 continue;
440
441 /* scan each socket but the accept socket. Poll this
26ac0430 442 * more frequently to minimize losses due to the 5 connect
62e76326 443 * limit in SunOS */
444 PROF_start(comm_handle_ready_fd);
445
098346fd 446 for (size_t loopIndex = 0; loopIndex < nfds; ++loopIndex) {
62e76326 447 fde *F;
448 int revents = pfds[loopIndex].revents;
449 fd = pfds[loopIndex].fd;
450
451 if (fd == -1)
452 continue;
453
454 if (fd_table[fd].flags.read_pending)
455 revents |= POLLIN;
456
457 if (revents == 0)
458 continue;
459
65d448bc
AJ
460 if (fdIsUdpListen(fd)) {
461 calludp = 1;
62e76326 462 continue;
463 }
464
465 if (fdIsDns(fd)) {
466 calldns = 1;
467 continue;
468 }
469
65d448bc
AJ
470 if (fdIsTcpListen(fd)) {
471 calltcp = 1;
62e76326 472 continue;
473 }
474
475 F = &fd_table[fd];
476
477 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
bf8fe701 478 debugs(5, 6, "comm_poll: FD " << fd << " ready for reading");
62e76326 479
65d448bc 480 if ((hdl = F->read_handler)) {
62e76326 481 PROF_start(comm_read_handler);
482 F->read_handler = NULL;
be4d35dc 483 F->flags.read_pending = false;
62e76326 484 hdl(fd, F->read_data);
485 PROF_stop(comm_read_handler);
098346fd 486 ++ statCounter.select_fds;
62e76326 487
65d448bc
AJ
488 if (commCheckUdpIncoming)
489 comm_poll_udp_incoming();
62e76326 490
65d448bc 491 if (commCheckDnsIncoming)
62e76326 492 comm_poll_dns_incoming();
493
65d448bc
AJ
494 if (commCheckTcpIncoming)
495 comm_poll_tcp_incoming();
62e76326 496 }
497 }
498
499 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
48e7baac 500 debugs(5, 6, "comm_poll: FD " << fd << " ready for writing");
62e76326 501
502 if ((hdl = F->write_handler)) {
503 PROF_start(comm_write_handler);
504 F->write_handler = NULL;
505 hdl(fd, F->write_data);
506 PROF_stop(comm_write_handler);
098346fd 507 ++ statCounter.select_fds;
62e76326 508
65d448bc
AJ
509 if (commCheckUdpIncoming)
510 comm_poll_udp_incoming();
62e76326 511
65d448bc 512 if (commCheckDnsIncoming)
62e76326 513 comm_poll_dns_incoming();
514
65d448bc
AJ
515 if (commCheckTcpIncoming)
516 comm_poll_tcp_incoming();
62e76326 517 }
518 }
519
520 if (revents & POLLNVAL) {
6d527e0a 521 AsyncCall::Pointer ch;
fa84c01d
FC
522 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
523 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type]);
524 debugs(5, DBG_CRITICAL, "--> " << F->desc);
525 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << "read:" <<
bf8fe701 526 F->read_handler << " write:" << F->write_handler);
62e76326 527
6d527e0a 528 for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
fa84c01d 529 debugs(5, DBG_CRITICAL, " close handler: " << ch);
62e76326 530
6d527e0a 531 if (F->closeHandler != NULL) {
62e76326 532 commCallCloseHandlers(fd);
6d527e0a 533 } else if (F->timeoutHandler != NULL) {
fa84c01d 534 debugs(5, DBG_CRITICAL, "comm_poll: Calling Timeout Handler");
26ac0430 535 ScheduleCallHere(F->timeoutHandler);
62e76326 536 }
537
538 F->closeHandler = NULL;
6d527e0a 539 F->timeoutHandler = NULL;
62e76326 540 F->read_handler = NULL;
541 F->write_handler = NULL;
542
543 if (F->flags.open)
544 fd_close(fd);
545 }
546 }
547
548 PROF_stop(comm_handle_ready_fd);
549
65d448bc
AJ
550 if (calludp)
551 comm_poll_udp_incoming();
62e76326 552
553 if (calldns)
554 comm_poll_dns_incoming();
555
65d448bc
AJ
556 if (calltcp)
557 comm_poll_tcp_incoming();
62e76326 558
62e76326 559 getCurrentTime();
560
561 statCounter.select_time += (current_dtime - start);
562
c8407295 563 return Comm::OK;
62e76326 564 } while (timeout > current_dtime);
565
4a7a3d56 566 debugs(5, 8, "comm_poll: time out: " << squid_curtime << ".");
62e76326 567
c8407295 568 return Comm::TIMEOUT;
1b3db6d9 569}
570
1b3db6d9 571static void
572comm_poll_dns_incoming(void)
573{
574 int nfds = 0;
575 int fds[2];
576 int nevents;
577 dns_io_events = 0;
62e76326 578
055421ee 579 if (DnsSocketA < 0 && DnsSocketB < 0)
62e76326 580 return;
581
098346fd
FC
582 if (DnsSocketA >= 0) {
583 fds[nfds] = DnsSocketA;
584 ++nfds;
585 }
4d6c8504 586
098346fd
FC
587 if (DnsSocketB >= 0) {
588 fds[nfds] = DnsSocketB;
589 ++nfds;
590 }
62e76326 591
1b3db6d9 592 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 593
1b3db6d9 594 if (nevents < 0)
62e76326 595 return;
596
65d448bc 597 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
62e76326 598
65d448bc
AJ
599 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
600 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
62e76326 601
1b3db6d9 602 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
62e76326 603 incoming_dns_interval = MAX_INCOMING_INTERVAL;
604
1b3db6d9 605 if (nevents > INCOMING_DNS_MAX)
62e76326 606 nevents = INCOMING_DNS_MAX;
607
f30f7998 608 statCounter.comm_dns_incoming.count(nevents);
1b3db6d9 609}
610
5acc9f37 611static void
edfab338 612commPollRegisterWithCacheManager(void)
1b3db6d9 613{
8822ebee 614 Mgr::RegisterAction("comm_poll_incoming",
d9fc6862
A
615 "comm_incoming() stats",
616 commIncomingStats, 0, 1);
1b3db6d9 617}
618
5acc9f37 619void
d841c88d 620Comm::SelectLoopInit(void)
5acc9f37
FC
621{
622 commPollRegisterWithCacheManager();
623}
1b3db6d9 624
625static void
626commIncomingStats(StoreEntry * sentry)
627{
65d448bc
AJ
628 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
629 incoming_udp_interval >> INCOMING_FACTOR);
1b3db6d9 630 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
62e76326 631 incoming_dns_interval >> INCOMING_FACTOR);
65d448bc
AJ
632 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
633 incoming_tcp_interval >> INCOMING_FACTOR);
1b3db6d9 634 storeAppendPrintf(sentry, "\n");
635 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
65d448bc
AJ
636 storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_udp_incoming() call:\n");
637 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 638 storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n");
82146cc8 639 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
65d448bc
AJ
640 storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_tcp_incoming() call:\n");
641 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 642}
643
1b3db6d9 644/* Called by async-io or diskd to speed up the polling */
645void
d841c88d 646Comm::QuickPollRequired(void)
1b3db6d9 647{
648 MAX_POLL_TIME = 10;
649}
650
651#endif /* USE_POLL */
f53969cc 652