]> git.ipfire.org Git - thirdparty/squid.git/blame - src/comm/ModPoll.cc
Reduce cache_effective_user was leaking $HOME memory
[thirdparty/squid.git] / src / comm / ModPoll.cc
CommitLineData
1b3db6d9 1/*
b510f3a1 2 * DEBUG: section 05 Socket Functions
1b3db6d9 3 *
4 * SQUID Web Proxy Cache http://www.squid-cache.org/
5 * ----------------------------------------------------------
6 *
7 * Squid is the result of efforts by numerous individuals from
8 * the Internet community; see the CONTRIBUTORS file for full
9 * details. Many organizations have provided support for Squid's
10 * development; see the SPONSORS file for full details. Squid is
11 * Copyrighted (C) 2001 by the Regents of the University of
12 * California; see the COPYRIGHT file for full details. Squid
13 * incorporates software developed and/or copyrighted by other
14 * sources; see the CREDITS file for full details.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
26ac0430 20 *
1b3db6d9 21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26ac0430 25 *
1b3db6d9 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
29 *
30 */
f7f3304a 31#include "squid.h"
d841c88d
AJ
32
33#if USE_POLL
65d448bc 34#include "anyp/PortCfg.h"
1b76e6c1 35#include "comm/Connection.h"
d841c88d 36#include "comm/Loops.h"
c4ad1349 37#include "fd.h"
d841c88d 38#include "fde.h"
582c2af2 39#include "globals.h"
1b76e6c1 40#include "ICP.h"
8822ebee 41#include "mgr/Registration.h"
582c2af2 42#include "profiler/Profiler.h"
4d5904f7 43#include "SquidConfig.h"
985c86bc 44#include "SquidTime.h"
e4f1fdae 45#include "StatCounters.h"
e6ccf245 46#include "Store.h"
1b3db6d9 47
dc47f531
AJ
48#if HAVE_POLL_H
49#include <poll.h>
50#endif
21d845b1
FC
51#if HAVE_ERRNO_H
52#include <errno.h>
53#endif
dc47f531
AJ
54
55/* Needed for poll() on Linux at least */
56#if USE_POLL
57#ifndef POLLRDNORM
58#define POLLRDNORM POLLIN
59#endif
60#ifndef POLLWRNORM
61#define POLLWRNORM POLLOUT
62#endif
63#endif
64
d841c88d 65static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
1b3db6d9 66
67#ifndef howmany
68#define howmany(x, y) (((x)+((y)-1))/(y))
69#endif
70#ifndef NBBY
71#define NBBY 8
72#endif
73#define FD_MASK_BYTES sizeof(fd_mask)
74#define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
75
76/* STATIC */
65d448bc
AJ
77static int fdIsTcpListen(int fd);
78static int fdIsUdpListen(int fd);
1b3db6d9 79static int fdIsDns(int fd);
80static OBJH commIncomingStats;
81static int comm_check_incoming_poll_handlers(int nfds, int *fds);
82static void comm_poll_dns_incoming(void);
1b3db6d9 83
84/*
85 * Automatic tuning for incoming requests:
86 *
87 * INCOMING sockets are the ICP and HTTP ports. We need to check these
88 * fairly regularly, but how often? When the load increases, we
89 * want to check the incoming sockets more often. If we have a lot
90 * of incoming ICP, then we need to check these sockets more than
91 * if we just have HTTP.
92 *
26ac0430 93 * The variables 'incoming_icp_interval' and 'incoming_http_interval'
1b3db6d9 94 * determine how many normal I/O events to process before checking
95 * incoming sockets again. Note we store the incoming_interval
96 * multipled by a factor of (2^INCOMING_FACTOR) to have some
97 * pseudo-floating point precision.
98 *
65d448bc 99 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
1b3db6d9 100 * I/O events have been processed since the last check on the incoming
101 * sockets. When io_events > incoming_interval, its time to check incoming
102 * sockets.
103 *
104 * Every time we check incoming sockets, we count how many new messages
105 * or connections were processed. This is used to adjust the
106 * incoming_interval for the next iteration. The new incoming_interval
107 * is calculated as the current incoming_interval plus what we would
108 * like to see as an average number of events minus the number of
109 * events just processed.
110 *
111 * incoming_interval = incoming_interval + target_average - number_of_events_processed
112 *
65d448bc 113 * There are separate incoming_interval counters for TCP-based, UDP-based, and DNS events
26ac0430 114 *
1b3db6d9 115 * You can see the current values of the incoming_interval's, as well as
116 * a histogram of 'incoming_events' by asking the cache manager
117 * for 'comm_incoming', e.g.:
118 *
62ee09ca 119 * % ./client mgr:comm_poll_incoming
1b3db6d9 120 *
121 * Caveats:
122 *
123 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
124 * incoming_interval for both types of sockets. At the
125 * largest value the cache will effectively be idling.
126 *
127 * - The higher the INCOMING_FACTOR, the slower the algorithm will
128 * respond to load spikes/increases/decreases in demand. A value
129 * between 3 and 8 is recommended.
130 */
131
132#define MAX_INCOMING_INTEGER 256
133#define INCOMING_FACTOR 5
134#define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
65d448bc
AJ
135static int udp_io_events = 0; ///< I/O events passed since last UDP receiver socket poll
136static int dns_io_events = 0; ///< I/O events passed since last DNS socket poll
137static int tcp_io_events = 0; ///< I/O events passed since last TCP listening socket poll
138static int incoming_udp_interval = 16 << INCOMING_FACTOR;
1b3db6d9 139static int incoming_dns_interval = 16 << INCOMING_FACTOR;
65d448bc
AJ
140static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
141#define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
142#define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
143#define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
1b3db6d9 144
1b3db6d9 145void
d841c88d 146Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
1b3db6d9 147{
148 fde *F = &fd_table[fd];
149 assert(fd >= 0);
150 assert(F->flags.open);
48e7baac
AJ
151 debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
152 ", handler=" << handler << ", client_data=" << client_data <<
153 ", timeout=" << timeout);
62e76326 154
1b3db6d9 155 if (type & COMM_SELECT_READ) {
62e76326 156 F->read_handler = handler;
157 F->read_data = client_data;
1b3db6d9 158 }
62e76326 159
1b3db6d9 160 if (type & COMM_SELECT_WRITE) {
62e76326 161 F->write_handler = handler;
162 F->write_data = client_data;
1b3db6d9 163 }
62e76326 164
1b3db6d9 165 if (timeout)
62e76326 166 F->timeout = squid_curtime + timeout;
1b3db6d9 167}
168
3a5a4930 169void
d841c88d 170Comm::ResetSelect(int fd)
3a5a4930 171{
172}
173
1b3db6d9 174static int
65d448bc 175fdIsUdpListen(int fd)
1b3db6d9 176{
1b76e6c1 177 if (icpIncomingConn != NULL && icpIncomingConn->fd == fd)
62e76326 178 return 1;
179
1b76e6c1 180 if (icpOutgoingConn != NULL && icpOutgoingConn->fd == fd)
62e76326 181 return 1;
182
1b3db6d9 183 return 0;
184}
185
186static int
187fdIsDns(int fd)
188{
4d6c8504
AJ
189 if (fd == DnsSocketA)
190 return 1;
191
192 if (fd == DnsSocketB)
62e76326 193 return 1;
194
1b3db6d9 195 return 0;
196}
197
198static int
65d448bc 199fdIsTcpListen(int fd)
1b3db6d9 200{
65d448bc
AJ
201 for (const AnyP::PortCfg *s = Config.Sockaddr.http; s; s = s->next) {
202 if (s->listenConn != NULL && s->listenConn->fd == fd)
62e76326 203 return 1;
1b3db6d9 204 }
62e76326 205
1b3db6d9 206 return 0;
207}
208
1b3db6d9 209static int
210comm_check_incoming_poll_handlers(int nfds, int *fds)
211{
212 int i;
213 int fd;
214 PF *hdl = NULL;
215 int npfds;
62e76326 216
65d448bc 217 struct pollfd pfds[3 + MAXTCPLISTENPORTS];
88bfe092 218 PROF_start(comm_check_incoming);
1b3db6d9 219 incoming_sockets_accepted = 0;
62e76326 220
098346fd 221 for (i = npfds = 0; i < nfds; ++i) {
62e76326 222 int events;
223 fd = fds[i];
224 events = 0;
225
226 if (fd_table[fd].read_handler)
227 events |= POLLRDNORM;
228
229 if (fd_table[fd].write_handler)
230 events |= POLLWRNORM;
231
232 if (events) {
233 pfds[npfds].fd = fd;
234 pfds[npfds].events = events;
235 pfds[npfds].revents = 0;
cbebe602 236 ++npfds;
62e76326 237 }
1b3db6d9 238 }
62e76326 239
88bfe092 240 if (!nfds) {
62e76326 241 PROF_stop(comm_check_incoming);
242 return -1;
88bfe092 243 }
62e76326 244
1b3db6d9 245 getCurrentTime();
098346fd 246 ++ statCounter.syscalls.selects;
62e76326 247
88bfe092 248 if (poll(pfds, npfds, 0) < 1) {
62e76326 249 PROF_stop(comm_check_incoming);
250 return incoming_sockets_accepted;
88bfe092 251 }
62e76326 252
cbebe602 253 for (i = 0; i < npfds; ++i) {
62e76326 254 int revents;
255
256 if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1))
257 continue;
258
259 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
260 if ((hdl = fd_table[fd].read_handler)) {
261 fd_table[fd].read_handler = NULL;
262 hdl(fd, fd_table[fd].read_data);
263 } else if (pfds[i].events & POLLRDNORM)
e0236918 264 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL read handler");
62e76326 265 }
266
267 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
268 if ((hdl = fd_table[fd].write_handler)) {
269 fd_table[fd].write_handler = NULL;
270 hdl(fd, fd_table[fd].write_data);
271 } else if (pfds[i].events & POLLWRNORM)
e0236918 272 debugs(5, DBG_IMPORTANT, "comm_poll_incoming: FD " << fd << " NULL write_handler");
62e76326 273 }
1b3db6d9 274 }
62e76326 275
88bfe092 276 PROF_stop(comm_check_incoming);
1b3db6d9 277 return incoming_sockets_accepted;
278}
279
280static void
65d448bc 281comm_poll_udp_incoming(void)
1b3db6d9 282{
283 int nfds = 0;
284 int fds[2];
285 int nevents;
65d448bc 286 udp_io_events = 0;
62e76326 287
098346fd
FC
288 if (Comm::IsConnOpen(icpIncomingConn)) {
289 fds[nfds] = icpIncomingConn->fd;
290 ++nfds;
291 }
62e76326 292
098346fd
FC
293 if (icpIncomingConn != icpOutgoingConn && Comm::IsConnOpen(icpOutgoingConn)) {
294 fds[nfds] = icpOutgoingConn->fd;
295 ++nfds;
296 }
62e76326 297
1b3db6d9 298 if (nfds == 0)
62e76326 299 return;
300
1b3db6d9 301 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 302
65d448bc 303 incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
62e76326 304
65d448bc
AJ
305 if (incoming_udp_interval < Config.comm_incoming.udp.min_poll)
306 incoming_udp_interval = Config.comm_incoming.udp.min_poll;
62e76326 307
65d448bc
AJ
308 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
309 incoming_udp_interval = MAX_INCOMING_INTERVAL;
62e76326 310
65d448bc
AJ
311 if (nevents > INCOMING_UDP_MAX)
312 nevents = INCOMING_UDP_MAX;
62e76326 313
65d448bc 314 statCounter.comm_udp_incoming.count(nevents);
1b3db6d9 315}
316
317static void
65d448bc 318comm_poll_tcp_incoming(void)
1b3db6d9 319{
320 int nfds = 0;
65d448bc 321 int fds[MAXTCPLISTENPORTS];
1b3db6d9 322 int j;
323 int nevents;
65d448bc 324 tcp_io_events = 0;
62e76326 325
65d448bc 326 // XXX: only poll sockets that won't be deferred. But how do we identify them?
a46d2c0e 327
098346fd 328 for (j = 0; j < NHttpSockets; ++j) {
62e76326 329 if (HttpSockets[j] < 0)
330 continue;
331
098346fd
FC
332 fds[nfds] = HttpSockets[j];
333 ++nfds;
1b3db6d9 334 }
62e76326 335
1b3db6d9 336 nevents = comm_check_incoming_poll_handlers(nfds, fds);
65d448bc 337 incoming_tcp_interval = incoming_tcp_interval
04401ab0 338 + Config.comm_incoming.tcp.average - nevents;
62e76326 339
65d448bc
AJ
340 if (incoming_tcp_interval < Config.comm_incoming.tcp.min_poll)
341 incoming_tcp_interval = Config.comm_incoming.tcp.min_poll;
62e76326 342
65d448bc
AJ
343 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
344 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
62e76326 345
65d448bc
AJ
346 if (nevents > INCOMING_TCP_MAX)
347 nevents = INCOMING_TCP_MAX;
62e76326 348
65d448bc 349 statCounter.comm_tcp_incoming.count(nevents);
1b3db6d9 350}
351
352/* poll all sockets; call handlers for those that are ready. */
3d7e9d7c 353comm_err_t
d841c88d 354Comm::DoSelect(int msec)
1b3db6d9 355{
356 struct pollfd pfds[SQUID_MAXFD];
62e76326 357
1b3db6d9 358 PF *hdl = NULL;
359 int fd;
1b3db6d9 360 int maxfd;
361 unsigned long nfds;
362 unsigned long npending;
363 int num;
65d448bc 364 int calldns = 0, calludp = 0, calltcp = 0;
1b3db6d9 365 double timeout = current_dtime + (msec / 1000.0);
62e76326 366
1b3db6d9 367 do {
62e76326 368 double start;
369 getCurrentTime();
370 start = current_dtime;
62e76326 371
65d448bc
AJ
372 if (commCheckUdpIncoming)
373 comm_poll_udp_incoming();
62e76326 374
65d448bc 375 if (commCheckDnsIncoming)
62e76326 376 comm_poll_dns_incoming();
377
65d448bc
AJ
378 if (commCheckTcpIncoming)
379 comm_poll_tcp_incoming();
62e76326 380
381 PROF_start(comm_poll_prep_pfds);
382
65d448bc 383 calldns = calludp = calltcp = 0;
62e76326 384
385 nfds = 0;
386
387 npending = 0;
388
389 maxfd = Biggest_FD + 1;
390
098346fd 391 for (int i = 0; i < maxfd; ++i) {
62e76326 392 int events;
393 events = 0;
394 /* Check each open socket for a handler. */
395
a46d2c0e 396 if (fd_table[i].read_handler)
397 events |= POLLRDNORM;
62e76326 398
399 if (fd_table[i].write_handler)
400 events |= POLLWRNORM;
401
402 if (events) {
403 pfds[nfds].fd = i;
404 pfds[nfds].events = events;
405 pfds[nfds].revents = 0;
cbebe602 406 ++nfds;
62e76326 407
408 if ((events & POLLRDNORM) && fd_table[i].flags.read_pending)
cbebe602 409 ++npending;
62e76326 410 }
411 }
412
413 PROF_stop(comm_poll_prep_pfds);
414
62e76326 415 if (npending)
416 msec = 0;
417
418 if (msec > MAX_POLL_TIME)
419 msec = MAX_POLL_TIME;
420
8ff3fa2e 421 /* nothing to do
422 *
423 * Note that this will only ever trigger when there are no log files
424 * and stdout/err/in are all closed too.
425 */
425e3a42 426 if (nfds == 0 && npending == 0) {
a553a5a3 427 if (shutting_down)
428 return COMM_SHUTDOWN;
429 else
430 return COMM_IDLE;
431 }
432
62e76326 433 for (;;) {
434 PROF_start(comm_poll_normal);
098346fd 435 ++ statCounter.syscalls.selects;
62e76326 436 num = poll(pfds, nfds, msec);
098346fd 437 ++ statCounter.select_loops;
62e76326 438 PROF_stop(comm_poll_normal);
439
425e3a42 440 if (num >= 0 || npending > 0)
62e76326 441 break;
442
443 if (ignoreErrno(errno))
444 continue;
445
fa84c01d 446 debugs(5, DBG_CRITICAL, "comm_poll: poll failure: " << xstrerror());
62e76326 447
448 assert(errno != EINVAL);
449
450 return COMM_ERROR;
451
452 /* NOTREACHED */
453 }
454
40a77eef 455 getCurrentTime();
456
bf8fe701 457 debugs(5, num ? 5 : 8, "comm_poll: " << num << "+" << npending << " FDs ready");
f30f7998 458 statCounter.select_fds_hist.count(num);
62e76326 459
460 if (num == 0 && npending == 0)
461 continue;
462
463 /* scan each socket but the accept socket. Poll this
26ac0430 464 * more frequently to minimize losses due to the 5 connect
62e76326 465 * limit in SunOS */
466 PROF_start(comm_handle_ready_fd);
467
098346fd 468 for (size_t loopIndex = 0; loopIndex < nfds; ++loopIndex) {
62e76326 469 fde *F;
470 int revents = pfds[loopIndex].revents;
471 fd = pfds[loopIndex].fd;
472
473 if (fd == -1)
474 continue;
475
476 if (fd_table[fd].flags.read_pending)
477 revents |= POLLIN;
478
479 if (revents == 0)
480 continue;
481
65d448bc
AJ
482 if (fdIsUdpListen(fd)) {
483 calludp = 1;
62e76326 484 continue;
485 }
486
487 if (fdIsDns(fd)) {
488 calldns = 1;
489 continue;
490 }
491
65d448bc
AJ
492 if (fdIsTcpListen(fd)) {
493 calltcp = 1;
62e76326 494 continue;
495 }
496
497 F = &fd_table[fd];
498
499 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
bf8fe701 500 debugs(5, 6, "comm_poll: FD " << fd << " ready for reading");
62e76326 501
65d448bc 502 if ((hdl = F->read_handler)) {
62e76326 503 PROF_start(comm_read_handler);
504 F->read_handler = NULL;
be4d35dc 505 F->flags.read_pending = false;
62e76326 506 hdl(fd, F->read_data);
507 PROF_stop(comm_read_handler);
098346fd 508 ++ statCounter.select_fds;
62e76326 509
65d448bc
AJ
510 if (commCheckUdpIncoming)
511 comm_poll_udp_incoming();
62e76326 512
65d448bc 513 if (commCheckDnsIncoming)
62e76326 514 comm_poll_dns_incoming();
515
65d448bc
AJ
516 if (commCheckTcpIncoming)
517 comm_poll_tcp_incoming();
62e76326 518 }
519 }
520
521 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
48e7baac 522 debugs(5, 6, "comm_poll: FD " << fd << " ready for writing");
62e76326 523
524 if ((hdl = F->write_handler)) {
525 PROF_start(comm_write_handler);
526 F->write_handler = NULL;
527 hdl(fd, F->write_data);
528 PROF_stop(comm_write_handler);
098346fd 529 ++ statCounter.select_fds;
62e76326 530
65d448bc
AJ
531 if (commCheckUdpIncoming)
532 comm_poll_udp_incoming();
62e76326 533
65d448bc 534 if (commCheckDnsIncoming)
62e76326 535 comm_poll_dns_incoming();
536
65d448bc
AJ
537 if (commCheckTcpIncoming)
538 comm_poll_tcp_incoming();
62e76326 539 }
540 }
541
542 if (revents & POLLNVAL) {
6d527e0a 543 AsyncCall::Pointer ch;
fa84c01d
FC
544 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
545 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type]);
546 debugs(5, DBG_CRITICAL, "--> " << F->desc);
547 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << "read:" <<
bf8fe701 548 F->read_handler << " write:" << F->write_handler);
62e76326 549
6d527e0a 550 for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
fa84c01d 551 debugs(5, DBG_CRITICAL, " close handler: " << ch);
62e76326 552
6d527e0a 553 if (F->closeHandler != NULL) {
62e76326 554 commCallCloseHandlers(fd);
6d527e0a 555 } else if (F->timeoutHandler != NULL) {
fa84c01d 556 debugs(5, DBG_CRITICAL, "comm_poll: Calling Timeout Handler");
26ac0430 557 ScheduleCallHere(F->timeoutHandler);
62e76326 558 }
559
560 F->closeHandler = NULL;
6d527e0a 561 F->timeoutHandler = NULL;
62e76326 562 F->read_handler = NULL;
563 F->write_handler = NULL;
564
565 if (F->flags.open)
566 fd_close(fd);
567 }
568 }
569
570 PROF_stop(comm_handle_ready_fd);
571
65d448bc
AJ
572 if (calludp)
573 comm_poll_udp_incoming();
62e76326 574
575 if (calldns)
576 comm_poll_dns_incoming();
577
65d448bc
AJ
578 if (calltcp)
579 comm_poll_tcp_incoming();
62e76326 580
62e76326 581 getCurrentTime();
582
583 statCounter.select_time += (current_dtime - start);
584
585 return COMM_OK;
586 } while (timeout > current_dtime);
587
4a7a3d56 588 debugs(5, 8, "comm_poll: time out: " << squid_curtime << ".");
62e76326 589
1b3db6d9 590 return COMM_TIMEOUT;
591}
592
1b3db6d9 593static void
594comm_poll_dns_incoming(void)
595{
596 int nfds = 0;
597 int fds[2];
598 int nevents;
599 dns_io_events = 0;
62e76326 600
055421ee 601 if (DnsSocketA < 0 && DnsSocketB < 0)
62e76326 602 return;
603
098346fd
FC
604 if (DnsSocketA >= 0) {
605 fds[nfds] = DnsSocketA;
606 ++nfds;
607 }
4d6c8504 608
098346fd
FC
609 if (DnsSocketB >= 0) {
610 fds[nfds] = DnsSocketB;
611 ++nfds;
612 }
62e76326 613
1b3db6d9 614 nevents = comm_check_incoming_poll_handlers(nfds, fds);
62e76326 615
1b3db6d9 616 if (nevents < 0)
62e76326 617 return;
618
65d448bc 619 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
62e76326 620
65d448bc
AJ
621 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
622 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
62e76326 623
1b3db6d9 624 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
62e76326 625 incoming_dns_interval = MAX_INCOMING_INTERVAL;
626
1b3db6d9 627 if (nevents > INCOMING_DNS_MAX)
62e76326 628 nevents = INCOMING_DNS_MAX;
629
f30f7998 630 statCounter.comm_dns_incoming.count(nevents);
1b3db6d9 631}
632
5acc9f37 633static void
edfab338 634commPollRegisterWithCacheManager(void)
1b3db6d9 635{
8822ebee 636 Mgr::RegisterAction("comm_poll_incoming",
d9fc6862
A
637 "comm_incoming() stats",
638 commIncomingStats, 0, 1);
1b3db6d9 639}
640
5acc9f37 641void
d841c88d 642Comm::SelectLoopInit(void)
5acc9f37
FC
643{
644 commPollRegisterWithCacheManager();
645}
1b3db6d9 646
647static void
648commIncomingStats(StoreEntry * sentry)
649{
65d448bc
AJ
650 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
651 incoming_udp_interval >> INCOMING_FACTOR);
1b3db6d9 652 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
62e76326 653 incoming_dns_interval >> INCOMING_FACTOR);
65d448bc
AJ
654 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
655 incoming_tcp_interval >> INCOMING_FACTOR);
1b3db6d9 656 storeAppendPrintf(sentry, "\n");
657 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
65d448bc
AJ
658 storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_udp_incoming() call:\n");
659 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 660 storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n");
82146cc8 661 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
65d448bc
AJ
662 storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_tcp_incoming() call:\n");
663 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
1b3db6d9 664}
665
1b3db6d9 666/* Called by async-io or diskd to speed up the polling */
667void
d841c88d 668Comm::QuickPollRequired(void)
1b3db6d9 669{
670 MAX_POLL_TIME = 10;
671}
672
673#endif /* USE_POLL */