]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm/ModSelectWin32.cc
Boilerplate: update copyright blurbs on src/
[thirdparty/squid.git] / src / comm / ModSelectWin32.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12
13 #if USE_SELECT_WIN32
14 #include "anyp/PortCfg.h"
15 #include "comm/Connection.h"
16 #include "comm/Loops.h"
17 #include "fde.h"
18 #include "ICP.h"
19 #include "mgr/Registration.h"
20 #include "SquidTime.h"
21 #include "StatCounters.h"
22 #include "StatHist.h"
23 #include "Store.h"
24
25 #include <cerrno>
26
27 static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
28
29 #ifndef howmany
30 #define howmany(x, y) (((x)+((y)-1))/(y))
31 #endif
32 #ifndef NBBY
33 #define NBBY 8
34 #endif
35 #define FD_MASK_BYTES sizeof(fd_mask)
36 #define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
37
38 /* STATIC */
39 static int examine_select(fd_set *, fd_set *);
40 static int fdIsTcpListener(int fd);
41 static int fdIsUdpListener(int fd);
42 static int fdIsDns(int fd);
43 static OBJH commIncomingStats;
44 static int comm_check_incoming_select_handlers(int nfds, int *fds);
45 static void comm_select_dns_incoming(void);
46 static void commUpdateReadBits(int fd, PF * handler);
47 static void commUpdateWriteBits(int fd, PF * handler);
48
49 static struct timeval zero_tv;
50 static fd_set global_readfds;
51 static fd_set global_writefds;
52 static int nreadfds;
53 static int nwritefds;
54
55 /*
56 * Automatic tuning for incoming requests:
57 *
58 * INCOMING sockets are the ICP and HTTP ports. We need to check these
59 * fairly regularly, but how often? When the load increases, we
60 * want to check the incoming sockets more often. If we have a lot
61 * of incoming ICP, then we need to check these sockets more than
62 * if we just have HTTP.
63 *
64 * The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
65 * determine how many normal I/O events to process before checking
66 * incoming sockets again. Note we store the incoming_interval
67 * multipled by a factor of (2^INCOMING_FACTOR) to have some
68 * pseudo-floating point precision.
69 *
70 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
71 * I/O events have been processed since the last check on the incoming
72 * sockets. When io_events > incoming_interval, its time to check incoming
73 * sockets.
74 *
75 * Every time we check incoming sockets, we count how many new messages
76 * or connections were processed. This is used to adjust the
77 * incoming_interval for the next iteration. The new incoming_interval
78 * is calculated as the current incoming_interval plus what we would
79 * like to see as an average number of events minus the number of
80 * events just processed.
81 *
82 * incoming_interval = incoming_interval + target_average - number_of_events_processed
83 *
84 * There are separate incoming_interval counters for DNS, UDP and TCP events
85 *
86 * You can see the current values of the incoming_interval's, as well as
87 * a histogram of 'incoming_events' by asking the cache manager
88 * for 'comm_incoming', e.g.:
89 *
90 * % ./client mgr:comm_incoming
91 *
92 * Caveats:
93 *
94 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
95 * incoming_interval for both types of sockets. At the
96 * largest value the cache will effectively be idling.
97 *
98 * - The higher the INCOMING_FACTOR, the slower the algorithm will
99 * respond to load spikes/increases/decreases in demand. A value
100 * between 3 and 8 is recommended.
101 */
102
103 #define MAX_INCOMING_INTEGER 256
104 #define INCOMING_FACTOR 5
105 #define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
106 static int udp_io_events = 0;
107 static int dns_io_events = 0;
108 static int tcp_io_events = 0;
109 static int incoming_udp_interval = 16 << INCOMING_FACTOR;
110 static int incoming_dns_interval = 16 << INCOMING_FACTOR;
111 static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
112 #define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
113 #define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
114 #define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
115
116 void
117 Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
118 {
119 fde *F = &fd_table[fd];
120 assert(fd >= 0);
121 assert(F->flags.open);
122 debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
123 ", handler=" << handler << ", client_data=" << client_data <<
124 ", timeout=" << timeout);
125
126 if (type & COMM_SELECT_READ) {
127 F->read_handler = handler;
128 F->read_data = client_data;
129 commUpdateReadBits(fd, handler);
130 }
131
132 if (type & COMM_SELECT_WRITE) {
133 F->write_handler = handler;
134 F->write_data = client_data;
135 commUpdateWriteBits(fd, handler);
136 }
137
138 if (timeout)
139 F->timeout = squid_curtime + timeout;
140 }
141
142 void
143 Comm::ResetSelect(int fd)
144 {
145 }
146
147 static int
148 fdIsUdpListener(int fd)
149 {
150 if (icpIncomingConn != NULL && fd == icpIncomingConn->fd)
151 return 1;
152
153 if (icpOutgoingConn != NULL && fd == icpOutgoingConn->fd)
154 return 1;
155
156 return 0;
157 }
158
159 static int
160 fdIsDns(int fd)
161 {
162 if (fd == DnsSocketA)
163 return 1;
164
165 if (fd == DnsSocketB)
166 return 1;
167
168 return 0;
169 }
170
171 static int
172 fdIsTcpListener(int fd)
173 {
174 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
175 if (s->listenConn != NULL && s->listenConn->fd == fd)
176 return 1;
177 }
178
179 return 0;
180 }
181
182 static int
183 comm_check_incoming_select_handlers(int nfds, int *fds)
184 {
185 int i;
186 int fd;
187 int maxfd = 0;
188 PF *hdl = NULL;
189 fd_set read_mask;
190 fd_set write_mask;
191 fd_set errfds;
192 FD_ZERO(&errfds);
193 FD_ZERO(&read_mask);
194 FD_ZERO(&write_mask);
195 incoming_sockets_accepted = 0;
196
197 for (i = 0; i < nfds; ++i) {
198 fd = fds[i];
199
200 if (fd_table[fd].read_handler) {
201 FD_SET(fd, &read_mask);
202
203 if (fd > maxfd)
204 maxfd = fd;
205 }
206
207 if (fd_table[fd].write_handler) {
208 FD_SET(fd, &write_mask);
209
210 if (fd > maxfd)
211 maxfd = fd;
212 }
213 }
214
215 if (maxfd++ == 0)
216 return -1;
217
218 getCurrentTime();
219
220 ++ statCounter.syscalls.selects;
221
222 if (select(maxfd, &read_mask, &write_mask, &errfds, &zero_tv) < 1)
223
224 return incoming_sockets_accepted;
225
226 for (i = 0; i < nfds; ++i) {
227 fd = fds[i];
228
229 if (FD_ISSET(fd, &read_mask)) {
230 if ((hdl = fd_table[fd].read_handler) != NULL) {
231 fd_table[fd].read_handler = NULL;
232 commUpdateReadBits(fd, NULL);
233 hdl(fd, fd_table[fd].read_data);
234 } else {
235 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL read handler");
236 }
237 }
238
239 if (FD_ISSET(fd, &write_mask)) {
240 if ((hdl = fd_table[fd].write_handler) != NULL) {
241 fd_table[fd].write_handler = NULL;
242 commUpdateWriteBits(fd, NULL);
243 hdl(fd, fd_table[fd].write_data);
244 } else {
245 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL write handler");
246 }
247 }
248 }
249
250 return incoming_sockets_accepted;
251 }
252
253 static void
254 comm_select_udp_incoming(void)
255 {
256 int nfds = 0;
257 int fds[2];
258 int nevents;
259 udp_io_events = 0;
260
261 if (Comm::IsConnOpen(icpIncomingConn)) {
262 fds[nfds] = icpIncomingConn->fd;
263 ++nfds;
264 }
265
266 if (Comm::IsConnOpen(icpOutgoingConn) && icpIncomingConn != icpOutgoingConn) {
267 fds[nfds] = icpOutgoingConn->fd;
268 ++nfds;
269 }
270
271 if (nfds == 0)
272 return;
273
274 nevents = comm_check_incoming_select_handlers(nfds, fds);
275
276 incoming_udp_interval += Config.comm_incoming.udp_average - nevents;
277
278 if (incoming_udp_interval < 0)
279 incoming_udp_interval = 0;
280
281 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
282 incoming_udp_interval = MAX_INCOMING_INTERVAL;
283
284 if (nevents > INCOMING_UDP_MAX)
285 nevents = INCOMING_UDP_MAX;
286
287 statCounter.comm_udp_incoming.count(nevents);
288 }
289
290 static void
291 comm_select_tcp_incoming(void)
292 {
293 int nfds = 0;
294 int fds[MAXTCPLISTENPORTS];
295 int nevents;
296 tcp_io_events = 0;
297
298 // XXX: only poll sockets that won't be deferred. But how do we identify them?
299
300 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
301 if (Comm::IsConnOpen(s->listenConn)) {
302 fds[nfds] = s->listenConn->fd;
303 ++nfds;
304 }
305 }
306
307 nevents = comm_check_incoming_select_handlers(nfds, fds);
308 incoming_tcp_interval += Config.comm_incoming.tcp_average - nevents;
309
310 if (incoming_tcp_interval < 0)
311 incoming_tcp_interval = 0;
312
313 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
314 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
315
316 if (nevents > INCOMING_TCP_MAX)
317 nevents = INCOMING_TCP_MAX;
318
319 statCounter.comm_tcp_incoming.count(nevents);
320 }
321
322 #define DEBUG_FDBITS 0
323 /* Select on all sockets; call handlers for those that are ready. */
324 Comm::Flag
325 Comm::DoSelect(int msec)
326 {
327 fd_set readfds;
328 fd_set pendingfds;
329 fd_set writefds;
330
331 PF *hdl = NULL;
332 int fd;
333 int maxfd;
334 int num;
335 int pending;
336 int calldns = 0, callicp = 0, callhttp = 0;
337 int j;
338 #if DEBUG_FDBITS
339
340 int i;
341 #endif
342 struct timeval poll_time;
343 double timeout = current_dtime + (msec / 1000.0);
344 fde *F;
345
346 int no_bits;
347 fd_set errfds;
348 FD_ZERO(&errfds);
349
350 do {
351 double start;
352 getCurrentTime();
353 start = current_dtime;
354
355 if (commCheckUdpIncoming)
356 comm_select_udp_incoming();
357
358 if (commCheckDnsIncoming)
359 comm_select_dns_incoming();
360
361 if (commCheckTcpIncoming)
362 comm_select_tcp_incoming();
363
364 calludp = calldns = calltcp = 0;
365
366 maxfd = Biggest_FD + 1;
367
368 memcpy(&readfds, &global_readfds, sizeof(global_readfds));
369
370 memcpy(&writefds, &global_writefds, sizeof(global_writefds));
371
372 memcpy(&errfds, &global_writefds, sizeof(global_writefds));
373
374 /* remove stalled FDs, and deal with pending descriptors */
375 pending = 0;
376
377 FD_ZERO(&pendingfds);
378
379 for (j = 0; j < (int) readfds.fd_count; ++j) {
380 register int readfds_handle = readfds.fd_array[j];
381 no_bits = 1;
382
383 for ( fd = Biggest_FD; fd; --fd ) {
384 if ( fd_table[fd].win32.handle == readfds_handle ) {
385 if (fd_table[fd].flags.open) {
386 no_bits = 0;
387 break;
388 }
389 }
390 }
391
392 if (no_bits)
393 continue;
394
395 if (FD_ISSET(fd, &readfds) && fd_table[fd].flags.read_pending) {
396 FD_SET(fd, &pendingfds);
397 ++pending;
398 }
399 }
400
401 #if DEBUG_FDBITS
402 for (i = 0; i < maxfd; ++i) {
403 /* Check each open socket for a handler. */
404
405 if (fd_table[i].read_handler) {
406 assert(FD_ISSET(i, readfds));
407 }
408
409 if (fd_table[i].write_handler) {
410 assert(FD_ISSET(i, writefds));
411 }
412 }
413
414 #endif
415 if (nreadfds + nwritefds == 0) {
416 assert(shutting_down);
417 return Comm::SHUTDOWN;
418 }
419
420 if (msec > MAX_POLL_TIME)
421 msec = MAX_POLL_TIME;
422
423 if (pending)
424 msec = 0;
425
426 for (;;) {
427 poll_time.tv_sec = msec / 1000;
428 poll_time.tv_usec = (msec % 1000) * 1000;
429 ++statCounter.syscalls.selects;
430 num = select(maxfd, &readfds, &writefds, &errfds, &poll_time);
431 ++statCounter.select_loops;
432
433 if (num >= 0 || pending > 0)
434 break;
435
436 if (ignoreErrno(errno))
437 break;
438
439 debugs(5, DBG_CRITICAL, "comm_select: select failure: " << xstrerror());
440
441 examine_select(&readfds, &writefds);
442
443 return Comm::COMM_ERROR;
444
445 /* NOTREACHED */
446 }
447
448 if (num < 0 && !pending)
449 continue;
450
451 getCurrentTime();
452
453 debugs(5, num ? 5 : 8, "comm_select: " << num << "+" << pending << " FDs ready");
454
455 statCounter.select_fds_hist.count(num);
456
457 if (num == 0 && pending == 0)
458 continue;
459
460 /* Scan return fd masks for ready descriptors */
461 assert(readfds.fd_count <= (unsigned int) Biggest_FD);
462 assert(pendingfds.fd_count <= (unsigned int) Biggest_FD);
463
464 for (j = 0; j < (int) readfds.fd_count; ++j) {
465 register int readfds_handle = readfds.fd_array[j];
466 register int pendingfds_handle = pendingfds.fd_array[j];
467 register int osfhandle;
468 no_bits = 1;
469
470 for ( fd = Biggest_FD; fd; --fd ) {
471 osfhandle = fd_table[fd].win32.handle;
472
473 if (( osfhandle == readfds_handle ) ||
474 ( osfhandle == pendingfds_handle )) {
475 if (fd_table[fd].flags.open) {
476 no_bits = 0;
477 break;
478 }
479 }
480 }
481
482 if (no_bits)
483 continue;
484
485 #if DEBUG_FDBITS
486
487 debugs(5, 9, "FD " << fd << " bit set for reading");
488
489 assert(FD_ISSET(fd, readfds));
490
491 #endif
492
493 if (fdIsUdpListener(fd)) {
494 calludp = 1;
495 continue;
496 }
497
498 if (fdIsDns(fd)) {
499 calldns = 1;
500 continue;
501 }
502
503 if (fdIsTcpListener(fd)) {
504 calltcp = 1;
505 continue;
506 }
507
508 F = &fd_table[fd];
509 debugs(5, 6, "comm_select: FD " << fd << " ready for reading");
510
511 if ((hdl = F->read_handler)) {
512 F->read_handler = NULL;
513 F->flags.read_pending = 0;
514 commUpdateReadBits(fd, NULL);
515 hdl(fd, F->read_data);
516 ++ statCounter.select_fds;
517
518 if (commCheckUdpIncoming)
519 comm_select_udp_incoming();
520
521 if (commCheckDnsIncoming)
522 comm_select_dns_incoming();
523
524 if (commCheckTcpIncoming)
525 comm_select_tcp_incoming();
526 }
527 }
528
529 assert(errfds.fd_count <= (unsigned int) Biggest_FD);
530
531 for (j = 0; j < (int) errfds.fd_count; ++j) {
532 register int errfds_handle = errfds.fd_array[j];
533
534 for ( fd = Biggest_FD; fd; --fd ) {
535 if ( fd_table[fd].win32.handle == errfds_handle )
536 break;
537 }
538
539 if (fd_table[fd].flags.open) {
540 F = &fd_table[fd];
541
542 if ((hdl = F->write_handler)) {
543 F->write_handler = NULL;
544 commUpdateWriteBits(fd, NULL);
545 hdl(fd, F->write_data);
546 ++ statCounter.select_fds;
547 }
548 }
549 }
550
551 assert(writefds.fd_count <= (unsigned int) Biggest_FD);
552
553 for (j = 0; j < (int) writefds.fd_count; ++j) {
554 register int writefds_handle = writefds.fd_array[j];
555 no_bits = 1;
556
557 for ( fd = Biggest_FD; fd; --fd ) {
558 if ( fd_table[fd].win32.handle == writefds_handle ) {
559 if (fd_table[fd].flags.open) {
560 no_bits = 0;
561 break;
562 }
563 }
564 }
565
566 if (no_bits)
567 continue;
568
569 #if DEBUG_FDBITS
570
571 debugs(5, 9, "FD " << fd << " bit set for writing");
572
573 assert(FD_ISSET(fd, writefds));
574
575 #endif
576
577 if (fdIsUdpListener(fd)) {
578 calludp = 1;
579 continue;
580 }
581
582 if (fdIsDns(fd)) {
583 calldns = 1;
584 continue;
585 }
586
587 if (fdIsTcpListener(fd)) {
588 calltcp = 1;
589 continue;
590 }
591
592 F = &fd_table[fd];
593 debugs(5, 6, "comm_select: FD " << fd << " ready for writing");
594
595 if ((hdl = F->write_handler)) {
596 F->write_handler = NULL;
597 commUpdateWriteBits(fd, NULL);
598 hdl(fd, F->write_data);
599 ++ statCounter.select_fds;
600
601 if (commCheckUdpIncoming)
602 comm_select_udp_incoming();
603
604 if (commCheckDnsIncoming)
605 comm_select_dns_incoming();
606
607 if (commCheckTcpIncoming)
608 comm_select_tcp_incoming();
609 }
610 }
611
612 if (calludp)
613 comm_select_udp_incoming();
614
615 if (calldns)
616 comm_select_dns_incoming();
617
618 if (calltcp)
619 comm_select_tcp_incoming();
620
621 getCurrentTime();
622
623 statCounter.select_time += (current_dtime - start);
624
625 return Comm::OK;
626 } while (timeout > current_dtime);
627 debugs(5, 8, "comm_select: time out: " << squid_curtime);
628
629 return Comm::TIMEOUT;
630 }
631
632 static void
633 comm_select_dns_incoming(void)
634 {
635 int nfds = 0;
636 int fds[3];
637 int nevents;
638 dns_io_events = 0;
639
640 if (DnsSocketA < 0 && DnsSocketB < 0)
641 return;
642
643 if (DnsSocketA >= 0) {
644 fds[nfds] = DnsSocketA;
645 ++nfds;
646 }
647
648 if (DnsSocketB >= 0) {
649 fds[nfds] = DnsSocketB;
650 ++nfds;
651 }
652
653 nevents = comm_check_incoming_select_handlers(nfds, fds);
654
655 if (nevents < 0)
656 return;
657
658 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
659
660 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
661 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
662
663 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
664 incoming_dns_interval = MAX_INCOMING_INTERVAL;
665
666 if (nevents > INCOMING_DNS_MAX)
667 nevents = INCOMING_DNS_MAX;
668
669 statCounter.comm_dns_incoming.count(nevents);
670 }
671
672 void
673 Comm::SelectLoopInit(void)
674 {
675 zero_tv.tv_sec = 0;
676 zero_tv.tv_usec = 0;
677 FD_ZERO(&global_readfds);
678 FD_ZERO(&global_writefds);
679 nreadfds = nwritefds = 0;
680
681 Mgr::RegisterAction("comm_select_incoming",
682 "comm_incoming() stats",
683 commIncomingStats, 0, 1);
684 }
685
686 /*
687 * examine_select - debug routine.
688 *
689 * I spend the day chasing this core dump that occurs when both the client
690 * and the server side of a cache fetch simultaneoulsy abort the
691 * connection. While I haven't really studied the code to figure out how
692 * it happens, the snippet below may prevent the cache from exitting:
693 *
694 * Call this from where the select loop fails.
695 */
696 static int
697 examine_select(fd_set * readfds, fd_set * writefds)
698 {
699 int fd = 0;
700 fd_set read_x;
701 fd_set write_x;
702
703 struct timeval tv;
704 AsyncCall::Pointer ch = NULL;
705 fde *F = NULL;
706
707 struct stat sb;
708 debugs(5, DBG_CRITICAL, "examine_select: Examining open file descriptors...");
709
710 for (fd = 0; fd < Squid_MaxFD; ++fd) {
711 FD_ZERO(&read_x);
712 FD_ZERO(&write_x);
713 tv.tv_sec = tv.tv_usec = 0;
714
715 if (FD_ISSET(fd, readfds))
716 FD_SET(fd, &read_x);
717 else if (FD_ISSET(fd, writefds))
718 FD_SET(fd, &write_x);
719 else
720 continue;
721
722 ++ statCounter.syscalls.selects;
723 errno = 0;
724
725 if (!fstat(fd, &sb)) {
726 debugs(5, 5, "FD " << fd << " is valid.");
727 continue;
728 }
729
730 F = &fd_table[fd];
731 debugs(5, DBG_CRITICAL, "FD " << fd << ": " << xstrerror());
732 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
733 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type] << " called '" << F->desc << "'");
734 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << " read:" << F->read_handler << " write:" << F->write_handler);
735
736 for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
737 debugs(5, DBG_CRITICAL, " close handler: " << ch);
738
739 if (F->closeHandler != NULL) {
740 commCallCloseHandlers(fd);
741 } else if (F->timeoutHandler != NULL) {
742 debugs(5, DBG_CRITICAL, "examine_select: Calling Timeout Handler");
743 ScheduleCallHere(F->timeoutHandler);
744 }
745
746 F->closeHandler = NULL;
747 F->timeoutHandler = NULL;
748 F->read_handler = NULL;
749 F->write_handler = NULL;
750 FD_CLR(fd, readfds);
751 FD_CLR(fd, writefds);
752 }
753
754 return 0;
755 }
756
757 static void
758 commIncomingStats(StoreEntry * sentry)
759 {
760 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
761 incoming_udp_interval >> INCOMING_FACTOR);
762 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
763 incoming_dns_interval >> INCOMING_FACTOR);
764 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
765 incoming_tcp_interval >> INCOMING_FACTOR);
766 storeAppendPrintf(sentry, "\n");
767 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
768 storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
769 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
770 storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
771 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
772 storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
773 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
774 }
775
776 void
777 commUpdateReadBits(int fd, PF * handler)
778 {
779 if (handler && !FD_ISSET(fd, &global_readfds)) {
780 FD_SET(fd, &global_readfds);
781 ++nreadfds;
782 } else if (!handler && FD_ISSET(fd, &global_readfds)) {
783 FD_CLR(fd, &global_readfds);
784 --nreadfds;
785 }
786 }
787
788 void
789 commUpdateWriteBits(int fd, PF * handler)
790 {
791 if (handler && !FD_ISSET(fd, &global_writefds)) {
792 FD_SET(fd, &global_writefds);
793 ++nwritefds;
794 } else if (!handler && FD_ISSET(fd, &global_writefds)) {
795 FD_CLR(fd, &global_writefds);
796 --nwritefds;
797 }
798 }
799
800 /* Called by async-io or diskd to speed up the polling */
801 void
802 Comm::QuickPollRequired(void)
803 {
804 MAX_POLL_TIME = 10;
805 }
806
807 #endif /* USE_SELECT_WIN32 */