]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm/ModSelect.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / comm / ModSelect.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12
13 #if USE_SELECT
14
15 #include "anyp/PortCfg.h"
16 #include "comm/Connection.h"
17 #include "comm/Loops.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "ICP.h"
21 #include "mgr/Registration.h"
22 #include "SquidConfig.h"
23 #include "SquidTime.h"
24 #include "StatCounters.h"
25 #include "StatHist.h"
26 #include "Store.h"
27
28 #include <cerrno>
29 #if HAVE_SYS_STAT_H
30 #include <sys/stat.h>
31 #endif
32
33 static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
34
35 #ifndef howmany
36 #define howmany(x, y) (((x)+((y)-1))/(y))
37 #endif
38 #ifndef NBBY
39 #define NBBY 8
40 #endif
41 #define FD_MASK_BYTES sizeof(fd_mask)
42 #define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
43
44 /* STATIC */
45 static int examine_select(fd_set *, fd_set *);
46 static int fdIsTcpListener(int fd);
47 static int fdIsUdpListener(int fd);
48 static int fdIsDns(int fd);
49 static OBJH commIncomingStats;
50 static int comm_check_incoming_select_handlers(int nfds, int *fds);
51 static void comm_select_dns_incoming(void);
52 static void commUpdateReadBits(int fd, PF * handler);
53 static void commUpdateWriteBits(int fd, PF * handler);
54
55 static struct timeval zero_tv;
56 static fd_set global_readfds;
57 static fd_set global_writefds;
58 static int nreadfds;
59 static int nwritefds;
60
61 /*
62 * Automatic tuning for incoming requests:
63 *
64 * INCOMING sockets are the ICP and HTTP ports. We need to check these
65 * fairly regularly, but how often? When the load increases, we
66 * want to check the incoming sockets more often. If we have a lot
67 * of incoming ICP, then we need to check these sockets more than
68 * if we just have HTTP.
69 *
70 * The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
71 * determine how many normal I/O events to process before checking
72 * incoming sockets again. Note we store the incoming_interval
73 * multipled by a factor of (2^INCOMING_FACTOR) to have some
74 * pseudo-floating point precision.
75 *
76 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
77 * I/O events have been processed since the last check on the incoming
78 * sockets. When io_events > incoming_interval, its time to check incoming
79 * sockets.
80 *
81 * Every time we check incoming sockets, we count how many new messages
82 * or connections were processed. This is used to adjust the
83 * incoming_interval for the next iteration. The new incoming_interval
84 * is calculated as the current incoming_interval plus what we would
85 * like to see as an average number of events minus the number of
86 * events just processed.
87 *
88 * incoming_interval = incoming_interval + target_average - number_of_events_processed
89 *
90 * There are separate incoming_interval counters for DNS, UDP and TCP events
91 *
92 * You can see the current values of the incoming_interval's, as well as
93 * a histogram of 'incoming_events' by asking the cache manager
94 * for 'comm_incoming', e.g.:
95 *
96 * % ./client mgr:comm_incoming
97 *
98 * Caveats:
99 *
100 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
101 * incoming_interval for both types of sockets. At the
102 * largest value the cache will effectively be idling.
103 *
104 * - The higher the INCOMING_FACTOR, the slower the algorithm will
105 * respond to load spikes/increases/decreases in demand. A value
106 * between 3 and 8 is recommended.
107 */
108
109 #define MAX_INCOMING_INTEGER 256
110 #define INCOMING_FACTOR 5
111 #define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
112 static int udp_io_events = 0;
113 static int dns_io_events = 0;
114 static int tcp_io_events = 0;
115 static int incoming_udp_interval = 16 << INCOMING_FACTOR;
116 static int incoming_dns_interval = 16 << INCOMING_FACTOR;
117 static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
118 #define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
119 #define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
120 #define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
121
122 void
123 Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
124 {
125 fde *F = &fd_table[fd];
126 assert(fd >= 0);
127 assert(F->flags.open);
128 debugs(5, 5, HERE << "FD " << fd << ", type=" << type <<
129 ", handler=" << handler << ", client_data=" << client_data <<
130 ", timeout=" << timeout);
131
132 if (type & COMM_SELECT_READ) {
133 F->read_handler = handler;
134 F->read_data = client_data;
135 commUpdateReadBits(fd, handler);
136 }
137
138 if (type & COMM_SELECT_WRITE) {
139 F->write_handler = handler;
140 F->write_data = client_data;
141 commUpdateWriteBits(fd, handler);
142 }
143
144 if (timeout)
145 F->timeout = squid_curtime + timeout;
146 }
147
148 void
149 Comm::ResetSelect(int fd)
150 {
151 }
152
153 static int
154 fdIsUdpListener(int fd)
155 {
156 if (icpIncomingConn != NULL && fd == icpIncomingConn->fd)
157 return 1;
158
159 if (icpOutgoingConn != NULL && fd == icpOutgoingConn->fd)
160 return 1;
161
162 return 0;
163 }
164
165 static int
166 fdIsDns(int fd)
167 {
168 if (fd == DnsSocketA)
169 return 1;
170
171 if (fd == DnsSocketB)
172 return 1;
173
174 return 0;
175 }
176
177 static int
178 fdIsTcpListener(int fd)
179 {
180 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
181 if (s->listenConn != NULL && s->listenConn->fd == fd)
182 return 1;
183 }
184
185 return 0;
186 }
187
188 static int
189 comm_check_incoming_select_handlers(int nfds, int *fds)
190 {
191 int i;
192 int fd;
193 int maxfd = 0;
194 PF *hdl = NULL;
195 fd_set read_mask;
196 fd_set write_mask;
197 FD_ZERO(&read_mask);
198 FD_ZERO(&write_mask);
199 incoming_sockets_accepted = 0;
200
201 for (i = 0; i < nfds; ++i) {
202 fd = fds[i];
203
204 if (fd_table[fd].read_handler) {
205 FD_SET(fd, &read_mask);
206
207 if (fd > maxfd)
208 maxfd = fd;
209 }
210
211 if (fd_table[fd].write_handler) {
212 FD_SET(fd, &write_mask);
213
214 if (fd > maxfd)
215 maxfd = fd;
216 }
217 }
218
219 if (maxfd++ == 0)
220 return -1;
221
222 getCurrentTime();
223
224 ++ statCounter.syscalls.selects;
225
226 if (select(maxfd, &read_mask, &write_mask, NULL, &zero_tv) < 1)
227 return incoming_sockets_accepted;
228
229 for (i = 0; i < nfds; ++i) {
230 fd = fds[i];
231
232 if (FD_ISSET(fd, &read_mask)) {
233 if ((hdl = fd_table[fd].read_handler) != NULL) {
234 fd_table[fd].read_handler = NULL;
235 commUpdateReadBits(fd, NULL);
236 hdl(fd, fd_table[fd].read_data);
237 } else {
238 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL read handler");
239 }
240 }
241
242 if (FD_ISSET(fd, &write_mask)) {
243 if ((hdl = fd_table[fd].write_handler) != NULL) {
244 fd_table[fd].write_handler = NULL;
245 commUpdateWriteBits(fd, NULL);
246 hdl(fd, fd_table[fd].write_data);
247 } else {
248 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL write handler");
249 }
250 }
251 }
252
253 return incoming_sockets_accepted;
254 }
255
256 static void
257 comm_select_udp_incoming(void)
258 {
259 int nfds = 0;
260 int fds[2];
261 int nevents;
262 udp_io_events = 0;
263
264 if (Comm::IsConnOpen(icpIncomingConn)) {
265 fds[nfds] = icpIncomingConn->fd;
266 ++nfds;
267 }
268
269 if (Comm::IsConnOpen(icpOutgoingConn) && icpIncomingConn != icpOutgoingConn) {
270 fds[nfds] = icpOutgoingConn->fd;
271 ++nfds;
272 }
273
274 if (nfds == 0)
275 return;
276
277 nevents = comm_check_incoming_select_handlers(nfds, fds);
278
279 incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
280
281 if (incoming_udp_interval < 0)
282 incoming_udp_interval = 0;
283
284 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
285 incoming_udp_interval = MAX_INCOMING_INTERVAL;
286
287 if (nevents > INCOMING_UDP_MAX)
288 nevents = INCOMING_UDP_MAX;
289
290 statCounter.comm_udp_incoming.count(nevents);
291 }
292
293 static void
294 comm_select_tcp_incoming(void)
295 {
296 int nfds = 0;
297 int fds[MAXTCPLISTENPORTS];
298 int nevents;
299 tcp_io_events = 0;
300
301 // XXX: only poll sockets that won't be deferred. But how do we identify them?
302
303 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
304 if (Comm::IsConnOpen(s->listenConn)) {
305 fds[nfds] = s->listenConn->fd;
306 ++nfds;
307 }
308 }
309
310 nevents = comm_check_incoming_select_handlers(nfds, fds);
311 incoming_tcp_interval += Config.comm_incoming.tcp.average - nevents;
312
313 if (incoming_tcp_interval < 0)
314 incoming_tcp_interval = 0;
315
316 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
317 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
318
319 if (nevents > INCOMING_TCP_MAX)
320 nevents = INCOMING_TCP_MAX;
321
322 statCounter.comm_tcp_incoming.count(nevents);
323 }
324
325 #define DEBUG_FDBITS 0
326 /* Select on all sockets; call handlers for those that are ready. */
327 Comm::Flag
328 Comm::DoSelect(int msec)
329 {
330 fd_set readfds;
331 fd_set pendingfds;
332 fd_set writefds;
333
334 PF *hdl = NULL;
335 int fd;
336 int maxfd;
337 int num;
338 int pending;
339 int calldns = 0, calludp = 0, calltcp = 0;
340 int maxindex;
341 unsigned int k;
342 int j;
343 #if DEBUG_FDBITS
344
345 int i;
346 #endif
347
348 fd_mask *fdsp;
349 fd_mask *pfdsp;
350 fd_mask tmask;
351
352 struct timeval poll_time;
353 double timeout = current_dtime + (msec / 1000.0);
354 fde *F;
355
356 do {
357 double start;
358 getCurrentTime();
359 start = current_dtime;
360
361 if (commCheckUdpIncoming)
362 comm_select_udp_incoming();
363
364 if (commCheckDnsIncoming)
365 comm_select_dns_incoming();
366
367 if (commCheckTcpIncoming)
368 comm_select_tcp_incoming();
369
370 calldns = calludp = calltcp = 0;
371
372 maxfd = Biggest_FD + 1;
373
374 memcpy(&readfds, &global_readfds,
375 howmany(maxfd, FD_MASK_BITS) * FD_MASK_BYTES);
376
377 memcpy(&writefds, &global_writefds,
378 howmany(maxfd, FD_MASK_BITS) * FD_MASK_BYTES);
379
380 /* remove stalled FDs, and deal with pending descriptors */
381 pending = 0;
382
383 FD_ZERO(&pendingfds);
384
385 maxindex = howmany(maxfd, FD_MASK_BITS);
386
387 fdsp = (fd_mask *) & readfds;
388
389 for (j = 0; j < maxindex; ++j) {
390 if ((tmask = fdsp[j]) == 0)
391 continue; /* no bits here */
392
393 for (k = 0; k < FD_MASK_BITS; ++k) {
394 if (!EBIT_TEST(tmask, k))
395 continue;
396
397 /* Found a set bit */
398 fd = (j * FD_MASK_BITS) + k;
399
400 if (FD_ISSET(fd, &readfds) && fd_table[fd].flags.read_pending) {
401 FD_SET(fd, &pendingfds);
402 ++pending;
403 }
404 }
405 }
406
407 #if DEBUG_FDBITS
408 for (i = 0; i < maxfd; ++i) {
409 /* Check each open socket for a handler. */
410
411 if (fd_table[i].read_handler) {
412 assert(FD_ISSET(i, &readfds));
413 }
414
415 if (fd_table[i].write_handler) {
416 assert(FD_ISSET(i, &writefds));
417 }
418 }
419
420 #endif
421 if (nreadfds + nwritefds == 0) {
422 assert(shutting_down);
423 return Comm::SHUTDOWN;
424 }
425
426 if (msec > MAX_POLL_TIME)
427 msec = MAX_POLL_TIME;
428
429 if (pending)
430 msec = 0;
431
432 for (;;) {
433 poll_time.tv_sec = msec / 1000;
434 poll_time.tv_usec = (msec % 1000) * 1000;
435 ++ statCounter.syscalls.selects;
436 num = select(maxfd, &readfds, &writefds, NULL, &poll_time);
437 ++ statCounter.select_loops;
438
439 if (num >= 0 || pending > 0)
440 break;
441
442 if (ignoreErrno(errno))
443 break;
444
445 debugs(5, DBG_CRITICAL, "comm_select: select failure: " << xstrerror());
446
447 examine_select(&readfds, &writefds);
448
449 return Comm::COMM_ERROR;
450
451 /* NOTREACHED */
452 }
453
454 if (num < 0 && !pending)
455 continue;
456
457 getCurrentTime();
458
459 debugs(5, num ? 5 : 8, "comm_select: " << num << "+" << pending << " FDs ready");
460
461 statCounter.select_fds_hist.count(num);
462
463 if (num == 0 && pending == 0)
464 continue;
465
466 /* Scan return fd masks for ready descriptors */
467 fdsp = (fd_mask *) & readfds;
468
469 pfdsp = (fd_mask *) & pendingfds;
470
471 maxindex = howmany(maxfd, FD_MASK_BITS);
472
473 for (j = 0; j < maxindex; ++j) {
474 if ((tmask = (fdsp[j] | pfdsp[j])) == 0)
475 continue; /* no bits here */
476
477 for (k = 0; k < FD_MASK_BITS; ++k) {
478 if (tmask == 0)
479 break; /* no more bits left */
480
481 if (!EBIT_TEST(tmask, k))
482 continue;
483
484 /* Found a set bit */
485 fd = (j * FD_MASK_BITS) + k;
486
487 EBIT_CLR(tmask, k); /* this will be done */
488
489 #if DEBUG_FDBITS
490
491 debugs(5, 9, "FD " << fd << " bit set for reading");
492
493 assert(FD_ISSET(fd, &readfds));
494
495 #endif
496
497 if (fdIsUdpListener(fd)) {
498 calludp = 1;
499 continue;
500 }
501
502 if (fdIsDns(fd)) {
503 calldns = 1;
504 continue;
505 }
506
507 if (fdIsTcpListener(fd)) {
508 calltcp = 1;
509 continue;
510 }
511
512 F = &fd_table[fd];
513 debugs(5, 6, "comm_select: FD " << fd << " ready for reading");
514
515 if (NULL == (hdl = F->read_handler))
516 (void) 0;
517 else {
518 F->read_handler = NULL;
519 F->flags.read_pending = 0;
520 commUpdateReadBits(fd, NULL);
521 hdl(fd, F->read_data);
522 ++ statCounter.select_fds;
523
524 if (commCheckUdpIncoming)
525 comm_select_udp_incoming();
526
527 if (commCheckDnsIncoming)
528 comm_select_dns_incoming();
529
530 if (commCheckTcpIncoming)
531 comm_select_tcp_incoming();
532 }
533 }
534 }
535
536 fdsp = (fd_mask *) & writefds;
537
538 for (j = 0; j < maxindex; ++j) {
539 if ((tmask = fdsp[j]) == 0)
540 continue; /* no bits here */
541
542 for (k = 0; k < FD_MASK_BITS; ++k) {
543 if (tmask == 0)
544 break; /* no more bits left */
545
546 if (!EBIT_TEST(tmask, k))
547 continue;
548
549 /* Found a set bit */
550 fd = (j * FD_MASK_BITS) + k;
551
552 EBIT_CLR(tmask, k); /* this will be done */
553
554 #if DEBUG_FDBITS
555
556 debugs(5, 9, "FD " << fd << " bit set for writing");
557
558 assert(FD_ISSET(fd, &writefds));
559
560 #endif
561
562 if (fdIsUdpListener(fd)) {
563 calludp = 1;
564 continue;
565 }
566
567 if (fdIsDns(fd)) {
568 calldns = 1;
569 continue;
570 }
571
572 if (fdIsTcpListener(fd)) {
573 calltcp = 1;
574 continue;
575 }
576
577 F = &fd_table[fd];
578 debugs(5, 6, "comm_select: FD " << fd << " ready for writing");
579
580 if ((hdl = F->write_handler)) {
581 F->write_handler = NULL;
582 commUpdateWriteBits(fd, NULL);
583 hdl(fd, F->write_data);
584 ++ statCounter.select_fds;
585
586 if (commCheckUdpIncoming)
587 comm_select_udp_incoming();
588
589 if (commCheckDnsIncoming)
590 comm_select_dns_incoming();
591
592 if (commCheckTcpIncoming)
593 comm_select_tcp_incoming();
594 }
595 }
596 }
597
598 if (calludp)
599 comm_select_udp_incoming();
600
601 if (calldns)
602 comm_select_dns_incoming();
603
604 if (calltcp)
605 comm_select_tcp_incoming();
606
607 getCurrentTime();
608
609 statCounter.select_time += (current_dtime - start);
610
611 return Comm::OK;
612 } while (timeout > current_dtime);
613 debugs(5, 8, "comm_select: time out: " << squid_curtime);
614
615 return Comm::TIMEOUT;
616 }
617
618 static void
619 comm_select_dns_incoming(void)
620 {
621 int nfds = 0;
622 int fds[3];
623 int nevents;
624 dns_io_events = 0;
625
626 if (DnsSocketA < 0 && DnsSocketB < 0)
627 return;
628
629 if (DnsSocketA >= 0) {
630 fds[nfds] = DnsSocketA;
631 ++nfds;
632 }
633
634 if (DnsSocketB >= 0) {
635 fds[nfds] = DnsSocketB;
636 ++nfds;
637 }
638
639 nevents = comm_check_incoming_select_handlers(nfds, fds);
640
641 if (nevents < 0)
642 return;
643
644 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
645
646 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
647 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
648
649 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
650 incoming_dns_interval = MAX_INCOMING_INTERVAL;
651
652 if (nevents > INCOMING_DNS_MAX)
653 nevents = INCOMING_DNS_MAX;
654
655 statCounter.comm_dns_incoming.count(nevents);
656 }
657
658 void
659 Comm::SelectLoopInit(void)
660 {
661 zero_tv.tv_sec = 0;
662 zero_tv.tv_usec = 0;
663 FD_ZERO(&global_readfds);
664 FD_ZERO(&global_writefds);
665 nreadfds = nwritefds = 0;
666
667 Mgr::RegisterAction("comm_select_incoming",
668 "comm_incoming() stats",
669 commIncomingStats, 0, 1);
670 }
671
672 /*
673 * examine_select - debug routine.
674 *
675 * I spend the day chasing this core dump that occurs when both the client
676 * and the server side of a cache fetch simultaneoulsy abort the
677 * connection. While I haven't really studied the code to figure out how
678 * it happens, the snippet below may prevent the cache from exitting:
679 *
680 * Call this from where the select loop fails.
681 */
682 static int
683 examine_select(fd_set * readfds, fd_set * writefds)
684 {
685 int fd = 0;
686 fd_set read_x;
687 fd_set write_x;
688
689 struct timeval tv;
690 AsyncCall::Pointer ch = NULL;
691 fde *F = NULL;
692
693 struct stat sb;
694 debugs(5, DBG_CRITICAL, "examine_select: Examining open file descriptors...");
695
696 for (fd = 0; fd < Squid_MaxFD; ++fd) {
697 FD_ZERO(&read_x);
698 FD_ZERO(&write_x);
699 tv.tv_sec = tv.tv_usec = 0;
700
701 if (FD_ISSET(fd, readfds))
702 FD_SET(fd, &read_x);
703 else if (FD_ISSET(fd, writefds))
704 FD_SET(fd, &write_x);
705 else
706 continue;
707
708 ++ statCounter.syscalls.selects;
709 errno = 0;
710
711 if (!fstat(fd, &sb)) {
712 debugs(5, 5, "FD " << fd << " is valid.");
713 continue;
714 }
715
716 F = &fd_table[fd];
717 debugs(5, DBG_CRITICAL, "FD " << fd << ": " << xstrerror());
718 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
719 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type] << " called '" << F->desc << "'");
720 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << " read:" << F->read_handler << " write:" << F->write_handler);
721
722 for (ch = F->closeHandler; ch != NULL; ch = ch->Next())
723 debugs(5, DBG_CRITICAL, " close handler: " << ch);
724
725 if (F->closeHandler != NULL) {
726 commCallCloseHandlers(fd);
727 } else if (F->timeoutHandler != NULL) {
728 debugs(5, DBG_CRITICAL, "examine_select: Calling Timeout Handler");
729 ScheduleCallHere(F->timeoutHandler);
730 }
731
732 F->closeHandler = NULL;
733 F->timeoutHandler = NULL;
734 F->read_handler = NULL;
735 F->write_handler = NULL;
736 FD_CLR(fd, readfds);
737 FD_CLR(fd, writefds);
738 }
739
740 return 0;
741 }
742
743 static void
744 commIncomingStats(StoreEntry * sentry)
745 {
746 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
747 incoming_udp_interval >> INCOMING_FACTOR);
748 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
749 incoming_dns_interval >> INCOMING_FACTOR);
750 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
751 incoming_tcp_interval >> INCOMING_FACTOR);
752 storeAppendPrintf(sentry, "\n");
753 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
754 storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
755 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
756 storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
757 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
758 storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
759 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
760 }
761
762 void
763 commUpdateReadBits(int fd, PF * handler)
764 {
765 if (handler && !FD_ISSET(fd, &global_readfds)) {
766 FD_SET(fd, &global_readfds);
767 ++nreadfds;
768 } else if (!handler && FD_ISSET(fd, &global_readfds)) {
769 FD_CLR(fd, &global_readfds);
770 --nreadfds;
771 }
772 }
773
774 void
775 commUpdateWriteBits(int fd, PF * handler)
776 {
777 if (handler && !FD_ISSET(fd, &global_writefds)) {
778 FD_SET(fd, &global_writefds);
779 ++nwritefds;
780 } else if (!handler && FD_ISSET(fd, &global_writefds)) {
781 FD_CLR(fd, &global_writefds);
782 --nwritefds;
783 }
784 }
785
786 /* Called by async-io or diskd to speed up the polling */
787 void
788 Comm::QuickPollRequired(void)
789 {
790 MAX_POLL_TIME = 10;
791 }
792
793 #endif /* USE_SELECT */
794