]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm/ModSelect.cc
Maintenance: Removed most NULLs using modernize-use-nullptr (#1075)
[thirdparty/squid.git] / src / comm / ModSelect.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12
13 #if USE_SELECT
14
15 #include "anyp/PortCfg.h"
16 #include "comm/Connection.h"
17 #include "comm/Loops.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "ICP.h"
21 #include "mgr/Registration.h"
22 #include "SquidConfig.h"
23 #include "StatCounters.h"
24 #include "StatHist.h"
25 #include "Store.h"
26
27 #include <cerrno>
28 #if HAVE_SYS_STAT_H
29 #include <sys/stat.h>
30 #endif
31
32 static int MAX_POLL_TIME = 1000; /* see also Comm::QuickPollRequired() */
33
34 #ifndef howmany
35 #define howmany(x, y) (((x)+((y)-1))/(y))
36 #endif
37 #ifndef NBBY
38 #define NBBY 8
39 #endif
40 #define FD_MASK_BYTES sizeof(fd_mask)
41 #define FD_MASK_BITS (FD_MASK_BYTES*NBBY)
42
43 /* STATIC */
44 static int examine_select(fd_set *, fd_set *);
45 static int fdIsTcpListener(int fd);
46 static int fdIsUdpListener(int fd);
47 static int fdIsDns(int fd);
48 static OBJH commIncomingStats;
49 static int comm_check_incoming_select_handlers(int nfds, int *fds);
50 static void comm_select_dns_incoming(void);
51 static void commUpdateReadBits(int fd, PF * handler);
52 static void commUpdateWriteBits(int fd, PF * handler);
53
54 static struct timeval zero_tv;
55 static fd_set global_readfds;
56 static fd_set global_writefds;
57 static int nreadfds;
58 static int nwritefds;
59
60 /*
61 * Automatic tuning for incoming requests:
62 *
63 * INCOMING sockets are the ICP and HTTP ports. We need to check these
64 * fairly regularly, but how often? When the load increases, we
65 * want to check the incoming sockets more often. If we have a lot
66 * of incoming ICP, then we need to check these sockets more than
67 * if we just have HTTP.
68 *
69 * The variables 'incoming_udp_interval' and 'incoming_tcp_interval'
70 * determine how many normal I/O events to process before checking
71 * incoming sockets again. Note we store the incoming_interval
72 * multiplied by a factor of (2^INCOMING_FACTOR) to have some
73 * pseudo-floating point precision.
74 *
75 * The variable 'udp_io_events' and 'tcp_io_events' counts how many normal
76 * I/O events have been processed since the last check on the incoming
77 * sockets. When io_events > incoming_interval, its time to check incoming
78 * sockets.
79 *
80 * Every time we check incoming sockets, we count how many new messages
81 * or connections were processed. This is used to adjust the
82 * incoming_interval for the next iteration. The new incoming_interval
83 * is calculated as the current incoming_interval plus what we would
84 * like to see as an average number of events minus the number of
85 * events just processed.
86 *
87 * incoming_interval = incoming_interval + target_average - number_of_events_processed
88 *
89 * There are separate incoming_interval counters for DNS, UDP and TCP events
90 *
91 * You can see the current values of the incoming_interval's, as well as
92 * a histogram of 'incoming_events' by asking the cache manager
93 * for 'comm_incoming', e.g.:
94 *
95 * % ./client mgr:comm_incoming
96 *
97 * Caveats:
98 *
99 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
100 * incoming_interval for both types of sockets. At the
101 * largest value the cache will effectively be idling.
102 *
103 * - The higher the INCOMING_FACTOR, the slower the algorithm will
104 * respond to load spikes/increases/decreases in demand. A value
105 * between 3 and 8 is recommended.
106 */
107
108 #define MAX_INCOMING_INTEGER 256
109 #define INCOMING_FACTOR 5
110 #define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
111 static int udp_io_events = 0;
112 static int dns_io_events = 0;
113 static int tcp_io_events = 0;
114 static int incoming_udp_interval = 16 << INCOMING_FACTOR;
115 static int incoming_dns_interval = 16 << INCOMING_FACTOR;
116 static int incoming_tcp_interval = 16 << INCOMING_FACTOR;
117 #define commCheckUdpIncoming (++udp_io_events > (incoming_udp_interval>> INCOMING_FACTOR))
118 #define commCheckDnsIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR))
119 #define commCheckTcpIncoming (++tcp_io_events > (incoming_tcp_interval>> INCOMING_FACTOR))
120
121 void
122 Comm::SetSelect(int fd, unsigned int type, PF * handler, void *client_data, time_t timeout)
123 {
124 fde *F = &fd_table[fd];
125 assert(fd >= 0);
126 assert(F->flags.open || (!handler && !client_data && !timeout));
127 debugs(5, 5, "FD " << fd << ", type=" << type <<
128 ", handler=" << handler << ", client_data=" << client_data <<
129 ", timeout=" << timeout);
130
131 if (type & COMM_SELECT_READ) {
132 F->read_handler = handler;
133 F->read_data = client_data;
134 commUpdateReadBits(fd, handler);
135 }
136
137 if (type & COMM_SELECT_WRITE) {
138 F->write_handler = handler;
139 F->write_data = client_data;
140 commUpdateWriteBits(fd, handler);
141 }
142
143 if (timeout)
144 F->timeout = squid_curtime + timeout;
145 }
146
147 static int
148 fdIsUdpListener(int fd)
149 {
150 if (icpIncomingConn != nullptr && fd == icpIncomingConn->fd)
151 return 1;
152
153 if (icpOutgoingConn != nullptr && fd == icpOutgoingConn->fd)
154 return 1;
155
156 return 0;
157 }
158
159 static int
160 fdIsDns(int fd)
161 {
162 if (fd == DnsSocketA)
163 return 1;
164
165 if (fd == DnsSocketB)
166 return 1;
167
168 return 0;
169 }
170
171 static int
172 fdIsTcpListener(int fd)
173 {
174 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
175 if (s->listenConn != nullptr && s->listenConn->fd == fd)
176 return 1;
177 }
178
179 return 0;
180 }
181
182 static int
183 comm_check_incoming_select_handlers(int nfds, int *fds)
184 {
185 int i;
186 int fd;
187 int maxfd = 0;
188 PF *hdl = nullptr;
189 fd_set read_mask;
190 fd_set write_mask;
191 FD_ZERO(&read_mask);
192 FD_ZERO(&write_mask);
193 incoming_sockets_accepted = 0;
194
195 for (i = 0; i < nfds; ++i) {
196 fd = fds[i];
197
198 if (fd_table[fd].read_handler) {
199 FD_SET(fd, &read_mask);
200
201 if (fd > maxfd)
202 maxfd = fd;
203 }
204
205 if (fd_table[fd].write_handler) {
206 FD_SET(fd, &write_mask);
207
208 if (fd > maxfd)
209 maxfd = fd;
210 }
211 }
212
213 if (maxfd++ == 0)
214 return -1;
215
216 getCurrentTime();
217
218 ++ statCounter.syscalls.selects;
219
220 if (select(maxfd, &read_mask, &write_mask, nullptr, &zero_tv) < 1)
221 return incoming_sockets_accepted;
222
223 for (i = 0; i < nfds; ++i) {
224 fd = fds[i];
225
226 if (FD_ISSET(fd, &read_mask)) {
227 if ((hdl = fd_table[fd].read_handler) != nullptr) {
228 fd_table[fd].read_handler = nullptr;
229 commUpdateReadBits(fd, nullptr);
230 hdl(fd, fd_table[fd].read_data);
231 } else {
232 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL read handler");
233 }
234 }
235
236 if (FD_ISSET(fd, &write_mask)) {
237 if ((hdl = fd_table[fd].write_handler) != nullptr) {
238 fd_table[fd].write_handler = nullptr;
239 commUpdateWriteBits(fd, nullptr);
240 hdl(fd, fd_table[fd].write_data);
241 } else {
242 debugs(5, DBG_IMPORTANT, "comm_select_incoming: FD " << fd << " NULL write handler");
243 }
244 }
245 }
246
247 return incoming_sockets_accepted;
248 }
249
250 static void
251 comm_select_udp_incoming(void)
252 {
253 int nfds = 0;
254 int fds[2];
255 int nevents;
256 udp_io_events = 0;
257
258 if (Comm::IsConnOpen(icpIncomingConn)) {
259 fds[nfds] = icpIncomingConn->fd;
260 ++nfds;
261 }
262
263 if (Comm::IsConnOpen(icpOutgoingConn) && icpIncomingConn != icpOutgoingConn) {
264 fds[nfds] = icpOutgoingConn->fd;
265 ++nfds;
266 }
267
268 if (nfds == 0)
269 return;
270
271 nevents = comm_check_incoming_select_handlers(nfds, fds);
272
273 incoming_udp_interval += Config.comm_incoming.udp.average - nevents;
274
275 if (incoming_udp_interval < 0)
276 incoming_udp_interval = 0;
277
278 if (incoming_udp_interval > MAX_INCOMING_INTERVAL)
279 incoming_udp_interval = MAX_INCOMING_INTERVAL;
280
281 if (nevents > INCOMING_UDP_MAX)
282 nevents = INCOMING_UDP_MAX;
283
284 statCounter.comm_udp_incoming.count(nevents);
285 }
286
287 static void
288 comm_select_tcp_incoming(void)
289 {
290 int nfds = 0;
291 int fds[MAXTCPLISTENPORTS];
292 int nevents;
293 tcp_io_events = 0;
294
295 // XXX: only poll sockets that won't be deferred. But how do we identify them?
296
297 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
298 if (Comm::IsConnOpen(s->listenConn)) {
299 fds[nfds] = s->listenConn->fd;
300 ++nfds;
301 }
302 }
303
304 nevents = comm_check_incoming_select_handlers(nfds, fds);
305 incoming_tcp_interval += Config.comm_incoming.tcp.average - nevents;
306
307 if (incoming_tcp_interval < 0)
308 incoming_tcp_interval = 0;
309
310 if (incoming_tcp_interval > MAX_INCOMING_INTERVAL)
311 incoming_tcp_interval = MAX_INCOMING_INTERVAL;
312
313 if (nevents > INCOMING_TCP_MAX)
314 nevents = INCOMING_TCP_MAX;
315
316 statCounter.comm_tcp_incoming.count(nevents);
317 }
318
319 /* Select on all sockets; call handlers for those that are ready. */
320 Comm::Flag
321 Comm::DoSelect(int msec)
322 {
323 fd_set readfds;
324 fd_set pendingfds;
325 fd_set writefds;
326
327 PF *hdl = nullptr;
328 int fd;
329 int maxfd;
330 int num;
331 int pending;
332 int calldns = 0, calludp = 0, calltcp = 0;
333 int maxindex;
334 unsigned int k;
335 int j;
336 fd_mask *fdsp;
337 fd_mask *pfdsp;
338 fd_mask tmask;
339
340 struct timeval poll_time;
341 double timeout = current_dtime + (msec / 1000.0);
342 fde *F;
343
344 do {
345 double start;
346 getCurrentTime();
347 start = current_dtime;
348
349 if (commCheckUdpIncoming)
350 comm_select_udp_incoming();
351
352 if (commCheckDnsIncoming)
353 comm_select_dns_incoming();
354
355 if (commCheckTcpIncoming)
356 comm_select_tcp_incoming();
357
358 calldns = calludp = calltcp = 0;
359
360 maxfd = Biggest_FD + 1;
361
362 memcpy(&readfds, &global_readfds,
363 howmany(maxfd, FD_MASK_BITS) * FD_MASK_BYTES);
364
365 memcpy(&writefds, &global_writefds,
366 howmany(maxfd, FD_MASK_BITS) * FD_MASK_BYTES);
367
368 /* remove stalled FDs, and deal with pending descriptors */
369 pending = 0;
370
371 FD_ZERO(&pendingfds);
372
373 maxindex = howmany(maxfd, FD_MASK_BITS);
374
375 fdsp = (fd_mask *) & readfds;
376
377 for (j = 0; j < maxindex; ++j) {
378 if ((tmask = fdsp[j]) == 0)
379 continue; /* no bits here */
380
381 for (k = 0; k < FD_MASK_BITS; ++k) {
382 if (!EBIT_TEST(tmask, k))
383 continue;
384
385 /* Found a set bit */
386 fd = (j * FD_MASK_BITS) + k;
387
388 if (FD_ISSET(fd, &readfds) && fd_table[fd].flags.read_pending) {
389 FD_SET(fd, &pendingfds);
390 ++pending;
391 }
392 }
393 }
394
395 if (nreadfds + nwritefds == 0) {
396 assert(shutting_down);
397 return Comm::SHUTDOWN;
398 }
399
400 if (msec > MAX_POLL_TIME)
401 msec = MAX_POLL_TIME;
402
403 if (pending)
404 msec = 0;
405
406 for (;;) {
407 poll_time.tv_sec = msec / 1000;
408 poll_time.tv_usec = (msec % 1000) * 1000;
409 ++ statCounter.syscalls.selects;
410 num = select(maxfd, &readfds, &writefds, nullptr, &poll_time);
411 int xerrno = errno;
412 ++ statCounter.select_loops;
413
414 if (num >= 0 || pending > 0)
415 break;
416
417 if (ignoreErrno(xerrno))
418 break;
419
420 debugs(5, DBG_CRITICAL, MYNAME << "select failure: " << xstrerr(xerrno));
421
422 examine_select(&readfds, &writefds);
423
424 return Comm::COMM_ERROR;
425
426 /* NOTREACHED */
427 }
428
429 if (num < 0 && !pending)
430 continue;
431
432 getCurrentTime();
433
434 debugs(5, num ? 5 : 8, "comm_select: " << num << "+" << pending << " FDs ready");
435
436 statCounter.select_fds_hist.count(num);
437
438 if (num == 0 && pending == 0)
439 continue;
440
441 /* Scan return fd masks for ready descriptors */
442 fdsp = (fd_mask *) & readfds;
443
444 pfdsp = (fd_mask *) & pendingfds;
445
446 maxindex = howmany(maxfd, FD_MASK_BITS);
447
448 for (j = 0; j < maxindex; ++j) {
449 if ((tmask = (fdsp[j] | pfdsp[j])) == 0)
450 continue; /* no bits here */
451
452 for (k = 0; k < FD_MASK_BITS; ++k) {
453 if (tmask == 0)
454 break; /* no more bits left */
455
456 if (!EBIT_TEST(tmask, k))
457 continue;
458
459 /* Found a set bit */
460 fd = (j * FD_MASK_BITS) + k;
461
462 EBIT_CLR(tmask, k); /* this will be done */
463
464 if (fdIsUdpListener(fd)) {
465 calludp = 1;
466 continue;
467 }
468
469 if (fdIsDns(fd)) {
470 calldns = 1;
471 continue;
472 }
473
474 if (fdIsTcpListener(fd)) {
475 calltcp = 1;
476 continue;
477 }
478
479 F = &fd_table[fd];
480 debugs(5, 6, "comm_select: FD " << fd << " ready for reading");
481
482 if (nullptr == (hdl = F->read_handler))
483 (void) 0;
484 else {
485 F->read_handler = nullptr;
486 commUpdateReadBits(fd, nullptr);
487 hdl(fd, F->read_data);
488 ++ statCounter.select_fds;
489
490 if (commCheckUdpIncoming)
491 comm_select_udp_incoming();
492
493 if (commCheckDnsIncoming)
494 comm_select_dns_incoming();
495
496 if (commCheckTcpIncoming)
497 comm_select_tcp_incoming();
498 }
499 }
500 }
501
502 fdsp = (fd_mask *) & writefds;
503
504 for (j = 0; j < maxindex; ++j) {
505 if ((tmask = fdsp[j]) == 0)
506 continue; /* no bits here */
507
508 for (k = 0; k < FD_MASK_BITS; ++k) {
509 if (tmask == 0)
510 break; /* no more bits left */
511
512 if (!EBIT_TEST(tmask, k))
513 continue;
514
515 /* Found a set bit */
516 fd = (j * FD_MASK_BITS) + k;
517
518 EBIT_CLR(tmask, k); /* this will be done */
519
520 if (fdIsUdpListener(fd)) {
521 calludp = 1;
522 continue;
523 }
524
525 if (fdIsDns(fd)) {
526 calldns = 1;
527 continue;
528 }
529
530 if (fdIsTcpListener(fd)) {
531 calltcp = 1;
532 continue;
533 }
534
535 F = &fd_table[fd];
536 debugs(5, 6, "comm_select: FD " << fd << " ready for writing");
537
538 if ((hdl = F->write_handler)) {
539 F->write_handler = nullptr;
540 commUpdateWriteBits(fd, nullptr);
541 hdl(fd, F->write_data);
542 ++ statCounter.select_fds;
543
544 if (commCheckUdpIncoming)
545 comm_select_udp_incoming();
546
547 if (commCheckDnsIncoming)
548 comm_select_dns_incoming();
549
550 if (commCheckTcpIncoming)
551 comm_select_tcp_incoming();
552 }
553 }
554 }
555
556 if (calludp)
557 comm_select_udp_incoming();
558
559 if (calldns)
560 comm_select_dns_incoming();
561
562 if (calltcp)
563 comm_select_tcp_incoming();
564
565 getCurrentTime();
566
567 statCounter.select_time += (current_dtime - start);
568
569 return Comm::OK;
570 } while (timeout > current_dtime);
571 debugs(5, 8, "comm_select: time out: " << squid_curtime);
572
573 return Comm::TIMEOUT;
574 }
575
576 static void
577 comm_select_dns_incoming(void)
578 {
579 int nfds = 0;
580 int fds[3];
581 int nevents;
582 dns_io_events = 0;
583
584 if (DnsSocketA < 0 && DnsSocketB < 0)
585 return;
586
587 if (DnsSocketA >= 0) {
588 fds[nfds] = DnsSocketA;
589 ++nfds;
590 }
591
592 if (DnsSocketB >= 0) {
593 fds[nfds] = DnsSocketB;
594 ++nfds;
595 }
596
597 nevents = comm_check_incoming_select_handlers(nfds, fds);
598
599 if (nevents < 0)
600 return;
601
602 incoming_dns_interval += Config.comm_incoming.dns.average - nevents;
603
604 if (incoming_dns_interval < Config.comm_incoming.dns.min_poll)
605 incoming_dns_interval = Config.comm_incoming.dns.min_poll;
606
607 if (incoming_dns_interval > MAX_INCOMING_INTERVAL)
608 incoming_dns_interval = MAX_INCOMING_INTERVAL;
609
610 if (nevents > INCOMING_DNS_MAX)
611 nevents = INCOMING_DNS_MAX;
612
613 statCounter.comm_dns_incoming.count(nevents);
614 }
615
616 void
617 Comm::SelectLoopInit(void)
618 {
619 zero_tv.tv_sec = 0;
620 zero_tv.tv_usec = 0;
621 FD_ZERO(&global_readfds);
622 FD_ZERO(&global_writefds);
623 nreadfds = nwritefds = 0;
624
625 Mgr::RegisterAction("comm_select_incoming",
626 "comm_incoming() stats",
627 commIncomingStats, 0, 1);
628 }
629
630 /*
631 * examine_select - debug routine.
632 *
633 * I spend the day chasing this core dump that occurs when both the client
634 * and the server side of a cache fetch simultaneoulsy abort the
635 * connection. While I haven't really studied the code to figure out how
636 * it happens, the snippet below may prevent the cache from exitting:
637 *
638 * Call this from where the select loop fails.
639 */
640 static int
641 examine_select(fd_set * readfds, fd_set * writefds)
642 {
643 int fd = 0;
644 fd_set read_x;
645 fd_set write_x;
646
647 struct timeval tv;
648 AsyncCall::Pointer ch = nullptr;
649 fde *F = nullptr;
650
651 struct stat sb;
652 debugs(5, DBG_CRITICAL, "examine_select: Examining open file descriptors...");
653
654 for (fd = 0; fd < Squid_MaxFD; ++fd) {
655 FD_ZERO(&read_x);
656 FD_ZERO(&write_x);
657 tv.tv_sec = tv.tv_usec = 0;
658
659 if (FD_ISSET(fd, readfds))
660 FD_SET(fd, &read_x);
661 else if (FD_ISSET(fd, writefds))
662 FD_SET(fd, &write_x);
663 else
664 continue;
665
666 ++ statCounter.syscalls.selects;
667 errno = 0;
668
669 if (!fstat(fd, &sb)) {
670 debugs(5, 5, "FD " << fd << " is valid.");
671 continue;
672 }
673 int xerrno = errno;
674
675 F = &fd_table[fd];
676 debugs(5, DBG_CRITICAL, "fstat(FD " << fd << "): " << xstrerr(xerrno));
677 debugs(5, DBG_CRITICAL, "WARNING: FD " << fd << " has handlers, but it's invalid.");
678 debugs(5, DBG_CRITICAL, "FD " << fd << " is a " << fdTypeStr[F->type] << " called '" << F->desc << "'");
679 debugs(5, DBG_CRITICAL, "tmout:" << F->timeoutHandler << " read:" << F->read_handler << " write:" << F->write_handler);
680
681 for (ch = F->closeHandler; ch != nullptr; ch = ch->Next())
682 debugs(5, DBG_CRITICAL, " close handler: " << ch);
683
684 if (F->closeHandler != nullptr) {
685 commCallCloseHandlers(fd);
686 } else if (F->timeoutHandler != nullptr) {
687 debugs(5, DBG_CRITICAL, "examine_select: Calling Timeout Handler");
688 ScheduleCallHere(F->timeoutHandler);
689 }
690
691 F->closeHandler = nullptr;
692 F->timeoutHandler = nullptr;
693 F->read_handler = nullptr;
694 F->write_handler = nullptr;
695 FD_CLR(fd, readfds);
696 FD_CLR(fd, writefds);
697 }
698
699 return 0;
700 }
701
702 static void
703 commIncomingStats(StoreEntry * sentry)
704 {
705 storeAppendPrintf(sentry, "Current incoming_udp_interval: %d\n",
706 incoming_udp_interval >> INCOMING_FACTOR);
707 storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n",
708 incoming_dns_interval >> INCOMING_FACTOR);
709 storeAppendPrintf(sentry, "Current incoming_tcp_interval: %d\n",
710 incoming_tcp_interval >> INCOMING_FACTOR);
711 storeAppendPrintf(sentry, "\n");
712 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
713 storeAppendPrintf(sentry, "ICP Messages handled per comm_select_udp_incoming() call:\n");
714 statCounter.comm_udp_incoming.dump(sentry, statHistIntDumper);
715 storeAppendPrintf(sentry, "DNS Messages handled per comm_select_dns_incoming() call:\n");
716 statCounter.comm_dns_incoming.dump(sentry, statHistIntDumper);
717 storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_tcp_incoming() call:\n");
718 statCounter.comm_tcp_incoming.dump(sentry, statHistIntDumper);
719 }
720
721 void
722 commUpdateReadBits(int fd, PF * handler)
723 {
724 if (handler && !FD_ISSET(fd, &global_readfds)) {
725 FD_SET(fd, &global_readfds);
726 ++nreadfds;
727 } else if (!handler && FD_ISSET(fd, &global_readfds)) {
728 FD_CLR(fd, &global_readfds);
729 --nreadfds;
730 }
731 }
732
733 void
734 commUpdateWriteBits(int fd, PF * handler)
735 {
736 if (handler && !FD_ISSET(fd, &global_writefds)) {
737 FD_SET(fd, &global_writefds);
738 ++nwritefds;
739 } else if (!handler && FD_ISSET(fd, &global_writefds)) {
740 FD_CLR(fd, &global_writefds);
741 --nwritefds;
742 }
743 }
744
745 /* Called by async-io or diskd to speed up the polling */
746 void
747 Comm::QuickPollRequired(void)
748 {
749 MAX_POLL_TIME = 10;
750 }
751
752 #endif /* USE_SELECT */
753