]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm_select.cc
Luyers finished delay pools patch
[thirdparty/squid.git] / src / comm_select.cc
1
2
3 /*
4 * $Id: comm_select.cc,v 1.5 1998/08/14 09:22:33 wessels Exp $
5 *
6 * DEBUG: section 5 Socket Functions
7 *
8 * SQUID Internet Object Cache http://squid.nlanr.net/Squid/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from the
12 * Internet community. Development is led by Duane Wessels of the
13 * National Laboratory for Applied Network Research and funded by the
14 * National Science Foundation. Squid is Copyrighted (C) 1998 by
15 * Duane Wessels and the University of California San Diego. Please
16 * see the COPYRIGHT file for full details. Squid incorporates
17 * software developed and/or copyrighted by other sources. Please see
18 * the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37
38 #if USE_ASYNC_IO
39 #define MAX_POLL_TIME 10
40 #else
41 #define MAX_POLL_TIME 1000
42 #endif
43
44 /* STATIC */
45 #if !HAVE_POLL
46 static int examine_select(fd_set *, fd_set *);
47 #endif
48 static int fdIsHttp(int fd);
49 static int fdIsIcp(int fd);
50 static int commDeferRead(int fd);
51 static void checkTimeouts(void);
52 static OBJH commIncomingStats;
53 #if HAVE_POLL
54 static int comm_check_incoming_poll_handlers(int nfds, int *fds);
55 #else
56 static int comm_check_incoming_select_handlers(int nfds, int *fds);
57 #endif
58
59 static struct timeval zero_tv;
60
61 /*
62 * Automatic tuning for incoming requests:
63 *
64 * INCOMING sockets are the ICP and HTTP ports. We need to check these
65 * fairly regularly, but how often? When the load increases, we
66 * want to check the incoming sockets more often. If we have a lot
67 * of incoming ICP, then we need to check these sockets more than
68 * if we just have HTTP.
69 *
70 * The variables 'incoming_icp_interval' and 'incoming_http_interval'
71 * determine how many normal I/O events to process before checking
72 * incoming sockets again. Note we store the incoming_interval
73 * multipled by a factor of (2^INCOMING_FACTOR) to have some
74 * pseudo-floating point precision.
75 *
76 * The variable 'icp_io_events' and 'http_io_events' counts how many normal
77 * I/O events have been processed since the last check on the incoming
78 * sockets. When io_events > incoming_interval, its time to check incoming
79 * sockets.
80 *
81 * Every time we check incoming sockets, we count how many new messages
82 * or connections were processed. This is used to adjust the
83 * incoming_interval for the next iteration. The new incoming_interval
84 * is calculated as the current incoming_interval plus what we would
85 * like to see as an average number of events minus the number of
86 * events just processed.
87 *
88 * incoming_interval = incoming_interval + 1 - number_of_events_processed
89 *
90 * There are separate incoming_interval counters for both HTTP and ICP events
91 *
92 * You can see the current values of the incoming_interval's, as well as
93 * a histogram of 'incoming_events' by asking the cache manager
94 * for 'comm_incoming', e.g.:
95 *
96 * % ./client mgr:comm_incoming
97 *
98 * Caveats:
99 *
100 * - We have MAX_INCOMING_INTEGER as a magic upper limit on
101 * incoming_interval for both types of sockets. At the
102 * largest value the cache will effectively be idling.
103 *
104 * - The higher the INCOMING_FACTOR, the slower the algorithm will
105 * respond to load spikes/increases/decreases in demand. A value
106 * between 3 and 8 is recommended.
107 */
108
109 #define MAX_INCOMING_INTEGER 256
110 #define INCOMING_FACTOR 5
111 #define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR)
112 static int icp_io_events = 0;
113 static int http_io_events = 0;
114 static int incoming_icp_interval = 16 << INCOMING_FACTOR;
115 static int incoming_http_interval = 16 << INCOMING_FACTOR;
116 #define commCheckICPIncoming (++icp_io_events > (incoming_icp_interval>> INCOMING_FACTOR))
117 #define commCheckHTTPIncoming (++http_io_events > (incoming_http_interval>> INCOMING_FACTOR))
118
119 static int
120 commDeferRead(int fd)
121 {
122 fde *F = &fd_table[fd];
123 if (F->defer_check == NULL)
124 return 0;
125 return F->defer_check(fd, F->defer_data);
126 }
127
128 static int
129 fdIsIcp(int fd)
130 {
131 if (fd == theInIcpConnection)
132 return 1;
133 if (fd == theOutIcpConnection)
134 return 1;
135 return 0;
136 }
137
138 static int
139 fdIsHttp(int fd)
140 {
141 int j;
142 for (j = 0; j < NHttpSockets; j++) {
143 if (fd == HttpSockets[j])
144 return 1;
145 }
146 return 0;
147 }
148
149 #if HAVE_POLL
150 static int
151 comm_check_incoming_poll_handlers(int nfds, int *fds)
152 {
153 int i;
154 int fd;
155 int incame = 0;
156 PF *hdl = NULL;
157 int npfds;
158 struct pollfd pfds[3 + MAXHTTPPORTS];
159 for (i = npfds = 0; i < nfds; i++) {
160 int events;
161 fd = fds[i];
162 events = 0;
163 if (fd_table[fd].read_handler)
164 events |= POLLRDNORM;
165 if (fd_table[fd].write_handler)
166 events |= POLLWRNORM;
167 if (events) {
168 pfds[npfds].fd = fd;
169 pfds[npfds].events = events;
170 pfds[npfds].revents = 0;
171 npfds++;
172 }
173 }
174 if (!nfds)
175 return incame;
176 #if !ALARM_UPDATES_TIME
177 getCurrentTime();
178 #endif
179 if (poll(pfds, npfds, 0) < 1)
180 return incame;
181 for (i = 0; i < npfds; i++) {
182 int revents;
183 if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1))
184 continue;
185 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
186 if ((hdl = fd_table[fd].read_handler)) {
187 fd_table[fd].read_handler = NULL;
188 hdl(fd, &incame);
189 } else
190 debug(5, 1) ("comm_poll_incoming: NULL read handler\n");
191 }
192 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
193 if ((hdl = fd_table[fd].write_handler)) {
194 fd_table[fd].write_handler = NULL;
195 hdl(fd, &incame);
196 } else
197 debug(5, 1) ("comm_poll_incoming: NULL write handler\n");
198 }
199 }
200 return incame;
201 }
202
203 static void
204 comm_poll_icp_incoming(void)
205 {
206 int nfds = 0;
207 int fds[2];
208 int nevents;
209 icp_io_events = 0;
210 if (theInIcpConnection >= 0)
211 fds[nfds++] = theInIcpConnection;
212 if (theInIcpConnection != theOutIcpConnection)
213 if (theOutIcpConnection >= 0)
214 fds[nfds++] = theOutIcpConnection;
215 if (nfds == 0)
216 return;
217 nevents = comm_check_incoming_poll_handlers(nfds, fds);
218 incoming_icp_interval = incoming_icp_interval + 1 - nevents;
219 if (incoming_icp_interval < 0)
220 incoming_icp_interval = 0;
221 if (incoming_icp_interval > MAX_INCOMING_INTERVAL)
222 incoming_icp_interval = MAX_INCOMING_INTERVAL;
223 if (nevents > INCOMING_ICP_MAX)
224 nevents = INCOMING_ICP_MAX;
225 statHistCount(&Counter.comm_icp_incoming, nevents);
226 }
227
228 static void
229 comm_poll_http_incoming(void)
230 {
231 int nfds = 0;
232 int fds[MAXHTTPPORTS];
233 int j;
234 int nevents;
235 http_io_events = 0;
236 for (j = 0; j < NHttpSockets; j++) {
237 if (HttpSockets[j] < 0)
238 continue;
239 if (commDeferRead(HttpSockets[j]))
240 continue;
241 fds[nfds++] = HttpSockets[j];
242 }
243 nevents = comm_check_incoming_poll_handlers(nfds, fds);
244 incoming_http_interval = incoming_http_interval + 1 - nevents;
245 if (incoming_http_interval < 0)
246 incoming_http_interval = 0;
247 if (incoming_http_interval > MAX_INCOMING_INTERVAL)
248 incoming_http_interval = MAX_INCOMING_INTERVAL;
249 if (nevents > INCOMING_HTTP_MAX)
250 nevents = INCOMING_HTTP_MAX;
251 statHistCount(&Counter.comm_http_incoming, nevents);
252 }
253
254 /* poll all sockets; call handlers for those that are ready. */
255 int
256 comm_poll(int msec)
257 {
258 struct pollfd pfds[SQUID_MAXFD];
259 PF *hdl = NULL;
260 int fd;
261 int i;
262 int maxfd;
263 unsigned long nfds;
264 int num;
265 int callicp = 0, callhttp = 0;
266 static time_t last_timeout = 0;
267 double timeout = current_dtime + (msec / 1000.0);
268 do {
269 #if !ALARM_UPDATES_TIME
270 getCurrentTime();
271 #endif
272 #if USE_ASYNC_IO
273 aioCheckCallbacks();
274 #endif
275 if (commCheckICPIncoming)
276 comm_poll_icp_incoming();
277 if (commCheckHTTPIncoming)
278 comm_poll_http_incoming();
279 #if DELAY_POOLS
280 if (squid_curtime > delay_pools_last_update) {
281 delayPoolsUpdate(squid_curtime - delay_pools_last_update);
282 delay_pools_last_update = squid_curtime;
283 }
284 #endif
285 callicp = callhttp = 0;
286 nfds = 0;
287 maxfd = Biggest_FD + 1;
288 for (i = 0; i < maxfd; i++) {
289 int events;
290 events = 0;
291 /* Check each open socket for a handler. */
292 if (fd_table[i].read_handler && !commDeferRead(i))
293 events |= POLLRDNORM;
294 if (fd_table[i].write_handler)
295 events |= POLLWRNORM;
296 if (events) {
297 pfds[nfds].fd = i;
298 pfds[nfds].events = events;
299 pfds[nfds].revents = 0;
300 nfds++;
301 }
302 }
303 if (nfds == 0) {
304 assert(shutting_down);
305 return COMM_SHUTDOWN;
306 }
307 if (msec > MAX_POLL_TIME)
308 msec = MAX_POLL_TIME;
309 for (;;) {
310 num = poll(pfds, nfds, msec);
311 Counter.select_loops++;
312 if (num >= 0)
313 break;
314 if (ignoreErrno(errno))
315 continue;
316 debug(5, 0) ("comm_poll: poll failure: %s\n", xstrerror());
317 assert(errno != EINVAL);
318 return COMM_ERROR;
319 /* NOTREACHED */
320 }
321 debug(5, num ? 5 : 8) ("comm_poll: %d sockets ready\n", num);
322 /* Check timeout handlers ONCE each second. */
323 if (squid_curtime > last_timeout) {
324 last_timeout = squid_curtime;
325 checkTimeouts();
326 }
327 if (num == 0)
328 continue;
329 /* scan each socket but the accept socket. Poll this
330 * more frequently to minimize losses due to the 5 connect
331 * limit in SunOS */
332 for (i = 0; i < nfds; i++) {
333 int revents;
334 if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1))
335 continue;
336 if (fdIsIcp(fd)) {
337 callicp = 1;
338 continue;
339 }
340 if (fdIsHttp(fd)) {
341 callhttp = 1;
342 continue;
343 }
344 if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) {
345 debug(5, 6) ("comm_poll: FD %d ready for reading\n", fd);
346 if ((hdl = fd_table[fd].read_handler)) {
347 fd_table[fd].read_handler = NULL;
348 hdl(fd, fd_table[fd].read_data);
349 }
350 if (commCheckICPIncoming)
351 comm_poll_icp_incoming();
352 if (commCheckHTTPIncoming)
353 comm_poll_http_incoming();
354 }
355 if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) {
356 debug(5, 5) ("comm_poll: FD %d ready for writing\n", fd);
357 if ((hdl = fd_table[fd].write_handler)) {
358 fd_table[fd].write_handler = NULL;
359 hdl(fd, fd_table[fd].write_data);
360 }
361 if (commCheckICPIncoming)
362 comm_poll_icp_incoming();
363 if (commCheckHTTPIncoming)
364 comm_poll_http_incoming();
365 }
366 if (revents & POLLNVAL) {
367 close_handler *ch;
368 fde *F = &fd_table[fd];
369 debug(5, 0) ("WARNING: FD %d has handlers, but it's invalid.\n", fd);
370 debug(5, 0) ("FD %d is a %s\n", fd, fdTypeStr[fd_table[fd].type]);
371 debug(5, 0) ("--> %s\n", fd_table[fd].desc);
372 debug(5, 0) ("tmout:%p read:%p write:%p\n",
373 F->timeout_handler,
374 F->read_handler,
375 F->write_handler);
376 for (ch = F->close_handler; ch; ch = ch->next)
377 debug(5, 0) (" close handler: %p\n", ch->handler);
378 if (F->close_handler) {
379 commCallCloseHandlers(fd);
380 } else if (F->timeout_handler) {
381 debug(5, 0) ("comm_poll: Calling Timeout Handler\n");
382 F->timeout_handler(fd, F->timeout_data);
383 }
384 F->close_handler = NULL;
385 F->timeout_handler = NULL;
386 F->read_handler = NULL;
387 F->write_handler = NULL;
388 if (F->open != 0)
389 fd_close(fd);
390 }
391 }
392 if (callicp)
393 comm_poll_icp_incoming();
394 if (callhttp)
395 comm_poll_http_incoming();
396 return COMM_OK;
397 } while (timeout > current_dtime);
398 debug(5, 8) ("comm_poll: time out: %d.\n", squid_curtime);
399 return COMM_TIMEOUT;
400 }
401
402 #else
403
404 static int
405 comm_check_incoming_select_handlers(int nfds, int *fds)
406 {
407 int i;
408 int fd;
409 int incame = 0;
410 int maxfd = 0;
411 PF *hdl = NULL;
412 fd_set read_mask;
413 fd_set write_mask;
414 FD_ZERO(&read_mask);
415 FD_ZERO(&write_mask);
416 for (i = 0; i < nfds; i++) {
417 fd = fds[i];
418 if (fd_table[fd].read_handler) {
419 FD_SET(fd, &read_mask);
420 if (fd > maxfd)
421 maxfd = fd;
422 }
423 if (fd_table[fd].write_handler) {
424 FD_SET(fd, &write_mask);
425 if (fd > maxfd)
426 maxfd = fd;
427 }
428 }
429 if (maxfd++ == 0)
430 return incame;
431 #if !ALARM_UPDATES_TIME
432 getCurrentTime();
433 #endif
434 if (select(maxfd, &read_mask, &write_mask, NULL, &zero_tv) < 1)
435 return incame;
436 for (i = 0; i < nfds; i++) {
437 fd = fds[i];
438 if (FD_ISSET(fd, &read_mask)) {
439 if ((hdl = fd_table[fd].read_handler) != NULL) {
440 fd_table[fd].read_handler = NULL;
441 hdl(fd, &incame);
442 } else {
443 debug(5, 1) ("comm_select_incoming: NULL read handler\n");
444 }
445 }
446 if (FD_ISSET(fd, &write_mask)) {
447 if ((hdl = fd_table[fd].write_handler) != NULL) {
448 fd_table[fd].write_handler = NULL;
449 hdl(fd, &incame);
450 } else {
451 debug(5, 1) ("comm_select_incoming: NULL write handler\n");
452 }
453 }
454 }
455 return incame;
456 }
457
458 static void
459 comm_select_icp_incoming(void)
460 {
461 int nfds = 0;
462 int fds[2];
463 int nevents;
464 icp_io_events = 0;
465 if (theInIcpConnection >= 0)
466 fds[nfds++] = theInIcpConnection;
467 if (theInIcpConnection != theOutIcpConnection)
468 if (theOutIcpConnection >= 0)
469 fds[nfds++] = theOutIcpConnection;
470 if (nfds == 0)
471 return;
472 nevents = comm_check_incoming_select_handlers(nfds, fds);
473 incoming_icp_interval = incoming_icp_interval + 1 - nevents;
474 if (incoming_icp_interval < 0)
475 incoming_icp_interval = 0;
476 if (incoming_icp_interval > MAX_INCOMING_INTERVAL)
477 incoming_icp_interval = MAX_INCOMING_INTERVAL;
478 if (nevents > INCOMING_ICP_MAX)
479 nevents = INCOMING_ICP_MAX;
480 statHistCount(&Counter.comm_icp_incoming, nevents);
481 }
482
483 static void
484 comm_select_http_incoming(void)
485 {
486 int nfds = 0;
487 int fds[MAXHTTPPORTS];
488 int j;
489 int nevents;
490 http_io_events = 0;
491 for (j = 0; j < NHttpSockets; j++) {
492 if (HttpSockets[j] < 0)
493 continue;
494 if (commDeferRead(HttpSockets[j]))
495 continue;
496 fds[nfds++] = HttpSockets[j];
497 }
498 nevents = comm_check_incoming_select_handlers(nfds, fds);
499 incoming_http_interval = incoming_http_interval + 1 - nevents;
500 if (incoming_http_interval < 0)
501 incoming_http_interval = 0;
502 if (incoming_http_interval > MAX_INCOMING_INTERVAL)
503 incoming_http_interval = MAX_INCOMING_INTERVAL;
504 if (nevents > INCOMING_HTTP_MAX)
505 nevents = INCOMING_HTTP_MAX;
506 statHistCount(&Counter.comm_http_incoming, nevents);
507 }
508
509 /* Select on all sockets; call handlers for those that are ready. */
510 int
511 comm_select(int msec)
512 {
513 fd_set readfds;
514 fd_set writefds;
515 PF *hdl = NULL;
516 int fd;
517 int i;
518 int maxfd;
519 int nfds;
520 int num;
521 int callicp = 0, callhttp = 0;
522 static time_t last_timeout = 0;
523 struct timeval poll_time;
524 double timeout = current_dtime + (msec / 1000.0);
525 do {
526 #if !ALARM_UPDATES_TIME
527 getCurrentTime();
528 #endif
529 #if USE_ASYNC_IO
530 aioCheckCallbacks();
531 #endif
532 FD_ZERO(&readfds);
533 FD_ZERO(&writefds);
534 if (commCheckICPIncoming)
535 comm_select_icp_incoming();
536 if (commCheckHTTPIncoming)
537 comm_select_http_incoming();
538 #if DELAY_POOLS
539 if (squid_curtime > delay_pools_last_update) {
540 delayPoolsUpdate(squid_curtime - delay_pools_last_update);
541 delay_pools_last_update = squid_curtime;
542 }
543 #endif
544 callicp = callhttp = 0;
545 nfds = 0;
546 maxfd = Biggest_FD + 1;
547 for (i = 0; i < maxfd; i++) {
548 /* Check each open socket for a handler. */
549 if (fd_table[i].read_handler && !commDeferRead(i)) {
550 nfds++;
551 FD_SET(i, &readfds);
552 }
553 if (fd_table[i].write_handler) {
554 nfds++;
555 FD_SET(i, &writefds);
556 }
557 }
558 if (nfds == 0) {
559 assert(shutting_down);
560 return COMM_SHUTDOWN;
561 }
562 if (msec > MAX_POLL_TIME)
563 msec = MAX_POLL_TIME;
564 for (;;) {
565 poll_time.tv_sec = msec / 1000;
566 poll_time.tv_usec = (msec % 1000) * 1000;
567 num = select(maxfd, &readfds, &writefds, NULL, &poll_time);
568 Counter.select_loops++;
569 if (num >= 0)
570 break;
571 if (ignoreErrno(errno))
572 break;
573 debug(50, 0) ("comm_select: select failure: %s\n",
574 xstrerror());
575 examine_select(&readfds, &writefds);
576 return COMM_ERROR;
577 /* NOTREACHED */
578 }
579 if (num < 0)
580 continue;
581 debug(5, num ? 5 : 8) ("comm_select: %d sockets ready at %d\n",
582 num, (int) squid_curtime);
583 /* Check lifetime and timeout handlers ONCE each second.
584 * Replaces brain-dead check every time through the loop! */
585 if (squid_curtime > last_timeout) {
586 last_timeout = squid_curtime;
587 checkTimeouts();
588 }
589 if (num == 0)
590 continue;
591 /* scan each socket but the accept socket. Poll this
592 * more frequently to minimize losses due to the 5 connect
593 * limit in SunOS */
594 for (fd = 0; fd < maxfd; fd++) {
595 if (!FD_ISSET(fd, &readfds) && !FD_ISSET(fd, &writefds))
596 continue;
597 if (fdIsIcp(fd)) {
598 callicp = 1;
599 continue;
600 }
601 if (fdIsHttp(fd)) {
602 callhttp = 1;
603 continue;
604 }
605 if (FD_ISSET(fd, &readfds)) {
606 debug(5, 6) ("comm_select: FD %d ready for reading\n", fd);
607 if (fd_table[fd].read_handler) {
608 hdl = fd_table[fd].read_handler;
609 fd_table[fd].read_handler = NULL;
610 hdl(fd, fd_table[fd].read_data);
611 }
612 if (commCheckICPIncoming)
613 comm_select_icp_incoming();
614 if (commCheckHTTPIncoming)
615 comm_select_http_incoming();
616 }
617 if (FD_ISSET(fd, &writefds)) {
618 debug(5, 5) ("comm_select: FD %d ready for writing\n", fd);
619 if (fd_table[fd].write_handler) {
620 hdl = fd_table[fd].write_handler;
621 fd_table[fd].write_handler = NULL;
622 hdl(fd, fd_table[fd].write_data);
623 }
624 if (commCheckICPIncoming)
625 comm_select_icp_incoming();
626 if (commCheckHTTPIncoming)
627 comm_select_http_incoming();
628 }
629 }
630 if (callicp)
631 comm_select_icp_incoming();
632 if (callhttp)
633 comm_select_http_incoming();
634 return COMM_OK;
635 } while (timeout > current_dtime);
636 debug(5, 8) ("comm_select: time out: %d\n", (int) squid_curtime);
637 return COMM_TIMEOUT;
638 }
639 #endif
640
641 void
642 comm_select_init(void)
643 {
644 zero_tv.tv_sec = 0;
645 zero_tv.tv_usec = 0;
646 cachemgrRegister("comm_incoming",
647 "comm_incoming() stats",
648 commIncomingStats, 0, 1);
649 }
650
651 #if !HAVE_POLL
652 /*
653 * examine_select - debug routine.
654 *
655 * I spend the day chasing this core dump that occurs when both the client
656 * and the server side of a cache fetch simultaneoulsy abort the
657 * connection. While I haven't really studied the code to figure out how
658 * it happens, the snippet below may prevent the cache from exitting:
659 *
660 * Call this from where the select loop fails.
661 */
662 static int
663 examine_select(fd_set * readfds, fd_set * writefds)
664 {
665 int fd = 0;
666 fd_set read_x;
667 fd_set write_x;
668 int num;
669 struct timeval tv;
670 close_handler *ch = NULL;
671 fde *F = NULL;
672 debug(5, 0) ("examine_select: Examining open file descriptors...\n");
673 for (fd = 0; fd < Squid_MaxFD; fd++) {
674 FD_ZERO(&read_x);
675 FD_ZERO(&write_x);
676 tv.tv_sec = tv.tv_usec = 0;
677 if (FD_ISSET(fd, readfds))
678 FD_SET(fd, &read_x);
679 else if (FD_ISSET(fd, writefds))
680 FD_SET(fd, &write_x);
681 else
682 continue;
683 num = select(Squid_MaxFD, &read_x, &write_x, NULL, &tv);
684 if (num > -1) {
685 debug(5, 5) ("FD %d is valid.\n", fd);
686 continue;
687 }
688 F = &fd_table[fd];
689 debug(5, 0) ("FD %d: %s\n", fd, xstrerror());
690 debug(5, 0) ("WARNING: FD %d has handlers, but it's invalid.\n", fd);
691 debug(5, 0) ("FD %d is a %s called '%s'\n",
692 fd,
693 fdTypeStr[fd_table[fd].type],
694 F->desc);
695 debug(5, 0) ("tmout:%p read:%p write:%p\n",
696 F->timeout_handler,
697 F->read_handler,
698 F->write_handler);
699 for (ch = F->close_handler; ch; ch = ch->next)
700 debug(5, 0) (" close handler: %p\n", ch->handler);
701 if (F->close_handler) {
702 commCallCloseHandlers(fd);
703 } else if (F->timeout_handler) {
704 debug(5, 0) ("examine_select: Calling Timeout Handler\n");
705 F->timeout_handler(fd, F->timeout_data);
706 }
707 F->close_handler = NULL;
708 F->timeout_handler = NULL;
709 F->read_handler = NULL;
710 F->write_handler = NULL;
711 FD_CLR(fd, readfds);
712 FD_CLR(fd, writefds);
713 }
714 return 0;
715 }
716 #endif
717
718 static void
719 checkTimeouts(void)
720 {
721 int fd;
722 fde *F = NULL;
723 PF *callback;
724 for (fd = 0; fd <= Biggest_FD; fd++) {
725 F = &fd_table[fd];
726 if (F->open != FD_OPEN)
727 continue;
728 if (F->timeout == 0)
729 continue;
730 if (F->timeout > squid_curtime)
731 continue;
732 debug(5, 5) ("checkTimeouts: FD %d Expired\n", fd);
733 if (F->timeout_handler) {
734 debug(5, 5) ("checkTimeouts: FD %d: Call timeout handler\n", fd);
735 callback = F->timeout_handler;
736 F->timeout_handler = NULL;
737 callback(fd, F->timeout_data);
738 } else {
739 debug(5, 5) ("checkTimeouts: FD %d: Forcing comm_close()\n", fd);
740 comm_close(fd);
741 }
742 }
743 }
744
745 static void
746 commIncomingStats(StoreEntry * sentry)
747 {
748 StatCounters *f = &Counter;
749 storeAppendPrintf(sentry, "Current incoming_icp_interval: %d\n",
750 incoming_icp_interval >> INCOMING_FACTOR);
751 storeAppendPrintf(sentry, "Current incoming_http_interval: %d\n",
752 incoming_http_interval >> INCOMING_FACTOR);
753 storeAppendPrintf(sentry, "\n");
754 storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n");
755 #ifdef HAVE_POLL
756 storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_icp_incoming() call:\n");
757 #else
758 storeAppendPrintf(sentry, "ICP Messages handled per comm_select_icp_incoming() call:\n");
759 #endif
760 statHistDump(&f->comm_icp_incoming, sentry, statHistIntDumper);
761 #ifdef HAVE_POLL
762 storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_http_incoming() call:\n");
763 #else
764 storeAppendPrintf(sentry, "HTTP Messages handled per comm_select_http_incoming() call:\n");
765 #endif
766 statHistDump(&f->comm_http_incoming, sentry, statHistIntDumper);
767 }