]>
Commit | Line | Data |
---|---|---|
1b3db6d9 | 1 | |
2 | /* | |
a48a31d5 | 3 | * $Id: comm_poll.cc,v 1.8 2002/10/28 01:12:28 adrian Exp $ |
1b3db6d9 | 4 | * |
5 | * DEBUG: section 5 Socket Functions | |
6 | * | |
7 | * SQUID Web Proxy Cache http://www.squid-cache.org/ | |
8 | * ---------------------------------------------------------- | |
9 | * | |
10 | * Squid is the result of efforts by numerous individuals from | |
11 | * the Internet community; see the CONTRIBUTORS file for full | |
12 | * details. Many organizations have provided support for Squid's | |
13 | * development; see the SPONSORS file for full details. Squid is | |
14 | * Copyrighted (C) 2001 by the Regents of the University of | |
15 | * California; see the COPYRIGHT file for full details. Squid | |
16 | * incorporates software developed and/or copyrighted by other | |
17 | * sources; see the CREDITS file for full details. | |
18 | * | |
19 | * This program is free software; you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation; either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | * | |
24 | * This program is distributed in the hope that it will be useful, | |
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
27 | * GNU General Public License for more details. | |
28 | * | |
29 | * You should have received a copy of the GNU General Public License | |
30 | * along with this program; if not, write to the Free Software | |
31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include "squid.h" | |
e6ccf245 | 36 | #include "Store.h" |
1b3db6d9 | 37 | |
38 | #ifdef USE_POLL | |
39 | ||
40 | static int MAX_POLL_TIME = 1000; /* see also comm_quick_poll_required() */ | |
41 | ||
42 | #ifndef howmany | |
43 | #define howmany(x, y) (((x)+((y)-1))/(y)) | |
44 | #endif | |
45 | #ifndef NBBY | |
46 | #define NBBY 8 | |
47 | #endif | |
48 | #define FD_MASK_BYTES sizeof(fd_mask) | |
49 | #define FD_MASK_BITS (FD_MASK_BYTES*NBBY) | |
50 | ||
51 | /* STATIC */ | |
52 | static int fdIsHttp(int fd); | |
53 | static int fdIsIcp(int fd); | |
54 | static int fdIsDns(int fd); | |
55 | static OBJH commIncomingStats; | |
56 | static int comm_check_incoming_poll_handlers(int nfds, int *fds); | |
57 | static void comm_poll_dns_incoming(void); | |
58 | static void commUpdateReadBits(int fd, PF * handler); | |
59 | static void commUpdateWriteBits(int fd, PF * handler); | |
60 | ||
61 | static fd_set global_readfds; | |
62 | static fd_set global_writefds; | |
63 | static int nreadfds; | |
64 | static int nwritefds; | |
65 | ||
66 | /* | |
67 | * Automatic tuning for incoming requests: | |
68 | * | |
69 | * INCOMING sockets are the ICP and HTTP ports. We need to check these | |
70 | * fairly regularly, but how often? When the load increases, we | |
71 | * want to check the incoming sockets more often. If we have a lot | |
72 | * of incoming ICP, then we need to check these sockets more than | |
73 | * if we just have HTTP. | |
74 | * | |
75 | * The variables 'incoming_icp_interval' and 'incoming_http_interval' | |
76 | * determine how many normal I/O events to process before checking | |
77 | * incoming sockets again. Note we store the incoming_interval | |
78 | * multipled by a factor of (2^INCOMING_FACTOR) to have some | |
79 | * pseudo-floating point precision. | |
80 | * | |
81 | * The variable 'icp_io_events' and 'http_io_events' counts how many normal | |
82 | * I/O events have been processed since the last check on the incoming | |
83 | * sockets. When io_events > incoming_interval, its time to check incoming | |
84 | * sockets. | |
85 | * | |
86 | * Every time we check incoming sockets, we count how many new messages | |
87 | * or connections were processed. This is used to adjust the | |
88 | * incoming_interval for the next iteration. The new incoming_interval | |
89 | * is calculated as the current incoming_interval plus what we would | |
90 | * like to see as an average number of events minus the number of | |
91 | * events just processed. | |
92 | * | |
93 | * incoming_interval = incoming_interval + target_average - number_of_events_processed | |
94 | * | |
95 | * There are separate incoming_interval counters for both HTTP and ICP events | |
96 | * | |
97 | * You can see the current values of the incoming_interval's, as well as | |
98 | * a histogram of 'incoming_events' by asking the cache manager | |
99 | * for 'comm_incoming', e.g.: | |
100 | * | |
101 | * % ./client mgr:comm_incoming | |
102 | * | |
103 | * Caveats: | |
104 | * | |
105 | * - We have MAX_INCOMING_INTEGER as a magic upper limit on | |
106 | * incoming_interval for both types of sockets. At the | |
107 | * largest value the cache will effectively be idling. | |
108 | * | |
109 | * - The higher the INCOMING_FACTOR, the slower the algorithm will | |
110 | * respond to load spikes/increases/decreases in demand. A value | |
111 | * between 3 and 8 is recommended. | |
112 | */ | |
113 | ||
114 | #define MAX_INCOMING_INTEGER 256 | |
115 | #define INCOMING_FACTOR 5 | |
116 | #define MAX_INCOMING_INTERVAL (MAX_INCOMING_INTEGER << INCOMING_FACTOR) | |
117 | static int icp_io_events = 0; | |
118 | static int dns_io_events = 0; | |
119 | static int http_io_events = 0; | |
120 | static int incoming_icp_interval = 16 << INCOMING_FACTOR; | |
121 | static int incoming_dns_interval = 16 << INCOMING_FACTOR; | |
122 | static int incoming_http_interval = 16 << INCOMING_FACTOR; | |
123 | #define commCheckICPIncoming (++icp_io_events > (incoming_icp_interval>> INCOMING_FACTOR)) | |
124 | #define commCheckDNSIncoming (++dns_io_events > (incoming_dns_interval>> INCOMING_FACTOR)) | |
125 | #define commCheckHTTPIncoming (++http_io_events > (incoming_http_interval>> INCOMING_FACTOR)) | |
126 | ||
127 | ||
128 | void | |
129 | commSetSelect(int fd, unsigned int type, PF * handler, void *client_data, | |
fa80a8ef | 130 | time_t timeout) |
1b3db6d9 | 131 | { |
132 | fde *F = &fd_table[fd]; | |
133 | assert(fd >= 0); | |
134 | assert(F->flags.open); | |
135 | debug(5, 5) ("commSetSelect: FD %d type %d\n", fd, type); | |
136 | if (type & COMM_SELECT_READ) { | |
fa80a8ef | 137 | F->read_handler = handler; |
138 | F->read_data = client_data; | |
139 | commUpdateReadBits(fd, handler); | |
1b3db6d9 | 140 | } |
141 | if (type & COMM_SELECT_WRITE) { | |
fa80a8ef | 142 | F->write_handler = handler; |
143 | F->write_data = client_data; | |
144 | commUpdateWriteBits(fd, handler); | |
1b3db6d9 | 145 | } |
146 | if (timeout) | |
fa80a8ef | 147 | F->timeout = squid_curtime + timeout; |
1b3db6d9 | 148 | } |
149 | ||
150 | static int | |
151 | fdIsIcp(int fd) | |
152 | { | |
153 | if (fd == theInIcpConnection) | |
154 | return 1; | |
155 | if (fd == theOutIcpConnection) | |
156 | return 1; | |
157 | return 0; | |
158 | } | |
159 | ||
160 | static int | |
161 | fdIsDns(int fd) | |
162 | { | |
163 | if (fd == DnsSocket) | |
164 | return 1; | |
165 | return 0; | |
166 | } | |
167 | ||
168 | static int | |
169 | fdIsHttp(int fd) | |
170 | { | |
171 | int j; | |
172 | for (j = 0; j < NHttpSockets; j++) { | |
173 | if (fd == HttpSockets[j]) | |
174 | return 1; | |
175 | } | |
176 | return 0; | |
177 | } | |
178 | ||
179 | #if DELAY_POOLS | |
180 | static int slowfdcnt = 0; | |
181 | static int slowfdarr[SQUID_MAXFD]; | |
182 | ||
183 | static void | |
184 | commAddSlowFd(int fd) | |
185 | { | |
186 | assert(slowfdcnt < SQUID_MAXFD); | |
187 | slowfdarr[slowfdcnt++] = fd; | |
188 | } | |
189 | ||
190 | static int | |
191 | commGetSlowFd(void) | |
192 | { | |
193 | int whichfd, retfd; | |
194 | ||
195 | if (!slowfdcnt) | |
196 | return -1; | |
197 | whichfd = squid_random() % slowfdcnt; | |
198 | retfd = slowfdarr[whichfd]; | |
199 | slowfdarr[whichfd] = slowfdarr[--slowfdcnt]; | |
200 | return retfd; | |
201 | } | |
202 | #endif | |
203 | ||
204 | static int | |
205 | comm_check_incoming_poll_handlers(int nfds, int *fds) | |
206 | { | |
207 | int i; | |
208 | int fd; | |
209 | PF *hdl = NULL; | |
210 | int npfds; | |
211 | struct pollfd pfds[3 + MAXHTTPPORTS]; | |
88bfe092 | 212 | PROF_start(comm_check_incoming); |
1b3db6d9 | 213 | incoming_sockets_accepted = 0; |
214 | for (i = npfds = 0; i < nfds; i++) { | |
215 | int events; | |
216 | fd = fds[i]; | |
217 | events = 0; | |
218 | if (fd_table[fd].read_handler) | |
219 | events |= POLLRDNORM; | |
220 | if (fd_table[fd].write_handler) | |
221 | events |= POLLWRNORM; | |
222 | if (events) { | |
223 | pfds[npfds].fd = fd; | |
224 | pfds[npfds].events = events; | |
225 | pfds[npfds].revents = 0; | |
226 | npfds++; | |
227 | } | |
228 | } | |
88bfe092 | 229 | if (!nfds) { |
230 | PROF_stop(comm_check_incoming); | |
1b3db6d9 | 231 | return -1; |
88bfe092 | 232 | } |
1b3db6d9 | 233 | getCurrentTime(); |
1b3db6d9 | 234 | statCounter.syscalls.polls++; |
88bfe092 | 235 | if (poll(pfds, npfds, 0) < 1) { |
236 | PROF_stop(comm_check_incoming); | |
1b3db6d9 | 237 | return incoming_sockets_accepted; |
88bfe092 | 238 | } |
1b3db6d9 | 239 | for (i = 0; i < npfds; i++) { |
240 | int revents; | |
241 | if (((revents = pfds[i].revents) == 0) || ((fd = pfds[i].fd) == -1)) | |
242 | continue; | |
243 | if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) { | |
244 | if ((hdl = fd_table[fd].read_handler)) { | |
245 | fd_table[fd].read_handler = NULL; | |
246 | hdl(fd, fd_table[fd].read_data); | |
247 | } else if (pfds[i].events & POLLRDNORM) | |
248 | debug(5, 1) ("comm_poll_incoming: FD %d NULL read handler\n", | |
249 | fd); | |
250 | } | |
251 | if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) { | |
252 | if ((hdl = fd_table[fd].write_handler)) { | |
253 | fd_table[fd].write_handler = NULL; | |
254 | hdl(fd, fd_table[fd].write_data); | |
255 | } else if (pfds[i].events & POLLWRNORM) | |
256 | debug(5, 1) ("comm_poll_incoming: FD %d NULL write_handler\n", | |
257 | fd); | |
258 | } | |
259 | } | |
88bfe092 | 260 | PROF_stop(comm_check_incoming); |
1b3db6d9 | 261 | return incoming_sockets_accepted; |
262 | } | |
263 | ||
264 | static void | |
265 | comm_poll_icp_incoming(void) | |
266 | { | |
267 | int nfds = 0; | |
268 | int fds[2]; | |
269 | int nevents; | |
270 | icp_io_events = 0; | |
271 | if (theInIcpConnection >= 0) | |
272 | fds[nfds++] = theInIcpConnection; | |
273 | if (theInIcpConnection != theOutIcpConnection) | |
274 | if (theOutIcpConnection >= 0) | |
275 | fds[nfds++] = theOutIcpConnection; | |
276 | if (nfds == 0) | |
277 | return; | |
278 | nevents = comm_check_incoming_poll_handlers(nfds, fds); | |
279 | incoming_icp_interval += Config.comm_incoming.icp_average - nevents; | |
280 | if (incoming_icp_interval < Config.comm_incoming.icp_min_poll) | |
281 | incoming_icp_interval = Config.comm_incoming.icp_min_poll; | |
282 | if (incoming_icp_interval > MAX_INCOMING_INTERVAL) | |
283 | incoming_icp_interval = MAX_INCOMING_INTERVAL; | |
284 | if (nevents > INCOMING_ICP_MAX) | |
285 | nevents = INCOMING_ICP_MAX; | |
286 | statHistCount(&statCounter.comm_icp_incoming, nevents); | |
287 | } | |
288 | ||
289 | static void | |
290 | comm_poll_http_incoming(void) | |
291 | { | |
292 | int nfds = 0; | |
293 | int fds[MAXHTTPPORTS]; | |
294 | int j; | |
295 | int nevents; | |
296 | http_io_events = 0; | |
297 | for (j = 0; j < NHttpSockets; j++) { | |
298 | if (HttpSockets[j] < 0) | |
299 | continue; | |
300 | if (commDeferRead(HttpSockets[j])) | |
301 | continue; | |
302 | fds[nfds++] = HttpSockets[j]; | |
303 | } | |
304 | nevents = comm_check_incoming_poll_handlers(nfds, fds); | |
305 | incoming_http_interval = incoming_http_interval | |
306 | + Config.comm_incoming.http_average - nevents; | |
307 | if (incoming_http_interval < Config.comm_incoming.http_min_poll) | |
308 | incoming_http_interval = Config.comm_incoming.http_min_poll; | |
309 | if (incoming_http_interval > MAX_INCOMING_INTERVAL) | |
310 | incoming_http_interval = MAX_INCOMING_INTERVAL; | |
311 | if (nevents > INCOMING_HTTP_MAX) | |
312 | nevents = INCOMING_HTTP_MAX; | |
313 | statHistCount(&statCounter.comm_http_incoming, nevents); | |
314 | } | |
315 | ||
316 | /* poll all sockets; call handlers for those that are ready. */ | |
3d7e9d7c | 317 | comm_err_t |
1b3db6d9 | 318 | comm_select(int msec) |
319 | { | |
320 | struct pollfd pfds[SQUID_MAXFD]; | |
321 | #if DELAY_POOLS | |
322 | fd_set slowfds; | |
323 | #endif | |
324 | PF *hdl = NULL; | |
325 | int fd; | |
1b3db6d9 | 326 | int maxfd; |
327 | unsigned long nfds; | |
328 | unsigned long npending; | |
329 | int num; | |
330 | int callicp = 0, callhttp = 0; | |
331 | int calldns = 0; | |
332 | static time_t last_timeout = 0; | |
333 | double timeout = current_dtime + (msec / 1000.0); | |
334 | do { | |
1b3db6d9 | 335 | double start; |
336 | getCurrentTime(); | |
337 | start = current_dtime; | |
1b3db6d9 | 338 | #if DELAY_POOLS |
339 | FD_ZERO(&slowfds); | |
340 | #endif | |
341 | if (commCheckICPIncoming) | |
342 | comm_poll_icp_incoming(); | |
343 | if (commCheckDNSIncoming) | |
344 | comm_poll_dns_incoming(); | |
345 | if (commCheckHTTPIncoming) | |
346 | comm_poll_http_incoming(); | |
88bfe092 | 347 | PROF_start(comm_poll_prep_pfds); |
1b3db6d9 | 348 | callicp = calldns = callhttp = 0; |
349 | nfds = 0; | |
350 | npending = 0; | |
351 | maxfd = Biggest_FD + 1; | |
e6ccf245 | 352 | for (int i = 0; i < maxfd; i++) { |
1b3db6d9 | 353 | int events; |
354 | events = 0; | |
355 | /* Check each open socket for a handler. */ | |
356 | if (fd_table[i].read_handler) { | |
357 | switch (commDeferRead(i)) { | |
358 | case 0: | |
359 | events |= POLLRDNORM; | |
360 | break; | |
361 | case 1: | |
362 | break; | |
363 | #if DELAY_POOLS | |
364 | case -1: | |
365 | events |= POLLRDNORM; | |
366 | FD_SET(i, &slowfds); | |
367 | break; | |
368 | #endif | |
369 | default: | |
370 | fatalf("bad return value from commDeferRead(FD %d)\n", i); | |
371 | } | |
372 | } | |
373 | if (fd_table[i].write_handler) | |
374 | events |= POLLWRNORM; | |
375 | if (events) { | |
376 | pfds[nfds].fd = i; | |
377 | pfds[nfds].events = events; | |
378 | pfds[nfds].revents = 0; | |
379 | nfds++; | |
380 | if ((events & POLLRDNORM) && fd_table[i].flags.read_pending) | |
381 | npending++; | |
382 | } | |
383 | } | |
88bfe092 | 384 | PROF_stop(comm_poll_prep_pfds); |
1b3db6d9 | 385 | if (nfds == 0) { |
386 | assert(shutting_down); | |
387 | return COMM_SHUTDOWN; | |
388 | } | |
389 | if (npending) | |
390 | msec = 0; | |
391 | if (msec > MAX_POLL_TIME) | |
392 | msec = MAX_POLL_TIME; | |
393 | for (;;) { | |
88bfe092 | 394 | PROF_start(comm_poll_normal); |
1b3db6d9 | 395 | statCounter.syscalls.polls++; |
396 | num = poll(pfds, nfds, msec); | |
397 | statCounter.select_loops++; | |
88bfe092 | 398 | PROF_stop(comm_poll_normal); |
1b3db6d9 | 399 | if (num >= 0 || npending >= 0) |
400 | break; | |
401 | if (ignoreErrno(errno)) | |
402 | continue; | |
403 | debug(5, 0) ("comm_poll: poll failure: %s\n", xstrerror()); | |
404 | assert(errno != EINVAL); | |
405 | return COMM_ERROR; | |
406 | /* NOTREACHED */ | |
407 | } | |
408 | debug(5, num ? 5 : 8) ("comm_poll: %d+%ld FDs ready\n", num, npending); | |
409 | statHistCount(&statCounter.select_fds_hist, num); | |
410 | /* Check timeout handlers ONCE each second. */ | |
411 | if (squid_curtime > last_timeout) { | |
412 | last_timeout = squid_curtime; | |
413 | checkTimeouts(); | |
414 | } | |
415 | if (num == 0 && npending == 0) | |
416 | continue; | |
417 | /* scan each socket but the accept socket. Poll this | |
418 | * more frequently to minimize losses due to the 5 connect | |
419 | * limit in SunOS */ | |
88bfe092 | 420 | PROF_start(comm_handle_ready_fd); |
e6ccf245 | 421 | for (size_t loopIndex = 0; loopIndex < nfds; loopIndex++) { |
1b3db6d9 | 422 | fde *F; |
e6ccf245 | 423 | int revents = pfds[loopIndex].revents; |
424 | fd = pfds[loopIndex].fd; | |
1b3db6d9 | 425 | if (fd == -1) |
426 | continue; | |
427 | if (fd_table[fd].flags.read_pending) | |
428 | revents |= POLLIN; | |
429 | if (revents == 0) | |
430 | continue; | |
431 | if (fdIsIcp(fd)) { | |
432 | callicp = 1; | |
433 | continue; | |
434 | } | |
435 | if (fdIsDns(fd)) { | |
436 | calldns = 1; | |
437 | continue; | |
438 | } | |
439 | if (fdIsHttp(fd)) { | |
440 | callhttp = 1; | |
441 | continue; | |
442 | } | |
443 | F = &fd_table[fd]; | |
444 | if (revents & (POLLRDNORM | POLLIN | POLLHUP | POLLERR)) { | |
445 | debug(5, 6) ("comm_poll: FD %d ready for reading\n", fd); | |
446 | if (NULL == (hdl = F->read_handler)) | |
447 | (void) 0; | |
448 | #if DELAY_POOLS | |
449 | else if (FD_ISSET(fd, &slowfds)) | |
450 | commAddSlowFd(fd); | |
451 | #endif | |
452 | else { | |
88bfe092 | 453 | PROF_start(comm_read_handler); |
1b3db6d9 | 454 | F->read_handler = NULL; |
455 | hdl(fd, F->read_data); | |
88bfe092 | 456 | PROF_stop(comm_read_handler); |
1b3db6d9 | 457 | statCounter.select_fds++; |
458 | if (commCheckICPIncoming) | |
459 | comm_poll_icp_incoming(); | |
460 | if (commCheckDNSIncoming) | |
461 | comm_poll_dns_incoming(); | |
462 | if (commCheckHTTPIncoming) | |
463 | comm_poll_http_incoming(); | |
464 | } | |
465 | } | |
466 | if (revents & (POLLWRNORM | POLLOUT | POLLHUP | POLLERR)) { | |
467 | debug(5, 5) ("comm_poll: FD %d ready for writing\n", fd); | |
468 | if ((hdl = F->write_handler)) { | |
88bfe092 | 469 | PROF_start(comm_write_handler); |
1b3db6d9 | 470 | F->write_handler = NULL; |
471 | hdl(fd, F->write_data); | |
88bfe092 | 472 | PROF_stop(comm_write_handler); |
1b3db6d9 | 473 | statCounter.select_fds++; |
474 | if (commCheckICPIncoming) | |
475 | comm_poll_icp_incoming(); | |
476 | if (commCheckDNSIncoming) | |
477 | comm_poll_dns_incoming(); | |
478 | if (commCheckHTTPIncoming) | |
479 | comm_poll_http_incoming(); | |
480 | } | |
481 | } | |
482 | if (revents & POLLNVAL) { | |
483 | close_handler *ch; | |
484 | debug(5, 0) ("WARNING: FD %d has handlers, but it's invalid.\n", fd); | |
485 | debug(5, 0) ("FD %d is a %s\n", fd, fdTypeStr[F->type]); | |
486 | debug(5, 0) ("--> %s\n", F->desc); | |
487 | debug(5, 0) ("tmout:%p read:%p write:%p\n", | |
488 | F->timeout_handler, | |
489 | F->read_handler, | |
490 | F->write_handler); | |
29b8d8d6 | 491 | for (ch = F->closeHandler; ch; ch = ch->next) |
1b3db6d9 | 492 | debug(5, 0) (" close handler: %p\n", ch->handler); |
29b8d8d6 | 493 | if (F->closeHandler) { |
1b3db6d9 | 494 | commCallCloseHandlers(fd); |
495 | } else if (F->timeout_handler) { | |
496 | debug(5, 0) ("comm_poll: Calling Timeout Handler\n"); | |
497 | F->timeout_handler(fd, F->timeout_data); | |
498 | } | |
29b8d8d6 | 499 | F->closeHandler = NULL; |
1b3db6d9 | 500 | F->timeout_handler = NULL; |
501 | F->read_handler = NULL; | |
502 | F->write_handler = NULL; | |
503 | if (F->flags.open) | |
504 | fd_close(fd); | |
505 | } | |
506 | } | |
88bfe092 | 507 | PROF_stop(comm_handle_ready_fd); |
1b3db6d9 | 508 | if (callicp) |
509 | comm_poll_icp_incoming(); | |
510 | if (calldns) | |
511 | comm_poll_dns_incoming(); | |
512 | if (callhttp) | |
513 | comm_poll_http_incoming(); | |
514 | #if DELAY_POOLS | |
515 | while ((fd = commGetSlowFd()) != -1) { | |
516 | fde *F = &fd_table[fd]; | |
517 | debug(5, 6) ("comm_select: slow FD %d selected for reading\n", fd); | |
518 | if ((hdl = F->read_handler)) { | |
519 | F->read_handler = NULL; | |
520 | hdl(fd, F->read_data); | |
521 | statCounter.select_fds++; | |
522 | if (commCheckICPIncoming) | |
523 | comm_poll_icp_incoming(); | |
524 | if (commCheckDNSIncoming) | |
525 | comm_poll_dns_incoming(); | |
526 | if (commCheckHTTPIncoming) | |
527 | comm_poll_http_incoming(); | |
528 | } | |
529 | } | |
530 | #endif | |
1b3db6d9 | 531 | getCurrentTime(); |
532 | statCounter.select_time += (current_dtime - start); | |
1b3db6d9 | 533 | return COMM_OK; |
534 | } | |
535 | while (timeout > current_dtime); | |
536 | debug(5, 8) ("comm_poll: time out: %ld.\n", (long int) squid_curtime); | |
537 | return COMM_TIMEOUT; | |
538 | } | |
539 | ||
540 | ||
541 | static void | |
542 | comm_poll_dns_incoming(void) | |
543 | { | |
544 | int nfds = 0; | |
545 | int fds[2]; | |
546 | int nevents; | |
547 | dns_io_events = 0; | |
548 | if (DnsSocket < 0) | |
549 | return; | |
550 | fds[nfds++] = DnsSocket; | |
551 | nevents = comm_check_incoming_poll_handlers(nfds, fds); | |
552 | if (nevents < 0) | |
553 | return; | |
554 | incoming_dns_interval += Config.comm_incoming.dns_average - nevents; | |
555 | if (incoming_dns_interval < Config.comm_incoming.dns_min_poll) | |
556 | incoming_dns_interval = Config.comm_incoming.dns_min_poll; | |
557 | if (incoming_dns_interval > MAX_INCOMING_INTERVAL) | |
558 | incoming_dns_interval = MAX_INCOMING_INTERVAL; | |
559 | if (nevents > INCOMING_DNS_MAX) | |
560 | nevents = INCOMING_DNS_MAX; | |
561 | statHistCount(&statCounter.comm_dns_incoming, nevents); | |
562 | } | |
563 | ||
564 | void | |
565 | comm_select_init(void) | |
566 | { | |
567 | cachemgrRegister("comm_incoming", | |
568 | "comm_incoming() stats", | |
569 | commIncomingStats, 0, 1); | |
570 | FD_ZERO(&global_readfds); | |
571 | FD_ZERO(&global_writefds); | |
572 | nreadfds = nwritefds = 0; | |
573 | } | |
574 | ||
575 | ||
576 | static void | |
577 | commIncomingStats(StoreEntry * sentry) | |
578 | { | |
579 | StatCounters *f = &statCounter; | |
580 | storeAppendPrintf(sentry, "Current incoming_icp_interval: %d\n", | |
581 | incoming_icp_interval >> INCOMING_FACTOR); | |
582 | storeAppendPrintf(sentry, "Current incoming_dns_interval: %d\n", | |
583 | incoming_dns_interval >> INCOMING_FACTOR); | |
584 | storeAppendPrintf(sentry, "Current incoming_http_interval: %d\n", | |
585 | incoming_http_interval >> INCOMING_FACTOR); | |
586 | storeAppendPrintf(sentry, "\n"); | |
587 | storeAppendPrintf(sentry, "Histogram of events per incoming socket type\n"); | |
588 | storeAppendPrintf(sentry, "ICP Messages handled per comm_poll_icp_incoming() call:\n"); | |
589 | statHistDump(&f->comm_icp_incoming, sentry, statHistIntDumper); | |
590 | storeAppendPrintf(sentry, "DNS Messages handled per comm_poll_dns_incoming() call:\n"); | |
591 | statHistDump(&f->comm_dns_incoming, sentry, statHistIntDumper); | |
592 | storeAppendPrintf(sentry, "HTTP Messages handled per comm_poll_http_incoming() call:\n"); | |
593 | statHistDump(&f->comm_http_incoming, sentry, statHistIntDumper); | |
594 | } | |
595 | ||
596 | void | |
597 | commUpdateReadBits(int fd, PF * handler) | |
598 | { | |
599 | if (handler && !FD_ISSET(fd, &global_readfds)) { | |
600 | FD_SET(fd, &global_readfds); | |
601 | nreadfds++; | |
602 | } else if (!handler && FD_ISSET(fd, &global_readfds)) { | |
603 | FD_CLR(fd, &global_readfds); | |
604 | nreadfds--; | |
605 | } | |
606 | } | |
607 | ||
608 | void | |
609 | commUpdateWriteBits(int fd, PF * handler) | |
610 | { | |
611 | if (handler && !FD_ISSET(fd, &global_writefds)) { | |
612 | FD_SET(fd, &global_writefds); | |
613 | nwritefds++; | |
614 | } else if (!handler && FD_ISSET(fd, &global_writefds)) { | |
615 | FD_CLR(fd, &global_writefds); | |
616 | nwritefds--; | |
617 | } | |
618 | } | |
619 | ||
620 | /* Called by async-io or diskd to speed up the polling */ | |
621 | void | |
622 | comm_quick_poll_required(void) | |
623 | { | |
624 | MAX_POLL_TIME = 10; | |
625 | } | |
626 | ||
627 | #endif /* USE_POLL */ |