]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Renamed squid.h to squid-old.h and config.h to squid.h
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid-old.h"
36 #include "base/AsyncCall.h"
37 #include "StoreIOBuffer.h"
38 #include "comm.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "comm/AcceptLimiter.h"
42 #include "comm/comm_internal.h"
43 #include "comm/Connection.h"
44 #include "comm/IoCallback.h"
45 #include "comm/Loops.h"
46 #include "comm/Write.h"
47 #include "comm/TcpAcceptor.h"
48 #include "CommIO.h"
49 #include "CommRead.h"
50 #include "MemBuf.h"
51 #include "pconn.h"
52 #include "SquidTime.h"
53 #include "CommCalls.h"
54 #include "DescriptorSet.h"
55 #include "icmp/net_db.h"
56 #include "ip/Address.h"
57 #include "ip/Intercept.h"
58 #include "ip/QosConfig.h"
59 #include "ip/tools.h"
60 #include "ClientInfo.h"
61 #include "StatCounters.h"
62 #if USE_SSL
63 #include "ssl/support.h"
64 #endif
65
66 #include "cbdata.h"
67 #if _SQUID_CYGWIN_
68 #include <sys/ioctl.h>
69 #endif
70 #ifdef HAVE_NETINET_TCP_H
71 #include <netinet/tcp.h>
72 #endif
73
74 /*
75 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
76 */
77
78 static void commStopHalfClosedMonitor(int fd);
79 static IOCB commHalfClosedReader;
80 static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
81 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
82
83 #if USE_DELAY_POOLS
84 CBDATA_CLASS_INIT(CommQuotaQueue);
85
86 static void commHandleWriteHelper(void * data);
87 #endif
88
89 /* STATIC */
90
91 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
92 static bool WillCheckHalfClosed = false; /// true if check is scheduled
93 static EVH commHalfClosedCheck;
94 static void commPlanHalfClosedCheck();
95
96 static comm_err_t commBind(int s, struct addrinfo &);
97 static void commSetReuseAddr(int);
98 static void commSetNoLinger(int);
99 #ifdef TCP_NODELAY
100 static void commSetTcpNoDelay(int);
101 #endif
102 static void commSetTcpRcvbuf(int, int);
103
104 static MemAllocator *conn_close_pool = NULL;
105 fd_debug_t *fdd_table = NULL;
106
107 bool
108 isOpen(const int fd)
109 {
110 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
111 }
112
113 /**
114 * Attempt a read
115 *
116 * If the read attempt succeeds or fails, call the callback.
117 * Else, wait for another IO notification.
118 */
119 void
120 commHandleRead(int fd, void *data)
121 {
122 Comm::IoCallback *ccb = (Comm::IoCallback *) data;
123
124 assert(data == COMMIO_FD_READCB(fd));
125 assert(ccb->active());
126 /* Attempt a read */
127 statCounter.syscalls.sock.reads++;
128 errno = 0;
129 int retval;
130 retval = FD_READ_METHOD(fd, ccb->buf, ccb->size);
131 debugs(5, 3, "comm_read_try: FD " << fd << ", size " << ccb->size << ", retval " << retval << ", errno " << errno);
132
133 if (retval < 0 && !ignoreErrno(errno)) {
134 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
135 ccb->offset = 0;
136 ccb->finish(COMM_ERROR, errno);
137 return;
138 };
139
140 /* See if we read anything */
141 /* Note - read 0 == socket EOF, which is a valid read */
142 if (retval >= 0) {
143 fd_bytes(fd, retval, FD_READ);
144 ccb->offset = retval;
145 ccb->finish(COMM_OK, errno);
146 return;
147 }
148
149 /* Nope, register for some more IO */
150 Comm::SetSelect(fd, COMM_SELECT_READ, commHandleRead, data, 0);
151 }
152
153 /**
154 * Queue a read. handler/handler_data are called when the read
155 * completes, on error, or on file descriptor close.
156 */
157 void
158 comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, AsyncCall::Pointer &callback)
159 {
160 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
161
162 /* Make sure we are open and not closing */
163 assert(Comm::IsConnOpen(conn));
164 assert(!fd_table[conn->fd].closing());
165 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
166
167 // Make sure we are either not reading or just passively monitoring.
168 // Active/passive conflicts are OK and simply cancel passive monitoring.
169 if (ccb->active()) {
170 // if the assertion below fails, we have an active comm_read conflict
171 assert(fd_table[conn->fd].halfClosedReader != NULL);
172 commStopHalfClosedMonitor(conn->fd);
173 assert(!ccb->active());
174 }
175 ccb->conn = conn;
176
177 /* Queue the read */
178 ccb->setCallback(Comm::IOCB_READ, callback, (char *)buf, NULL, size);
179 Comm::SetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
180 }
181
182 /**
183 * Empty the read buffers
184 *
185 * This is a magical routine that empties the read buffers.
186 * Under some platforms (Linux) if a buffer has data in it before
187 * you call close(), the socket will hang and take quite a while
188 * to timeout.
189 */
190 static void
191 comm_empty_os_read_buffers(int fd)
192 {
193 #if _SQUID_LINUX_
194 /* prevent those nasty RST packets */
195 char buf[SQUID_TCP_SO_RCVBUF];
196
197 if (fd_table[fd].flags.nonblocking == 1) {
198 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
199 }
200 #endif
201 }
202
203
204 /**
205 * Return whether the FD has a pending completed callback.
206 * NP: does not work.
207 */
208 int
209 comm_has_pending_read_callback(int fd)
210 {
211 assert(isOpen(fd));
212 // XXX: We do not know whether there is a read callback scheduled.
213 // This is used for pconn management that should probably be more
214 // tightly integrated into comm to minimize the chance that a
215 // closing pconn socket will be used for a new transaction.
216 return false;
217 }
218
219 // Does comm check this fd for read readiness?
220 // Note that when comm is not monitoring, there can be a pending callback
221 // call, which may resume comm monitoring once fired.
222 bool
223 comm_monitors_read(int fd)
224 {
225 assert(isOpen(fd));
226 // Being active is usually the same as monitoring because we always
227 // start monitoring the FD when we configure Comm::IoCallback for I/O
228 // and we usually configure Comm::IoCallback for I/O when we starting
229 // monitoring a FD for reading.
230 return COMMIO_FD_READCB(fd)->active();
231 }
232
233 /**
234 * Cancel a pending read. Assert that we have the right parameters,
235 * and that there are no pending read events!
236 *
237 * XXX: We do not assert that there are no pending read events and
238 * with async calls it becomes even more difficult.
239 * The whole interface should be reworked to do callback->cancel()
240 * instead of searching for places where the callback may be stored and
241 * updating the state of those places.
242 *
243 * AHC Don't call the comm handlers?
244 */
245 void
246 comm_read_cancel(int fd, IOCB *callback, void *data)
247 {
248 if (!isOpen(fd)) {
249 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
250 return;
251 }
252
253 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
254 // TODO: is "active" == "monitors FD"?
255 if (!cb->active()) {
256 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
257 return;
258 }
259
260 typedef CommCbFunPtrCallT<CommIoCbPtrFun> Call;
261 Call *call = dynamic_cast<Call*>(cb->callback.getRaw());
262 if (!call) {
263 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " lacks callback");
264 return;
265 }
266
267 call->cancel("old comm_read_cancel");
268
269 typedef CommIoCbParams Params;
270 const Params &params = GetCommParams<Params>(cb->callback);
271
272 /* Ok, we can be reasonably sure we won't lose any data here! */
273 assert(call->dialer.handler == callback);
274 assert(params.data == data);
275
276 /* Delete the callback */
277 cb->cancel("old comm_read_cancel");
278
279 /* And the IO event */
280 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
281 }
282
283 void
284 comm_read_cancel(int fd, AsyncCall::Pointer &callback)
285 {
286 callback->cancel("comm_read_cancel");
287
288 if (!isOpen(fd)) {
289 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
290 return;
291 }
292
293 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
294
295 if (!cb->active()) {
296 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
297 return;
298 }
299
300 AsyncCall::Pointer call = cb->callback;
301 assert(call != NULL); // XXX: should never fail (active() checks for callback==NULL)
302
303 /* Ok, we can be reasonably sure we won't lose any data here! */
304 assert(call == callback);
305
306 /* Delete the callback */
307 cb->cancel("comm_read_cancel");
308
309 /* And the IO event */
310 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
311 }
312
313
314 /**
315 * synchronous wrapper around udp socket functions
316 */
317 int
318 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
319 {
320 statCounter.syscalls.sock.recvfroms++;
321 int x = 0;
322 struct addrinfo *AI = NULL;
323
324 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
325
326 assert( NULL == AI );
327
328 from.InitAddrInfo(AI);
329
330 x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
331
332 from = *AI;
333
334 from.FreeAddrInfo(AI);
335
336 return x;
337 }
338
339 int
340 comm_udp_recv(int fd, void *buf, size_t len, int flags)
341 {
342 Ip::Address nul;
343 return comm_udp_recvfrom(fd, buf, len, flags, nul);
344 }
345
346 ssize_t
347 comm_udp_send(int s, const void *buf, size_t len, int flags)
348 {
349 return send(s, buf, len, flags);
350 }
351
352
353 bool
354 comm_has_incomplete_write(int fd)
355 {
356 assert(isOpen(fd));
357 return COMMIO_FD_WRITECB(fd)->active();
358 }
359
360 /**
361 * Queue a write. handler/handler_data are called when the write fully
362 * completes, on error, or on file descriptor close.
363 */
364
365 /* Return the local port associated with fd. */
366 unsigned short
367 comm_local_port(int fd)
368 {
369 Ip::Address temp;
370 struct addrinfo *addr = NULL;
371 fde *F = &fd_table[fd];
372
373 /* If the fd is closed already, just return */
374
375 if (!F->flags.open) {
376 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
377 return 0;
378 }
379
380 if (F->local_addr.GetPort())
381 return F->local_addr.GetPort();
382
383 if (F->sock_family == AF_INET)
384 temp.SetIPv4();
385
386 temp.InitAddrInfo(addr);
387
388 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
389 debugs(50, 1, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
390 temp.FreeAddrInfo(addr);
391 return 0;
392 }
393 temp = *addr;
394
395 temp.FreeAddrInfo(addr);
396
397 if (F->local_addr.IsAnyAddr()) {
398 /* save the whole local address, not just the port. */
399 F->local_addr = temp;
400 } else {
401 F->local_addr.SetPort(temp.GetPort());
402 }
403
404 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.GetPort() << "(family=" << F->sock_family << ")");
405 return F->local_addr.GetPort();
406 }
407
408 static comm_err_t
409 commBind(int s, struct addrinfo &inaddr)
410 {
411 statCounter.syscalls.sock.binds++;
412
413 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
414 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
415 return COMM_OK;
416 }
417
418 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
419
420 return COMM_ERROR;
421 }
422
423 /**
424 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
425 * is OR of flags specified in comm.h. Defaults TOS
426 */
427 int
428 comm_open(int sock_type,
429 int proto,
430 Ip::Address &addr,
431 int flags,
432 const char *note)
433 {
434 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
435 }
436
437 void
438 comm_open_listener(int sock_type,
439 int proto,
440 Comm::ConnectionPointer &conn,
441 const char *note)
442 {
443 /* all listener sockets require bind() */
444 conn->flags |= COMM_DOBIND;
445
446 /* attempt native enabled port. */
447 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
448 }
449
450 int
451 comm_open_listener(int sock_type,
452 int proto,
453 Ip::Address &addr,
454 int flags,
455 const char *note)
456 {
457 int sock = -1;
458
459 /* all listener sockets require bind() */
460 flags |= COMM_DOBIND;
461
462 /* attempt native enabled port. */
463 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
464
465 return sock;
466 }
467
468 static bool
469 limitError(int const anErrno)
470 {
471 return anErrno == ENFILE || anErrno == EMFILE;
472 }
473
474 void
475 comm_set_v6only(int fd, int tos)
476 {
477 #ifdef IPV6_V6ONLY
478 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
479 debugs(50, 1, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
480 }
481 #else
482 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
483 #endif /* sockopt */
484 }
485
486 /**
487 * Set the socket IP_TRANSPARENT option for Linux TPROXY v4 support.
488 */
489 void
490 comm_set_transparent(int fd)
491 {
492 #if defined(IP_TRANSPARENT)
493 int tos = 1;
494 if (setsockopt(fd, SOL_IP, IP_TRANSPARENT, (char *) &tos, sizeof(int)) < 0) {
495 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IP_TRANSPARENT) on FD " << fd << ": " << xstrerror());
496 } else {
497 /* mark the socket as having transparent options */
498 fd_table[fd].flags.transparent = 1;
499 }
500 #else
501 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(IP_TRANSPARENT) not supported on this platform");
502 #endif /* sockopt */
503 }
504
505 /**
506 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
507 * is OR of flags specified in defines.h:COMM_*
508 */
509 int
510 comm_openex(int sock_type,
511 int proto,
512 Ip::Address &addr,
513 int flags,
514 tos_t tos,
515 nfmark_t nfmark,
516 const char *note)
517 {
518 int new_socket;
519 struct addrinfo *AI = NULL;
520
521 PROF_start(comm_open);
522 /* Create socket for accepting new connections. */
523 statCounter.syscalls.sock.sockets++;
524
525 /* Setup the socket addrinfo details for use */
526 addr.GetAddrInfo(AI);
527 AI->ai_socktype = sock_type;
528 AI->ai_protocol = proto;
529
530 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
531
532 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
533
534 /* under IPv6 there is the possibility IPv6 is present but disabled. */
535 /* try again as IPv4-native if possible */
536 if ( new_socket < 0 && Ip::EnableIpv6 && addr.IsIPv6() && addr.SetIPv4() ) {
537 /* attempt to open this IPv4-only. */
538 addr.FreeAddrInfo(AI);
539 /* Setup the socket addrinfo details for use */
540 addr.GetAddrInfo(AI);
541 AI->ai_socktype = sock_type;
542 AI->ai_protocol = proto;
543 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
544 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
545 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
546 }
547
548 if (new_socket < 0) {
549 /* Increase the number of reserved fd's if calls to socket()
550 * are failing because the open file table is full. This
551 * limits the number of simultaneous clients */
552
553 if (limitError(errno)) {
554 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
555 fdAdjustReserved();
556 } else {
557 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
558 }
559
560 addr.FreeAddrInfo(AI);
561
562 PROF_stop(comm_open);
563 return -1;
564 }
565
566 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
567 Comm::ConnectionPointer conn = new Comm::Connection;
568 conn->local = addr;
569 conn->fd = new_socket;
570
571 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
572
573 /* set TOS if needed */
574 if (tos)
575 Ip::Qos::setSockTos(conn, tos);
576
577 /* set netfilter mark if needed */
578 if (nfmark)
579 Ip::Qos::setSockNfmark(conn, nfmark);
580
581 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.IsIPv6() )
582 comm_set_v6only(conn->fd, 1);
583
584 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
585 /* Other OS may have this administratively disabled for general use. Same deal. */
586 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.IsIPv6() )
587 comm_set_v6only(conn->fd, 0);
588
589 comm_init_opened(conn, tos, nfmark, note, AI);
590 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
591
592 addr.FreeAddrInfo(AI);
593
594 PROF_stop(comm_open);
595
596 // XXX transition only. prevent conn from closing the new FD on function exit.
597 conn->fd = -1;
598 return new_socket;
599 }
600
601 /// update FD tables after a local or remote (IPC) comm_openex();
602 void
603 comm_init_opened(const Comm::ConnectionPointer &conn,
604 tos_t tos,
605 nfmark_t nfmark,
606 const char *note,
607 struct addrinfo *AI)
608 {
609 assert(Comm::IsConnOpen(conn));
610 assert(AI);
611
612 /* update fdstat */
613 debugs(5, 5, HERE << conn << " is a new socket");
614
615 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
616 fd_open(conn->fd, FD_SOCKET, note);
617
618 fdd_table[conn->fd].close_file = NULL;
619 fdd_table[conn->fd].close_line = 0;
620
621 fde *F = &fd_table[conn->fd];
622 F->local_addr = conn->local;
623 F->tosToServer = tos;
624
625 F->nfmarkToServer = nfmark;
626
627 F->sock_family = AI->ai_family;
628 }
629
630 /// apply flags after a local comm_open*() call;
631 /// returns new_socket or -1 on error
632 static int
633 comm_apply_flags(int new_socket,
634 Ip::Address &addr,
635 int flags,
636 struct addrinfo *AI)
637 {
638 assert(new_socket >= 0);
639 assert(AI);
640 const int sock_type = AI->ai_socktype;
641
642 if (!(flags & COMM_NOCLOEXEC))
643 commSetCloseOnExec(new_socket);
644
645 if ((flags & COMM_REUSEADDR))
646 commSetReuseAddr(new_socket);
647
648 if (addr.GetPort() > (unsigned short) 0) {
649 #if _SQUID_MSWIN_
650 if (sock_type != SOCK_DGRAM)
651 #endif
652 commSetNoLinger(new_socket);
653
654 if (opt_reuseaddr)
655 commSetReuseAddr(new_socket);
656 }
657
658 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
659 if ((flags & COMM_TRANSPARENT)) {
660 comm_set_transparent(new_socket);
661 }
662
663 if ( (flags & COMM_DOBIND) || addr.GetPort() > 0 || !addr.IsAnyAddr() ) {
664 if ( !(flags & COMM_DOBIND) && addr.IsAnyAddr() )
665 debugs(5,1,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
666 if ( addr.IsNoAddr() )
667 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
668
669 if (commBind(new_socket, *AI) != COMM_OK) {
670 comm_close(new_socket);
671 return -1;
672 }
673 }
674
675 if (flags & COMM_NONBLOCKING)
676 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
677 comm_close(new_socket);
678 return -1;
679 }
680
681 #ifdef TCP_NODELAY
682 if (sock_type == SOCK_STREAM)
683 commSetTcpNoDelay(new_socket);
684
685 #endif
686
687 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
688 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
689
690 return new_socket;
691 }
692
693 void
694 comm_import_opened(const Comm::ConnectionPointer &conn,
695 const char *note,
696 struct addrinfo *AI)
697 {
698 debugs(5, 2, HERE << conn);
699 assert(Comm::IsConnOpen(conn));
700 assert(AI);
701
702 comm_init_opened(conn, 0, 0, note, AI);
703
704 if (!(conn->flags & COMM_NOCLOEXEC))
705 fd_table[conn->fd].flags.close_on_exec = 1;
706
707 if (conn->local.GetPort() > (unsigned short) 0) {
708 #if _SQUID_MSWIN_
709 if (AI->ai_socktype != SOCK_DGRAM)
710 #endif
711 fd_table[conn->fd].flags.nolinger = 1;
712 }
713
714 if ((conn->flags & COMM_TRANSPARENT))
715 fd_table[conn->fd].flags.transparent = 1;
716
717 if (conn->flags & COMM_NONBLOCKING)
718 fd_table[conn->fd].flags.nonblocking = 1;
719
720 #ifdef TCP_NODELAY
721 if (AI->ai_socktype == SOCK_STREAM)
722 fd_table[conn->fd].flags.nodelay = 1;
723 #endif
724
725 /* no fd_table[fd].flags. updates needed for these conditions:
726 * if ((flags & COMM_REUSEADDR)) ...
727 * if ((flags & COMM_DOBIND) ...) ...
728 */
729 }
730
731 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
732 // With handler already unset. Leaving this present until that can be verified for all code paths.
733 void
734 commUnsetFdTimeout(int fd)
735 {
736 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
737 assert(fd >= 0);
738 assert(fd < Squid_MaxFD);
739 fde *F = &fd_table[fd];
740 assert(F->flags.open);
741
742 F->timeoutHandler = NULL;
743 F->timeout = 0;
744 }
745
746 int
747 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
748 {
749 debugs(5, 3, HERE << conn << " timeout " << timeout);
750 assert(Comm::IsConnOpen(conn));
751 assert(conn->fd < Squid_MaxFD);
752 fde *F = &fd_table[conn->fd];
753 assert(F->flags.open);
754
755 if (timeout < 0) {
756 F->timeoutHandler = NULL;
757 F->timeout = 0;
758 } else {
759 if (callback != NULL) {
760 typedef CommTimeoutCbParams Params;
761 Params &params = GetCommParams<Params>(callback);
762 params.conn = conn;
763 F->timeoutHandler = callback;
764 }
765
766 F->timeout = squid_curtime + (time_t) timeout;
767 }
768
769 return F->timeout;
770 }
771
772 int
773 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
774 {
775 debugs(5, 3, HERE << "Remove timeout for " << conn);
776 AsyncCall::Pointer nil;
777 return commSetConnTimeout(conn, -1, nil);
778 }
779
780 int
781 comm_connect_addr(int sock, const Ip::Address &address)
782 {
783 comm_err_t status = COMM_OK;
784 fde *F = &fd_table[sock];
785 int x = 0;
786 int err = 0;
787 socklen_t errlen;
788 struct addrinfo *AI = NULL;
789 PROF_start(comm_connect_addr);
790
791 assert(address.GetPort() != 0);
792
793 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
794
795 /* Handle IPv6 over IPv4-only socket case.
796 * this case must presently be handled here since the GetAddrInfo asserts on bad mappings.
797 * NP: because commResetFD is private to ConnStateData we have to return an error and
798 * trust its handled properly.
799 */
800 if (F->sock_family == AF_INET && !address.IsIPv4()) {
801 errno = ENETUNREACH;
802 return COMM_ERR_PROTOCOL;
803 }
804
805 /* Handle IPv4 over IPv6-only socket case.
806 * This case is presently handled here as it's both a known case and it's
807 * uncertain what error will be returned by the IPv6 stack in such case. It's
808 * possible this will also be handled by the errno checks below after connect()
809 * but needs carefull cross-platform verification, and verifying the address
810 * condition here is simple.
811 */
812 if (!F->local_addr.IsIPv4() && address.IsIPv4()) {
813 errno = ENETUNREACH;
814 return COMM_ERR_PROTOCOL;
815 }
816
817 address.GetAddrInfo(AI, F->sock_family);
818
819 /* Establish connection. */
820 errno = 0;
821
822 if (!F->flags.called_connect) {
823 F->flags.called_connect = 1;
824 statCounter.syscalls.sock.connects++;
825
826 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
827
828 // XXX: ICAP code refuses callbacks during a pending comm_ call
829 // Async calls development will fix this.
830 if (x == 0) {
831 x = -1;
832 errno = EINPROGRESS;
833 }
834
835 if (x < 0) {
836 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
837 " flags=" << AI->ai_flags <<
838 ", family=" << AI->ai_family <<
839 ", socktype=" << AI->ai_socktype <<
840 ", protocol=" << AI->ai_protocol <<
841 ", &addr=" << AI->ai_addr <<
842 ", addrlen=" << AI->ai_addrlen <<
843 " )" );
844 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
845 debugs(14,9, "connecting to: " << address );
846 }
847 } else {
848 #if _SQUID_NEWSOS6_
849 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
850
851 connect(sock, AI->ai_addr, AI->ai_addrlen);
852
853 if (errno == EINVAL) {
854 errlen = sizeof(err);
855 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
856
857 if (x >= 0)
858 errno = x;
859 }
860
861 #else
862 errlen = sizeof(err);
863
864 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
865
866 if (x == 0)
867 errno = err;
868
869 #if _SQUID_SOLARIS_
870 /*
871 * Solaris 2.4's socket emulation doesn't allow you
872 * to determine the error from a failed non-blocking
873 * connect and just returns EPIPE. Create a fake
874 * error message for connect. -- fenner@parc.xerox.com
875 */
876 if (x < 0 && errno == EPIPE)
877 errno = ENOTCONN;
878
879 #endif
880 #endif
881
882 }
883
884 /* Squid seems to be working fine without this code. With this code,
885 * we leak memory on many connect requests because of EINPROGRESS.
886 * If you find that this code is needed, please file a bug report. */
887 #if 0
888 #if _SQUID_LINUX_
889 /* 2007-11-27:
890 * Linux Debian replaces our allocated AI pointer with garbage when
891 * connect() fails. This leads to segmentation faults deallocating
892 * the system-allocated memory when we go to clean up our pointer.
893 * HACK: is to leak the memory returned since we can't deallocate.
894 */
895 if (errno != 0) {
896 AI = NULL;
897 }
898 #endif
899 #endif
900
901 address.FreeAddrInfo(AI);
902
903 PROF_stop(comm_connect_addr);
904
905 if (errno == 0 || errno == EISCONN)
906 status = COMM_OK;
907 else if (ignoreErrno(errno))
908 status = COMM_INPROGRESS;
909 else if (errno == EAFNOSUPPORT || errno == EINVAL)
910 return COMM_ERR_PROTOCOL;
911 else
912 return COMM_ERROR;
913
914 address.NtoA(F->ipaddr, MAX_IPSTRLEN);
915
916 F->remote_port = address.GetPort(); /* remote_port is HS */
917
918 if (status == COMM_OK) {
919 debugs(5, 10, "comm_connect_addr: FD " << sock << " connected to " << address);
920 } else if (status == COMM_INPROGRESS) {
921 debugs(5, 10, "comm_connect_addr: FD " << sock << " connection pending");
922 }
923
924 return status;
925 }
926
927 void
928 commCallCloseHandlers(int fd)
929 {
930 fde *F = &fd_table[fd];
931 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
932
933 while (F->closeHandler != NULL) {
934 AsyncCall::Pointer call = F->closeHandler;
935 F->closeHandler = call->Next();
936 call->setNext(NULL);
937 // If call is not canceled schedule it for execution else ignore it
938 if (!call->canceled()) {
939 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
940 ScheduleCallHere(call);
941 }
942 }
943 }
944
945 #if LINGERING_CLOSE
946 static void
947 commLingerClose(int fd, void *unused)
948 {
949 LOCAL_ARRAY(char, buf, 1024);
950 int n;
951 n = FD_READ_METHOD(fd, buf, 1024);
952
953 if (n < 0)
954 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
955
956 comm_close(fd);
957 }
958
959 static void
960 commLingerTimeout(const FdeCbParams &params)
961 {
962 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
963 comm_close(params.fd);
964 }
965
966 /*
967 * Inspired by apache
968 */
969 void
970 comm_lingering_close(int fd)
971 {
972 #if USE_SSL
973 if (fd_table[fd].ssl)
974 ssl_shutdown_method(fd_table[fd].ssl);
975 #endif
976
977 if (shutdown(fd, 1) < 0) {
978 comm_close(fd);
979 return;
980 }
981
982 fd_note(fd, "lingering close");
983 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
984
985 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
986 assert(fd_table[fd].flags.open);
987 if (callback != NULL) {
988 typedef FdeCbParams Params;
989 Params &params = GetCommParams<Params>(callback);
990 params.fd = fd;
991 fd_table[fd].timeoutHandler = callback;
992 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
993 }
994
995 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
996 }
997
998 #endif
999
1000 /**
1001 * enable linger with time of 0 so that when the socket is
1002 * closed, TCP generates a RESET
1003 */
1004 void
1005 comm_reset_close(const Comm::ConnectionPointer &conn)
1006 {
1007 struct linger L;
1008 L.l_onoff = 1;
1009 L.l_linger = 0;
1010
1011 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1012 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
1013
1014 conn->close();
1015 }
1016
1017 // Legacy close function.
1018 void
1019 old_comm_reset_close(int fd)
1020 {
1021 struct linger L;
1022 L.l_onoff = 1;
1023 L.l_linger = 0;
1024
1025 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1026 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
1027
1028 comm_close(fd);
1029 }
1030
1031 #if USE_SSL
1032 void
1033 commStartSslClose(const FdeCbParams &params)
1034 {
1035 assert(&fd_table[params.fd].ssl);
1036 ssl_shutdown_method(fd_table[params.fd].ssl);
1037 }
1038 #endif
1039
1040 void
1041 comm_close_complete(const FdeCbParams &params)
1042 {
1043 #if USE_SSL
1044 fde *F = &fd_table[params.fd];
1045
1046 if (F->ssl) {
1047 SSL_free(F->ssl);
1048 F->ssl = NULL;
1049 }
1050
1051 if (F->dynamicSslContext) {
1052 SSL_CTX_free(F->dynamicSslContext);
1053 F->dynamicSslContext = NULL;
1054 }
1055 #endif
1056 fd_close(params.fd); /* update fdstat */
1057 close(params.fd);
1058
1059 statCounter.syscalls.sock.closes++;
1060
1061 /* When one connection closes, give accept() a chance, if need be */
1062 Comm::AcceptLimiter::Instance().kick();
1063 }
1064
1065 /*
1066 * Close the socket fd.
1067 *
1068 * + call write handlers with ERR_CLOSING
1069 * + call read handlers with ERR_CLOSING
1070 * + call closing handlers
1071 *
1072 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1073 * DeferredReadManager.
1074 */
1075 void
1076 _comm_close(int fd, char const *file, int line)
1077 {
1078 debugs(5, 3, "comm_close: start closing FD " << fd);
1079 assert(fd >= 0);
1080 assert(fd < Squid_MaxFD);
1081
1082 fde *F = &fd_table[fd];
1083 fdd_table[fd].close_file = file;
1084 fdd_table[fd].close_line = line;
1085
1086 if (F->closing())
1087 return;
1088
1089 /* XXX: is this obsolete behind F->closing() ? */
1090 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
1091 return;
1092
1093 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1094 assert(isOpen(fd));
1095
1096 assert(F->type != FD_FILE);
1097
1098 PROF_start(comm_close);
1099
1100 F->flags.close_request = 1;
1101
1102 #if USE_SSL
1103 if (F->ssl) {
1104 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
1105 FdeCbPtrFun(commStartSslClose, NULL));
1106 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
1107 startParams.fd = fd;
1108 ScheduleCallHere(startCall);
1109 }
1110 #endif
1111
1112 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1113 if (commHasHalfClosedMonitor(fd))
1114 commStopHalfClosedMonitor(fd);
1115 commUnsetFdTimeout(fd);
1116
1117 // notify read/write handlers after canceling select reservations, if any
1118 if (COMMIO_FD_WRITECB(fd)->active()) {
1119 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1120 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
1121 }
1122 if (COMMIO_FD_READCB(fd)->active()) {
1123 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
1124 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
1125 }
1126
1127 #if USE_DELAY_POOLS
1128 if (ClientInfo *clientInfo = F->clientInfo) {
1129 if (clientInfo->selectWaiting) {
1130 clientInfo->selectWaiting = false;
1131 // kick queue or it will get stuck as commWriteHandle is not called
1132 clientInfo->kickQuotaQueue();
1133 }
1134 }
1135 #endif
1136
1137 commCallCloseHandlers(fd);
1138
1139 if (F->pconn.uses && F->pconn.pool)
1140 F->pconn.pool->noteUses(F->pconn.uses);
1141
1142 comm_empty_os_read_buffers(fd);
1143
1144
1145 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
1146 FdeCbPtrFun(comm_close_complete, NULL));
1147 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
1148 completeParams.fd = fd;
1149 // must use async call to wait for all callbacks
1150 // scheduled before comm_close() to finish
1151 ScheduleCallHere(completeCall);
1152
1153 PROF_stop(comm_close);
1154 }
1155
1156 /* Send a udp datagram to specified TO_ADDR. */
1157 int
1158 comm_udp_sendto(int fd,
1159 const Ip::Address &to_addr,
1160 const void *buf,
1161 int len)
1162 {
1163 int x = 0;
1164 struct addrinfo *AI = NULL;
1165
1166 PROF_start(comm_udp_sendto);
1167 statCounter.syscalls.sock.sendtos++;
1168
1169 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
1170 " using FD " << fd << " using Port " << comm_local_port(fd) );
1171
1172 /* BUG: something in the above macro appears to occasionally be setting AI to garbage. */
1173 /* AYJ: 2007-08-27 : or was it because I wasn't then setting 'fd_table[fd].sock_family' to fill properly. */
1174 assert( NULL == AI );
1175
1176 to_addr.GetAddrInfo(AI, fd_table[fd].sock_family);
1177
1178 x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
1179
1180 to_addr.FreeAddrInfo(AI);
1181
1182 PROF_stop(comm_udp_sendto);
1183
1184 if (x >= 0)
1185 return x;
1186
1187 #if _SQUID_LINUX_
1188
1189 if (ECONNREFUSED != errno)
1190 #endif
1191
1192 debugs(50, 1, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1193
1194 return COMM_ERROR;
1195 }
1196
1197 void
1198 comm_add_close_handler(int fd, CLCB * handler, void *data)
1199 {
1200 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1201 handler << ", data=" << data);
1202
1203 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1204 CommCloseCbPtrFun(handler, data));
1205 comm_add_close_handler(fd, call);
1206 }
1207
1208 void
1209 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1210 {
1211 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1212
1213 /*TODO:Check for a similar scheduled AsyncCall*/
1214 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1215 // assert(c->handler != handler || c->data != data);
1216
1217 call->setNext(fd_table[fd].closeHandler);
1218
1219 fd_table[fd].closeHandler = call;
1220 }
1221
1222
1223 // remove function-based close handler
1224 void
1225 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1226 {
1227 assert (isOpen(fd));
1228 /* Find handler in list */
1229 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1230 handler << ", data=" << data);
1231
1232 AsyncCall::Pointer p, prev = NULL;
1233 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1234 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1235 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1236 if (!call) // method callbacks have their own comm_remove_close_handler
1237 continue;
1238
1239 typedef CommCloseCbParams Params;
1240 const Params &params = GetCommParams<Params>(p);
1241 if (call->dialer.handler == handler && params.data == data)
1242 break; /* This is our handler */
1243 }
1244
1245 // comm_close removes all close handlers so our handler may be gone
1246 if (p != NULL) {
1247 p->dequeue(fd_table[fd].closeHandler, prev);
1248 p->cancel("comm_remove_close_handler");
1249 }
1250 }
1251
1252 // remove method-based close handler
1253 void
1254 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1255 {
1256 assert (isOpen(fd));
1257 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1258
1259 // comm_close removes all close handlers so our handler may be gone
1260 AsyncCall::Pointer p, prev = NULL;
1261 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1262
1263 if (p != NULL)
1264 p->dequeue(fd_table[fd].closeHandler, prev);
1265 call->cancel("comm_remove_close_handler");
1266 }
1267
1268 static void
1269 commSetNoLinger(int fd)
1270 {
1271
1272 struct linger L;
1273 L.l_onoff = 0; /* off */
1274 L.l_linger = 0;
1275
1276 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1277 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1278
1279 fd_table[fd].flags.nolinger = 1;
1280 }
1281
1282 static void
1283 commSetReuseAddr(int fd)
1284 {
1285 int on = 1;
1286
1287 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1288 debugs(50, 1, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1289 }
1290
1291 static void
1292 commSetTcpRcvbuf(int fd, int size)
1293 {
1294 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1295 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1296 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1297 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1298 #ifdef TCP_WINDOW_CLAMP
1299 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1300 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1301 #endif
1302 }
1303
1304 int
1305 commSetNonBlocking(int fd)
1306 {
1307 #if !_SQUID_MSWIN_
1308 int flags;
1309 int dummy = 0;
1310 #endif
1311 #if _SQUID_WINDOWS_
1312 int nonblocking = TRUE;
1313
1314 #if _SQUID_CYGWIN_
1315 if (fd_table[fd].type != FD_PIPE) {
1316 #endif
1317
1318 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1319 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1320 return COMM_ERROR;
1321 }
1322
1323 #if _SQUID_CYGWIN_
1324 } else {
1325 #endif
1326 #endif
1327 #if !_SQUID_MSWIN_
1328
1329 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1330 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1331 return COMM_ERROR;
1332 }
1333
1334 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1335 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1336 return COMM_ERROR;
1337 }
1338
1339 #endif
1340 #if _SQUID_CYGWIN_
1341 }
1342 #endif
1343 fd_table[fd].flags.nonblocking = 1;
1344
1345 return 0;
1346 }
1347
1348 int
1349 commUnsetNonBlocking(int fd)
1350 {
1351 #if _SQUID_MSWIN_
1352 int nonblocking = FALSE;
1353
1354 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1355 #else
1356 int flags;
1357 int dummy = 0;
1358
1359 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1360 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1361 return COMM_ERROR;
1362 }
1363
1364 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1365 #endif
1366 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1367 return COMM_ERROR;
1368 }
1369
1370 fd_table[fd].flags.nonblocking = 0;
1371 return 0;
1372 }
1373
1374 void
1375 commSetCloseOnExec(int fd)
1376 {
1377 #ifdef FD_CLOEXEC
1378 int flags;
1379 int dummy = 0;
1380
1381 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1382 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1383 return;
1384 }
1385
1386 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1387 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1388
1389 fd_table[fd].flags.close_on_exec = 1;
1390
1391 #endif
1392 }
1393
1394 #ifdef TCP_NODELAY
1395 static void
1396 commSetTcpNoDelay(int fd)
1397 {
1398 int on = 1;
1399
1400 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1401 debugs(50, 1, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1402
1403 fd_table[fd].flags.nodelay = 1;
1404 }
1405
1406 #endif
1407
1408 void
1409 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1410 {
1411 int on = 1;
1412 #ifdef TCP_KEEPCNT
1413 if (timeout && interval) {
1414 int count = (timeout + interval - 1) / interval;
1415 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1416 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1417 }
1418 #endif
1419 #ifdef TCP_KEEPIDLE
1420 if (idle) {
1421 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1422 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1423 }
1424 #endif
1425 #ifdef TCP_KEEPINTVL
1426 if (interval) {
1427 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1428 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1429 }
1430 #endif
1431 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1432 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1433 }
1434
1435 void
1436 comm_init(void)
1437 {
1438 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1439 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1440
1441 /* make sure the accept() socket FIFO delay queue exists */
1442 Comm::AcceptLimiter::Instance();
1443
1444 // make sure the IO pending callback table exists
1445 Comm::CallbackTableInit();
1446
1447 /* XXX account fd_table */
1448 /* Keep a few file descriptors free so that we don't run out of FD's
1449 * after accepting a client but before it opens a socket or a file.
1450 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1451 RESERVED_FD = min(100, Squid_MaxFD / 4);
1452
1453 conn_close_pool = memPoolCreate("close_handler", sizeof(close_handler));
1454
1455 TheHalfClosed = new DescriptorSet;
1456
1457 /* setup the select loop module */
1458 Comm::SelectLoopInit();
1459 }
1460
1461 void
1462 comm_exit(void)
1463 {
1464 delete TheHalfClosed;
1465 TheHalfClosed = NULL;
1466
1467 safe_free(fd_table);
1468 safe_free(fdd_table);
1469 Comm::CallbackTableDestruct();
1470 }
1471
1472 #if USE_DELAY_POOLS
1473 // called when the queue is done waiting for the client bucket to fill
1474 void
1475 commHandleWriteHelper(void * data)
1476 {
1477 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1478 assert(queue);
1479
1480 ClientInfo *clientInfo = queue->clientInfo;
1481 // ClientInfo invalidates queue if freed, so if we got here through,
1482 // evenAdd cbdata protections, everything should be valid and consistent
1483 assert(clientInfo);
1484 assert(clientInfo->hasQueue());
1485 assert(clientInfo->hasQueue(queue));
1486 assert(!clientInfo->selectWaiting);
1487 assert(clientInfo->eventWaiting);
1488 clientInfo->eventWaiting = false;
1489
1490 do {
1491 // check that the head descriptor is still relevant
1492 const int head = clientInfo->quotaPeekFd();
1493 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1494
1495 if (fd_table[head].clientInfo == clientInfo &&
1496 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1497 !fd_table[head].closing()) {
1498
1499 // wait for the head descriptor to become ready for writing
1500 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1501 clientInfo->selectWaiting = true;
1502 return;
1503 }
1504
1505 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1506 // and continue looking for a relevant one
1507 } while (clientInfo->hasQueue());
1508
1509 debugs(77,3, HERE << "emptied queue");
1510 }
1511
1512 bool
1513 ClientInfo::hasQueue() const
1514 {
1515 assert(quotaQueue);
1516 return !quotaQueue->empty();
1517 }
1518
1519 bool
1520 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1521 {
1522 assert(quotaQueue);
1523 return quotaQueue == q;
1524 }
1525
1526 /// returns the first descriptor to be dequeued
1527 int
1528 ClientInfo::quotaPeekFd() const
1529 {
1530 assert(quotaQueue);
1531 return quotaQueue->front();
1532 }
1533
1534 /// returns the reservation ID of the first descriptor to be dequeued
1535 unsigned int
1536 ClientInfo::quotaPeekReserv() const
1537 {
1538 assert(quotaQueue);
1539 return quotaQueue->outs + 1;
1540 }
1541
1542 /// queues a given fd, creating the queue if necessary; returns reservation ID
1543 unsigned int
1544 ClientInfo::quotaEnqueue(int fd)
1545 {
1546 assert(quotaQueue);
1547 return quotaQueue->enqueue(fd);
1548 }
1549
1550 /// removes queue head
1551 void
1552 ClientInfo::quotaDequeue()
1553 {
1554 assert(quotaQueue);
1555 quotaQueue->dequeue();
1556 }
1557
1558 void
1559 ClientInfo::kickQuotaQueue()
1560 {
1561 if (!eventWaiting && !selectWaiting && hasQueue()) {
1562 // wait at least a second if the bucket is empty
1563 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1564 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1565 quotaQueue, delay, 0, true);
1566 eventWaiting = true;
1567 }
1568 }
1569
1570 /// calculates how much to write for a single dequeued client
1571 int
1572 ClientInfo::quotaForDequed()
1573 {
1574 /* If we have multiple clients and give full bucketSize to each client then
1575 * clt1 may often get a lot more because clt1->clt2 time distance in the
1576 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1577 * We divide quota evenly to be more fair. */
1578
1579 if (!rationedCount) {
1580 rationedCount = quotaQueue->size() + 1;
1581
1582 // The delay in ration recalculation _temporary_ deprives clients from
1583 // bytes that should have trickled in while rationedCount was positive.
1584 refillBucket();
1585
1586 // Rounding errors do not accumulate here, but we round down to avoid
1587 // negative bucket sizes after write with rationedCount=1.
1588 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1589 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1590 '*' << rationedCount);
1591 }
1592
1593 --rationedCount;
1594 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1595 " rations remaining: " << rationedCount);
1596
1597 // update 'last seen' time to prevent clientdb GC from dropping us
1598 last_seen = squid_curtime;
1599 return rationedQuota;
1600 }
1601
1602 ///< adds bytes to the quota bucket based on the rate and passed time
1603 void
1604 ClientInfo::refillBucket()
1605 {
1606 // all these times are in seconds, with double precision
1607 const double currTime = current_dtime;
1608 const double timePassed = currTime - prevTime;
1609
1610 // Calculate allowance for the time passed. Use double to avoid
1611 // accumulating rounding errors for small intervals. For example, always
1612 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1613 const double gain = timePassed * writeSpeedLimit;
1614
1615 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1616 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1617 " = " << gain << ')');
1618
1619 // to further combat error accumulation during micro updates,
1620 // quit before updating time if we cannot add at least one byte
1621 if (gain < 1.0)
1622 return;
1623
1624 prevTime = currTime;
1625
1626 // for "first" connections, drain initial fat before refilling but keep
1627 // updating prevTime to avoid bursts after the fat is gone
1628 if (bucketSize > bucketSizeLimit) {
1629 debugs(77,4, HERE << "not refilling while draining initial fat");
1630 return;
1631 }
1632
1633 bucketSize += gain;
1634
1635 // obey quota limits
1636 if (bucketSize > bucketSizeLimit)
1637 bucketSize = bucketSizeLimit;
1638 }
1639
1640 void
1641 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1642 {
1643 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1644 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1645 " highwatermark=" << aHighWatermark);
1646
1647 // set or possibly update traffic shaping parameters
1648 writeLimitingActive = true;
1649 writeSpeedLimit = aWriteSpeedLimit;
1650 bucketSizeLimit = aHighWatermark;
1651
1652 // but some members should only be set once for a newly activated bucket
1653 if (firstTimeConnection) {
1654 firstTimeConnection = false;
1655
1656 assert(!selectWaiting);
1657 assert(!quotaQueue);
1658 quotaQueue = new CommQuotaQueue(this);
1659
1660 bucketSize = anInitialBurst;
1661 prevTime = current_dtime;
1662 }
1663 }
1664
1665 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1666 ins(0), outs(0)
1667 {
1668 assert(clientInfo);
1669 }
1670
1671 CommQuotaQueue::~CommQuotaQueue()
1672 {
1673 assert(!clientInfo); // ClientInfo should clear this before destroying us
1674 }
1675
1676 /// places the given fd at the end of the queue; returns reservation ID
1677 unsigned int
1678 CommQuotaQueue::enqueue(int fd)
1679 {
1680 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1681 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1682 fds.push_back(fd);
1683 return ++ins;
1684 }
1685
1686 /// removes queue head
1687 void
1688 CommQuotaQueue::dequeue()
1689 {
1690 assert(!fds.empty());
1691 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1692 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1693 fds.size());
1694 fds.pop_front();
1695 ++outs;
1696 }
1697 #endif
1698
1699 /*
1700 * hm, this might be too general-purpose for all the places we'd
1701 * like to use it.
1702 */
1703 int
1704 ignoreErrno(int ierrno)
1705 {
1706 switch (ierrno) {
1707
1708 case EINPROGRESS:
1709
1710 case EWOULDBLOCK:
1711 #if EAGAIN != EWOULDBLOCK
1712
1713 case EAGAIN:
1714 #endif
1715
1716 case EALREADY:
1717
1718 case EINTR:
1719 #ifdef ERESTART
1720
1721 case ERESTART:
1722 #endif
1723
1724 return 1;
1725
1726 default:
1727 return 0;
1728 }
1729
1730 /* NOTREACHED */
1731 }
1732
1733 void
1734 commCloseAllSockets(void)
1735 {
1736 int fd;
1737 fde *F = NULL;
1738
1739 for (fd = 0; fd <= Biggest_FD; fd++) {
1740 F = &fd_table[fd];
1741
1742 if (!F->flags.open)
1743 continue;
1744
1745 if (F->type != FD_SOCKET)
1746 continue;
1747
1748 if (F->flags.ipc) /* don't close inter-process sockets */
1749 continue;
1750
1751 if (F->timeoutHandler != NULL) {
1752 AsyncCall::Pointer callback = F->timeoutHandler;
1753 F->timeoutHandler = NULL;
1754 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1755 ScheduleCallHere(callback);
1756 } else {
1757 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1758 old_comm_reset_close(fd);
1759 }
1760 }
1761 }
1762
1763 static bool
1764 AlreadyTimedOut(fde *F)
1765 {
1766 if (!F->flags.open)
1767 return true;
1768
1769 if (F->timeout == 0)
1770 return true;
1771
1772 if (F->timeout > squid_curtime)
1773 return true;
1774
1775 return false;
1776 }
1777
1778 static bool
1779 writeTimedOut(int fd)
1780 {
1781 if (!COMMIO_FD_WRITECB(fd)->active())
1782 return false;
1783
1784 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1785 return false;
1786
1787 return true;
1788 }
1789
1790 void
1791 checkTimeouts(void)
1792 {
1793 int fd;
1794 fde *F = NULL;
1795 AsyncCall::Pointer callback;
1796
1797 for (fd = 0; fd <= Biggest_FD; fd++) {
1798 F = &fd_table[fd];
1799
1800 if (writeTimedOut(fd)) {
1801 // We have an active write callback and we are timed out
1802 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1803 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1804 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1805 } else if (AlreadyTimedOut(F))
1806 continue;
1807
1808 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1809
1810 if (F->timeoutHandler != NULL) {
1811 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1812 callback = F->timeoutHandler;
1813 F->timeoutHandler = NULL;
1814 ScheduleCallHere(callback);
1815 } else {
1816 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1817 comm_close(fd);
1818 }
1819 }
1820 }
1821
1822 void CommIO::Initialise()
1823 {
1824 /* Initialize done pipe signal */
1825 int DonePipe[2];
1826 if (pipe(DonePipe)) {}
1827 DoneFD = DonePipe[1];
1828 DoneReadFD = DonePipe[0];
1829 fd_open(DoneReadFD, FD_PIPE, "async-io completetion event: main");
1830 fd_open(DoneFD, FD_PIPE, "async-io completetion event: threads");
1831 commSetNonBlocking(DoneReadFD);
1832 commSetNonBlocking(DoneFD);
1833 Comm::SetSelect(DoneReadFD, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1834 Initialised = true;
1835 }
1836
1837 void CommIO::NotifyIOClose()
1838 {
1839 /* Close done pipe signal */
1840 FlushPipe();
1841 close(DoneFD);
1842 close(DoneReadFD);
1843 fd_close(DoneFD);
1844 fd_close(DoneReadFD);
1845 Initialised = false;
1846 }
1847
1848 bool CommIO::Initialised = false;
1849 bool CommIO::DoneSignalled = false;
1850 int CommIO::DoneFD = -1;
1851 int CommIO::DoneReadFD = -1;
1852
1853 void
1854 CommIO::FlushPipe()
1855 {
1856 char buf[256];
1857 FD_READ_METHOD(DoneReadFD, buf, sizeof(buf));
1858 }
1859
1860 void
1861 CommIO::NULLFDHandler(int fd, void *data)
1862 {
1863 FlushPipe();
1864 Comm::SetSelect(fd, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1865 }
1866
1867 void
1868 CommIO::ResetNotifications()
1869 {
1870 if (DoneSignalled) {
1871 FlushPipe();
1872 DoneSignalled = false;
1873 }
1874 }
1875
1876 /// Start waiting for a possibly half-closed connection to close
1877 // by scheduling a read callback to a monitoring handler that
1878 // will close the connection on read errors.
1879 void
1880 commStartHalfClosedMonitor(int fd)
1881 {
1882 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1883 assert(isOpen(fd));
1884 assert(!commHasHalfClosedMonitor(fd));
1885 (void)TheHalfClosed->add(fd); // could also assert the result
1886 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1887 }
1888
1889 static
1890 void
1891 commPlanHalfClosedCheck()
1892 {
1893 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1894 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1895 WillCheckHalfClosed = true;
1896 }
1897 }
1898
1899 /// iterates over all descriptors that may need half-closed tests and
1900 /// calls comm_read for those that do; re-schedules the check if needed
1901 static
1902 void
1903 commHalfClosedCheck(void *)
1904 {
1905 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1906
1907 typedef DescriptorSet::const_iterator DSCI;
1908 const DSCI end = TheHalfClosed->end();
1909 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1910 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1911 c->fd = *i;
1912 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1913 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1914 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1915 comm_read(c, NULL, 0, call);
1916 fd_table[c->fd].halfClosedReader = call;
1917 } else
1918 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1919 }
1920
1921 WillCheckHalfClosed = false; // as far as we know
1922 commPlanHalfClosedCheck(); // may need to check again
1923 }
1924
1925 /// checks whether we are waiting for possibly half-closed connection to close
1926 // We are monitoring if the read handler for the fd is the monitoring handler.
1927 bool
1928 commHasHalfClosedMonitor(int fd)
1929 {
1930 return TheHalfClosed->has(fd);
1931 }
1932
1933 /// stop waiting for possibly half-closed connection to close
1934 static void
1935 commStopHalfClosedMonitor(int const fd)
1936 {
1937 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1938
1939 // cancel the read if one was scheduled
1940 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1941 if (reader != NULL)
1942 comm_read_cancel(fd, reader);
1943 fd_table[fd].halfClosedReader = NULL;
1944
1945 TheHalfClosed->del(fd);
1946 }
1947
1948 /// I/O handler for the possibly half-closed connection monitoring code
1949 static void
1950 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
1951 {
1952 // there cannot be more data coming in on half-closed connections
1953 assert(size == 0);
1954 assert(conn != NULL);
1955 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1956
1957 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1958
1959 // nothing to do if fd is being closed
1960 if (flag == COMM_ERR_CLOSING)
1961 return;
1962
1963 // if read failed, close the connection
1964 if (flag != COMM_OK) {
1965 debugs(5, 3, HERE << "closing " << conn);
1966 conn->close();
1967 return;
1968 }
1969
1970 // continue waiting for close or error
1971 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1972 }
1973
1974
1975 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1976
1977 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1978 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1979
1980 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1981
1982 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1983
1984 DeferredReadManager::~DeferredReadManager()
1985 {
1986 flushReads();
1987 assert (deferredReads.empty());
1988 }
1989
1990 /* explicit instantiation required for some systems */
1991
1992 /// \cond AUTODOCS-IGNORE
1993 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1994 /// \endcond
1995
1996 void
1997 DeferredReadManager::delayRead(DeferredRead const &aRead)
1998 {
1999 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
2000 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
2001
2002 // We have to use a global function as a closer and point to temp
2003 // instead of "this" because DeferredReadManager is not a job and
2004 // is not even cbdata protected
2005 // XXX: and yet we use cbdata protection functions on it??
2006 AsyncCall::Pointer closer = commCbCall(5,4,
2007 "DeferredReadManager::CloseHandler",
2008 CommCloseCbPtrFun(&CloseHandler, temp));
2009 comm_add_close_handler(aRead.theRead.conn->fd, closer);
2010 temp->element.closer = closer; // remeber so that we can cancel
2011 }
2012
2013 void
2014 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
2015 {
2016 if (!cbdataReferenceValid(params.data))
2017 return;
2018
2019 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
2020
2021 temp->element.closer = NULL;
2022 temp->element.markCancelled();
2023 }
2024
2025 DeferredRead
2026 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
2027 {
2028 assert (!deferredReads.empty());
2029
2030 DeferredRead &read = deferredReads.head->element;
2031
2032 // NOTE: at this point the connection has been paused/stalled for an unknown
2033 // amount of time. We must re-validate that it is active and usable.
2034
2035 // If the connection has been closed already. Cancel this read.
2036 if (!Comm::IsConnOpen(read.theRead.conn)) {
2037 if (read.closer != NULL) {
2038 read.closer->cancel("Connection closed before.");
2039 read.closer = NULL;
2040 }
2041 read.markCancelled();
2042 }
2043
2044 if (!read.cancelled) {
2045 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
2046 read.closer = NULL;
2047 }
2048
2049 DeferredRead result = deferredReads.pop_front();
2050
2051 return result;
2052 }
2053
2054 void
2055 DeferredReadManager::kickReads(int const count)
2056 {
2057 /* if we had CbDataList::size() we could consolidate this and flushReads */
2058
2059 if (count < 1) {
2060 flushReads();
2061 return;
2062 }
2063
2064 size_t remaining = count;
2065
2066 while (!deferredReads.empty() && remaining) {
2067 DeferredRead aRead = popHead(deferredReads);
2068 kickARead(aRead);
2069
2070 if (!aRead.cancelled)
2071 --remaining;
2072 }
2073 }
2074
2075 void
2076 DeferredReadManager::flushReads()
2077 {
2078 CbDataListContainer<DeferredRead> reads;
2079 reads = deferredReads;
2080 deferredReads = CbDataListContainer<DeferredRead>();
2081
2082 // XXX: For fairness this SHOULD randomize the order
2083 while (!reads.empty()) {
2084 DeferredRead aRead = popHead(reads);
2085 kickARead(aRead);
2086 }
2087 }
2088
2089 void
2090 DeferredReadManager::kickARead(DeferredRead const &aRead)
2091 {
2092 if (aRead.cancelled)
2093 return;
2094
2095 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
2096 return;
2097
2098 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
2099
2100 aRead.theReader(aRead.theContext, aRead.theRead);
2101 }
2102
2103 void
2104 DeferredRead::markCancelled()
2105 {
2106 cancelled = true;
2107 }
2108
2109 int
2110 CommSelectEngine::checkEvents(int timeout)
2111 {
2112 static time_t last_timeout = 0;
2113
2114 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2115 if (squid_curtime > last_timeout) {
2116 last_timeout = squid_curtime;
2117 checkTimeouts();
2118 }
2119
2120 switch (Comm::DoSelect(timeout)) {
2121
2122 case COMM_OK:
2123
2124 case COMM_TIMEOUT:
2125 return 0;
2126
2127 case COMM_IDLE:
2128
2129 case COMM_SHUTDOWN:
2130 return EVENT_IDLE;
2131
2132 case COMM_ERROR:
2133 return EVENT_ERROR;
2134
2135 default:
2136 fatal_dump("comm.cc: Internal error -- this should never happen.");
2137 return EVENT_ERROR;
2138 };
2139 }
2140
2141 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2142 int
2143 comm_open_uds(int sock_type,
2144 int proto,
2145 struct sockaddr_un* addr,
2146 int flags)
2147 {
2148 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2149
2150 int new_socket;
2151
2152 PROF_start(comm_open);
2153 /* Create socket for accepting new connections. */
2154 statCounter.syscalls.sock.sockets++;
2155
2156 /* Setup the socket addrinfo details for use */
2157 struct addrinfo AI;
2158 AI.ai_flags = 0;
2159 AI.ai_family = PF_UNIX;
2160 AI.ai_socktype = sock_type;
2161 AI.ai_protocol = proto;
2162 AI.ai_addrlen = SUN_LEN(addr);
2163 AI.ai_addr = (sockaddr*)addr;
2164 AI.ai_canonname = NULL;
2165 AI.ai_next = NULL;
2166
2167 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
2168
2169 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
2170 /* Increase the number of reserved fd's if calls to socket()
2171 * are failing because the open file table is full. This
2172 * limits the number of simultaneous clients */
2173
2174 if (limitError(errno)) {
2175 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
2176 fdAdjustReserved();
2177 } else {
2178 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
2179 }
2180
2181 PROF_stop(comm_open);
2182 return -1;
2183 }
2184
2185 debugs(50, 3, HERE "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
2186
2187 /* update fdstat */
2188 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
2189
2190 assert(!isOpen(new_socket));
2191 fd_open(new_socket, FD_MSGHDR, NULL);
2192
2193 fdd_table[new_socket].close_file = NULL;
2194
2195 fdd_table[new_socket].close_line = 0;
2196
2197 fd_table[new_socket].sock_family = AI.ai_family;
2198
2199 if (!(flags & COMM_NOCLOEXEC))
2200 commSetCloseOnExec(new_socket);
2201
2202 if (flags & COMM_REUSEADDR)
2203 commSetReuseAddr(new_socket);
2204
2205 if (flags & COMM_NONBLOCKING) {
2206 if (commSetNonBlocking(new_socket) != COMM_OK) {
2207 comm_close(new_socket);
2208 PROF_stop(comm_open);
2209 return -1;
2210 }
2211 }
2212
2213 if (flags & COMM_DOBIND) {
2214 if (commBind(new_socket, AI) != COMM_OK) {
2215 comm_close(new_socket);
2216 PROF_stop(comm_open);
2217 return -1;
2218 }
2219 }
2220
2221 #ifdef TCP_NODELAY
2222 if (sock_type == SOCK_STREAM)
2223 commSetTcpNoDelay(new_socket);
2224
2225 #endif
2226
2227 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
2228 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
2229
2230 PROF_stop(comm_open);
2231
2232 return new_socket;
2233 }