2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
36 #include "base/AsyncCall.h"
37 #include "StoreIOBuffer.h"
41 #include "comm/AcceptLimiter.h"
42 #include "comm/comm_internal.h"
43 #include "comm/Connection.h"
44 #include "comm/IoCallback.h"
45 #include "comm/Write.h"
50 #include "SquidTime.h"
51 #include "CommCalls.h"
52 #include "DescriptorSet.h"
53 #include "icmp/net_db.h"
54 #include "ip/Address.h"
55 #include "ip/Intercept.h"
56 #include "ip/QosConfig.h"
58 #include "ClientInfo.h"
61 #if defined(_SQUID_CYGWIN_)
62 #include <sys/ioctl.h>
64 #ifdef HAVE_NETINET_TCP_H
65 #include <netinet/tcp.h>
69 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
72 static void commStopHalfClosedMonitor(int fd
);
73 static IOCB commHalfClosedReader
;
74 static void comm_init_opened(const Comm::ConnectionPointer
&conn
, tos_t tos
, nfmark_t nfmark
, const char *note
, struct addrinfo
*AI
);
75 static int comm_apply_flags(int new_socket
, Ip::Address
&addr
, int flags
, struct addrinfo
*AI
);
78 CBDATA_CLASS_INIT(CommQuotaQueue
);
80 static void commHandleWriteHelper(void * data
);
85 static DescriptorSet
*TheHalfClosed
= NULL
; /// the set of half-closed FDs
86 static bool WillCheckHalfClosed
= false; /// true if check is scheduled
87 static EVH commHalfClosedCheck
;
88 static void commPlanHalfClosedCheck();
90 static comm_err_t
commBind(int s
, struct addrinfo
&);
91 static void commSetReuseAddr(int);
92 static void commSetNoLinger(int);
94 static void commSetTcpNoDelay(int);
96 static void commSetTcpRcvbuf(int, int);
105 static MemAllocator
*conn_close_pool
= NULL
;
106 fd_debug_t
*fdd_table
= NULL
;
111 return fd_table
[fd
].flags
.open
!= 0;
117 * If the read attempt succeeds or fails, call the callback.
118 * Else, wait for another IO notification.
121 commHandleRead(int fd
, void *data
)
123 Comm::IoCallback
*ccb
= (Comm::IoCallback
*) data
;
125 assert(data
== COMMIO_FD_READCB(fd
));
126 assert(ccb
->active());
128 statCounter
.syscalls
.sock
.reads
++;
131 retval
= FD_READ_METHOD(fd
, ccb
->buf
, ccb
->size
);
132 debugs(5, 3, "comm_read_try: FD " << fd
<< ", size " << ccb
->size
<< ", retval " << retval
<< ", errno " << errno
);
134 if (retval
< 0 && !ignoreErrno(errno
)) {
135 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
137 ccb
->finish(COMM_ERROR
, errno
);
141 /* See if we read anything */
142 /* Note - read 0 == socket EOF, which is a valid read */
144 fd_bytes(fd
, retval
, FD_READ
);
145 ccb
->offset
= retval
;
146 ccb
->finish(COMM_OK
, errno
);
150 /* Nope, register for some more IO */
151 commSetSelect(fd
, COMM_SELECT_READ
, commHandleRead
, data
, 0);
154 #if 0 // obsolete wrapper.
156 comm_read(const Comm::ConnectionPointer
&conn
, char *buf
, int size
, IOCB
*handler
, void *handler_data
)
158 AsyncCall::Pointer call
= commCbCall(5,4, "SomeCommReadHandler",
159 CommIoCbPtrFun(handler
, handler_data
));
160 comm_read(conn
, buf
, size
, call
);
165 * Queue a read. handler/handler_data are called when the read
166 * completes, on error, or on file descriptor close.
169 comm_read(const Comm::ConnectionPointer
&conn
, char *buf
, int size
, AsyncCall::Pointer
&callback
)
171 debugs(5, 5, "comm_read, queueing read for " << conn
<< "; asynCall " << callback
);
173 /* Make sure we are open and not closing */
174 assert(Comm::IsConnOpen(conn
));
175 assert(!fd_table
[conn
->fd
].closing());
176 Comm::IoCallback
*ccb
= COMMIO_FD_READCB(conn
->fd
);
178 // Make sure we are either not reading or just passively monitoring.
179 // Active/passive conflicts are OK and simply cancel passive monitoring.
181 // if the assertion below fails, we have an active comm_read conflict
182 assert(fd_table
[conn
->fd
].halfClosedReader
!= NULL
);
183 commStopHalfClosedMonitor(conn
->fd
);
184 assert(!ccb
->active());
189 ccb
->setCallback(Comm::IOCB_READ
, callback
, (char *)buf
, NULL
, size
);
190 commSetSelect(conn
->fd
, COMM_SELECT_READ
, commHandleRead
, ccb
, 0);
194 * Empty the read buffers
196 * This is a magical routine that empties the read buffers.
197 * Under some platforms (Linux) if a buffer has data in it before
198 * you call close(), the socket will hang and take quite a while
202 comm_empty_os_read_buffers(int fd
)
205 /* prevent those nasty RST packets */
206 char buf
[SQUID_TCP_SO_RCVBUF
];
208 if (fd_table
[fd
].flags
.nonblocking
== 1) {
209 while (FD_READ_METHOD(fd
, buf
, SQUID_TCP_SO_RCVBUF
) > 0) {};
216 * Return whether the FD has a pending completed callback.
220 comm_has_pending_read_callback(int fd
)
223 // XXX: We do not know whether there is a read callback scheduled.
224 // This is used for pconn management that should probably be more
225 // tightly integrated into comm to minimize the chance that a
226 // closing pconn socket will be used for a new transaction.
230 // Does comm check this fd for read readiness?
231 // Note that when comm is not monitoring, there can be a pending callback
232 // call, which may resume comm monitoring once fired.
234 comm_monitors_read(int fd
)
237 // Being active is usually the same as monitoring because we always
238 // start monitoring the FD when we configure Comm::IoCallback for I/O
239 // and we usually configure Comm::IoCallback for I/O when we starting
240 // monitoring a FD for reading.
241 return COMMIO_FD_READCB(fd
)->active();
245 * Cancel a pending read. Assert that we have the right parameters,
246 * and that there are no pending read events!
248 * XXX: We do not assert that there are no pending read events and
249 * with async calls it becomes even more difficult.
250 * The whole interface should be reworked to do callback->cancel()
251 * instead of searching for places where the callback may be stored and
252 * updating the state of those places.
254 * AHC Don't call the comm handlers?
257 comm_read_cancel(int fd
, IOCB
*callback
, void *data
)
260 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " closed");
264 Comm::IoCallback
*cb
= COMMIO_FD_READCB(fd
);
265 // TODO: is "active" == "monitors FD"?
267 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " inactive");
271 typedef CommCbFunPtrCallT
<CommIoCbPtrFun
> Call
;
272 Call
*call
= dynamic_cast<Call
*>(cb
->callback
.getRaw());
274 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " lacks callback");
278 call
->cancel("old comm_read_cancel");
280 typedef CommIoCbParams Params
;
281 const Params
¶ms
= GetCommParams
<Params
>(cb
->callback
);
283 /* Ok, we can be reasonably sure we won't lose any data here! */
284 assert(call
->dialer
.handler
== callback
);
285 assert(params
.data
== data
);
287 /* Delete the callback */
288 cb
->cancel("old comm_read_cancel");
290 /* And the IO event */
291 commSetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
295 comm_read_cancel(int fd
, AsyncCall::Pointer
&callback
)
297 callback
->cancel("comm_read_cancel");
300 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " closed");
304 Comm::IoCallback
*cb
= COMMIO_FD_READCB(fd
);
307 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " inactive");
311 AsyncCall::Pointer call
= cb
->callback
;
312 assert(call
!= NULL
); // XXX: should never fail (active() checks for callback==NULL)
314 /* Ok, we can be reasonably sure we won't lose any data here! */
315 assert(call
== callback
);
317 /* Delete the callback */
318 cb
->cancel("comm_read_cancel");
320 /* And the IO event */
321 commSetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
326 * synchronous wrapper around udp socket functions
329 comm_udp_recvfrom(int fd
, void *buf
, size_t len
, int flags
, Ip::Address
&from
)
331 statCounter
.syscalls
.sock
.recvfroms
++;
333 struct addrinfo
*AI
= NULL
;
335 debugs(5,8, "comm_udp_recvfrom: FD " << fd
<< " from " << from
);
337 assert( NULL
== AI
);
339 from
.InitAddrInfo(AI
);
341 x
= recvfrom(fd
, buf
, len
, flags
, AI
->ai_addr
, &AI
->ai_addrlen
);
345 from
.FreeAddrInfo(AI
);
351 comm_udp_recv(int fd
, void *buf
, size_t len
, int flags
)
354 return comm_udp_recvfrom(fd
, buf
, len
, flags
, nul
);
358 comm_udp_send(int s
, const void *buf
, size_t len
, int flags
)
360 return send(s
, buf
, len
, flags
);
365 comm_has_incomplete_write(int fd
)
368 return COMMIO_FD_WRITECB(fd
)->active();
372 * Queue a write. handler/handler_data are called when the write fully
373 * completes, on error, or on file descriptor close.
376 /* Return the local port associated with fd. */
378 comm_local_port(int fd
)
381 struct addrinfo
*addr
= NULL
;
382 fde
*F
= &fd_table
[fd
];
384 /* If the fd is closed already, just return */
386 if (!F
->flags
.open
) {
387 debugs(5, 0, "comm_local_port: FD " << fd
<< " has been closed.");
391 if (F
->local_addr
.GetPort())
392 return F
->local_addr
.GetPort();
394 if (F
->sock_family
== AF_INET
)
397 temp
.InitAddrInfo(addr
);
399 if (getsockname(fd
, addr
->ai_addr
, &(addr
->ai_addrlen
)) ) {
400 debugs(50, 1, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd
<< ": " << xstrerror());
401 temp
.FreeAddrInfo(addr
);
406 temp
.FreeAddrInfo(addr
);
408 if (F
->local_addr
.IsAnyAddr()) {
409 /* save the whole local address, not just the port. */
410 F
->local_addr
= temp
;
412 F
->local_addr
.SetPort(temp
.GetPort());
415 debugs(5, 6, "comm_local_port: FD " << fd
<< ": port " << F
->local_addr
.GetPort() << "(family=" << F
->sock_family
<< ")");
416 return F
->local_addr
.GetPort();
420 commBind(int s
, struct addrinfo
&inaddr
)
422 statCounter
.syscalls
.sock
.binds
++;
424 if (bind(s
, inaddr
.ai_addr
, inaddr
.ai_addrlen
) == 0) {
425 debugs(50, 6, "commBind: bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
);
429 debugs(50, 0, "commBind: Cannot bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
<< ": " << xstrerror());
435 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
436 * is OR of flags specified in comm.h. Defaults TOS
439 comm_open(int sock_type
,
445 return comm_openex(sock_type
, proto
, addr
, flags
, 0, 0, note
);
449 comm_open_listener(int sock_type
,
451 Comm::ConnectionPointer
&conn
,
454 /* all listener sockets require bind() */
455 conn
->flags
|= COMM_DOBIND
;
457 /* attempt native enabled port. */
458 conn
->fd
= comm_openex(sock_type
, proto
, conn
->local
, conn
->flags
, 0, 0, note
);
462 comm_open_listener(int sock_type
,
470 /* all listener sockets require bind() */
471 flags
|= COMM_DOBIND
;
473 /* attempt native enabled port. */
474 sock
= comm_openex(sock_type
, proto
, addr
, flags
, 0, 0, note
);
480 limitError(int const anErrno
)
482 return anErrno
== ENFILE
|| anErrno
== EMFILE
;
486 comm_set_v6only(int fd
, int tos
)
489 if (setsockopt(fd
, IPPROTO_IPV6
, IPV6_V6ONLY
, (char *) &tos
, sizeof(int)) < 0) {
490 debugs(50, 1, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos
?"ON":"OFF") << " for FD " << fd
<< ": " << xstrerror());
493 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
498 * Set the socket IP_TRANSPARENT option for Linux TPROXY v4 support.
501 comm_set_transparent(int fd
)
503 #if defined(IP_TRANSPARENT)
505 if (setsockopt(fd
, SOL_IP
, IP_TRANSPARENT
, (char *) &tos
, sizeof(int)) < 0) {
506 debugs(50, DBG_IMPORTANT
, "comm_open: setsockopt(IP_TRANSPARENT) on FD " << fd
<< ": " << xstrerror());
508 /* mark the socket as having transparent options */
509 fd_table
[fd
].flags
.transparent
= 1;
512 debugs(50, DBG_CRITICAL
, "WARNING: comm_open: setsockopt(IP_TRANSPARENT) not supported on this platform");
517 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
518 * is OR of flags specified in defines.h:COMM_*
521 comm_openex(int sock_type
,
530 struct addrinfo
*AI
= NULL
;
532 PROF_start(comm_open
);
533 /* Create socket for accepting new connections. */
534 statCounter
.syscalls
.sock
.sockets
++;
536 /* Setup the socket addrinfo details for use */
537 addr
.GetAddrInfo(AI
);
538 AI
->ai_socktype
= sock_type
;
539 AI
->ai_protocol
= proto
;
541 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr
);
542 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
544 /* under IPv6 there is the possibility IPv6 is present but disabled. */
545 /* try again as IPv4-native if possible */
546 if ( new_socket
< 0 && Ip::EnableIpv6
&& addr
.IsIPv6() && addr
.SetIPv4() ) {
547 /* attempt to open this IPv4-only. */
548 addr
.FreeAddrInfo(AI
);
549 /* Setup the socket addrinfo details for use */
550 addr
.GetAddrInfo(AI
);
551 AI
->ai_socktype
= sock_type
;
552 AI
->ai_protocol
= proto
;
553 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr
);
554 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
555 debugs(50, 2, HERE
<< "attempt open " << note
<< " socket on: " << addr
);
558 if (new_socket
< 0) {
559 /* Increase the number of reserved fd's if calls to socket()
560 * are failing because the open file table is full. This
561 * limits the number of simultaneous clients */
563 if (limitError(errno
)) {
564 debugs(50, DBG_IMPORTANT
, "comm_open: socket failure: " << xstrerror());
567 debugs(50, DBG_CRITICAL
, "comm_open: socket failure: " << xstrerror());
570 addr
.FreeAddrInfo(AI
);
572 PROF_stop(comm_open
);
576 // temporary for the transition. comm_openex will eventually have a conn to play with.
577 Comm::ConnectionPointer conn
= new Comm::Connection
;
579 conn
->fd
= new_socket
;
581 debugs(50, 3, "comm_openex: Opened socket " << conn
<< " : family=" << AI
->ai_family
<< ", type=" << AI
->ai_socktype
<< ", protocol=" << AI
->ai_protocol
);
583 /* set TOS if needed */
585 Ip::Qos::setSockTos(conn
, tos
);
587 /* set netfilter mark if needed */
589 Ip::Qos::setSockNfmark(conn
, nfmark
);
591 if ( Ip::EnableIpv6
&IPV6_SPECIAL_SPLITSTACK
&& addr
.IsIPv6() )
592 comm_set_v6only(conn
->fd
, 1);
594 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
595 /* Other OS may have this administratively disabled for general use. Same deal. */
596 if ( Ip::EnableIpv6
&IPV6_SPECIAL_V4MAPPING
&& addr
.IsIPv6() )
597 comm_set_v6only(conn
->fd
, 0);
599 comm_init_opened(conn
, tos
, nfmark
, note
, AI
);
600 new_socket
= comm_apply_flags(conn
->fd
, addr
, flags
, AI
);
602 addr
.FreeAddrInfo(AI
);
604 PROF_stop(comm_open
);
606 // XXX transition only. prevent conn from closing the new FD on functio exit.
611 /// update FD tables after a local or remote (IPC) comm_openex();
613 comm_init_opened(const Comm::ConnectionPointer
&conn
,
619 assert(Comm::IsConnOpen(conn
));
623 debugs(5, 5, HERE
<< conn
<< " is a new socket");
625 assert(!isOpen(conn
->fd
)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
626 fd_open(conn
->fd
, FD_SOCKET
, note
);
628 fdd_table
[conn
->fd
].close_file
= NULL
;
629 fdd_table
[conn
->fd
].close_line
= 0;
631 fde
*F
= &fd_table
[conn
->fd
];
632 F
->local_addr
= conn
->local
;
633 F
->tosToServer
= tos
;
634 F
->nfmarkToServer
= nfmark
;
635 F
->sock_family
= AI
->ai_family
;
638 /// apply flags after a local comm_open*() call;
639 /// returns new_socket or -1 on error
641 comm_apply_flags(int new_socket
,
646 assert(new_socket
>= 0);
648 const int sock_type
= AI
->ai_socktype
;
650 if (!(flags
& COMM_NOCLOEXEC
))
651 commSetCloseOnExec(new_socket
);
653 if ((flags
& COMM_REUSEADDR
))
654 commSetReuseAddr(new_socket
);
656 if (addr
.GetPort() > (u_short
) 0) {
658 if (sock_type
!= SOCK_DGRAM
)
660 commSetNoLinger(new_socket
);
663 commSetReuseAddr(new_socket
);
666 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
667 if ((flags
& COMM_TRANSPARENT
)) {
668 comm_set_transparent(new_socket
);
671 if ( (flags
& COMM_DOBIND
) || addr
.GetPort() > 0 || !addr
.IsAnyAddr() ) {
672 if ( !(flags
& COMM_DOBIND
) && addr
.IsAnyAddr() )
673 debugs(5,1,"WARNING: Squid is attempting to bind() port " << addr
<< " without being a listener.");
674 if ( addr
.IsNoAddr() )
675 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr
<< "!!");
677 if (commBind(new_socket
, *AI
) != COMM_OK
) {
678 comm_close(new_socket
);
683 if (flags
& COMM_NONBLOCKING
)
684 if (commSetNonBlocking(new_socket
) == COMM_ERROR
) {
685 comm_close(new_socket
);
690 if (sock_type
== SOCK_STREAM
)
691 commSetTcpNoDelay(new_socket
);
695 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
696 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
702 comm_import_opened(const Comm::ConnectionPointer
&conn
,
706 debugs(5, 2, HERE
<< conn
);
707 assert(Comm::IsConnOpen(conn
));
710 comm_init_opened(conn
, 0, 0, note
, AI
);
712 if (!(conn
->flags
& COMM_NOCLOEXEC
))
713 fd_table
[conn
->fd
].flags
.close_on_exec
= 1;
715 if (conn
->local
.GetPort() > (u_short
) 0) {
717 if (AI
->ai_socktype
!= SOCK_DGRAM
)
719 fd_table
[conn
->fd
].flags
.nolinger
= 1;
722 if ((conn
->flags
& COMM_TRANSPARENT
))
723 fd_table
[conn
->fd
].flags
.transparent
= 1;
725 if (conn
->flags
& COMM_NONBLOCKING
)
726 fd_table
[conn
->fd
].flags
.nonblocking
= 1;
729 if (AI
->ai_socktype
== SOCK_STREAM
)
730 fd_table
[conn
->fd
].flags
.nodelay
= 1;
733 /* no fd_table[fd].flags. updates needed for these conditions:
734 * if ((flags & COMM_REUSEADDR)) ...
735 * if ((flags & COMM_DOBIND) ...) ...
741 commSetTimeout_old(int fd
, int timeout
, PF
* handler
, void *data
)
743 debugs(5, 3, HERE
<< "FD " << fd
<< " timeout " << timeout
);
745 assert(fd
< Squid_MaxFD
);
746 fde
*F
= &fd_table
[fd
];
747 assert(F
->flags
.open
);
750 cbdataReferenceDone(F
->timeout_data
);
751 F
->timeout_handler
= NULL
;
755 cbdataReferenceDone(F
->timeout_data
);
756 F
->timeout_handler
= handler
;
757 F
->timeout_data
= cbdataReference(data
);
760 F
->timeout
= squid_curtime
+ (time_t) timeout
;
767 // Legacy pre-AsyncCalls API for FD timeouts.
769 commSetTimeout(int fd
, int timeout
, PF
* handler
, void *data
)
771 AsyncCall::Pointer call
;
772 debugs(5, 3, HERE
<< "FD " << fd
<< " timeout " << timeout
);
774 call
=commCbCall(5,4, "SomeTimeoutHandler", CommTimeoutCbPtrFun(handler
, data
));
777 return commSetTimeout(fd
, timeout
, call
);
780 // Legacy pre-Comm::Connection API for FD timeouts
781 // still used by non-socket FD code dealing with pipes and IPC sockets.
783 commSetTimeout(int fd
, int timeout
, AsyncCall::Pointer
&callback
)
785 debugs(5, 3, HERE
<< "FD " << fd
<< " timeout " << timeout
);
787 assert(fd
< Squid_MaxFD
);
788 fde
*F
= &fd_table
[fd
];
789 assert(F
->flags
.open
);
792 F
->timeoutHandler
= NULL
;
795 if (callback
!= NULL
) {
796 typedef CommTimeoutCbParams Params
;
797 Params
¶ms
= GetCommParams
<Params
>(callback
);
799 F
->timeoutHandler
= callback
;
802 F
->timeout
= squid_curtime
+ (time_t) timeout
;
809 commSetConnTimeout(const Comm::ConnectionPointer
&conn
, int timeout
, AsyncCall::Pointer
&callback
)
811 debugs(5, 3, HERE
<< conn
<< " timeout " << timeout
);
812 assert(Comm::IsConnOpen(conn
));
813 assert(conn
->fd
< Squid_MaxFD
);
814 fde
*F
= &fd_table
[conn
->fd
];
815 assert(F
->flags
.open
);
818 F
->timeoutHandler
= NULL
;
821 if (callback
!= NULL
) {
822 typedef CommTimeoutCbParams Params
;
823 Params
¶ms
= GetCommParams
<Params
>(callback
);
825 F
->timeoutHandler
= callback
;
828 F
->timeout
= squid_curtime
+ (time_t) timeout
;
835 comm_connect_addr(int sock
, const Ip::Address
&address
)
837 comm_err_t status
= COMM_OK
;
838 fde
*F
= &fd_table
[sock
];
842 struct addrinfo
*AI
= NULL
;
843 PROF_start(comm_connect_addr
);
845 assert(address
.GetPort() != 0);
847 debugs(5, 9, HERE
<< "connecting socket FD " << sock
<< " to " << address
<< " (want family: " << F
->sock_family
<< ")");
849 /* Handle IPv6 over IPv4-only socket case.
850 * this case must presently be handled here since the GetAddrInfo asserts on bad mappings.
851 * NP: because commResetFD is private to ConnStateData we have to return an error and
852 * trust its handled properly.
854 if (F
->sock_family
== AF_INET
&& !address
.IsIPv4()) {
856 return COMM_ERR_PROTOCOL
;
859 /* Handle IPv4 over IPv6-only socket case.
860 * This case is presently handled here as it's both a known case and it's
861 * uncertain what error will be returned by the IPv6 stack in such case. It's
862 * possible this will also be handled by the errno checks below after connect()
863 * but needs carefull cross-platform verification, and verifying the address
864 * condition here is simple.
866 if (!F
->local_addr
.IsIPv4() && address
.IsIPv4()) {
868 return COMM_ERR_PROTOCOL
;
871 address
.GetAddrInfo(AI
, F
->sock_family
);
873 /* Establish connection. */
876 if (!F
->flags
.called_connect
) {
877 F
->flags
.called_connect
= 1;
878 statCounter
.syscalls
.sock
.connects
++;
880 x
= connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
);
882 // XXX: ICAP code refuses callbacks during a pending comm_ call
883 // Async calls development will fix this.
890 debugs(5,5, "comm_connect_addr: sock=" << sock
<< ", addrinfo( " <<
891 " flags=" << AI
->ai_flags
<<
892 ", family=" << AI
->ai_family
<<
893 ", socktype=" << AI
->ai_socktype
<<
894 ", protocol=" << AI
->ai_protocol
<<
895 ", &addr=" << AI
->ai_addr
<<
896 ", addrlen=" << AI
->ai_addrlen
<<
898 debugs(5, 9, "connect FD " << sock
<< ": (" << x
<< ") " << xstrerror());
899 debugs(14,9, "connecting to: " << address
);
902 #if defined(_SQUID_NEWSOS6_)
903 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
905 connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
);
907 if (errno
== EINVAL
) {
908 errlen
= sizeof(err
);
909 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
916 errlen
= sizeof(err
);
918 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
923 #if defined(_SQUID_SOLARIS_)
925 * Solaris 2.4's socket emulation doesn't allow you
926 * to determine the error from a failed non-blocking
927 * connect and just returns EPIPE. Create a fake
928 * error message for connect. -- fenner@parc.xerox.com
930 if (x
< 0 && errno
== EPIPE
)
938 /* Squid seems to be working fine without this code. With this code,
939 * we leak memory on many connect requests because of EINPROGRESS.
940 * If you find that this code is needed, please file a bug report. */
944 * Linux Debian replaces our allocated AI pointer with garbage when
945 * connect() fails. This leads to segmentation faults deallocating
946 * the system-allocated memory when we go to clean up our pointer.
947 * HACK: is to leak the memory returned since we can't deallocate.
955 address
.FreeAddrInfo(AI
);
957 PROF_stop(comm_connect_addr
);
959 if (errno
== 0 || errno
== EISCONN
)
961 else if (ignoreErrno(errno
))
962 status
= COMM_INPROGRESS
;
963 else if (errno
== EAFNOSUPPORT
|| errno
== EINVAL
)
964 return COMM_ERR_PROTOCOL
;
968 address
.NtoA(F
->ipaddr
, MAX_IPSTRLEN
);
970 F
->remote_port
= address
.GetPort(); /* remote_port is HS */
972 if (status
== COMM_OK
) {
973 debugs(5, 10, "comm_connect_addr: FD " << sock
<< " connected to " << address
);
974 } else if (status
== COMM_INPROGRESS
) {
975 debugs(5, 10, "comm_connect_addr: FD " << sock
<< " connection pending");
982 commCallCloseHandlers(int fd
)
984 fde
*F
= &fd_table
[fd
];
985 debugs(5, 5, "commCallCloseHandlers: FD " << fd
);
987 while (F
->closeHandler
!= NULL
) {
988 AsyncCall::Pointer call
= F
->closeHandler
;
989 F
->closeHandler
= call
->Next();
991 // If call is not canceled schedule it for execution else ignore it
992 if (!call
->canceled()) {
993 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call
);
994 typedef CommCloseCbParams Params
;
995 Params
¶ms
= GetCommParams
<Params
>(call
);
997 ScheduleCallHere(call
);
1004 commLingerClose(int fd
, void *unused
)
1006 LOCAL_ARRAY(char, buf
, 1024);
1008 n
= FD_READ_METHOD(fd
, buf
, 1024);
1011 debugs(5, 3, "commLingerClose: FD " << fd
<< " read: " << xstrerror());
1017 commLingerTimeout(int fd
, void *unused
)
1019 debugs(5, 3, "commLingerTimeout: FD " << fd
);
1024 * Inspired by apache
1027 comm_lingering_close(int fd
)
1031 if (fd_table
[fd
].ssl
)
1032 ssl_shutdown_method(fd
);
1036 if (shutdown(fd
, 1) < 0) {
1041 fd_note(fd
, "lingering close");
1042 commSetTimeout(fd
, 10, commLingerTimeout
, NULL
);
1043 commSetSelect(fd
, COMM_SELECT_READ
, commLingerClose
, NULL
, 0);
1049 * enable linger with time of 0 so that when the socket is
1050 * closed, TCP generates a RESET
1053 comm_reset_close(Comm::ConnectionPointer
&conn
)
1059 if (setsockopt(conn
->fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1060 debugs(50, DBG_CRITICAL
, "ERROR: Closing FD " << conn
->fd
<< " with TCP RST: " << xstrerror());
1065 // Legacy close function.
1067 old_comm_reset_close(int fd
)
1073 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1074 debugs(50, DBG_CRITICAL
, "ERROR: Closing FD " << fd
<< " with TCP RST: " << xstrerror());
1080 comm_close_start(int fd
, void *data
)
1083 fde
*F
= &fd_table
[fd
];
1085 ssl_shutdown_method(fd
);
1092 comm_close_complete(int fd
, void *data
)
1095 fde
*F
= &fd_table
[fd
];
1102 if (F
->dynamicSslContext
) {
1103 SSL_CTX_free(F
->dynamicSslContext
);
1104 F
->dynamicSslContext
= NULL
;
1107 fd_close(fd
); /* update fdstat */
1111 statCounter
.syscalls
.sock
.closes
++;
1113 /* When an fd closes, give accept() a chance, if need be */
1114 Comm::AcceptLimiter::Instance().kick();
1118 * Close the socket fd.
1120 * + call write handlers with ERR_CLOSING
1121 * + call read handlers with ERR_CLOSING
1122 * + call closing handlers
1124 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1125 * DeferredReadManager.
1128 _comm_close(int fd
, char const *file
, int line
)
1130 debugs(5, 3, "comm_close: start closing FD " << fd
);
1132 assert(fd
< Squid_MaxFD
);
1134 fde
*F
= &fd_table
[fd
];
1135 fdd_table
[fd
].close_file
= file
;
1136 fdd_table
[fd
].close_line
= line
;
1141 /* XXX: is this obsolete behind F->closing() ? */
1142 if ( (shutting_down
|| reconfiguring
) && (!F
->flags
.open
|| F
->type
== FD_FILE
))
1145 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1148 assert(F
->type
!= FD_FILE
);
1150 PROF_start(comm_close
);
1152 F
->flags
.close_request
= 1;
1154 AsyncCall::Pointer startCall
=commCbCall(5,4, "comm_close_start",
1155 CommCloseCbPtrFun(comm_close_start
, NULL
));
1156 typedef CommCloseCbParams Params
;
1157 Params
&startParams
= GetCommParams
<Params
>(startCall
);
1158 startParams
.fd
= fd
;
1159 ScheduleCallHere(startCall
);
1161 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1162 if (commHasHalfClosedMonitor(fd
))
1163 commStopHalfClosedMonitor(fd
);
1164 commSetTimeout(fd
, -1, NULL
, NULL
);
1166 // notify read/write handlers after canceling select reservations, if any
1167 if (COMMIO_FD_WRITECB(fd
)->active()) {
1168 commSetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
1169 COMMIO_FD_WRITECB(fd
)->finish(COMM_ERR_CLOSING
, errno
);
1171 if (COMMIO_FD_READCB(fd
)->active()) {
1172 commSetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
1173 COMMIO_FD_READCB(fd
)->finish(COMM_ERR_CLOSING
, errno
);
1177 if (ClientInfo
*clientInfo
= F
->clientInfo
) {
1178 if (clientInfo
->selectWaiting
) {
1179 clientInfo
->selectWaiting
= false;
1180 // kick queue or it will get stuck as commWriteHandle is not called
1181 clientInfo
->kickQuotaQueue();
1186 commCallCloseHandlers(fd
);
1189 F
->pconn
.pool
->count(F
->pconn
.uses
);
1191 comm_empty_os_read_buffers(fd
);
1194 AsyncCall::Pointer completeCall
=commCbCall(5,4, "comm_close_complete",
1195 CommCloseCbPtrFun(comm_close_complete
, NULL
));
1196 Params
&completeParams
= GetCommParams
<Params
>(completeCall
);
1197 completeParams
.fd
= fd
;
1198 // must use async call to wait for all callbacks
1199 // scheduled before comm_close() to finish
1200 ScheduleCallHere(completeCall
);
1202 PROF_stop(comm_close
);
1205 /* Send a udp datagram to specified TO_ADDR. */
1207 comm_udp_sendto(int fd
,
1208 const Ip::Address
&to_addr
,
1213 struct addrinfo
*AI
= NULL
;
1215 PROF_start(comm_udp_sendto
);
1216 statCounter
.syscalls
.sock
.sendtos
++;
1218 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr
<<
1219 " using FD " << fd
<< " using Port " << comm_local_port(fd
) );
1221 /* BUG: something in the above macro appears to occasionally be setting AI to garbage. */
1222 /* AYJ: 2007-08-27 : or was it because I wasn't then setting 'fd_table[fd].sock_family' to fill properly. */
1223 assert( NULL
== AI
);
1225 to_addr
.GetAddrInfo(AI
, fd_table
[fd
].sock_family
);
1227 x
= sendto(fd
, buf
, len
, 0, AI
->ai_addr
, AI
->ai_addrlen
);
1229 to_addr
.FreeAddrInfo(AI
);
1231 PROF_stop(comm_udp_sendto
);
1236 #ifdef _SQUID_LINUX_
1238 if (ECONNREFUSED
!= errno
)
1241 debugs(50, 1, "comm_udp_sendto: FD " << fd
<< ", (family=" << fd_table
[fd
].sock_family
<< ") " << to_addr
<< ": " << xstrerror());
1247 comm_add_close_handler(int fd
, PF
* handler
, void *data
)
1249 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", handler=" <<
1250 handler
<< ", data=" << data
);
1252 AsyncCall::Pointer call
=commCbCall(5,4, "SomeCloseHandler",
1253 CommCloseCbPtrFun(handler
, data
));
1254 comm_add_close_handler(fd
, call
);
1258 comm_add_close_handler(int fd
, AsyncCall::Pointer
&call
)
1260 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1262 /*TODO:Check for a similar scheduled AsyncCall*/
1263 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1264 // assert(c->handler != handler || c->data != data);
1266 call
->setNext(fd_table
[fd
].closeHandler
);
1268 fd_table
[fd
].closeHandler
= call
;
1272 // remove function-based close handler
1274 comm_remove_close_handler(int fd
, PF
* handler
, void *data
)
1276 assert (isOpen(fd
));
1277 /* Find handler in list */
1278 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", handler=" <<
1279 handler
<< ", data=" << data
);
1281 AsyncCall::Pointer p
, prev
= NULL
;
1282 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
; prev
= p
, p
= p
->Next()) {
1283 typedef CommCbFunPtrCallT
<CommCloseCbPtrFun
> Call
;
1284 const Call
*call
= dynamic_cast<const Call
*>(p
.getRaw());
1285 if (!call
) // method callbacks have their own comm_remove_close_handler
1288 typedef CommCloseCbParams Params
;
1289 const Params
¶ms
= GetCommParams
<Params
>(p
);
1290 if (call
->dialer
.handler
== handler
&& params
.data
== data
)
1291 break; /* This is our handler */
1294 // comm_close removes all close handlers so our handler may be gone
1296 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1297 p
->cancel("comm_remove_close_handler");
1301 // remove method-based close handler
1303 comm_remove_close_handler(int fd
, AsyncCall::Pointer
&call
)
1305 assert (isOpen(fd
));
1306 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1308 // comm_close removes all close handlers so our handler may be gone
1309 AsyncCall::Pointer p
, prev
= NULL
;
1310 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
&& p
!= call
; prev
= p
, p
= p
->Next());
1313 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1314 call
->cancel("comm_remove_close_handler");
1318 commSetNoLinger(int fd
)
1322 L
.l_onoff
= 0; /* off */
1325 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1326 debugs(50, 0, "commSetNoLinger: FD " << fd
<< ": " << xstrerror());
1328 fd_table
[fd
].flags
.nolinger
= 1;
1332 commSetReuseAddr(int fd
)
1336 if (setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *) &on
, sizeof(on
)) < 0)
1337 debugs(50, 1, "commSetReuseAddr: FD " << fd
<< ": " << xstrerror());
1341 commSetTcpRcvbuf(int fd
, int size
)
1343 if (setsockopt(fd
, SOL_SOCKET
, SO_RCVBUF
, (char *) &size
, sizeof(size
)) < 0)
1344 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1345 if (setsockopt(fd
, SOL_SOCKET
, SO_SNDBUF
, (char *) &size
, sizeof(size
)) < 0)
1346 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1347 #ifdef TCP_WINDOW_CLAMP
1348 if (setsockopt(fd
, SOL_TCP
, TCP_WINDOW_CLAMP
, (char *) &size
, sizeof(size
)) < 0)
1349 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1354 commSetNonBlocking(int fd
)
1356 #ifndef _SQUID_MSWIN_
1360 #ifdef _SQUID_WIN32_
1362 int nonblocking
= TRUE
;
1364 #ifdef _SQUID_CYGWIN_
1366 if (fd_table
[fd
].type
!= FD_PIPE
) {
1369 if (ioctl(fd
, FIONBIO
, &nonblocking
) < 0) {
1370 debugs(50, 0, "commSetNonBlocking: FD " << fd
<< ": " << xstrerror() << " " << fd_table
[fd
].type
);
1374 #ifdef _SQUID_CYGWIN_
1379 #ifndef _SQUID_MSWIN_
1381 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1382 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFL: " << xstrerror());
1386 if (fcntl(fd
, F_SETFL
, flags
| SQUID_NONBLOCK
) < 0) {
1387 debugs(50, 0, "commSetNonBlocking: FD " << fd
<< ": " << xstrerror());
1392 #ifdef _SQUID_CYGWIN_
1397 fd_table
[fd
].flags
.nonblocking
= 1;
1403 commUnsetNonBlocking(int fd
)
1405 #ifdef _SQUID_MSWIN_
1406 int nonblocking
= FALSE
;
1408 if (ioctlsocket(fd
, FIONBIO
, (unsigned long *) &nonblocking
) < 0) {
1413 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1414 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFL: " << xstrerror());
1418 if (fcntl(fd
, F_SETFL
, flags
& (~SQUID_NONBLOCK
)) < 0) {
1420 debugs(50, 0, "commUnsetNonBlocking: FD " << fd
<< ": " << xstrerror());
1424 fd_table
[fd
].flags
.nonblocking
= 0;
1429 commSetCloseOnExec(int fd
)
1435 if ((flags
= fcntl(fd
, F_GETFD
, dummy
)) < 0) {
1436 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFD: " << xstrerror());
1440 if (fcntl(fd
, F_SETFD
, flags
| FD_CLOEXEC
) < 0)
1441 debugs(50, 0, "FD " << fd
<< ": set close-on-exec failed: " << xstrerror());
1443 fd_table
[fd
].flags
.close_on_exec
= 1;
1450 commSetTcpNoDelay(int fd
)
1454 if (setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *) &on
, sizeof(on
)) < 0)
1455 debugs(50, 1, "commSetTcpNoDelay: FD " << fd
<< ": " << xstrerror());
1457 fd_table
[fd
].flags
.nodelay
= 1;
1463 commSetTcpKeepalive(int fd
, int idle
, int interval
, int timeout
)
1467 if (timeout
&& interval
) {
1468 int count
= (timeout
+ interval
- 1) / interval
;
1469 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPCNT
, &count
, sizeof(on
)) < 0)
1470 debugs(5, 1, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1475 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPIDLE
, &idle
, sizeof(on
)) < 0)
1476 debugs(5, 1, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1479 #ifdef TCP_KEEPINTVL
1481 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPINTVL
, &interval
, sizeof(on
)) < 0)
1482 debugs(5, 1, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1485 if (setsockopt(fd
, SOL_SOCKET
, SO_KEEPALIVE
, (char *) &on
, sizeof(on
)) < 0)
1486 debugs(5, 1, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1492 fd_table
=(fde
*) xcalloc(Squid_MaxFD
, sizeof(fde
));
1493 fdd_table
= (fd_debug_t
*)xcalloc(Squid_MaxFD
, sizeof(fd_debug_t
));
1495 /* make sure the accept() socket FIFO delay queue exists */
1496 Comm::AcceptLimiter::Instance();
1498 // make sure the IO pending callback table exists
1499 Comm::CallbackTableInit();
1501 /* XXX account fd_table */
1502 /* Keep a few file descriptors free so that we don't run out of FD's
1503 * after accepting a client but before it opens a socket or a file.
1504 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1505 RESERVED_FD
= min(100, Squid_MaxFD
/ 4);
1507 conn_close_pool
= memPoolCreate("close_handler", sizeof(close_handler
));
1509 TheHalfClosed
= new DescriptorSet
;
1515 delete TheHalfClosed
;
1516 TheHalfClosed
= NULL
;
1518 safe_free(fd_table
);
1519 safe_free(fdd_table
);
1520 Comm::CallbackTableDestruct();
1524 // called when the queue is done waiting for the client bucket to fill
1526 commHandleWriteHelper(void * data
)
1528 CommQuotaQueue
*queue
= static_cast<CommQuotaQueue
*>(data
);
1531 ClientInfo
*clientInfo
= queue
->clientInfo
;
1532 // ClientInfo invalidates queue if freed, so if we got here through,
1533 // evenAdd cbdata protections, everything should be valid and consistent
1535 assert(clientInfo
->hasQueue());
1536 assert(clientInfo
->hasQueue(queue
));
1537 assert(!clientInfo
->selectWaiting
);
1538 assert(clientInfo
->eventWaiting
);
1539 clientInfo
->eventWaiting
= false;
1542 // check that the head descriptor is still relevant
1543 const int head
= clientInfo
->quotaPeekFd();
1544 Comm::IoCallback
*ccb
= COMMIO_FD_WRITECB(head
);
1546 if (fd_table
[head
].clientInfo
== clientInfo
&&
1547 clientInfo
->quotaPeekReserv() == ccb
->quotaQueueReserv
&&
1548 !fd_table
[head
].closing()) {
1550 // wait for the head descriptor to become ready for writing
1551 commSetSelect(head
, COMM_SELECT_WRITE
, Comm::HandleWrite
, ccb
, 0);
1552 clientInfo
->selectWaiting
= true;
1556 clientInfo
->quotaDequeue(); // remove the no longer relevant descriptor
1557 // and continue looking for a relevant one
1558 } while (clientInfo
->hasQueue());
1560 debugs(77,3, HERE
<< "emptied queue");
1564 ClientInfo::hasQueue() const
1567 return !quotaQueue
->empty();
1571 ClientInfo::hasQueue(const CommQuotaQueue
*q
) const
1574 return quotaQueue
== q
;
1577 /// returns the first descriptor to be dequeued
1579 ClientInfo::quotaPeekFd() const
1582 return quotaQueue
->front();
1585 /// returns the reservation ID of the first descriptor to be dequeued
1587 ClientInfo::quotaPeekReserv() const
1590 return quotaQueue
->outs
+ 1;
1593 /// queues a given fd, creating the queue if necessary; returns reservation ID
1595 ClientInfo::quotaEnqueue(int fd
)
1598 return quotaQueue
->enqueue(fd
);
1601 /// removes queue head
1603 ClientInfo::quotaDequeue()
1606 quotaQueue
->dequeue();
1610 ClientInfo::kickQuotaQueue()
1612 if (!eventWaiting
&& !selectWaiting
&& hasQueue()) {
1613 // wait at least a second if the bucket is empty
1614 const double delay
= (bucketSize
< 1.0) ? 1.0 : 0.0;
1615 eventAdd("commHandleWriteHelper", &commHandleWriteHelper
,
1616 quotaQueue
, delay
, 0, true);
1617 eventWaiting
= true;
1621 /// calculates how much to write for a single dequeued client
1623 ClientInfo::quotaForDequed()
1625 /* If we have multiple clients and give full bucketSize to each client then
1626 * clt1 may often get a lot more because clt1->clt2 time distance in the
1627 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1628 * We divide quota evenly to be more fair. */
1630 if (!rationedCount
) {
1631 rationedCount
= quotaQueue
->size() + 1;
1633 // The delay in ration recalculation _temporary_ deprives clients from
1634 // bytes that should have trickled in while rationedCount was positive.
1637 // Rounding errors do not accumulate here, but we round down to avoid
1638 // negative bucket sizes after write with rationedCount=1.
1639 rationedQuota
= static_cast<int>(floor(bucketSize
/rationedCount
));
1640 debugs(77,5, HERE
<< "new rationedQuota: " << rationedQuota
<<
1641 '*' << rationedCount
);
1645 debugs(77,7, HERE
<< "rationedQuota: " << rationedQuota
<<
1646 " rations remaining: " << rationedCount
);
1648 // update 'last seen' time to prevent clientdb GC from dropping us
1649 last_seen
= squid_curtime
;
1650 return rationedQuota
;
1653 ///< adds bytes to the quota bucket based on the rate and passed time
1655 ClientInfo::refillBucket()
1657 // all these times are in seconds, with double precision
1658 const double currTime
= current_dtime
;
1659 const double timePassed
= currTime
- prevTime
;
1661 // Calculate allowance for the time passed. Use double to avoid
1662 // accumulating rounding errors for small intervals. For example, always
1663 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1664 const double gain
= timePassed
* writeSpeedLimit
;
1666 debugs(77,5, HERE
<< currTime
<< " clt" << (const char*)hash
.key
<< ": " <<
1667 bucketSize
<< " + (" << timePassed
<< " * " << writeSpeedLimit
<<
1668 " = " << gain
<< ')');
1670 // to further combat error accumulation during micro updates,
1671 // quit before updating time if we cannot add at least one byte
1675 prevTime
= currTime
;
1677 // for "first" connections, drain initial fat before refilling but keep
1678 // updating prevTime to avoid bursts after the fat is gone
1679 if (bucketSize
> bucketSizeLimit
) {
1680 debugs(77,4, HERE
<< "not refilling while draining initial fat");
1686 // obey quota limits
1687 if (bucketSize
> bucketSizeLimit
)
1688 bucketSize
= bucketSizeLimit
;
1692 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit
, const double anInitialBurst
, const double aHighWatermark
)
1694 debugs(77,5, HERE
<< "Write limits for " << (const char*)hash
.key
<<
1695 " speed=" << aWriteSpeedLimit
<< " burst=" << anInitialBurst
<<
1696 " highwatermark=" << aHighWatermark
);
1698 // set or possibly update traffic shaping parameters
1699 writeLimitingActive
= true;
1700 writeSpeedLimit
= aWriteSpeedLimit
;
1701 bucketSizeLimit
= aHighWatermark
;
1703 // but some members should only be set once for a newly activated bucket
1704 if (firstTimeConnection
) {
1705 firstTimeConnection
= false;
1707 assert(!selectWaiting
);
1708 assert(!quotaQueue
);
1709 quotaQueue
= new CommQuotaQueue(this);
1711 bucketSize
= anInitialBurst
;
1712 prevTime
= current_dtime
;
1716 CommQuotaQueue::CommQuotaQueue(ClientInfo
*info
): clientInfo(info
),
1722 CommQuotaQueue::~CommQuotaQueue()
1724 assert(!clientInfo
); // ClientInfo should clear this before destroying us
1727 /// places the given fd at the end of the queue; returns reservation ID
1729 CommQuotaQueue::enqueue(int fd
)
1731 debugs(77,5, HERE
<< "clt" << (const char*)clientInfo
->hash
.key
<<
1732 ": FD " << fd
<< " with qqid" << (ins
+1) << ' ' << fds
.size());
1737 /// removes queue head
1739 CommQuotaQueue::dequeue()
1741 assert(!fds
.empty());
1742 debugs(77,5, HERE
<< "clt" << (const char*)clientInfo
->hash
.key
<<
1743 ": FD " << fds
.front() << " with qqid" << (outs
+1) << ' ' <<
1751 * hm, this might be too general-purpose for all the places we'd
1755 ignoreErrno(int ierrno
)
1762 #if EAGAIN != EWOULDBLOCK
1785 commCloseAllSockets(void)
1790 for (fd
= 0; fd
<= Biggest_FD
; fd
++) {
1796 if (F
->type
!= FD_SOCKET
)
1799 if (F
->flags
.ipc
) /* don't close inter-process sockets */
1802 if (F
->timeoutHandler
!= NULL
) {
1803 AsyncCall::Pointer callback
= F
->timeoutHandler
;
1804 F
->timeoutHandler
= NULL
;
1805 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": Calling timeout handler");
1806 ScheduleCallHere(callback
);
1808 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": calling comm_reset_close()");
1809 old_comm_reset_close(fd
);
1815 AlreadyTimedOut(fde
*F
)
1820 if (F
->timeout
== 0)
1823 if (F
->timeout
> squid_curtime
)
1830 writeTimedOut(int fd
)
1832 if (!COMMIO_FD_WRITECB(fd
)->active())
1835 if ((squid_curtime
- fd_table
[fd
].writeStart
) < Config
.Timeout
.write
)
1846 AsyncCall::Pointer callback
;
1848 for (fd
= 0; fd
<= Biggest_FD
; fd
++) {
1851 if (writeTimedOut(fd
)) {
1852 // We have an active write callback and we are timed out
1853 debugs(5, 5, "checkTimeouts: FD " << fd
<< " auto write timeout");
1854 commSetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
1855 COMMIO_FD_WRITECB(fd
)->finish(COMM_ERROR
, ETIMEDOUT
);
1856 } else if (AlreadyTimedOut(F
))
1859 debugs(5, 5, "checkTimeouts: FD " << fd
<< " Expired");
1861 if (F
->timeoutHandler
!= NULL
) {
1862 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Call timeout handler");
1863 callback
= F
->timeoutHandler
;
1864 F
->timeoutHandler
= NULL
;
1865 ScheduleCallHere(callback
);
1867 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Forcing comm_close()");
1873 void CommIO::Initialise()
1875 /* Initialize done pipe signal */
1877 if (pipe(DonePipe
)) {}
1878 DoneFD
= DonePipe
[1];
1879 DoneReadFD
= DonePipe
[0];
1880 fd_open(DoneReadFD
, FD_PIPE
, "async-io completetion event: main");
1881 fd_open(DoneFD
, FD_PIPE
, "async-io completetion event: threads");
1882 commSetNonBlocking(DoneReadFD
);
1883 commSetNonBlocking(DoneFD
);
1884 commSetSelect(DoneReadFD
, COMM_SELECT_READ
, NULLFDHandler
, NULL
, 0);
1888 void CommIO::NotifyIOClose()
1890 /* Close done pipe signal */
1895 fd_close(DoneReadFD
);
1896 Initialised
= false;
1899 bool CommIO::Initialised
= false;
1900 bool CommIO::DoneSignalled
= false;
1901 int CommIO::DoneFD
= -1;
1902 int CommIO::DoneReadFD
= -1;
1908 FD_READ_METHOD(DoneReadFD
, buf
, sizeof(buf
));
1912 CommIO::NULLFDHandler(int fd
, void *data
)
1915 commSetSelect(fd
, COMM_SELECT_READ
, NULLFDHandler
, NULL
, 0);
1919 CommIO::ResetNotifications()
1921 if (DoneSignalled
) {
1923 DoneSignalled
= false;
1927 /// Start waiting for a possibly half-closed connection to close
1928 // by scheduling a read callback to a monitoring handler that
1929 // will close the connection on read errors.
1931 commStartHalfClosedMonitor(int fd
)
1933 debugs(5, 5, HERE
<< "adding FD " << fd
<< " to " << *TheHalfClosed
);
1935 assert(!commHasHalfClosedMonitor(fd
));
1936 (void)TheHalfClosed
->add(fd
); // could also assert the result
1937 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1942 commPlanHalfClosedCheck()
1944 if (!WillCheckHalfClosed
&& !TheHalfClosed
->empty()) {
1945 eventAdd("commHalfClosedCheck", &commHalfClosedCheck
, NULL
, 1.0, 1);
1946 WillCheckHalfClosed
= true;
1950 /// iterates over all descriptors that may need half-closed tests and
1951 /// calls comm_read for those that do; re-schedules the check if needed
1954 commHalfClosedCheck(void *)
1956 debugs(5, 5, HERE
<< "checking " << *TheHalfClosed
);
1958 typedef DescriptorSet::const_iterator DSCI
;
1959 const DSCI end
= TheHalfClosed
->end();
1960 for (DSCI i
= TheHalfClosed
->begin(); i
!= end
; ++i
) {
1961 Comm::ConnectionPointer c
= new Comm::Connection
; // XXX: temporary. make HalfClosed a list of these.
1963 if (!fd_table
[c
->fd
].halfClosedReader
) { // not reading already
1964 AsyncCall::Pointer call
= commCbCall(5,4, "commHalfClosedReader",
1965 CommIoCbPtrFun(&commHalfClosedReader
, NULL
));
1966 comm_read(c
, NULL
, 0, call
);
1967 fd_table
[c
->fd
].halfClosedReader
= call
;
1969 c
->fd
= -1; // XXX: temporary. prevent c replacement erase closing listed FD
1972 WillCheckHalfClosed
= false; // as far as we know
1973 commPlanHalfClosedCheck(); // may need to check again
1976 /// checks whether we are waiting for possibly half-closed connection to close
1977 // We are monitoring if the read handler for the fd is the monitoring handler.
1979 commHasHalfClosedMonitor(int fd
)
1981 return TheHalfClosed
->has(fd
);
1984 /// stop waiting for possibly half-closed connection to close
1986 commStopHalfClosedMonitor(int const fd
)
1988 debugs(5, 5, HERE
<< "removing FD " << fd
<< " from " << *TheHalfClosed
);
1990 // cancel the read if one was scheduled
1991 AsyncCall::Pointer reader
= fd_table
[fd
].halfClosedReader
;
1993 comm_read_cancel(fd
, reader
);
1994 fd_table
[fd
].halfClosedReader
= NULL
;
1996 TheHalfClosed
->del(fd
);
1999 /// I/O handler for the possibly half-closed connection monitoring code
2001 commHalfClosedReader(const Comm::ConnectionPointer
&conn
, char *, size_t size
, comm_err_t flag
, int, void *)
2003 // there cannot be more data coming in on half-closed connections
2005 assert(conn
!= NULL
);
2006 assert(commHasHalfClosedMonitor(conn
->fd
)); // or we would have canceled the read
2008 fd_table
[conn
->fd
].halfClosedReader
= NULL
; // done reading, for now
2010 // nothing to do if fd is being closed
2011 if (flag
== COMM_ERR_CLOSING
)
2014 // if read failed, close the connection
2015 if (flag
!= COMM_OK
) {
2016 debugs(5, 3, HERE
<< "closing " << conn
);
2021 // continue waiting for close or error
2022 commPlanHalfClosedCheck(); // make sure this fd will be checked again
2026 CommRead::CommRead() : conn(NULL
), buf(NULL
), len(0), callback(NULL
) {}
2028 CommRead::CommRead(const Comm::ConnectionPointer
&c
, char *buf_
, int len_
, AsyncCall::Pointer
&callback_
)
2029 : conn(c
), buf(buf_
), len(len_
), callback(callback_
) {}
2031 DeferredRead::DeferredRead () : theReader(NULL
), theContext(NULL
), theRead(), cancelled(false) {}
2033 DeferredRead::DeferredRead (DeferrableRead
*aReader
, void *data
, CommRead
const &aRead
) : theReader(aReader
), theContext (data
), theRead(aRead
), cancelled(false) {}
2035 DeferredReadManager::~DeferredReadManager()
2038 assert (deferredReads
.empty());
2041 /* explicit instantiation required for some systems */
2043 /// \cond AUTODOCS-IGNORE
2044 template cbdata_type CbDataList
<DeferredRead
>::CBDATA_CbDataList
;
2048 DeferredReadManager::delayRead(DeferredRead
const &aRead
)
2050 debugs(5, 3, "Adding deferred read on " << aRead
.theRead
.conn
);
2051 CbDataList
<DeferredRead
> *temp
= deferredReads
.push_back(aRead
);
2053 // We have to use a global function as a closer and point to temp
2054 // instead of "this" because DeferredReadManager is not a job and
2055 // is not even cbdata protected
2056 AsyncCall::Pointer closer
= commCbCall(5,4,
2057 "DeferredReadManager::CloseHandler",
2058 CommCloseCbPtrFun(&CloseHandler
, temp
));
2059 comm_add_close_handler(aRead
.theRead
.conn
->fd
, closer
);
2060 temp
->element
.closer
= closer
; // remeber so that we can cancel
2064 DeferredReadManager::CloseHandler(int fd
, void *thecbdata
)
2066 if (!cbdataReferenceValid (thecbdata
))
2069 CbDataList
<DeferredRead
> *temp
= (CbDataList
<DeferredRead
> *)thecbdata
;
2071 temp
->element
.closer
= NULL
;
2072 temp
->element
.markCancelled();
2076 DeferredReadManager::popHead(CbDataListContainer
<DeferredRead
> &deferredReads
)
2078 assert (!deferredReads
.empty());
2080 DeferredRead
&read
= deferredReads
.head
->element
;
2081 if (!read
.cancelled
) {
2082 comm_remove_close_handler(read
.theRead
.conn
->fd
, read
.closer
);
2086 DeferredRead result
= deferredReads
.pop_front();
2092 DeferredReadManager::kickReads(int const count
)
2094 /* if we had CbDataList::size() we could consolidate this and flushReads */
2101 size_t remaining
= count
;
2103 while (!deferredReads
.empty() && remaining
) {
2104 DeferredRead aRead
= popHead(deferredReads
);
2107 if (!aRead
.cancelled
)
2113 DeferredReadManager::flushReads()
2115 CbDataListContainer
<DeferredRead
> reads
;
2116 reads
= deferredReads
;
2117 deferredReads
= CbDataListContainer
<DeferredRead
>();
2119 // XXX: For fairness this SHOULD randomize the order
2120 while (!reads
.empty()) {
2121 DeferredRead aRead
= popHead(reads
);
2127 DeferredReadManager::kickARead(DeferredRead
const &aRead
)
2129 if (aRead
.cancelled
)
2132 if (Comm::IsConnOpen(aRead
.theRead
.conn
) && fd_table
[aRead
.theRead
.conn
->fd
].closing())
2135 debugs(5, 3, "Kicking deferred read on " << aRead
.theRead
.conn
);
2137 aRead
.theReader(aRead
.theContext
, aRead
.theRead
);
2141 DeferredRead::markCancelled()
2147 CommSelectEngine::checkEvents(int timeout
)
2149 static time_t last_timeout
= 0;
2151 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2152 if (squid_curtime
> last_timeout
) {
2153 last_timeout
= squid_curtime
;
2157 switch (comm_select(timeout
)) {
2173 fatal_dump("comm.cc: Internal error -- this should never happen.");
2178 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2180 comm_open_uds(int sock_type
,
2182 struct sockaddr_un
* addr
,
2185 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2189 PROF_start(comm_open
);
2190 /* Create socket for accepting new connections. */
2191 statCounter
.syscalls
.sock
.sockets
++;
2193 /* Setup the socket addrinfo details for use */
2196 AI
.ai_family
= PF_UNIX
;
2197 AI
.ai_socktype
= sock_type
;
2198 AI
.ai_protocol
= proto
;
2199 AI
.ai_addrlen
= SUN_LEN(addr
);
2200 AI
.ai_addr
= (sockaddr
*)addr
;
2201 AI
.ai_canonname
= NULL
;
2204 debugs(50, 3, HERE
<< "Attempt open socket for: " << addr
->sun_path
);
2206 if ((new_socket
= socket(AI
.ai_family
, AI
.ai_socktype
, AI
.ai_protocol
)) < 0) {
2207 /* Increase the number of reserved fd's if calls to socket()
2208 * are failing because the open file table is full. This
2209 * limits the number of simultaneous clients */
2211 if (limitError(errno
)) {
2212 debugs(50, DBG_IMPORTANT
, HERE
<< "socket failure: " << xstrerror());
2215 debugs(50, DBG_CRITICAL
, HERE
<< "socket failure: " << xstrerror());
2218 PROF_stop(comm_open
);
2222 debugs(50, 3, HERE
"Opened UDS FD " << new_socket
<< " : family=" << AI
.ai_family
<< ", type=" << AI
.ai_socktype
<< ", protocol=" << AI
.ai_protocol
);
2225 debugs(50, 5, HERE
<< "FD " << new_socket
<< " is a new socket");
2227 assert(!isOpen(new_socket
));
2228 fd_open(new_socket
, FD_MSGHDR
, NULL
);
2230 fdd_table
[new_socket
].close_file
= NULL
;
2232 fdd_table
[new_socket
].close_line
= 0;
2234 fd_table
[new_socket
].sock_family
= AI
.ai_family
;
2236 if (!(flags
& COMM_NOCLOEXEC
))
2237 commSetCloseOnExec(new_socket
);
2239 if (flags
& COMM_REUSEADDR
)
2240 commSetReuseAddr(new_socket
);
2242 if (flags
& COMM_NONBLOCKING
) {
2243 if (commSetNonBlocking(new_socket
) != COMM_OK
) {
2244 comm_close(new_socket
);
2245 PROF_stop(comm_open
);
2250 if (flags
& COMM_DOBIND
) {
2251 if (commBind(new_socket
, AI
) != COMM_OK
) {
2252 comm_close(new_socket
);
2253 PROF_stop(comm_open
);
2259 if (sock_type
== SOCK_STREAM
)
2260 commSetTcpNoDelay(new_socket
);
2264 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
2265 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
2267 PROF_stop(comm_open
);