2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
36 #include "ClientInfo.h"
37 #include "comm/AcceptLimiter.h"
38 #include "comm/comm_internal.h"
39 #include "comm/Connection.h"
40 #include "comm/IoCallback.h"
41 #include "comm/Loops.h"
42 #include "comm/TcpAcceptor.h"
43 #include "comm/Write.h"
45 #include "compat/cmsg.h"
46 #include "DescriptorSet.h"
51 #include "icmp/net_db.h"
52 #include "ip/Intercept.h"
53 #include "ip/QosConfig.h"
56 #include "profiler/Profiler.h"
58 #include "SquidConfig.h"
59 #include "StatCounters.h"
60 #include "StoreIOBuffer.h"
64 #include "ssl/support.h"
70 #include <sys/ioctl.h>
72 #ifdef HAVE_NETINET_TCP_H
73 #include <netinet/tcp.h>
80 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
83 static void commStopHalfClosedMonitor(int fd
);
84 static IOCB commHalfClosedReader
;
85 static void comm_init_opened(const Comm::ConnectionPointer
&conn
, tos_t tos
, nfmark_t nfmark
, const char *note
, struct addrinfo
*AI
);
86 static int comm_apply_flags(int new_socket
, Ip::Address
&addr
, int flags
, struct addrinfo
*AI
);
89 CBDATA_CLASS_INIT(CommQuotaQueue
);
91 static void commHandleWriteHelper(void * data
);
96 static DescriptorSet
*TheHalfClosed
= NULL
; /// the set of half-closed FDs
97 static bool WillCheckHalfClosed
= false; /// true if check is scheduled
98 static EVH commHalfClosedCheck
;
99 static void commPlanHalfClosedCheck();
101 static comm_err_t
commBind(int s
, struct addrinfo
&);
102 static void commSetReuseAddr(int);
103 static void commSetNoLinger(int);
105 static void commSetTcpNoDelay(int);
107 static void commSetTcpRcvbuf(int, int);
109 fd_debug_t
*fdd_table
= NULL
;
114 return fd
>= 0 && fd_table
&& fd_table
[fd
].flags
.open
!= 0;
120 * If the read attempt succeeds or fails, call the callback.
121 * Else, wait for another IO notification.
124 commHandleRead(int fd
, void *data
)
126 Comm::IoCallback
*ccb
= (Comm::IoCallback
*) data
;
128 assert(data
== COMMIO_FD_READCB(fd
));
129 assert(ccb
->active());
131 ++ statCounter
.syscalls
.sock
.reads
;
135 retval
= FD_READ_METHOD(fd
, ccb
->buf
, ccb
->size
);
136 debugs(5, 3, "char FD " << fd
<< ", size " << ccb
->size
<< ", retval " << retval
<< ", errno " << errno
);
138 assert(ccb
->buf2
!= NULL
);
139 SBuf::size_type sz
= ccb
->buf2
->spaceSize();
140 char *buf
= ccb
->buf2
->rawSpace(sz
);
141 retval
= FD_READ_METHOD(fd
, buf
, sz
-1); // blocking synchronous read(2)
143 ccb
->buf2
->append(buf
, retval
);
145 debugs(5, 3, "SBuf FD " << fd
<< ", size " << sz
<< ", retval " << retval
<< ", errno " << errno
);
148 if (retval
< 0 && !ignoreErrno(errno
)) {
149 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
151 ccb
->finish(COMM_ERROR
, errno
);
155 /* See if we read anything */
156 /* Note - read 0 == socket EOF, which is a valid read */
158 fd_bytes(fd
, retval
, FD_READ
);
159 ccb
->offset
= retval
;
160 ccb
->finish(COMM_OK
, errno
);
164 /* Nope, register for some more IO */
165 Comm::SetSelect(fd
, COMM_SELECT_READ
, commHandleRead
, data
, 0);
169 * Queue a read. handler/handler_data are called when the read
170 * completes, on error, or on file descriptor close.
173 comm_read(const Comm::ConnectionPointer
&conn
, char *buf
, int size
, AsyncCall::Pointer
&callback
)
175 debugs(5, 5, "comm_read, queueing read for " << conn
<< "; asynCall " << callback
);
177 /* Make sure we are open and not closing */
178 assert(Comm::IsConnOpen(conn
));
179 assert(!fd_table
[conn
->fd
].closing());
180 Comm::IoCallback
*ccb
= COMMIO_FD_READCB(conn
->fd
);
182 // Make sure we are either not reading or just passively monitoring.
183 // Active/passive conflicts are OK and simply cancel passive monitoring.
185 // if the assertion below fails, we have an active comm_read conflict
186 assert(fd_table
[conn
->fd
].halfClosedReader
!= NULL
);
187 commStopHalfClosedMonitor(conn
->fd
);
188 assert(!ccb
->active());
193 ccb
->setCallback(Comm::IOCB_READ
, callback
, (char *)buf
, NULL
, size
);
194 Comm::SetSelect(conn
->fd
, COMM_SELECT_READ
, commHandleRead
, ccb
, 0);
198 * Queue a read. handler/handler_data are called when the read
199 * completes, on error, or on file descriptor close.
202 comm_read(const Comm::ConnectionPointer
&conn
, SBuf
&buf
, AsyncCall::Pointer
&callback
)
204 debugs(5, 5, "comm_read, queueing read for " << conn
<< "; asynCall " << callback
);
206 /* Make sure we are open and not closing */
207 assert(Comm::IsConnOpen(conn
));
208 assert(!fd_table
[conn
->fd
].closing());
209 Comm::IoCallback
*ccb
= COMMIO_FD_READCB(conn
->fd
);
211 // Make sure we are either not reading or just passively monitoring.
212 // Active/passive conflicts are OK and simply cancel passive monitoring.
214 // if the assertion below fails, we have an active comm_read conflict
215 assert(fd_table
[conn
->fd
].halfClosedReader
!= NULL
);
216 commStopHalfClosedMonitor(conn
->fd
);
217 assert(!ccb
->active());
223 ccb
->setCallback(Comm::IOCB_READ
, callback
, NULL
, NULL
, buf
.spaceSize());
224 Comm::SetSelect(conn
->fd
, COMM_SELECT_READ
, commHandleRead
, ccb
, 0);
228 * Empty the read buffers
230 * This is a magical routine that empties the read buffers.
231 * Under some platforms (Linux) if a buffer has data in it before
232 * you call close(), the socket will hang and take quite a while
236 comm_empty_os_read_buffers(int fd
)
239 /* prevent those nasty RST packets */
240 char buf
[SQUID_TCP_SO_RCVBUF
];
242 if (fd_table
[fd
].flags
.nonblocking
) {
243 while (FD_READ_METHOD(fd
, buf
, SQUID_TCP_SO_RCVBUF
) > 0) {};
249 * Return whether the FD has a pending completed callback.
253 comm_has_pending_read_callback(int fd
)
256 // XXX: We do not know whether there is a read callback scheduled.
257 // This is used for pconn management that should probably be more
258 // tightly integrated into comm to minimize the chance that a
259 // closing pconn socket will be used for a new transaction.
263 // Does comm check this fd for read readiness?
264 // Note that when comm is not monitoring, there can be a pending callback
265 // call, which may resume comm monitoring once fired.
267 comm_monitors_read(int fd
)
269 assert(isOpen(fd
) && COMMIO_FD_READCB(fd
));
270 // Being active is usually the same as monitoring because we always
271 // start monitoring the FD when we configure Comm::IoCallback for I/O
272 // and we usually configure Comm::IoCallback for I/O when we starting
273 // monitoring a FD for reading.
274 return COMMIO_FD_READCB(fd
)->active();
278 * Cancel a pending read. Assert that we have the right parameters,
279 * and that there are no pending read events!
281 * XXX: We do not assert that there are no pending read events and
282 * with async calls it becomes even more difficult.
283 * The whole interface should be reworked to do callback->cancel()
284 * instead of searching for places where the callback may be stored and
285 * updating the state of those places.
287 * AHC Don't call the comm handlers?
290 comm_read_cancel(int fd
, IOCB
*callback
, void *data
)
293 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " closed");
297 Comm::IoCallback
*cb
= COMMIO_FD_READCB(fd
);
298 // TODO: is "active" == "monitors FD"?
300 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " inactive");
304 typedef CommCbFunPtrCallT
<CommIoCbPtrFun
> Call
;
305 Call
*call
= dynamic_cast<Call
*>(cb
->callback
.getRaw());
307 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " lacks callback");
311 call
->cancel("old comm_read_cancel");
313 typedef CommIoCbParams Params
;
314 const Params
¶ms
= GetCommParams
<Params
>(cb
->callback
);
316 /* Ok, we can be reasonably sure we won't lose any data here! */
317 assert(call
->dialer
.handler
== callback
);
318 assert(params
.data
== data
);
320 /* Delete the callback */
321 cb
->cancel("old comm_read_cancel");
323 /* And the IO event */
324 Comm::SetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
328 comm_read_cancel(int fd
, AsyncCall::Pointer
&callback
)
330 callback
->cancel("comm_read_cancel");
333 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " closed");
337 Comm::IoCallback
*cb
= COMMIO_FD_READCB(fd
);
340 debugs(5, 4, "comm_read_cancel fails: FD " << fd
<< " inactive");
344 AsyncCall::Pointer call
= cb
->callback
;
345 assert(call
!= NULL
); // XXX: should never fail (active() checks for callback==NULL)
347 /* Ok, we can be reasonably sure we won't lose any data here! */
348 assert(call
== callback
);
350 /* Delete the callback */
351 cb
->cancel("comm_read_cancel");
353 /* And the IO event */
354 Comm::SetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
358 * synchronous wrapper around udp socket functions
361 comm_udp_recvfrom(int fd
, void *buf
, size_t len
, int flags
, Ip::Address
&from
)
363 ++ statCounter
.syscalls
.sock
.recvfroms
;
364 debugs(5,8, "comm_udp_recvfrom: FD " << fd
<< " from " << from
);
365 struct addrinfo
*AI
= NULL
;
366 Ip::Address::InitAddrInfo(AI
);
367 int x
= recvfrom(fd
, buf
, len
, flags
, AI
->ai_addr
, &AI
->ai_addrlen
);
369 Ip::Address::FreeAddrInfo(AI
);
374 comm_udp_recv(int fd
, void *buf
, size_t len
, int flags
)
377 return comm_udp_recvfrom(fd
, buf
, len
, flags
, nul
);
381 comm_udp_send(int s
, const void *buf
, size_t len
, int flags
)
383 return send(s
, buf
, len
, flags
);
387 comm_has_incomplete_write(int fd
)
389 assert(isOpen(fd
) && COMMIO_FD_WRITECB(fd
));
390 return COMMIO_FD_WRITECB(fd
)->active();
394 * Queue a write. handler/handler_data are called when the write fully
395 * completes, on error, or on file descriptor close.
398 /* Return the local port associated with fd. */
400 comm_local_port(int fd
)
403 struct addrinfo
*addr
= NULL
;
404 fde
*F
= &fd_table
[fd
];
406 /* If the fd is closed already, just return */
408 if (!F
->flags
.open
) {
409 debugs(5, 0, "comm_local_port: FD " << fd
<< " has been closed.");
413 if (F
->local_addr
.port())
414 return F
->local_addr
.port();
416 if (F
->sock_family
== AF_INET
)
419 Ip::Address::InitAddrInfo(addr
);
421 if (getsockname(fd
, addr
->ai_addr
, &(addr
->ai_addrlen
)) ) {
422 debugs(50, DBG_IMPORTANT
, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd
<< ": " << xstrerror());
423 Ip::Address::FreeAddrInfo(addr
);
428 Ip::Address::FreeAddrInfo(addr
);
430 if (F
->local_addr
.isAnyAddr()) {
431 /* save the whole local address, not just the port. */
432 F
->local_addr
= temp
;
434 F
->local_addr
.port(temp
.port());
437 debugs(5, 6, "comm_local_port: FD " << fd
<< ": port " << F
->local_addr
.port() << "(family=" << F
->sock_family
<< ")");
438 return F
->local_addr
.port();
442 commBind(int s
, struct addrinfo
&inaddr
)
444 ++ statCounter
.syscalls
.sock
.binds
;
446 if (bind(s
, inaddr
.ai_addr
, inaddr
.ai_addrlen
) == 0) {
447 debugs(50, 6, "commBind: bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
);
451 debugs(50, 0, "commBind: Cannot bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
<< ": " << xstrerror());
457 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
458 * is OR of flags specified in comm.h. Defaults TOS
461 comm_open(int sock_type
,
467 return comm_openex(sock_type
, proto
, addr
, flags
, 0, 0, note
);
471 comm_open_listener(int sock_type
,
473 Comm::ConnectionPointer
&conn
,
476 /* all listener sockets require bind() */
477 conn
->flags
|= COMM_DOBIND
;
479 /* attempt native enabled port. */
480 conn
->fd
= comm_openex(sock_type
, proto
, conn
->local
, conn
->flags
, 0, 0, note
);
484 comm_open_listener(int sock_type
,
492 /* all listener sockets require bind() */
493 flags
|= COMM_DOBIND
;
495 /* attempt native enabled port. */
496 sock
= comm_openex(sock_type
, proto
, addr
, flags
, 0, 0, note
);
502 limitError(int const anErrno
)
504 return anErrno
== ENFILE
|| anErrno
== EMFILE
;
508 comm_set_v6only(int fd
, int tos
)
511 if (setsockopt(fd
, IPPROTO_IPV6
, IPV6_V6ONLY
, (char *) &tos
, sizeof(int)) < 0) {
512 debugs(50, DBG_IMPORTANT
, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos
?"ON":"OFF") << " for FD " << fd
<< ": " << xstrerror());
515 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
520 * Set the socket option required for TPROXY spoofing for:
521 * - Linux TPROXY v4 support,
522 * - OpenBSD divert-to support,
523 * - FreeBSD IPFW TPROXY v4 support.
526 comm_set_transparent(int fd
)
528 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
529 # define soLevel SOL_IP
530 # define soFlag IP_TRANSPARENT
531 bool doneSuid
= false;
533 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
534 # define soLevel SOL_SOCKET
535 # define soFlag SO_BINDANY
537 bool doneSuid
= true;
539 #elif defined(IP_BINDANY) // FreeBSD with IPFW
540 # define soLevel IPPROTO_IP
541 # define soFlag IP_BINDANY
543 bool doneSuid
= true;
546 debugs(50, DBG_CRITICAL
, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
549 #if defined(soLevel) && defined(soFlag)
551 if (setsockopt(fd
, soLevel
, soFlag
, (char *) &tos
, sizeof(int)) < 0) {
552 debugs(50, DBG_IMPORTANT
, "comm_open: setsockopt(TPROXY) on FD " << fd
<< ": " << xstrerror());
554 /* mark the socket as having transparent options */
555 fd_table
[fd
].flags
.transparent
= true;
563 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
564 * is OR of flags specified in defines.h:COMM_*
567 comm_openex(int sock_type
,
576 struct addrinfo
*AI
= NULL
;
578 PROF_start(comm_open
);
579 /* Create socket for accepting new connections. */
580 ++ statCounter
.syscalls
.sock
.sockets
;
582 /* Setup the socket addrinfo details for use */
583 addr
.getAddrInfo(AI
);
584 AI
->ai_socktype
= sock_type
;
585 AI
->ai_protocol
= proto
;
587 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr
);
589 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
591 /* under IPv6 there is the possibility IPv6 is present but disabled. */
592 /* try again as IPv4-native if possible */
593 if ( new_socket
< 0 && Ip::EnableIpv6
&& addr
.isIPv6() && addr
.setIPv4() ) {
594 /* attempt to open this IPv4-only. */
595 Ip::Address::FreeAddrInfo(AI
);
596 /* Setup the socket addrinfo details for use */
597 addr
.getAddrInfo(AI
);
598 AI
->ai_socktype
= sock_type
;
599 AI
->ai_protocol
= proto
;
600 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr
);
601 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
602 debugs(50, 2, HERE
<< "attempt open " << note
<< " socket on: " << addr
);
605 if (new_socket
< 0) {
606 /* Increase the number of reserved fd's if calls to socket()
607 * are failing because the open file table is full. This
608 * limits the number of simultaneous clients */
610 if (limitError(errno
)) {
611 debugs(50, DBG_IMPORTANT
, "comm_open: socket failure: " << xstrerror());
614 debugs(50, DBG_CRITICAL
, "comm_open: socket failure: " << xstrerror());
617 Ip::Address::FreeAddrInfo(AI
);
619 PROF_stop(comm_open
);
623 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
624 Comm::ConnectionPointer conn
= new Comm::Connection
;
626 conn
->fd
= new_socket
;
628 debugs(50, 3, "comm_openex: Opened socket " << conn
<< " : family=" << AI
->ai_family
<< ", type=" << AI
->ai_socktype
<< ", protocol=" << AI
->ai_protocol
);
630 /* set TOS if needed */
632 Ip::Qos::setSockTos(conn
, tos
);
634 /* set netfilter mark if needed */
636 Ip::Qos::setSockNfmark(conn
, nfmark
);
638 if ( Ip::EnableIpv6
&IPV6_SPECIAL_SPLITSTACK
&& addr
.isIPv6() )
639 comm_set_v6only(conn
->fd
, 1);
641 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
642 /* Other OS may have this administratively disabled for general use. Same deal. */
643 if ( Ip::EnableIpv6
&IPV6_SPECIAL_V4MAPPING
&& addr
.isIPv6() )
644 comm_set_v6only(conn
->fd
, 0);
646 comm_init_opened(conn
, tos
, nfmark
, note
, AI
);
647 new_socket
= comm_apply_flags(conn
->fd
, addr
, flags
, AI
);
649 Ip::Address::FreeAddrInfo(AI
);
651 PROF_stop(comm_open
);
653 // XXX transition only. prevent conn from closing the new FD on function exit.
658 /// update FD tables after a local or remote (IPC) comm_openex();
660 comm_init_opened(const Comm::ConnectionPointer
&conn
,
666 assert(Comm::IsConnOpen(conn
));
670 debugs(5, 5, HERE
<< conn
<< " is a new socket");
672 assert(!isOpen(conn
->fd
)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
673 fd_open(conn
->fd
, FD_SOCKET
, note
);
675 fdd_table
[conn
->fd
].close_file
= NULL
;
676 fdd_table
[conn
->fd
].close_line
= 0;
678 fde
*F
= &fd_table
[conn
->fd
];
679 F
->local_addr
= conn
->local
;
680 F
->tosToServer
= tos
;
682 F
->nfmarkToServer
= nfmark
;
684 F
->sock_family
= AI
->ai_family
;
687 /// apply flags after a local comm_open*() call;
688 /// returns new_socket or -1 on error
690 comm_apply_flags(int new_socket
,
695 assert(new_socket
>= 0);
697 const int sock_type
= AI
->ai_socktype
;
699 if (!(flags
& COMM_NOCLOEXEC
))
700 commSetCloseOnExec(new_socket
);
702 if ((flags
& COMM_REUSEADDR
))
703 commSetReuseAddr(new_socket
);
705 if (addr
.port() > (unsigned short) 0) {
707 if (sock_type
!= SOCK_DGRAM
)
709 commSetNoLinger(new_socket
);
712 commSetReuseAddr(new_socket
);
715 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
716 if ((flags
& COMM_TRANSPARENT
)) {
717 comm_set_transparent(new_socket
);
720 if ( (flags
& COMM_DOBIND
) || addr
.port() > 0 || !addr
.isAnyAddr() ) {
721 if ( !(flags
& COMM_DOBIND
) && addr
.isAnyAddr() )
722 debugs(5, DBG_IMPORTANT
,"WARNING: Squid is attempting to bind() port " << addr
<< " without being a listener.");
723 if ( addr
.isNoAddr() )
724 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr
<< "!!");
726 if (commBind(new_socket
, *AI
) != COMM_OK
) {
727 comm_close(new_socket
);
732 if (flags
& COMM_NONBLOCKING
)
733 if (commSetNonBlocking(new_socket
) == COMM_ERROR
) {
734 comm_close(new_socket
);
739 if (sock_type
== SOCK_STREAM
)
740 commSetTcpNoDelay(new_socket
);
744 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
745 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
751 comm_import_opened(const Comm::ConnectionPointer
&conn
,
755 debugs(5, 2, HERE
<< conn
);
756 assert(Comm::IsConnOpen(conn
));
759 comm_init_opened(conn
, 0, 0, note
, AI
);
761 if (!(conn
->flags
& COMM_NOCLOEXEC
))
762 fd_table
[conn
->fd
].flags
.close_on_exec
= true;
764 if (conn
->local
.port() > (unsigned short) 0) {
766 if (AI
->ai_socktype
!= SOCK_DGRAM
)
768 fd_table
[conn
->fd
].flags
.nolinger
= true;
771 if ((conn
->flags
& COMM_TRANSPARENT
))
772 fd_table
[conn
->fd
].flags
.transparent
= true;
774 if (conn
->flags
& COMM_NONBLOCKING
)
775 fd_table
[conn
->fd
].flags
.nonblocking
= true;
778 if (AI
->ai_socktype
== SOCK_STREAM
)
779 fd_table
[conn
->fd
].flags
.nodelay
= true;
782 /* no fd_table[fd].flags. updates needed for these conditions:
783 * if ((flags & COMM_REUSEADDR)) ...
784 * if ((flags & COMM_DOBIND) ...) ...
788 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
789 // With handler already unset. Leaving this present until that can be verified for all code paths.
791 commUnsetFdTimeout(int fd
)
793 debugs(5, 3, HERE
<< "Remove timeout for FD " << fd
);
795 assert(fd
< Squid_MaxFD
);
796 fde
*F
= &fd_table
[fd
];
797 assert(F
->flags
.open
);
799 F
->timeoutHandler
= NULL
;
804 commSetConnTimeout(const Comm::ConnectionPointer
&conn
, int timeout
, AsyncCall::Pointer
&callback
)
806 debugs(5, 3, HERE
<< conn
<< " timeout " << timeout
);
807 assert(Comm::IsConnOpen(conn
));
808 assert(conn
->fd
< Squid_MaxFD
);
809 fde
*F
= &fd_table
[conn
->fd
];
810 assert(F
->flags
.open
);
813 F
->timeoutHandler
= NULL
;
816 if (callback
!= NULL
) {
817 typedef CommTimeoutCbParams Params
;
818 Params
¶ms
= GetCommParams
<Params
>(callback
);
820 F
->timeoutHandler
= callback
;
823 F
->timeout
= squid_curtime
+ (time_t) timeout
;
830 commUnsetConnTimeout(const Comm::ConnectionPointer
&conn
)
832 debugs(5, 3, HERE
<< "Remove timeout for " << conn
);
833 AsyncCall::Pointer nil
;
834 return commSetConnTimeout(conn
, -1, nil
);
838 comm_connect_addr(int sock
, const Ip::Address
&address
)
840 comm_err_t status
= COMM_OK
;
841 fde
*F
= &fd_table
[sock
];
845 struct addrinfo
*AI
= NULL
;
846 PROF_start(comm_connect_addr
);
848 assert(address
.port() != 0);
850 debugs(5, 9, HERE
<< "connecting socket FD " << sock
<< " to " << address
<< " (want family: " << F
->sock_family
<< ")");
852 /* Handle IPv6 over IPv4-only socket case.
853 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
854 * NP: because commResetFD is private to ConnStateData we have to return an error and
855 * trust its handled properly.
857 if (F
->sock_family
== AF_INET
&& !address
.isIPv4()) {
859 return COMM_ERR_PROTOCOL
;
862 /* Handle IPv4 over IPv6-only socket case.
863 * This case is presently handled here as it's both a known case and it's
864 * uncertain what error will be returned by the IPv6 stack in such case. It's
865 * possible this will also be handled by the errno checks below after connect()
866 * but needs carefull cross-platform verification, and verifying the address
867 * condition here is simple.
869 if (!F
->local_addr
.isIPv4() && address
.isIPv4()) {
871 return COMM_ERR_PROTOCOL
;
874 address
.getAddrInfo(AI
, F
->sock_family
);
876 /* Establish connection. */
879 if (!F
->flags
.called_connect
) {
880 F
->flags
.called_connect
= true;
881 ++ statCounter
.syscalls
.sock
.connects
;
883 x
= connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
);
885 // XXX: ICAP code refuses callbacks during a pending comm_ call
886 // Async calls development will fix this.
893 debugs(5,5, "comm_connect_addr: sock=" << sock
<< ", addrinfo( " <<
894 " flags=" << AI
->ai_flags
<<
895 ", family=" << AI
->ai_family
<<
896 ", socktype=" << AI
->ai_socktype
<<
897 ", protocol=" << AI
->ai_protocol
<<
898 ", &addr=" << AI
->ai_addr
<<
899 ", addrlen=" << AI
->ai_addrlen
<<
901 debugs(5, 9, "connect FD " << sock
<< ": (" << x
<< ") " << xstrerror());
902 debugs(14,9, "connecting to: " << address
);
906 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
908 connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
);
910 if (errno
== EINVAL
) {
911 errlen
= sizeof(err
);
912 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
919 errlen
= sizeof(err
);
921 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
928 * Solaris 2.4's socket emulation doesn't allow you
929 * to determine the error from a failed non-blocking
930 * connect and just returns EPIPE. Create a fake
931 * error message for connect. -- fenner@parc.xerox.com
933 if (x
< 0 && errno
== EPIPE
)
941 Ip::Address::FreeAddrInfo(AI
);
943 PROF_stop(comm_connect_addr
);
945 if (errno
== 0 || errno
== EISCONN
)
947 else if (ignoreErrno(errno
))
948 status
= COMM_INPROGRESS
;
949 else if (errno
== EAFNOSUPPORT
|| errno
== EINVAL
)
950 return COMM_ERR_PROTOCOL
;
954 address
.toStr(F
->ipaddr
, MAX_IPSTRLEN
);
956 F
->remote_port
= address
.port(); /* remote_port is HS */
958 if (status
== COMM_OK
) {
959 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connected to " << address
);
960 } else if (status
== COMM_INPROGRESS
) {
961 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connection pending");
968 commCallCloseHandlers(int fd
)
970 fde
*F
= &fd_table
[fd
];
971 debugs(5, 5, "commCallCloseHandlers: FD " << fd
);
973 while (F
->closeHandler
!= NULL
) {
974 AsyncCall::Pointer call
= F
->closeHandler
;
975 F
->closeHandler
= call
->Next();
977 // If call is not canceled schedule it for execution else ignore it
978 if (!call
->canceled()) {
979 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call
);
980 ScheduleCallHere(call
);
987 commLingerClose(int fd
, void *unused
)
989 LOCAL_ARRAY(char, buf
, 1024);
991 n
= FD_READ_METHOD(fd
, buf
, 1024);
994 debugs(5, 3, "commLingerClose: FD " << fd
<< " read: " << xstrerror());
1000 commLingerTimeout(const FdeCbParams
¶ms
)
1002 debugs(5, 3, "commLingerTimeout: FD " << params
.fd
);
1003 comm_close(params
.fd
);
1007 * Inspired by apache
1010 comm_lingering_close(int fd
)
1013 if (fd_table
[fd
].ssl
)
1014 ssl_shutdown_method(fd_table
[fd
].ssl
);
1017 if (shutdown(fd
, 1) < 0) {
1022 fd_note(fd
, "lingering close");
1023 AsyncCall::Pointer call
= commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout
, NULL
));
1025 debugs(5, 3, HERE
<< "FD " << fd
<< " timeout " << timeout
);
1026 assert(fd_table
[fd
].flags
.open
);
1027 if (callback
!= NULL
) {
1028 typedef FdeCbParams Params
;
1029 Params
¶ms
= GetCommParams
<Params
>(callback
);
1031 fd_table
[fd
].timeoutHandler
= callback
;
1032 fd_table
[fd
].timeout
= squid_curtime
+ static_cast<time_t>(10);
1035 Comm::SetSelect(fd
, COMM_SELECT_READ
, commLingerClose
, NULL
, 0);
1041 * enable linger with time of 0 so that when the socket is
1042 * closed, TCP generates a RESET
1045 comm_reset_close(const Comm::ConnectionPointer
&conn
)
1051 if (setsockopt(conn
->fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1052 debugs(50, DBG_CRITICAL
, "ERROR: Closing " << conn
<< " with TCP RST: " << xstrerror());
1057 // Legacy close function.
1059 old_comm_reset_close(int fd
)
1065 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1066 debugs(50, DBG_CRITICAL
, "ERROR: Closing FD " << fd
<< " with TCP RST: " << xstrerror());
1073 commStartSslClose(const FdeCbParams
¶ms
)
1075 assert(&fd_table
[params
.fd
].ssl
);
1076 ssl_shutdown_method(fd_table
[params
.fd
].ssl
);
1081 comm_close_complete(const FdeCbParams
¶ms
)
1084 fde
*F
= &fd_table
[params
.fd
];
1091 if (F
->dynamicSslContext
) {
1092 SSL_CTX_free(F
->dynamicSslContext
);
1093 F
->dynamicSslContext
= NULL
;
1096 fd_close(params
.fd
); /* update fdstat */
1099 ++ statCounter
.syscalls
.sock
.closes
;
1101 /* When one connection closes, give accept() a chance, if need be */
1102 Comm::AcceptLimiter::Instance().kick();
1106 * Close the socket fd.
1108 * + call write handlers with ERR_CLOSING
1109 * + call read handlers with ERR_CLOSING
1110 * + call closing handlers
1112 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1113 * DeferredReadManager.
1116 _comm_close(int fd
, char const *file
, int line
)
1118 debugs(5, 3, "comm_close: start closing FD " << fd
);
1120 assert(fd
< Squid_MaxFD
);
1122 fde
*F
= &fd_table
[fd
];
1123 fdd_table
[fd
].close_file
= file
;
1124 fdd_table
[fd
].close_line
= line
;
1129 /* XXX: is this obsolete behind F->closing() ? */
1130 if ( (shutting_down
|| reconfiguring
) && (!F
->flags
.open
|| F
->type
== FD_FILE
))
1133 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1135 debugs(50, DBG_IMPORTANT
, HERE
<< "BUG 3556: FD " << fd
<< " is not an open socket.");
1136 // XXX: do we need to run close(fd) or fd_close(fd) here?
1140 assert(F
->type
!= FD_FILE
);
1142 PROF_start(comm_close
);
1144 F
->flags
.close_request
= true;
1148 AsyncCall::Pointer startCall
=commCbCall(5,4, "commStartSslClose",
1149 FdeCbPtrFun(commStartSslClose
, NULL
));
1150 FdeCbParams
&startParams
= GetCommParams
<FdeCbParams
>(startCall
);
1151 startParams
.fd
= fd
;
1152 ScheduleCallHere(startCall
);
1156 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1157 if (commHasHalfClosedMonitor(fd
))
1158 commStopHalfClosedMonitor(fd
);
1159 commUnsetFdTimeout(fd
);
1161 // notify read/write handlers after canceling select reservations, if any
1162 if (COMMIO_FD_WRITECB(fd
)->active()) {
1163 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
1164 COMMIO_FD_WRITECB(fd
)->finish(COMM_ERR_CLOSING
, errno
);
1166 if (COMMIO_FD_READCB(fd
)->active()) {
1167 Comm::SetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
1168 COMMIO_FD_READCB(fd
)->finish(COMM_ERR_CLOSING
, errno
);
1172 if (ClientInfo
*clientInfo
= F
->clientInfo
) {
1173 if (clientInfo
->selectWaiting
) {
1174 clientInfo
->selectWaiting
= false;
1175 // kick queue or it will get stuck as commWriteHandle is not called
1176 clientInfo
->kickQuotaQueue();
1181 commCallCloseHandlers(fd
);
1183 comm_empty_os_read_buffers(fd
);
1185 AsyncCall::Pointer completeCall
=commCbCall(5,4, "comm_close_complete",
1186 FdeCbPtrFun(comm_close_complete
, NULL
));
1187 FdeCbParams
&completeParams
= GetCommParams
<FdeCbParams
>(completeCall
);
1188 completeParams
.fd
= fd
;
1189 // must use async call to wait for all callbacks
1190 // scheduled before comm_close() to finish
1191 ScheduleCallHere(completeCall
);
1193 PROF_stop(comm_close
);
1196 /* Send a udp datagram to specified TO_ADDR. */
1198 comm_udp_sendto(int fd
,
1199 const Ip::Address
&to_addr
,
1203 PROF_start(comm_udp_sendto
);
1204 ++ statCounter
.syscalls
.sock
.sendtos
;
1206 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr
<<
1207 " using FD " << fd
<< " using Port " << comm_local_port(fd
) );
1209 struct addrinfo
*AI
= NULL
;
1210 to_addr
.getAddrInfo(AI
, fd_table
[fd
].sock_family
);
1211 int x
= sendto(fd
, buf
, len
, 0, AI
->ai_addr
, AI
->ai_addrlen
);
1212 Ip::Address::FreeAddrInfo(AI
);
1214 PROF_stop(comm_udp_sendto
);
1221 if (ECONNREFUSED
!= errno
)
1224 debugs(50, DBG_IMPORTANT
, "comm_udp_sendto: FD " << fd
<< ", (family=" << fd_table
[fd
].sock_family
<< ") " << to_addr
<< ": " << xstrerror());
1230 comm_add_close_handler(int fd
, CLCB
* handler
, void *data
)
1232 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", handler=" <<
1233 handler
<< ", data=" << data
);
1235 AsyncCall::Pointer call
=commCbCall(5,4, "SomeCloseHandler",
1236 CommCloseCbPtrFun(handler
, data
));
1237 comm_add_close_handler(fd
, call
);
1241 comm_add_close_handler(int fd
, AsyncCall::Pointer
&call
)
1243 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1245 /*TODO:Check for a similar scheduled AsyncCall*/
1246 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1247 // assert(c->handler != handler || c->data != data);
1249 call
->setNext(fd_table
[fd
].closeHandler
);
1251 fd_table
[fd
].closeHandler
= call
;
1254 // remove function-based close handler
1256 comm_remove_close_handler(int fd
, CLCB
* handler
, void *data
)
1259 /* Find handler in list */
1260 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", handler=" <<
1261 handler
<< ", data=" << data
);
1263 AsyncCall::Pointer p
, prev
= NULL
;
1264 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
; prev
= p
, p
= p
->Next()) {
1265 typedef CommCbFunPtrCallT
<CommCloseCbPtrFun
> Call
;
1266 const Call
*call
= dynamic_cast<const Call
*>(p
.getRaw());
1267 if (!call
) // method callbacks have their own comm_remove_close_handler
1270 typedef CommCloseCbParams Params
;
1271 const Params
¶ms
= GetCommParams
<Params
>(p
);
1272 if (call
->dialer
.handler
== handler
&& params
.data
== data
)
1273 break; /* This is our handler */
1276 // comm_close removes all close handlers so our handler may be gone
1278 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1279 p
->cancel("comm_remove_close_handler");
1283 // remove method-based close handler
1285 comm_remove_close_handler(int fd
, AsyncCall::Pointer
&call
)
1288 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1290 // comm_close removes all close handlers so our handler may be gone
1291 AsyncCall::Pointer p
, prev
= NULL
;
1292 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
&& p
!= call
; prev
= p
, p
= p
->Next());
1295 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1296 call
->cancel("comm_remove_close_handler");
1300 commSetNoLinger(int fd
)
1304 L
.l_onoff
= 0; /* off */
1307 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0)
1308 debugs(50, 0, "commSetNoLinger: FD " << fd
<< ": " << xstrerror());
1310 fd_table
[fd
].flags
.nolinger
= true;
1314 commSetReuseAddr(int fd
)
1318 if (setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *) &on
, sizeof(on
)) < 0)
1319 debugs(50, DBG_IMPORTANT
, "commSetReuseAddr: FD " << fd
<< ": " << xstrerror());
1323 commSetTcpRcvbuf(int fd
, int size
)
1325 if (setsockopt(fd
, SOL_SOCKET
, SO_RCVBUF
, (char *) &size
, sizeof(size
)) < 0)
1326 debugs(50, DBG_IMPORTANT
, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1327 if (setsockopt(fd
, SOL_SOCKET
, SO_SNDBUF
, (char *) &size
, sizeof(size
)) < 0)
1328 debugs(50, DBG_IMPORTANT
, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1329 #ifdef TCP_WINDOW_CLAMP
1330 if (setsockopt(fd
, SOL_TCP
, TCP_WINDOW_CLAMP
, (char *) &size
, sizeof(size
)) < 0)
1331 debugs(50, DBG_IMPORTANT
, "commSetTcpRcvbuf: FD " << fd
<< ", SIZE " << size
<< ": " << xstrerror());
1336 commSetNonBlocking(int fd
)
1338 #if !_SQUID_WINDOWS_
1343 int nonblocking
= TRUE
;
1346 if (fd_table
[fd
].type
!= FD_PIPE
) {
1349 if (ioctl(fd
, FIONBIO
, &nonblocking
) < 0) {
1350 debugs(50, 0, "commSetNonBlocking: FD " << fd
<< ": " << xstrerror() << " " << fd_table
[fd
].type
);
1358 #if !_SQUID_WINDOWS_
1360 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1361 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFL: " << xstrerror());
1365 if (fcntl(fd
, F_SETFL
, flags
| SQUID_NONBLOCK
) < 0) {
1366 debugs(50, 0, "commSetNonBlocking: FD " << fd
<< ": " << xstrerror());
1374 fd_table
[fd
].flags
.nonblocking
= true;
1380 commUnsetNonBlocking(int fd
)
1383 int nonblocking
= FALSE
;
1385 if (ioctlsocket(fd
, FIONBIO
, (unsigned long *) &nonblocking
) < 0) {
1390 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1391 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFL: " << xstrerror());
1395 if (fcntl(fd
, F_SETFL
, flags
& (~SQUID_NONBLOCK
)) < 0) {
1397 debugs(50, 0, "commUnsetNonBlocking: FD " << fd
<< ": " << xstrerror());
1401 fd_table
[fd
].flags
.nonblocking
= false;
1406 commSetCloseOnExec(int fd
)
1412 if ((flags
= fcntl(fd
, F_GETFD
, dummy
)) < 0) {
1413 debugs(50, 0, "FD " << fd
<< ": fcntl F_GETFD: " << xstrerror());
1417 if (fcntl(fd
, F_SETFD
, flags
| FD_CLOEXEC
) < 0)
1418 debugs(50, 0, "FD " << fd
<< ": set close-on-exec failed: " << xstrerror());
1420 fd_table
[fd
].flags
.close_on_exec
= true;
1427 commSetTcpNoDelay(int fd
)
1431 if (setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *) &on
, sizeof(on
)) < 0)
1432 debugs(50, DBG_IMPORTANT
, "commSetTcpNoDelay: FD " << fd
<< ": " << xstrerror());
1434 fd_table
[fd
].flags
.nodelay
= true;
1440 commSetTcpKeepalive(int fd
, int idle
, int interval
, int timeout
)
1444 if (timeout
&& interval
) {
1445 int count
= (timeout
+ interval
- 1) / interval
;
1446 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPCNT
, &count
, sizeof(on
)) < 0)
1447 debugs(5, DBG_IMPORTANT
, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1452 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPIDLE
, &idle
, sizeof(on
)) < 0)
1453 debugs(5, DBG_IMPORTANT
, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1456 #ifdef TCP_KEEPINTVL
1458 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPINTVL
, &interval
, sizeof(on
)) < 0)
1459 debugs(5, DBG_IMPORTANT
, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1462 if (setsockopt(fd
, SOL_SOCKET
, SO_KEEPALIVE
, (char *) &on
, sizeof(on
)) < 0)
1463 debugs(5, DBG_IMPORTANT
, "commSetKeepalive: FD " << fd
<< ": " << xstrerror());
1469 fd_table
=(fde
*) xcalloc(Squid_MaxFD
, sizeof(fde
));
1470 fdd_table
= (fd_debug_t
*)xcalloc(Squid_MaxFD
, sizeof(fd_debug_t
));
1472 /* make sure the accept() socket FIFO delay queue exists */
1473 Comm::AcceptLimiter::Instance();
1475 // make sure the IO pending callback table exists
1476 Comm::CallbackTableInit();
1478 /* XXX account fd_table */
1479 /* Keep a few file descriptors free so that we don't run out of FD's
1480 * after accepting a client but before it opens a socket or a file.
1481 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1482 RESERVED_FD
= min(100, Squid_MaxFD
/ 4);
1484 TheHalfClosed
= new DescriptorSet
;
1486 /* setup the select loop module */
1487 Comm::SelectLoopInit();
1493 delete TheHalfClosed
;
1494 TheHalfClosed
= NULL
;
1496 safe_free(fd_table
);
1497 safe_free(fdd_table
);
1498 Comm::CallbackTableDestruct();
1502 // called when the queue is done waiting for the client bucket to fill
1504 commHandleWriteHelper(void * data
)
1506 CommQuotaQueue
*queue
= static_cast<CommQuotaQueue
*>(data
);
1509 ClientInfo
*clientInfo
= queue
->clientInfo
;
1510 // ClientInfo invalidates queue if freed, so if we got here through,
1511 // evenAdd cbdata protections, everything should be valid and consistent
1513 assert(clientInfo
->hasQueue());
1514 assert(clientInfo
->hasQueue(queue
));
1515 assert(!clientInfo
->selectWaiting
);
1516 assert(clientInfo
->eventWaiting
);
1517 clientInfo
->eventWaiting
= false;
1520 // check that the head descriptor is still relevant
1521 const int head
= clientInfo
->quotaPeekFd();
1522 Comm::IoCallback
*ccb
= COMMIO_FD_WRITECB(head
);
1524 if (fd_table
[head
].clientInfo
== clientInfo
&&
1525 clientInfo
->quotaPeekReserv() == ccb
->quotaQueueReserv
&&
1526 !fd_table
[head
].closing()) {
1528 // wait for the head descriptor to become ready for writing
1529 Comm::SetSelect(head
, COMM_SELECT_WRITE
, Comm::HandleWrite
, ccb
, 0);
1530 clientInfo
->selectWaiting
= true;
1534 clientInfo
->quotaDequeue(); // remove the no longer relevant descriptor
1535 // and continue looking for a relevant one
1536 } while (clientInfo
->hasQueue());
1538 debugs(77,3, HERE
<< "emptied queue");
1542 ClientInfo::hasQueue() const
1545 return !quotaQueue
->empty();
1549 ClientInfo::hasQueue(const CommQuotaQueue
*q
) const
1552 return quotaQueue
== q
;
1555 /// returns the first descriptor to be dequeued
1557 ClientInfo::quotaPeekFd() const
1560 return quotaQueue
->front();
1563 /// returns the reservation ID of the first descriptor to be dequeued
1565 ClientInfo::quotaPeekReserv() const
1568 return quotaQueue
->outs
+ 1;
1571 /// queues a given fd, creating the queue if necessary; returns reservation ID
1573 ClientInfo::quotaEnqueue(int fd
)
1576 return quotaQueue
->enqueue(fd
);
1579 /// removes queue head
1581 ClientInfo::quotaDequeue()
1584 quotaQueue
->dequeue();
1588 ClientInfo::kickQuotaQueue()
1590 if (!eventWaiting
&& !selectWaiting
&& hasQueue()) {
1591 // wait at least a second if the bucket is empty
1592 const double delay
= (bucketSize
< 1.0) ? 1.0 : 0.0;
1593 eventAdd("commHandleWriteHelper", &commHandleWriteHelper
,
1594 quotaQueue
, delay
, 0, true);
1595 eventWaiting
= true;
1599 /// calculates how much to write for a single dequeued client
1601 ClientInfo::quotaForDequed()
1603 /* If we have multiple clients and give full bucketSize to each client then
1604 * clt1 may often get a lot more because clt1->clt2 time distance in the
1605 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1606 * We divide quota evenly to be more fair. */
1608 if (!rationedCount
) {
1609 rationedCount
= quotaQueue
->size() + 1;
1611 // The delay in ration recalculation _temporary_ deprives clients from
1612 // bytes that should have trickled in while rationedCount was positive.
1615 // Rounding errors do not accumulate here, but we round down to avoid
1616 // negative bucket sizes after write with rationedCount=1.
1617 rationedQuota
= static_cast<int>(floor(bucketSize
/rationedCount
));
1618 debugs(77,5, HERE
<< "new rationedQuota: " << rationedQuota
<<
1619 '*' << rationedCount
);
1623 debugs(77,7, HERE
<< "rationedQuota: " << rationedQuota
<<
1624 " rations remaining: " << rationedCount
);
1626 // update 'last seen' time to prevent clientdb GC from dropping us
1627 last_seen
= squid_curtime
;
1628 return rationedQuota
;
1631 ///< adds bytes to the quota bucket based on the rate and passed time
1633 ClientInfo::refillBucket()
1635 // all these times are in seconds, with double precision
1636 const double currTime
= current_dtime
;
1637 const double timePassed
= currTime
- prevTime
;
1639 // Calculate allowance for the time passed. Use double to avoid
1640 // accumulating rounding errors for small intervals. For example, always
1641 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1642 const double gain
= timePassed
* writeSpeedLimit
;
1644 debugs(77,5, HERE
<< currTime
<< " clt" << (const char*)hash
.key
<< ": " <<
1645 bucketSize
<< " + (" << timePassed
<< " * " << writeSpeedLimit
<<
1646 " = " << gain
<< ')');
1648 // to further combat error accumulation during micro updates,
1649 // quit before updating time if we cannot add at least one byte
1653 prevTime
= currTime
;
1655 // for "first" connections, drain initial fat before refilling but keep
1656 // updating prevTime to avoid bursts after the fat is gone
1657 if (bucketSize
> bucketSizeLimit
) {
1658 debugs(77,4, HERE
<< "not refilling while draining initial fat");
1664 // obey quota limits
1665 if (bucketSize
> bucketSizeLimit
)
1666 bucketSize
= bucketSizeLimit
;
1670 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit
, const double anInitialBurst
, const double aHighWatermark
)
1672 debugs(77,5, HERE
<< "Write limits for " << (const char*)hash
.key
<<
1673 " speed=" << aWriteSpeedLimit
<< " burst=" << anInitialBurst
<<
1674 " highwatermark=" << aHighWatermark
);
1676 // set or possibly update traffic shaping parameters
1677 writeLimitingActive
= true;
1678 writeSpeedLimit
= aWriteSpeedLimit
;
1679 bucketSizeLimit
= aHighWatermark
;
1681 // but some members should only be set once for a newly activated bucket
1682 if (firstTimeConnection
) {
1683 firstTimeConnection
= false;
1685 assert(!selectWaiting
);
1686 assert(!quotaQueue
);
1687 quotaQueue
= new CommQuotaQueue(this);
1689 bucketSize
= anInitialBurst
;
1690 prevTime
= current_dtime
;
1694 CommQuotaQueue::CommQuotaQueue(ClientInfo
*info
): clientInfo(info
),
1700 CommQuotaQueue::~CommQuotaQueue()
1702 assert(!clientInfo
); // ClientInfo should clear this before destroying us
1705 /// places the given fd at the end of the queue; returns reservation ID
1707 CommQuotaQueue::enqueue(int fd
)
1709 debugs(77,5, HERE
<< "clt" << (const char*)clientInfo
->hash
.key
<<
1710 ": FD " << fd
<< " with qqid" << (ins
+1) << ' ' << fds
.size());
1715 /// removes queue head
1717 CommQuotaQueue::dequeue()
1719 assert(!fds
.empty());
1720 debugs(77,5, HERE
<< "clt" << (const char*)clientInfo
->hash
.key
<<
1721 ": FD " << fds
.front() << " with qqid" << (outs
+1) << ' ' <<
1729 * hm, this might be too general-purpose for all the places we'd
1733 ignoreErrno(int ierrno
)
1740 #if EAGAIN != EWOULDBLOCK
1763 commCloseAllSockets(void)
1768 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1774 if (F
->type
!= FD_SOCKET
)
1777 if (F
->flags
.ipc
) /* don't close inter-process sockets */
1780 if (F
->timeoutHandler
!= NULL
) {
1781 AsyncCall::Pointer callback
= F
->timeoutHandler
;
1782 F
->timeoutHandler
= NULL
;
1783 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": Calling timeout handler");
1784 ScheduleCallHere(callback
);
1786 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": calling comm_reset_close()");
1787 old_comm_reset_close(fd
);
1793 AlreadyTimedOut(fde
*F
)
1798 if (F
->timeout
== 0)
1801 if (F
->timeout
> squid_curtime
)
1808 writeTimedOut(int fd
)
1810 if (!COMMIO_FD_WRITECB(fd
)->active())
1813 if ((squid_curtime
- fd_table
[fd
].writeStart
) < Config
.Timeout
.write
)
1824 AsyncCall::Pointer callback
;
1826 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1829 if (writeTimedOut(fd
)) {
1830 // We have an active write callback and we are timed out
1831 debugs(5, 5, "checkTimeouts: FD " << fd
<< " auto write timeout");
1832 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
1833 COMMIO_FD_WRITECB(fd
)->finish(COMM_ERROR
, ETIMEDOUT
);
1834 } else if (AlreadyTimedOut(F
))
1837 debugs(5, 5, "checkTimeouts: FD " << fd
<< " Expired");
1839 if (F
->timeoutHandler
!= NULL
) {
1840 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Call timeout handler");
1841 callback
= F
->timeoutHandler
;
1842 F
->timeoutHandler
= NULL
;
1843 ScheduleCallHere(callback
);
1845 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Forcing comm_close()");
1851 /// Start waiting for a possibly half-closed connection to close
1852 // by scheduling a read callback to a monitoring handler that
1853 // will close the connection on read errors.
1855 commStartHalfClosedMonitor(int fd
)
1857 debugs(5, 5, HERE
<< "adding FD " << fd
<< " to " << *TheHalfClosed
);
1858 assert(isOpen(fd
) && !commHasHalfClosedMonitor(fd
));
1859 (void)TheHalfClosed
->add(fd
); // could also assert the result
1860 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1865 commPlanHalfClosedCheck()
1867 if (!WillCheckHalfClosed
&& !TheHalfClosed
->empty()) {
1868 eventAdd("commHalfClosedCheck", &commHalfClosedCheck
, NULL
, 1.0, 1);
1869 WillCheckHalfClosed
= true;
1873 /// iterates over all descriptors that may need half-closed tests and
1874 /// calls comm_read for those that do; re-schedules the check if needed
1877 commHalfClosedCheck(void *)
1879 debugs(5, 5, HERE
<< "checking " << *TheHalfClosed
);
1881 typedef DescriptorSet::const_iterator DSCI
;
1882 const DSCI end
= TheHalfClosed
->end();
1883 for (DSCI i
= TheHalfClosed
->begin(); i
!= end
; ++i
) {
1884 Comm::ConnectionPointer c
= new Comm::Connection
; // XXX: temporary. make HalfClosed a list of these.
1886 if (!fd_table
[c
->fd
].halfClosedReader
) { // not reading already
1887 AsyncCall::Pointer call
= commCbCall(5,4, "commHalfClosedReader",
1888 CommIoCbPtrFun(&commHalfClosedReader
, NULL
));
1889 comm_read(c
, NULL
, 0, call
);
1890 fd_table
[c
->fd
].halfClosedReader
= call
;
1892 c
->fd
= -1; // XXX: temporary. prevent c replacement erase closing listed FD
1895 WillCheckHalfClosed
= false; // as far as we know
1896 commPlanHalfClosedCheck(); // may need to check again
1899 /// checks whether we are waiting for possibly half-closed connection to close
1900 // We are monitoring if the read handler for the fd is the monitoring handler.
1902 commHasHalfClosedMonitor(int fd
)
1904 return TheHalfClosed
->has(fd
);
1907 /// stop waiting for possibly half-closed connection to close
1909 commStopHalfClosedMonitor(int const fd
)
1911 debugs(5, 5, HERE
<< "removing FD " << fd
<< " from " << *TheHalfClosed
);
1913 // cancel the read if one was scheduled
1914 AsyncCall::Pointer reader
= fd_table
[fd
].halfClosedReader
;
1916 comm_read_cancel(fd
, reader
);
1917 fd_table
[fd
].halfClosedReader
= NULL
;
1919 TheHalfClosed
->del(fd
);
1922 /// I/O handler for the possibly half-closed connection monitoring code
1924 commHalfClosedReader(const Comm::ConnectionPointer
&conn
, char *, size_t size
, comm_err_t flag
, int, void *)
1926 // there cannot be more data coming in on half-closed connections
1928 assert(conn
!= NULL
);
1929 assert(commHasHalfClosedMonitor(conn
->fd
)); // or we would have canceled the read
1931 fd_table
[conn
->fd
].halfClosedReader
= NULL
; // done reading, for now
1933 // nothing to do if fd is being closed
1934 if (flag
== COMM_ERR_CLOSING
)
1937 // if read failed, close the connection
1938 if (flag
!= COMM_OK
) {
1939 debugs(5, 3, HERE
<< "closing " << conn
);
1944 // continue waiting for close or error
1945 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1948 CommRead::CommRead() : conn(NULL
), buf(NULL
), len(0), callback(NULL
) {}
1950 CommRead::CommRead(const Comm::ConnectionPointer
&c
, char *buf_
, int len_
, AsyncCall::Pointer
&callback_
)
1951 : conn(c
), buf(buf_
), len(len_
), callback(callback_
) {}
1953 DeferredRead::DeferredRead () : theReader(NULL
), theContext(NULL
), theRead(), cancelled(false) {}
1955 DeferredRead::DeferredRead (DeferrableRead
*aReader
, void *data
, CommRead
const &aRead
) : theReader(aReader
), theContext (data
), theRead(aRead
), cancelled(false) {}
1957 DeferredReadManager::~DeferredReadManager()
1960 assert (deferredReads
.empty());
1963 /* explicit instantiation required for some systems */
1965 /// \cond AUTODOCS_IGNORE
1966 template cbdata_type CbDataList
<DeferredRead
>::CBDATA_CbDataList
;
1970 DeferredReadManager::delayRead(DeferredRead
const &aRead
)
1972 debugs(5, 3, "Adding deferred read on " << aRead
.theRead
.conn
);
1973 CbDataList
<DeferredRead
> *temp
= deferredReads
.push_back(aRead
);
1975 // We have to use a global function as a closer and point to temp
1976 // instead of "this" because DeferredReadManager is not a job and
1977 // is not even cbdata protected
1978 // XXX: and yet we use cbdata protection functions on it??
1979 AsyncCall::Pointer closer
= commCbCall(5,4,
1980 "DeferredReadManager::CloseHandler",
1981 CommCloseCbPtrFun(&CloseHandler
, temp
));
1982 comm_add_close_handler(aRead
.theRead
.conn
->fd
, closer
);
1983 temp
->element
.closer
= closer
; // remeber so that we can cancel
1987 DeferredReadManager::CloseHandler(const CommCloseCbParams
¶ms
)
1989 if (!cbdataReferenceValid(params
.data
))
1992 CbDataList
<DeferredRead
> *temp
= (CbDataList
<DeferredRead
> *)params
.data
;
1994 temp
->element
.closer
= NULL
;
1995 temp
->element
.markCancelled();
1999 DeferredReadManager::popHead(CbDataListContainer
<DeferredRead
> &deferredReads
)
2001 assert (!deferredReads
.empty());
2003 DeferredRead
&read
= deferredReads
.head
->element
;
2005 // NOTE: at this point the connection has been paused/stalled for an unknown
2006 // amount of time. We must re-validate that it is active and usable.
2008 // If the connection has been closed already. Cancel this read.
2009 if (!Comm::IsConnOpen(read
.theRead
.conn
)) {
2010 if (read
.closer
!= NULL
) {
2011 read
.closer
->cancel("Connection closed before.");
2014 read
.markCancelled();
2017 if (!read
.cancelled
) {
2018 comm_remove_close_handler(read
.theRead
.conn
->fd
, read
.closer
);
2022 DeferredRead result
= deferredReads
.pop_front();
2028 DeferredReadManager::kickReads(int const count
)
2030 /* if we had CbDataList::size() we could consolidate this and flushReads */
2037 size_t remaining
= count
;
2039 while (!deferredReads
.empty() && remaining
) {
2040 DeferredRead aRead
= popHead(deferredReads
);
2043 if (!aRead
.cancelled
)
2049 DeferredReadManager::flushReads()
2051 CbDataListContainer
<DeferredRead
> reads
;
2052 reads
= deferredReads
;
2053 deferredReads
= CbDataListContainer
<DeferredRead
>();
2055 // XXX: For fairness this SHOULD randomize the order
2056 while (!reads
.empty()) {
2057 DeferredRead aRead
= popHead(reads
);
2063 DeferredReadManager::kickARead(DeferredRead
const &aRead
)
2065 if (aRead
.cancelled
)
2068 if (Comm::IsConnOpen(aRead
.theRead
.conn
) && fd_table
[aRead
.theRead
.conn
->fd
].closing())
2071 debugs(5, 3, "Kicking deferred read on " << aRead
.theRead
.conn
);
2073 aRead
.theReader(aRead
.theContext
, aRead
.theRead
);
2077 DeferredRead::markCancelled()
2083 CommSelectEngine::checkEvents(int timeout
)
2085 static time_t last_timeout
= 0;
2087 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2088 if (squid_curtime
> last_timeout
) {
2089 last_timeout
= squid_curtime
;
2093 switch (Comm::DoSelect(timeout
)) {
2109 fatal_dump("comm.cc: Internal error -- this should never happen.");
2114 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2116 comm_open_uds(int sock_type
,
2118 struct sockaddr_un
* addr
,
2121 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2125 PROF_start(comm_open
);
2126 /* Create socket for accepting new connections. */
2127 ++ statCounter
.syscalls
.sock
.sockets
;
2129 /* Setup the socket addrinfo details for use */
2132 AI
.ai_family
= PF_UNIX
;
2133 AI
.ai_socktype
= sock_type
;
2134 AI
.ai_protocol
= proto
;
2135 AI
.ai_addrlen
= SUN_LEN(addr
);
2136 AI
.ai_addr
= (sockaddr
*)addr
;
2137 AI
.ai_canonname
= NULL
;
2140 debugs(50, 3, HERE
<< "Attempt open socket for: " << addr
->sun_path
);
2142 if ((new_socket
= socket(AI
.ai_family
, AI
.ai_socktype
, AI
.ai_protocol
)) < 0) {
2143 /* Increase the number of reserved fd's if calls to socket()
2144 * are failing because the open file table is full. This
2145 * limits the number of simultaneous clients */
2147 if (limitError(errno
)) {
2148 debugs(50, DBG_IMPORTANT
, HERE
<< "socket failure: " << xstrerror());
2151 debugs(50, DBG_CRITICAL
, HERE
<< "socket failure: " << xstrerror());
2154 PROF_stop(comm_open
);
2158 debugs(50, 3, "Opened UDS FD " << new_socket
<< " : family=" << AI
.ai_family
<< ", type=" << AI
.ai_socktype
<< ", protocol=" << AI
.ai_protocol
);
2161 debugs(50, 5, HERE
<< "FD " << new_socket
<< " is a new socket");
2163 assert(!isOpen(new_socket
));
2164 fd_open(new_socket
, FD_MSGHDR
, NULL
);
2166 fdd_table
[new_socket
].close_file
= NULL
;
2168 fdd_table
[new_socket
].close_line
= 0;
2170 fd_table
[new_socket
].sock_family
= AI
.ai_family
;
2172 if (!(flags
& COMM_NOCLOEXEC
))
2173 commSetCloseOnExec(new_socket
);
2175 if (flags
& COMM_REUSEADDR
)
2176 commSetReuseAddr(new_socket
);
2178 if (flags
& COMM_NONBLOCKING
) {
2179 if (commSetNonBlocking(new_socket
) != COMM_OK
) {
2180 comm_close(new_socket
);
2181 PROF_stop(comm_open
);
2186 if (flags
& COMM_DOBIND
) {
2187 if (commBind(new_socket
, AI
) != COMM_OK
) {
2188 comm_close(new_socket
);
2189 PROF_stop(comm_open
);
2195 if (sock_type
== SOCK_STREAM
)
2196 commSetTcpNoDelay(new_socket
);
2200 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
2201 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
2203 PROF_stop(comm_open
);