2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 05 Socket Functions */
12 #include "ClientInfo.h"
13 #include "comm/AcceptLimiter.h"
14 #include "comm/comm_internal.h"
15 #include "comm/Connection.h"
16 #include "comm/IoCallback.h"
17 #include "comm/Loops.h"
18 #include "comm/Read.h"
19 #include "comm/TcpAcceptor.h"
20 #include "comm/Write.h"
22 #include "compat/cmsg.h"
23 #include "DescriptorSet.h"
28 #include "icmp/net_db.h"
29 #include "ip/Intercept.h"
30 #include "ip/QosConfig.h"
33 #include "profiler/Profiler.h"
34 #include "sbuf/SBuf.h"
35 #include "SquidConfig.h"
36 #include "StatCounters.h"
37 #include "StoreIOBuffer.h"
41 #include "ssl/support.h"
47 #include <sys/ioctl.h>
49 #ifdef HAVE_NETINET_TCP_H
50 #include <netinet/tcp.h>
57 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
60 static IOCB commHalfClosedReader
;
61 static void comm_init_opened(const Comm::ConnectionPointer
&conn
, const char *note
, struct addrinfo
*AI
);
62 static int comm_apply_flags(int new_socket
, Ip::Address
&addr
, int flags
, struct addrinfo
*AI
);
65 CBDATA_CLASS_INIT(CommQuotaQueue
);
67 static void commHandleWriteHelper(void * data
);
72 static DescriptorSet
*TheHalfClosed
= NULL
; /// the set of half-closed FDs
73 static bool WillCheckHalfClosed
= false; /// true if check is scheduled
74 static EVH commHalfClosedCheck
;
75 static void commPlanHalfClosedCheck();
77 static Comm::Flag
commBind(int s
, struct addrinfo
&);
78 static void commSetReuseAddr(int);
79 static void commSetNoLinger(int);
81 static void commSetTcpNoDelay(int);
83 static void commSetTcpRcvbuf(int, int);
88 return fd
>= 0 && fd_table
&& fd_table
[fd
].flags
.open
!= 0;
92 * Empty the read buffers
94 * This is a magical routine that empties the read buffers.
95 * Under some platforms (Linux) if a buffer has data in it before
96 * you call close(), the socket will hang and take quite a while
100 comm_empty_os_read_buffers(int fd
)
104 // Bug 4146: SSL-Bump BIO does not release sockets on close.
105 if (fd_table
[fd
].ssl
)
109 /* prevent those nasty RST packets */
110 char buf
[SQUID_TCP_SO_RCVBUF
];
111 if (fd_table
[fd
].flags
.nonblocking
&& fd_table
[fd
].type
!= FD_MSGHDR
) {
112 while (FD_READ_METHOD(fd
, buf
, SQUID_TCP_SO_RCVBUF
) > 0) {};
118 * synchronous wrapper around udp socket functions
121 comm_udp_recvfrom(int fd
, void *buf
, size_t len
, int flags
, Ip::Address
&from
)
123 ++ statCounter
.syscalls
.sock
.recvfroms
;
124 debugs(5,8, "comm_udp_recvfrom: FD " << fd
<< " from " << from
);
125 struct addrinfo
*AI
= NULL
;
126 Ip::Address::InitAddr(AI
);
127 int x
= recvfrom(fd
, buf
, len
, flags
, AI
->ai_addr
, &AI
->ai_addrlen
);
129 Ip::Address::FreeAddr(AI
);
134 comm_udp_recv(int fd
, void *buf
, size_t len
, int flags
)
137 return comm_udp_recvfrom(fd
, buf
, len
, flags
, nul
);
141 comm_udp_send(int s
, const void *buf
, size_t len
, int flags
)
143 return send(s
, buf
, len
, flags
);
147 comm_has_incomplete_write(int fd
)
149 assert(isOpen(fd
) && COMMIO_FD_WRITECB(fd
) != NULL
);
150 return COMMIO_FD_WRITECB(fd
)->active();
154 * Queue a write. handler/handler_data are called when the write fully
155 * completes, on error, or on file descriptor close.
158 /* Return the local port associated with fd. */
160 comm_local_port(int fd
)
163 struct addrinfo
*addr
= NULL
;
164 fde
*F
= &fd_table
[fd
];
166 /* If the fd is closed already, just return */
168 if (!F
->flags
.open
) {
169 debugs(5, 0, "comm_local_port: FD " << fd
<< " has been closed.");
173 if (F
->local_addr
.port())
174 return F
->local_addr
.port();
176 if (F
->sock_family
== AF_INET
)
179 Ip::Address::InitAddr(addr
);
181 if (getsockname(fd
, addr
->ai_addr
, &(addr
->ai_addrlen
)) ) {
183 debugs(50, DBG_IMPORTANT
, MYNAME
<< "Failed to retrieve TCP/UDP port number for socket: FD " << fd
<< ": " << xstrerr(xerrno
));
184 Ip::Address::FreeAddr(addr
);
189 Ip::Address::FreeAddr(addr
);
191 if (F
->local_addr
.isAnyAddr()) {
192 /* save the whole local address, not just the port. */
193 F
->local_addr
= temp
;
195 F
->local_addr
.port(temp
.port());
198 debugs(5, 6, "comm_local_port: FD " << fd
<< ": port " << F
->local_addr
.port() << "(family=" << F
->sock_family
<< ")");
199 return F
->local_addr
.port();
203 commBind(int s
, struct addrinfo
&inaddr
)
205 ++ statCounter
.syscalls
.sock
.binds
;
207 if (bind(s
, inaddr
.ai_addr
, inaddr
.ai_addrlen
) == 0) {
208 debugs(50, 6, "bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
);
212 debugs(50, DBG_CRITICAL
, MYNAME
<< "Cannot bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
<< ": " << xstrerr(xerrno
));
214 return Comm::COMM_ERROR
;
218 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
219 * is OR of flags specified in comm.h. Defaults TOS
222 comm_open(int sock_type
,
228 return comm_openex(sock_type
, proto
, addr
, flags
, note
);
232 comm_open_listener(int sock_type
,
234 Comm::ConnectionPointer
&conn
,
237 /* all listener sockets require bind() */
238 conn
->flags
|= COMM_DOBIND
;
240 /* attempt native enabled port. */
241 conn
->fd
= comm_openex(sock_type
, proto
, conn
->local
, conn
->flags
, note
);
245 comm_open_listener(int sock_type
,
253 /* all listener sockets require bind() */
254 flags
|= COMM_DOBIND
;
256 /* attempt native enabled port. */
257 sock
= comm_openex(sock_type
, proto
, addr
, flags
, note
);
263 limitError(int const anErrno
)
265 return anErrno
== ENFILE
|| anErrno
== EMFILE
;
269 comm_set_v6only(int fd
, int tos
)
272 if (setsockopt(fd
, IPPROTO_IPV6
, IPV6_V6ONLY
, (char *) &tos
, sizeof(int)) < 0) {
274 debugs(50, DBG_IMPORTANT
, MYNAME
<< "setsockopt(IPV6_V6ONLY) " << (tos
?"ON":"OFF") << " for FD " << fd
<< ": " << xstrerr(xerrno
));
277 debugs(50, DBG_CRITICAL
, MYNAME
<< "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
282 * Set the socket option required for TPROXY spoofing for:
283 * - Linux TPROXY v4 support,
284 * - OpenBSD divert-to support,
285 * - FreeBSD IPFW TPROXY v4 support.
288 comm_set_transparent(int fd
)
290 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
291 # define soLevel SOL_IP
292 # define soFlag IP_TRANSPARENT
293 bool doneSuid
= false;
295 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
296 # define soLevel SOL_SOCKET
297 # define soFlag SO_BINDANY
299 bool doneSuid
= true;
301 #elif defined(IP_BINDANY) // FreeBSD with IPFW
302 # define soLevel IPPROTO_IP
303 # define soFlag IP_BINDANY
305 bool doneSuid
= true;
308 debugs(50, DBG_CRITICAL
, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
311 #if defined(soLevel) && defined(soFlag)
313 if (setsockopt(fd
, soLevel
, soFlag
, (char *) &tos
, sizeof(int)) < 0) {
315 debugs(50, DBG_IMPORTANT
, MYNAME
<< "setsockopt(TPROXY) on FD " << fd
<< ": " << xstrerr(xerrno
));
317 /* mark the socket as having transparent options */
318 fd_table
[fd
].flags
.transparent
= true;
326 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
327 * is OR of flags specified in defines.h:COMM_*
330 comm_openex(int sock_type
,
337 struct addrinfo
*AI
= NULL
;
339 PROF_start(comm_open
);
340 /* Create socket for accepting new connections. */
341 ++ statCounter
.syscalls
.sock
.sockets
;
343 /* Setup the socket addrinfo details for use */
344 addr
.getAddrInfo(AI
);
345 AI
->ai_socktype
= sock_type
;
346 AI
->ai_protocol
= proto
;
348 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr
);
350 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
353 /* under IPv6 there is the possibility IPv6 is present but disabled. */
354 /* try again as IPv4-native if possible */
355 if ( new_socket
< 0 && Ip::EnableIpv6
&& addr
.isIPv6() && addr
.setIPv4() ) {
356 /* attempt to open this IPv4-only. */
357 Ip::Address::FreeAddr(AI
);
358 /* Setup the socket addrinfo details for use */
359 addr
.getAddrInfo(AI
);
360 AI
->ai_socktype
= sock_type
;
361 AI
->ai_protocol
= proto
;
362 debugs(50, 3, "Attempt fallback open socket for: " << addr
);
363 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
364 debugs(50, 2, "attempt open " << note
<< " socket on: " << addr
);
367 if (new_socket
< 0) {
368 /* Increase the number of reserved fd's if calls to socket()
369 * are failing because the open file table is full. This
370 * limits the number of simultaneous clients */
372 if (limitError(errno
)) {
373 debugs(50, DBG_IMPORTANT
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
376 debugs(50, DBG_CRITICAL
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
379 Ip::Address::FreeAddr(AI
);
381 PROF_stop(comm_open
);
382 errno
= xerrno
; // restore for caller
386 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
387 Comm::ConnectionPointer conn
= new Comm::Connection
;
389 conn
->fd
= new_socket
;
391 debugs(50, 3, "comm_openex: Opened socket " << conn
<< " : family=" << AI
->ai_family
<< ", type=" << AI
->ai_socktype
<< ", protocol=" << AI
->ai_protocol
);
393 if ( Ip::EnableIpv6
&IPV6_SPECIAL_SPLITSTACK
&& addr
.isIPv6() )
394 comm_set_v6only(conn
->fd
, 1);
396 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
397 /* Other OS may have this administratively disabled for general use. Same deal. */
398 if ( Ip::EnableIpv6
&IPV6_SPECIAL_V4MAPPING
&& addr
.isIPv6() )
399 comm_set_v6only(conn
->fd
, 0);
401 comm_init_opened(conn
, note
, AI
);
402 new_socket
= comm_apply_flags(conn
->fd
, addr
, flags
, AI
);
404 Ip::Address::FreeAddr(AI
);
406 PROF_stop(comm_open
);
408 // XXX transition only. prevent conn from closing the new FD on function exit.
410 errno
= xerrno
; // restore for caller
414 /// update FD tables after a local or remote (IPC) comm_openex();
416 comm_init_opened(const Comm::ConnectionPointer
&conn
,
420 assert(Comm::IsConnOpen(conn
));
424 debugs(5, 5, HERE
<< conn
<< " is a new socket");
426 assert(!isOpen(conn
->fd
)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
427 fd_open(conn
->fd
, FD_SOCKET
, note
);
429 fde
*F
= &fd_table
[conn
->fd
];
430 F
->local_addr
= conn
->local
;
432 F
->sock_family
= AI
->ai_family
;
435 /// apply flags after a local comm_open*() call;
436 /// returns new_socket or -1 on error
438 comm_apply_flags(int new_socket
,
443 assert(new_socket
>= 0);
445 const int sock_type
= AI
->ai_socktype
;
447 if (!(flags
& COMM_NOCLOEXEC
))
448 commSetCloseOnExec(new_socket
);
450 if ((flags
& COMM_REUSEADDR
))
451 commSetReuseAddr(new_socket
);
453 if (addr
.port() > (unsigned short) 0) {
455 if (sock_type
!= SOCK_DGRAM
)
457 commSetNoLinger(new_socket
);
460 commSetReuseAddr(new_socket
);
463 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
464 if ((flags
& COMM_TRANSPARENT
)) {
465 comm_set_transparent(new_socket
);
468 if ( (flags
& COMM_DOBIND
) || addr
.port() > 0 || !addr
.isAnyAddr() ) {
469 if ( !(flags
& COMM_DOBIND
) && addr
.isAnyAddr() )
470 debugs(5, DBG_IMPORTANT
,"WARNING: Squid is attempting to bind() port " << addr
<< " without being a listener.");
471 if ( addr
.isNoAddr() )
472 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr
<< "!!");
474 if (commBind(new_socket
, *AI
) != Comm::OK
) {
475 comm_close(new_socket
);
480 if (flags
& COMM_NONBLOCKING
)
481 if (commSetNonBlocking(new_socket
) == Comm::COMM_ERROR
) {
482 comm_close(new_socket
);
487 if (sock_type
== SOCK_STREAM
)
488 commSetTcpNoDelay(new_socket
);
492 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
493 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
499 comm_import_opened(const Comm::ConnectionPointer
&conn
,
503 debugs(5, 2, HERE
<< conn
);
504 assert(Comm::IsConnOpen(conn
));
507 comm_init_opened(conn
, note
, AI
);
509 if (!(conn
->flags
& COMM_NOCLOEXEC
))
510 fd_table
[conn
->fd
].flags
.close_on_exec
= true;
512 if (conn
->local
.port() > (unsigned short) 0) {
514 if (AI
->ai_socktype
!= SOCK_DGRAM
)
516 fd_table
[conn
->fd
].flags
.nolinger
= true;
519 if ((conn
->flags
& COMM_TRANSPARENT
))
520 fd_table
[conn
->fd
].flags
.transparent
= true;
522 if (conn
->flags
& COMM_NONBLOCKING
)
523 fd_table
[conn
->fd
].flags
.nonblocking
= true;
526 if (AI
->ai_socktype
== SOCK_STREAM
)
527 fd_table
[conn
->fd
].flags
.nodelay
= true;
530 /* no fd_table[fd].flags. updates needed for these conditions:
531 * if ((flags & COMM_REUSEADDR)) ...
532 * if ((flags & COMM_DOBIND) ...) ...
536 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
537 // With handler already unset. Leaving this present until that can be verified for all code paths.
539 commUnsetFdTimeout(int fd
)
541 debugs(5, 3, HERE
<< "Remove timeout for FD " << fd
);
543 assert(fd
< Squid_MaxFD
);
544 fde
*F
= &fd_table
[fd
];
545 assert(F
->flags
.open
);
547 F
->timeoutHandler
= NULL
;
552 commSetConnTimeout(const Comm::ConnectionPointer
&conn
, int timeout
, AsyncCall::Pointer
&callback
)
554 debugs(5, 3, HERE
<< conn
<< " timeout " << timeout
);
555 assert(Comm::IsConnOpen(conn
));
556 assert(conn
->fd
< Squid_MaxFD
);
557 fde
*F
= &fd_table
[conn
->fd
];
558 assert(F
->flags
.open
);
561 F
->timeoutHandler
= NULL
;
564 if (callback
!= NULL
) {
565 typedef CommTimeoutCbParams Params
;
566 Params
¶ms
= GetCommParams
<Params
>(callback
);
568 F
->timeoutHandler
= callback
;
571 F
->timeout
= squid_curtime
+ (time_t) timeout
;
578 commUnsetConnTimeout(const Comm::ConnectionPointer
&conn
)
580 debugs(5, 3, HERE
<< "Remove timeout for " << conn
);
581 AsyncCall::Pointer nil
;
582 return commSetConnTimeout(conn
, -1, nil
);
586 * Connect socket FD to given remote address.
587 * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
588 * then error code will also be returned in errno.
591 comm_connect_addr(int sock
, const Ip::Address
&address
)
593 Comm::Flag status
= Comm::OK
;
594 fde
*F
= &fd_table
[sock
];
598 struct addrinfo
*AI
= NULL
;
599 PROF_start(comm_connect_addr
);
601 assert(address
.port() != 0);
603 debugs(5, 9, HERE
<< "connecting socket FD " << sock
<< " to " << address
<< " (want family: " << F
->sock_family
<< ")");
605 /* Handle IPv6 over IPv4-only socket case.
606 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
607 * NP: because commResetFD is private to ConnStateData we have to return an error and
608 * trust its handled properly.
610 if (F
->sock_family
== AF_INET
&& !address
.isIPv4()) {
612 return Comm::ERR_PROTOCOL
;
615 /* Handle IPv4 over IPv6-only socket case.
616 * This case is presently handled here as it's both a known case and it's
617 * uncertain what error will be returned by the IPv6 stack in such case. It's
618 * possible this will also be handled by the errno checks below after connect()
619 * but needs carefull cross-platform verification, and verifying the address
620 * condition here is simple.
622 if (!F
->local_addr
.isIPv4() && address
.isIPv4()) {
624 return Comm::ERR_PROTOCOL
;
627 address
.getAddrInfo(AI
, F
->sock_family
);
629 /* Establish connection. */
632 if (!F
->flags
.called_connect
) {
633 F
->flags
.called_connect
= true;
634 ++ statCounter
.syscalls
.sock
.connects
;
637 if ((x
= connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
)) < 0) {
639 debugs(5,5, "sock=" << sock
<< ", addrinfo(" <<
640 " flags=" << AI
->ai_flags
<<
641 ", family=" << AI
->ai_family
<<
642 ", socktype=" << AI
->ai_socktype
<<
643 ", protocol=" << AI
->ai_protocol
<<
644 ", &addr=" << AI
->ai_addr
<<
645 ", addrlen=" << AI
->ai_addrlen
<< " )");
646 debugs(5, 9, "connect FD " << sock
<< ": (" << x
<< ") " << xstrerr(xerrno
));
647 debugs(14,9, "connecting to: " << address
);
650 // XXX: ICAP code refuses callbacks during a pending comm_ call
651 // Async calls development will fix this.
653 xerrno
= EINPROGRESS
;
659 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
660 if (connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
) < 0)
663 if (xerrno
== EINVAL
) {
664 errlen
= sizeof(err
);
665 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
670 errlen
= sizeof(err
);
671 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
677 * Solaris 2.4's socket emulation doesn't allow you
678 * to determine the error from a failed non-blocking
679 * connect and just returns EPIPE. Create a fake
680 * error message for connect. -- fenner@parc.xerox.com
682 if (x
< 0 && xerrno
== EPIPE
)
690 Ip::Address::FreeAddr(AI
);
692 PROF_stop(comm_connect_addr
);
695 if (xerrno
== 0 || xerrno
== EISCONN
)
697 else if (ignoreErrno(xerrno
))
698 status
= Comm::INPROGRESS
;
699 else if (xerrno
== EAFNOSUPPORT
|| xerrno
== EINVAL
)
700 return Comm::ERR_PROTOCOL
;
702 return Comm::COMM_ERROR
;
704 address
.toStr(F
->ipaddr
, MAX_IPSTRLEN
);
706 F
->remote_port
= address
.port(); /* remote_port is HS */
708 if (status
== Comm::OK
) {
709 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connected to " << address
);
710 } else if (status
== Comm::INPROGRESS
) {
711 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connection pending");
719 commCallCloseHandlers(int fd
)
721 fde
*F
= &fd_table
[fd
];
722 debugs(5, 5, "commCallCloseHandlers: FD " << fd
);
724 while (F
->closeHandler
!= NULL
) {
725 AsyncCall::Pointer call
= F
->closeHandler
;
726 F
->closeHandler
= call
->Next();
728 // If call is not canceled schedule it for execution else ignore it
729 if (!call
->canceled()) {
730 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call
);
731 ScheduleCallHere(call
);
738 commLingerClose(int fd
, void *unused
)
740 LOCAL_ARRAY(char, buf
, 1024);
741 int n
= FD_READ_METHOD(fd
, buf
, 1024);
744 debugs(5, 3, "FD " << fd
<< " read: " << xstrerr(xerrno
));
750 commLingerTimeout(const FdeCbParams
¶ms
)
752 debugs(5, 3, "commLingerTimeout: FD " << params
.fd
);
753 comm_close(params
.fd
);
760 comm_lingering_close(int fd
)
762 Security::SessionSendGoodbye(fd_table
[fd
].ssl
);
764 if (shutdown(fd
, 1) < 0) {
769 fd_note(fd
, "lingering close");
770 AsyncCall::Pointer call
= commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout
, NULL
));
772 debugs(5, 3, HERE
<< "FD " << fd
<< " timeout " << timeout
);
773 assert(fd_table
[fd
].flags
.open
);
774 if (callback
!= NULL
) {
775 typedef FdeCbParams Params
;
776 Params
¶ms
= GetCommParams
<Params
>(callback
);
778 fd_table
[fd
].timeoutHandler
= callback
;
779 fd_table
[fd
].timeout
= squid_curtime
+ static_cast<time_t>(10);
782 Comm::SetSelect(fd
, COMM_SELECT_READ
, commLingerClose
, NULL
, 0);
788 * enable linger with time of 0 so that when the socket is
789 * closed, TCP generates a RESET
792 comm_reset_close(const Comm::ConnectionPointer
&conn
)
798 if (setsockopt(conn
->fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0) {
800 debugs(50, DBG_CRITICAL
, "ERROR: Closing " << conn
<< " with TCP RST: " << xstrerr(xerrno
));
805 // Legacy close function.
807 old_comm_reset_close(int fd
)
813 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0) {
815 debugs(50, DBG_CRITICAL
, "ERROR: Closing FD " << fd
<< " with TCP RST: " << xstrerr(xerrno
));
821 commStartTlsClose(const FdeCbParams
¶ms
)
823 Security::SessionSendGoodbye(fd_table
[params
.fd
].ssl
);
827 comm_close_complete(const FdeCbParams
¶ms
)
829 fde
*F
= &fd_table
[params
.fd
];
831 F
->dynamicTlsContext
.reset();
832 fd_close(params
.fd
); /* update fdstat */
835 ++ statCounter
.syscalls
.sock
.closes
;
837 /* When one connection closes, give accept() a chance, if need be */
838 Comm::AcceptLimiter::Instance().kick();
842 * Close the socket fd.
844 * + call write handlers with ERR_CLOSING
845 * + call read handlers with ERR_CLOSING
846 * + call closing handlers
848 * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
849 * DeferredReadManager.
852 _comm_close(int fd
, char const *file
, int line
)
854 debugs(5, 3, "start closing FD " << fd
<< " by " << file
<< ":" << line
);
856 assert(fd
< Squid_MaxFD
);
858 fde
*F
= &fd_table
[fd
];
863 /* XXX: is this obsolete behind F->closing() ? */
864 if ( (shutting_down
|| reconfiguring
) && (!F
->flags
.open
|| F
->type
== FD_FILE
))
867 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
869 debugs(50, DBG_IMPORTANT
, HERE
<< "BUG 3556: FD " << fd
<< " is not an open socket.");
870 // XXX: do we need to run close(fd) or fd_close(fd) here?
874 assert(F
->type
!= FD_FILE
);
876 PROF_start(comm_close
);
878 F
->flags
.close_request
= true;
881 AsyncCall::Pointer startCall
=commCbCall(5,4, "commStartTlsClose",
882 FdeCbPtrFun(commStartTlsClose
, nullptr));
883 FdeCbParams
&startParams
= GetCommParams
<FdeCbParams
>(startCall
);
885 ScheduleCallHere(startCall
);
888 // a half-closed fd may lack a reader, so we stop monitoring explicitly
889 if (commHasHalfClosedMonitor(fd
))
890 commStopHalfClosedMonitor(fd
);
891 commUnsetFdTimeout(fd
);
893 // notify read/write handlers after canceling select reservations, if any
894 if (COMMIO_FD_WRITECB(fd
)->active()) {
895 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
896 COMMIO_FD_WRITECB(fd
)->finish(Comm::ERR_CLOSING
, errno
);
898 if (COMMIO_FD_READCB(fd
)->active()) {
899 Comm::SetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
900 COMMIO_FD_READCB(fd
)->finish(Comm::ERR_CLOSING
, errno
);
904 if (BandwidthBucket
*bucket
= BandwidthBucket::SelectBucket(F
)) {
905 if (bucket
->selectWaiting
)
906 bucket
->onFdClosed();
910 commCallCloseHandlers(fd
);
912 comm_empty_os_read_buffers(fd
);
914 AsyncCall::Pointer completeCall
=commCbCall(5,4, "comm_close_complete",
915 FdeCbPtrFun(comm_close_complete
, NULL
));
916 FdeCbParams
&completeParams
= GetCommParams
<FdeCbParams
>(completeCall
);
917 completeParams
.fd
= fd
;
918 // must use async call to wait for all callbacks
919 // scheduled before comm_close() to finish
920 ScheduleCallHere(completeCall
);
922 PROF_stop(comm_close
);
925 /* Send a udp datagram to specified TO_ADDR. */
927 comm_udp_sendto(int fd
,
928 const Ip::Address
&to_addr
,
932 PROF_start(comm_udp_sendto
);
933 ++ statCounter
.syscalls
.sock
.sendtos
;
935 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr
<<
936 " using FD " << fd
<< " using Port " << comm_local_port(fd
) );
938 struct addrinfo
*AI
= NULL
;
939 to_addr
.getAddrInfo(AI
, fd_table
[fd
].sock_family
);
940 int x
= sendto(fd
, buf
, len
, 0, AI
->ai_addr
, AI
->ai_addrlen
);
942 Ip::Address::FreeAddr(AI
);
944 PROF_stop(comm_udp_sendto
);
947 errno
= xerrno
; // restore for caller to use
952 if (ECONNREFUSED
!= xerrno
)
954 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", (family=" << fd_table
[fd
].sock_family
<< ") " << to_addr
<< ": " << xstrerr(xerrno
));
956 errno
= xerrno
; // restore for caller to use
957 return Comm::COMM_ERROR
;
961 comm_add_close_handler(int fd
, CLCB
* handler
, void *data
)
963 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", handler=" <<
964 handler
<< ", data=" << data
);
966 AsyncCall::Pointer call
=commCbCall(5,4, "SomeCloseHandler",
967 CommCloseCbPtrFun(handler
, data
));
968 comm_add_close_handler(fd
, call
);
973 comm_add_close_handler(int fd
, AsyncCall::Pointer
&call
)
975 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
977 /*TODO:Check for a similar scheduled AsyncCall*/
978 // for (c = fd_table[fd].closeHandler; c; c = c->next)
979 // assert(c->handler != handler || c->data != data);
981 call
->setNext(fd_table
[fd
].closeHandler
);
983 fd_table
[fd
].closeHandler
= call
;
986 // remove function-based close handler
988 comm_remove_close_handler(int fd
, CLCB
* handler
, void *data
)
991 /* Find handler in list */
992 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", handler=" <<
993 handler
<< ", data=" << data
);
995 AsyncCall::Pointer p
, prev
= NULL
;
996 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
; prev
= p
, p
= p
->Next()) {
997 typedef CommCbFunPtrCallT
<CommCloseCbPtrFun
> Call
;
998 const Call
*call
= dynamic_cast<const Call
*>(p
.getRaw());
999 if (!call
) // method callbacks have their own comm_remove_close_handler
1002 typedef CommCloseCbParams Params
;
1003 const Params
¶ms
= GetCommParams
<Params
>(p
);
1004 if (call
->dialer
.handler
== handler
&& params
.data
== data
)
1005 break; /* This is our handler */
1008 // comm_close removes all close handlers so our handler may be gone
1010 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1011 p
->cancel("comm_remove_close_handler");
1015 // remove method-based close handler
1017 comm_remove_close_handler(int fd
, AsyncCall::Pointer
&call
)
1020 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1022 // comm_close removes all close handlers so our handler may be gone
1023 AsyncCall::Pointer p
, prev
= NULL
;
1024 for (p
= fd_table
[fd
].closeHandler
; p
!= NULL
&& p
!= call
; prev
= p
, p
= p
->Next());
1027 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1028 call
->cancel("comm_remove_close_handler");
1032 commSetNoLinger(int fd
)
1036 L
.l_onoff
= 0; /* off */
1039 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, (char *) &L
, sizeof(L
)) < 0) {
1041 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1043 fd_table
[fd
].flags
.nolinger
= true;
1047 commSetReuseAddr(int fd
)
1050 if (setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *) &on
, sizeof(on
)) < 0) {
1052 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1057 commSetTcpRcvbuf(int fd
, int size
)
1059 if (setsockopt(fd
, SOL_SOCKET
, SO_RCVBUF
, (char *) &size
, sizeof(size
)) < 0) {
1061 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1063 if (setsockopt(fd
, SOL_SOCKET
, SO_SNDBUF
, (char *) &size
, sizeof(size
)) < 0) {
1065 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1067 #ifdef TCP_WINDOW_CLAMP
1068 if (setsockopt(fd
, SOL_TCP
, TCP_WINDOW_CLAMP
, (char *) &size
, sizeof(size
)) < 0) {
1070 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1076 commSetNonBlocking(int fd
)
1079 int nonblocking
= TRUE
;
1081 if (ioctl(fd
, FIONBIO
, &nonblocking
) < 0) {
1083 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
) << " " << fd_table
[fd
].type
);
1084 return Comm::COMM_ERROR
;
1091 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1093 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFL: " << xstrerr(xerrno
));
1094 return Comm::COMM_ERROR
;
1097 if (fcntl(fd
, F_SETFL
, flags
| SQUID_NONBLOCK
) < 0) {
1099 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1100 return Comm::COMM_ERROR
;
1104 fd_table
[fd
].flags
.nonblocking
= true;
1109 commUnsetNonBlocking(int fd
)
1112 int nonblocking
= FALSE
;
1114 if (ioctlsocket(fd
, FIONBIO
, (unsigned long *) &nonblocking
) < 0) {
1119 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1121 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFL: " << xstrerr(xerrno
));
1122 return Comm::COMM_ERROR
;
1125 if (fcntl(fd
, F_SETFL
, flags
& (~SQUID_NONBLOCK
)) < 0) {
1128 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1129 return Comm::COMM_ERROR
;
1132 fd_table
[fd
].flags
.nonblocking
= false;
1137 commSetCloseOnExec(int fd
)
1143 if ((flags
= fcntl(fd
, F_GETFD
, dummy
)) < 0) {
1145 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFD: " << xstrerr(xerrno
));
1149 if (fcntl(fd
, F_SETFD
, flags
| FD_CLOEXEC
) < 0) {
1151 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": set close-on-exec failed: " << xstrerr(xerrno
));
1154 fd_table
[fd
].flags
.close_on_exec
= true;
1161 commSetTcpNoDelay(int fd
)
1165 if (setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *) &on
, sizeof(on
)) < 0) {
1167 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1170 fd_table
[fd
].flags
.nodelay
= true;
1176 commSetTcpKeepalive(int fd
, int idle
, int interval
, int timeout
)
1180 if (timeout
&& interval
) {
1181 int count
= (timeout
+ interval
- 1) / interval
;
1182 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPCNT
, &count
, sizeof(on
)) < 0) {
1184 debugs(5, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1190 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPIDLE
, &idle
, sizeof(on
)) < 0) {
1192 debugs(5, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1196 #ifdef TCP_KEEPINTVL
1198 if (setsockopt(fd
, IPPROTO_TCP
, TCP_KEEPINTVL
, &interval
, sizeof(on
)) < 0) {
1200 debugs(5, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1204 if (setsockopt(fd
, SOL_SOCKET
, SO_KEEPALIVE
, (char *) &on
, sizeof(on
)) < 0) {
1206 debugs(5, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1213 fd_table
=(fde
*) xcalloc(Squid_MaxFD
, sizeof(fde
));
1215 /* make sure the accept() socket FIFO delay queue exists */
1216 Comm::AcceptLimiter::Instance();
1218 // make sure the IO pending callback table exists
1219 Comm::CallbackTableInit();
1221 /* XXX account fd_table */
1222 /* Keep a few file descriptors free so that we don't run out of FD's
1223 * after accepting a client but before it opens a socket or a file.
1224 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1225 RESERVED_FD
= min(100, Squid_MaxFD
/ 4);
1227 TheHalfClosed
= new DescriptorSet
;
1229 /* setup the select loop module */
1230 Comm::SelectLoopInit();
1236 delete TheHalfClosed
;
1237 TheHalfClosed
= NULL
;
1239 safe_free(fd_table
);
1240 Comm::CallbackTableDestruct();
1244 // called when the queue is done waiting for the client bucket to fill
1246 commHandleWriteHelper(void * data
)
1248 CommQuotaQueue
*queue
= static_cast<CommQuotaQueue
*>(data
);
1251 ClientInfo
*clientInfo
= queue
->clientInfo
;
1252 // ClientInfo invalidates queue if freed, so if we got here through,
1253 // evenAdd cbdata protections, everything should be valid and consistent
1255 assert(clientInfo
->hasQueue());
1256 assert(clientInfo
->hasQueue(queue
));
1257 assert(!clientInfo
->selectWaiting
);
1258 assert(clientInfo
->eventWaiting
);
1259 clientInfo
->eventWaiting
= false;
1262 // check that the head descriptor is still relevant
1263 const int head
= clientInfo
->quotaPeekFd();
1264 Comm::IoCallback
*ccb
= COMMIO_FD_WRITECB(head
);
1266 if (fd_table
[head
].clientInfo
== clientInfo
&&
1267 clientInfo
->quotaPeekReserv() == ccb
->quotaQueueReserv
&&
1268 !fd_table
[head
].closing()) {
1270 // wait for the head descriptor to become ready for writing
1271 Comm::SetSelect(head
, COMM_SELECT_WRITE
, Comm::HandleWrite
, ccb
, 0);
1272 clientInfo
->selectWaiting
= true;
1276 clientInfo
->quotaDequeue(); // remove the no longer relevant descriptor
1277 // and continue looking for a relevant one
1278 } while (clientInfo
->hasQueue());
1280 debugs(77,3, HERE
<< "emptied queue");
1284 ClientInfo::hasQueue() const
1287 return !quotaQueue
->empty();
1291 ClientInfo::hasQueue(const CommQuotaQueue
*q
) const
1294 return quotaQueue
== q
;
1297 /// returns the first descriptor to be dequeued
1299 ClientInfo::quotaPeekFd() const
1302 return quotaQueue
->front();
1305 /// returns the reservation ID of the first descriptor to be dequeued
1307 ClientInfo::quotaPeekReserv() const
1310 return quotaQueue
->outs
+ 1;
1313 /// queues a given fd, creating the queue if necessary; returns reservation ID
1315 ClientInfo::quotaEnqueue(int fd
)
1318 return quotaQueue
->enqueue(fd
);
1321 /// removes queue head
1323 ClientInfo::quotaDequeue()
1326 quotaQueue
->dequeue();
1330 ClientInfo::kickQuotaQueue()
1332 if (!eventWaiting
&& !selectWaiting
&& hasQueue()) {
1333 // wait at least a second if the bucket is empty
1334 const double delay
= (bucketLevel
< 1.0) ? 1.0 : 0.0;
1335 eventAdd("commHandleWriteHelper", &commHandleWriteHelper
,
1336 quotaQueue
, delay
, 0, true);
1337 eventWaiting
= true;
1341 /// calculates how much to write for a single dequeued client
1345 /* If we have multiple clients and give full bucketSize to each client then
1346 * clt1 may often get a lot more because clt1->clt2 time distance in the
1347 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1348 * We divide quota evenly to be more fair. */
1350 if (!rationedCount
) {
1351 rationedCount
= quotaQueue
->size() + 1;
1353 // The delay in ration recalculation _temporary_ deprives clients from
1354 // bytes that should have trickled in while rationedCount was positive.
1357 // Rounding errors do not accumulate here, but we round down to avoid
1358 // negative bucket sizes after write with rationedCount=1.
1359 rationedQuota
= static_cast<int>(floor(bucketLevel
/rationedCount
));
1360 debugs(77,5, HERE
<< "new rationedQuota: " << rationedQuota
<<
1361 '*' << rationedCount
);
1365 debugs(77,7, HERE
<< "rationedQuota: " << rationedQuota
<<
1366 " rations remaining: " << rationedCount
);
1368 // update 'last seen' time to prevent clientdb GC from dropping us
1369 last_seen
= squid_curtime
;
1370 return rationedQuota
;
1374 ClientInfo::applyQuota(int &nleft
, Comm::IoCallback
*state
)
1377 assert(quotaPeekFd() == state
->conn
->fd
);
1378 quotaDequeue(); // we will write or requeue below
1379 if (nleft
> 0 && !BandwidthBucket::applyQuota(nleft
, state
)) {
1380 state
->quotaQueueReserv
= quotaEnqueue(state
->conn
->fd
);
1388 ClientInfo::scheduleWrite(Comm::IoCallback
*state
)
1390 if (writeLimitingActive
) {
1391 state
->quotaQueueReserv
= quotaEnqueue(state
->conn
->fd
);
1397 ClientInfo::onFdClosed()
1399 BandwidthBucket::onFdClosed();
1400 // kick queue or it will get stuck as commWriteHandle is not called
1405 ClientInfo::reduceBucket(const int len
)
1408 BandwidthBucket::reduceBucket(len
);
1409 // even if we wrote nothing, we were served; give others a chance
1414 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit
, const double anInitialBurst
, const double aHighWatermark
)
1416 debugs(77,5, "Write limits for " << (const char*)key
<<
1417 " speed=" << aWriteSpeedLimit
<< " burst=" << anInitialBurst
<<
1418 " highwatermark=" << aHighWatermark
);
1420 // set or possibly update traffic shaping parameters
1421 writeLimitingActive
= true;
1422 writeSpeedLimit
= aWriteSpeedLimit
;
1423 bucketSizeLimit
= aHighWatermark
;
1425 // but some members should only be set once for a newly activated bucket
1426 if (firstTimeConnection
) {
1427 firstTimeConnection
= false;
1429 assert(!selectWaiting
);
1430 assert(!quotaQueue
);
1431 quotaQueue
= new CommQuotaQueue(this);
1433 bucketLevel
= anInitialBurst
;
1434 prevTime
= current_dtime
;
1438 CommQuotaQueue::CommQuotaQueue(ClientInfo
*info
): clientInfo(info
),
1444 CommQuotaQueue::~CommQuotaQueue()
1446 assert(!clientInfo
); // ClientInfo should clear this before destroying us
1449 /// places the given fd at the end of the queue; returns reservation ID
1451 CommQuotaQueue::enqueue(int fd
)
1453 debugs(77,5, "clt" << (const char*)clientInfo
->key
<<
1454 ": FD " << fd
<< " with qqid" << (ins
+1) << ' ' << fds
.size());
1459 /// removes queue head
1461 CommQuotaQueue::dequeue()
1463 assert(!fds
.empty());
1464 debugs(77,5, "clt" << (const char*)clientInfo
->key
<<
1465 ": FD " << fds
.front() << " with qqid" << (outs
+1) << ' ' <<
1470 #endif /* USE_DELAY_POOLS */
1473 * hm, this might be too general-purpose for all the places we'd
1477 ignoreErrno(int ierrno
)
1484 #if EAGAIN != EWOULDBLOCK
1507 commCloseAllSockets(void)
1512 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1518 if (F
->type
!= FD_SOCKET
)
1521 if (F
->flags
.ipc
) /* don't close inter-process sockets */
1524 if (F
->timeoutHandler
!= NULL
) {
1525 AsyncCall::Pointer callback
= F
->timeoutHandler
;
1526 F
->timeoutHandler
= NULL
;
1527 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": Calling timeout handler");
1528 ScheduleCallHere(callback
);
1530 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": calling comm_reset_close()");
1531 old_comm_reset_close(fd
);
1537 AlreadyTimedOut(fde
*F
)
1542 if (F
->timeout
== 0)
1545 if (F
->timeout
> squid_curtime
)
1552 writeTimedOut(int fd
)
1554 if (!COMMIO_FD_WRITECB(fd
)->active())
1557 if ((squid_curtime
- fd_table
[fd
].writeStart
) < Config
.Timeout
.write
)
1568 AsyncCall::Pointer callback
;
1570 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1573 if (writeTimedOut(fd
)) {
1574 // We have an active write callback and we are timed out
1575 debugs(5, 5, "checkTimeouts: FD " << fd
<< " auto write timeout");
1576 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, NULL
, NULL
, 0);
1577 COMMIO_FD_WRITECB(fd
)->finish(Comm::COMM_ERROR
, ETIMEDOUT
);
1579 } else if (F
->writeQuotaHandler
!= nullptr && COMMIO_FD_WRITECB(fd
)->conn
!= nullptr) {
1580 if (!F
->writeQuotaHandler
->selectWaiting
&& F
->writeQuotaHandler
->quota() && !F
->closing()) {
1581 F
->writeQuotaHandler
->selectWaiting
= true;
1582 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, Comm::HandleWrite
, COMMIO_FD_WRITECB(fd
), 0);
1587 else if (AlreadyTimedOut(F
))
1590 debugs(5, 5, "checkTimeouts: FD " << fd
<< " Expired");
1592 if (F
->timeoutHandler
!= NULL
) {
1593 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Call timeout handler");
1594 callback
= F
->timeoutHandler
;
1595 F
->timeoutHandler
= NULL
;
1596 ScheduleCallHere(callback
);
1598 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Forcing comm_close()");
1604 /// Start waiting for a possibly half-closed connection to close
1605 // by scheduling a read callback to a monitoring handler that
1606 // will close the connection on read errors.
1608 commStartHalfClosedMonitor(int fd
)
1610 debugs(5, 5, HERE
<< "adding FD " << fd
<< " to " << *TheHalfClosed
);
1611 assert(isOpen(fd
) && !commHasHalfClosedMonitor(fd
));
1612 (void)TheHalfClosed
->add(fd
); // could also assert the result
1613 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1618 commPlanHalfClosedCheck()
1620 if (!WillCheckHalfClosed
&& !TheHalfClosed
->empty()) {
1621 eventAdd("commHalfClosedCheck", &commHalfClosedCheck
, NULL
, 1.0, 1);
1622 WillCheckHalfClosed
= true;
1626 /// iterates over all descriptors that may need half-closed tests and
1627 /// calls comm_read for those that do; re-schedules the check if needed
1630 commHalfClosedCheck(void *)
1632 debugs(5, 5, HERE
<< "checking " << *TheHalfClosed
);
1634 typedef DescriptorSet::const_iterator DSCI
;
1635 const DSCI end
= TheHalfClosed
->end();
1636 for (DSCI i
= TheHalfClosed
->begin(); i
!= end
; ++i
) {
1637 Comm::ConnectionPointer c
= new Comm::Connection
; // XXX: temporary. make HalfClosed a list of these.
1639 if (!fd_table
[c
->fd
].halfClosedReader
) { // not reading already
1640 AsyncCall::Pointer call
= commCbCall(5,4, "commHalfClosedReader",
1641 CommIoCbPtrFun(&commHalfClosedReader
, NULL
));
1642 Comm::Read(c
, call
);
1643 fd_table
[c
->fd
].halfClosedReader
= call
;
1645 c
->fd
= -1; // XXX: temporary. prevent c replacement erase closing listed FD
1648 WillCheckHalfClosed
= false; // as far as we know
1649 commPlanHalfClosedCheck(); // may need to check again
1652 /// checks whether we are waiting for possibly half-closed connection to close
1653 // We are monitoring if the read handler for the fd is the monitoring handler.
1655 commHasHalfClosedMonitor(int fd
)
1657 return TheHalfClosed
->has(fd
);
1660 /// stop waiting for possibly half-closed connection to close
1662 commStopHalfClosedMonitor(int const fd
)
1664 debugs(5, 5, HERE
<< "removing FD " << fd
<< " from " << *TheHalfClosed
);
1666 // cancel the read if one was scheduled
1667 AsyncCall::Pointer reader
= fd_table
[fd
].halfClosedReader
;
1669 Comm::ReadCancel(fd
, reader
);
1670 fd_table
[fd
].halfClosedReader
= NULL
;
1672 TheHalfClosed
->del(fd
);
1675 /// I/O handler for the possibly half-closed connection monitoring code
1677 commHalfClosedReader(const Comm::ConnectionPointer
&conn
, char *, size_t size
, Comm::Flag flag
, int, void *)
1679 // there cannot be more data coming in on half-closed connections
1681 assert(conn
!= NULL
);
1682 assert(commHasHalfClosedMonitor(conn
->fd
)); // or we would have canceled the read
1684 fd_table
[conn
->fd
].halfClosedReader
= NULL
; // done reading, for now
1686 // nothing to do if fd is being closed
1687 if (flag
== Comm::ERR_CLOSING
)
1690 // if read failed, close the connection
1691 if (flag
!= Comm::OK
) {
1692 debugs(5, 3, HERE
<< "closing " << conn
);
1697 // continue waiting for close or error
1698 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1701 CommRead::CommRead() : conn(NULL
), buf(NULL
), len(0), callback(NULL
) {}
1703 CommRead::CommRead(const Comm::ConnectionPointer
&c
, char *buf_
, int len_
, AsyncCall::Pointer
&callback_
)
1704 : conn(c
), buf(buf_
), len(len_
), callback(callback_
) {}
1706 DeferredRead::DeferredRead () : theReader(NULL
), theContext(NULL
), theRead(), cancelled(false) {}
1708 DeferredRead::DeferredRead (DeferrableRead
*aReader
, void *data
, CommRead
const &aRead
) : theReader(aReader
), theContext (data
), theRead(aRead
), cancelled(false) {}
1710 DeferredReadManager::~DeferredReadManager()
1713 assert (deferredReads
.empty());
1716 /* explicit instantiation required for some systems */
1718 /// \cond AUTODOCS_IGNORE
1719 template cbdata_type CbDataList
<DeferredRead
>::CBDATA_CbDataList
;
1723 DeferredReadManager::delayRead(DeferredRead
const &aRead
)
1725 debugs(5, 3, "Adding deferred read on " << aRead
.theRead
.conn
);
1726 CbDataList
<DeferredRead
> *temp
= deferredReads
.push_back(aRead
);
1728 // We have to use a global function as a closer and point to temp
1729 // instead of "this" because DeferredReadManager is not a job and
1730 // is not even cbdata protected
1731 // XXX: and yet we use cbdata protection functions on it??
1732 AsyncCall::Pointer closer
= commCbCall(5,4,
1733 "DeferredReadManager::CloseHandler",
1734 CommCloseCbPtrFun(&CloseHandler
, temp
));
1735 comm_add_close_handler(aRead
.theRead
.conn
->fd
, closer
);
1736 temp
->element
.closer
= closer
; // remeber so that we can cancel
1740 DeferredReadManager::CloseHandler(const CommCloseCbParams
¶ms
)
1742 if (!cbdataReferenceValid(params
.data
))
1745 CbDataList
<DeferredRead
> *temp
= (CbDataList
<DeferredRead
> *)params
.data
;
1747 temp
->element
.closer
= NULL
;
1748 temp
->element
.markCancelled();
1752 DeferredReadManager::popHead(CbDataListContainer
<DeferredRead
> &deferredReads
)
1754 assert (!deferredReads
.empty());
1756 DeferredRead
&read
= deferredReads
.head
->element
;
1758 // NOTE: at this point the connection has been paused/stalled for an unknown
1759 // amount of time. We must re-validate that it is active and usable.
1761 // If the connection has been closed already. Cancel this read.
1762 if (!fd_table
|| !Comm::IsConnOpen(read
.theRead
.conn
)) {
1763 if (read
.closer
!= NULL
) {
1764 read
.closer
->cancel("Connection closed before.");
1767 read
.markCancelled();
1770 if (!read
.cancelled
) {
1771 comm_remove_close_handler(read
.theRead
.conn
->fd
, read
.closer
);
1775 DeferredRead result
= deferredReads
.pop_front();
1781 DeferredReadManager::kickReads(int const count
)
1783 /* if we had CbDataList::size() we could consolidate this and flushReads */
1790 size_t remaining
= count
;
1792 while (!deferredReads
.empty() && remaining
) {
1793 DeferredRead aRead
= popHead(deferredReads
);
1796 if (!aRead
.cancelled
)
1802 DeferredReadManager::flushReads()
1804 CbDataListContainer
<DeferredRead
> reads
;
1805 reads
= deferredReads
;
1806 deferredReads
= CbDataListContainer
<DeferredRead
>();
1808 // XXX: For fairness this SHOULD randomize the order
1809 while (!reads
.empty()) {
1810 DeferredRead aRead
= popHead(reads
);
1816 DeferredReadManager::kickARead(DeferredRead
const &aRead
)
1818 if (aRead
.cancelled
)
1821 if (Comm::IsConnOpen(aRead
.theRead
.conn
) && fd_table
[aRead
.theRead
.conn
->fd
].closing())
1824 debugs(5, 3, "Kicking deferred read on " << aRead
.theRead
.conn
);
1826 aRead
.theReader(aRead
.theContext
, aRead
.theRead
);
1830 DeferredRead::markCancelled()
1836 CommSelectEngine::checkEvents(int timeout
)
1838 static time_t last_timeout
= 0;
1840 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1841 if (squid_curtime
> last_timeout
) {
1842 last_timeout
= squid_curtime
;
1846 switch (Comm::DoSelect(timeout
)) {
1855 case Comm::SHUTDOWN
:
1858 case Comm::COMM_ERROR
:
1862 fatal_dump("comm.cc: Internal error -- this should never happen.");
1867 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1869 comm_open_uds(int sock_type
,
1871 struct sockaddr_un
* addr
,
1874 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1878 PROF_start(comm_open
);
1879 /* Create socket for accepting new connections. */
1880 ++ statCounter
.syscalls
.sock
.sockets
;
1882 /* Setup the socket addrinfo details for use */
1885 AI
.ai_family
= PF_UNIX
;
1886 AI
.ai_socktype
= sock_type
;
1887 AI
.ai_protocol
= proto
;
1888 AI
.ai_addrlen
= SUN_LEN(addr
);
1889 AI
.ai_addr
= (sockaddr
*)addr
;
1890 AI
.ai_canonname
= NULL
;
1893 debugs(50, 3, HERE
<< "Attempt open socket for: " << addr
->sun_path
);
1895 if ((new_socket
= socket(AI
.ai_family
, AI
.ai_socktype
, AI
.ai_protocol
)) < 0) {
1897 /* Increase the number of reserved fd's if calls to socket()
1898 * are failing because the open file table is full. This
1899 * limits the number of simultaneous clients */
1901 if (limitError(xerrno
)) {
1902 debugs(50, DBG_IMPORTANT
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
1905 debugs(50, DBG_CRITICAL
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
1908 PROF_stop(comm_open
);
1912 debugs(50, 3, "Opened UDS FD " << new_socket
<< " : family=" << AI
.ai_family
<< ", type=" << AI
.ai_socktype
<< ", protocol=" << AI
.ai_protocol
);
1915 debugs(50, 5, HERE
<< "FD " << new_socket
<< " is a new socket");
1917 assert(!isOpen(new_socket
));
1918 fd_open(new_socket
, FD_MSGHDR
, addr
->sun_path
);
1920 fd_table
[new_socket
].sock_family
= AI
.ai_family
;
1922 if (!(flags
& COMM_NOCLOEXEC
))
1923 commSetCloseOnExec(new_socket
);
1925 if (flags
& COMM_REUSEADDR
)
1926 commSetReuseAddr(new_socket
);
1928 if (flags
& COMM_NONBLOCKING
) {
1929 if (commSetNonBlocking(new_socket
) != Comm::OK
) {
1930 comm_close(new_socket
);
1931 PROF_stop(comm_open
);
1936 if (flags
& COMM_DOBIND
) {
1937 if (commBind(new_socket
, AI
) != Comm::OK
) {
1938 comm_close(new_socket
);
1939 PROF_stop(comm_open
);
1945 if (sock_type
== SOCK_STREAM
)
1946 commSetTcpNoDelay(new_socket
);
1950 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
1951 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
1953 PROF_stop(comm_open
);