2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 05 Socket Functions */
12 #include "base/AsyncFunCalls.h"
13 #include "base/OnOff.h"
14 #include "ClientInfo.h"
15 #include "comm/AcceptLimiter.h"
16 #include "comm/comm_internal.h"
17 #include "comm/Connection.h"
18 #include "comm/IoCallback.h"
19 #include "comm/Loops.h"
20 #include "comm/Read.h"
21 #include "comm/TcpAcceptor.h"
22 #include "comm/Write.h"
23 #include "compat/cmsg.h"
24 #include "DescriptorSet.h"
29 #include "icmp/net_db.h"
30 #include "ip/Intercept.h"
31 #include "ip/QosConfig.h"
34 #include "sbuf/SBuf.h"
35 #include "sbuf/Stream.h"
36 #include "SquidConfig.h"
37 #include "StatCounters.h"
38 #include "StoreIOBuffer.h"
42 #include "ssl/support.h"
48 #include <sys/ioctl.h>
50 #ifdef HAVE_NETINET_TCP_H
51 #include <netinet/tcp.h>
58 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
61 static IOCB commHalfClosedReader
;
62 static int comm_openex(int sock_type
, int proto
, Ip::Address
&, int flags
, const char *note
);
63 static void comm_init_opened(const Comm::ConnectionPointer
&conn
, const char *note
, struct addrinfo
*AI
);
64 static int comm_apply_flags(int new_socket
, Ip::Address
&addr
, int flags
, struct addrinfo
*AI
);
67 CBDATA_CLASS_INIT(CommQuotaQueue
);
69 static void commHandleWriteHelper(void * data
);
74 static DescriptorSet
*TheHalfClosed
= nullptr; /// the set of half-closed FDs
75 static bool WillCheckHalfClosed
= false; /// true if check is scheduled
76 static EVH commHalfClosedCheck
;
77 static void commPlanHalfClosedCheck();
79 static Comm::Flag
commBind(int s
, struct addrinfo
&);
80 static void commSetBindAddressNoPort(int);
81 static void commSetReuseAddr(int);
82 static void commConfigureLinger(int fd
, OnOff
);
84 static void commSetTcpNoDelay(int);
86 static void commSetTcpRcvbuf(int, int);
91 return fd
>= 0 && fd_table
&& fd_table
[fd
].flags
.open
!= 0;
95 * Empty the read buffers
97 * This is a magical routine that empties the read buffers.
98 * Under some platforms (Linux) if a buffer has data in it before
99 * you call close(), the socket will hang and take quite a while
103 comm_empty_os_read_buffers(int fd
)
107 // Bug 4146: SSL-Bump BIO does not release sockets on close.
108 if (fd_table
[fd
].ssl
)
112 /* prevent those nasty RST packets */
113 char buf
[SQUID_TCP_SO_RCVBUF
];
114 if (fd_table
[fd
].flags
.nonblocking
&& fd_table
[fd
].type
!= FD_MSGHDR
) {
115 while (FD_READ_METHOD(fd
, buf
, SQUID_TCP_SO_RCVBUF
) > 0) {};
123 * synchronous wrapper around udp socket functions
126 comm_udp_recvfrom(int fd
, void *buf
, size_t len
, int flags
, Ip::Address
&from
)
128 ++ statCounter
.syscalls
.sock
.recvfroms
;
129 debugs(5,8, "comm_udp_recvfrom: FD " << fd
<< " from " << from
);
130 struct addrinfo
*AI
= nullptr;
131 Ip::Address::InitAddr(AI
);
132 int x
= recvfrom(fd
, buf
, len
, flags
, AI
->ai_addr
, &AI
->ai_addrlen
);
134 Ip::Address::FreeAddr(AI
);
139 comm_udp_recv(int fd
, void *buf
, size_t len
, int flags
)
142 return comm_udp_recvfrom(fd
, buf
, len
, flags
, nul
);
146 comm_udp_send(int s
, const void *buf
, size_t len
, int flags
)
148 return send(s
, buf
, len
, flags
);
152 comm_has_incomplete_write(int fd
)
154 assert(isOpen(fd
) && COMMIO_FD_WRITECB(fd
) != nullptr);
155 return COMMIO_FD_WRITECB(fd
)->active();
159 * Queue a write. handler/handler_data are called when the write fully
160 * completes, on error, or on file descriptor close.
163 /* Return the local port associated with fd. */
165 comm_local_port(int fd
)
168 struct addrinfo
*addr
= nullptr;
169 fde
*F
= &fd_table
[fd
];
171 /* If the fd is closed already, just return */
173 if (!F
->flags
.open
) {
174 debugs(5, 0, "comm_local_port: FD " << fd
<< " has been closed.");
178 if (F
->local_addr
.port())
179 return F
->local_addr
.port();
181 if (F
->sock_family
== AF_INET
)
184 Ip::Address::InitAddr(addr
);
186 if (getsockname(fd
, addr
->ai_addr
, &(addr
->ai_addrlen
)) ) {
188 debugs(50, DBG_IMPORTANT
, "ERROR: " << MYNAME
<< "Failed to retrieve TCP/UDP port number for socket: FD " << fd
<< ": " << xstrerr(xerrno
));
189 Ip::Address::FreeAddr(addr
);
194 Ip::Address::FreeAddr(addr
);
196 if (F
->local_addr
.isAnyAddr()) {
197 /* save the whole local address, not just the port. */
198 F
->local_addr
= temp
;
200 F
->local_addr
.port(temp
.port());
203 debugs(5, 6, "comm_local_port: FD " << fd
<< ": port " << F
->local_addr
.port() << "(family=" << F
->sock_family
<< ")");
204 return F
->local_addr
.port();
207 /// sets the IP_BIND_ADDRESS_NO_PORT socket option to optimize ephemeral port
208 /// reuse by outgoing TCP connections that must bind(2) to a source IP address
210 commSetBindAddressNoPort(const int fd
)
212 #if defined(IP_BIND_ADDRESS_NO_PORT)
214 if (setsockopt(fd
, IPPROTO_IP
, IP_BIND_ADDRESS_NO_PORT
, reinterpret_cast<char*>(&flag
), sizeof(flag
)) < 0) {
215 const auto savedErrno
= errno
;
216 debugs(50, DBG_IMPORTANT
, "ERROR: setsockopt(IP_BIND_ADDRESS_NO_PORT) failure: " << xstrerr(savedErrno
));
224 commBind(int s
, struct addrinfo
&inaddr
)
226 ++ statCounter
.syscalls
.sock
.binds
;
228 if (bind(s
, inaddr
.ai_addr
, inaddr
.ai_addrlen
) == 0) {
229 debugs(50, 6, "bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
);
233 debugs(50, DBG_CRITICAL
, "ERROR: " << MYNAME
<< "Cannot bind socket FD " << s
<< " to " << fd_table
[s
].local_addr
<< ": " << xstrerr(xerrno
));
235 return Comm::COMM_ERROR
;
239 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
240 * is OR of flags specified in comm.h. Defaults TOS
243 comm_open(int sock_type
,
249 // assume zero-port callers do not need to know the assigned port right away
250 if (sock_type
== SOCK_STREAM
&& addr
.port() == 0 && ((flags
& COMM_DOBIND
) || !addr
.isAnyAddr()))
251 flags
|= COMM_DOBIND_PORT_LATER
;
253 return comm_openex(sock_type
, proto
, addr
, flags
, note
);
257 comm_open_listener(int sock_type
,
259 Comm::ConnectionPointer
&conn
,
262 /* all listener sockets require bind() */
263 conn
->flags
|= COMM_DOBIND
;
265 /* attempt native enabled port. */
266 conn
->fd
= comm_openex(sock_type
, proto
, conn
->local
, conn
->flags
, note
);
270 comm_open_listener(int sock_type
,
278 /* all listener sockets require bind() */
279 flags
|= COMM_DOBIND
;
281 /* attempt native enabled port. */
282 sock
= comm_openex(sock_type
, proto
, addr
, flags
, note
);
288 limitError(int const anErrno
)
290 return anErrno
== ENFILE
|| anErrno
== EMFILE
;
294 comm_set_v6only(int fd
, int tos
)
297 if (setsockopt(fd
, IPPROTO_IPV6
, IPV6_V6ONLY
, (char *) &tos
, sizeof(int)) < 0) {
299 debugs(50, DBG_IMPORTANT
, MYNAME
<< "setsockopt(IPV6_V6ONLY) " << (tos
?"ON":"OFF") << " for FD " << fd
<< ": " << xstrerr(xerrno
));
302 debugs(50, DBG_CRITICAL
, MYNAME
<< "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
307 * Set the socket option required for TPROXY spoofing for:
308 * - Linux TPROXY v4 support,
309 * - OpenBSD divert-to support,
310 * - FreeBSD IPFW TPROXY v4 support.
313 comm_set_transparent(int fd
)
315 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
316 # define soLevel SOL_IP
317 # define soFlag IP_TRANSPARENT
318 bool doneSuid
= false;
320 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
321 # define soLevel SOL_SOCKET
322 # define soFlag SO_BINDANY
324 bool doneSuid
= true;
326 #elif defined(IP_BINDANY) // FreeBSD with IPFW
327 # define soLevel IPPROTO_IP
328 # define soFlag IP_BINDANY
330 bool doneSuid
= true;
333 debugs(50, DBG_CRITICAL
, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
337 #if defined(soLevel) && defined(soFlag)
339 if (setsockopt(fd
, soLevel
, soFlag
, (char *) &tos
, sizeof(int)) < 0) {
341 debugs(50, DBG_IMPORTANT
, MYNAME
<< "setsockopt(TPROXY) on FD " << fd
<< ": " << xstrerr(xerrno
));
343 /* mark the socket as having transparent options */
344 fd_table
[fd
].flags
.transparent
= true;
352 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
353 * is OR of flags specified in defines.h:COMM_*
356 comm_openex(int sock_type
,
363 struct addrinfo
*AI
= nullptr;
365 /* Create socket for accepting new connections. */
366 ++ statCounter
.syscalls
.sock
.sockets
;
368 if (!Ip::EnableIpv6
&& addr
.isIPv6()) {
369 debugs(50, 2, "refusing to open an IPv6 socket when IPv6 support is disabled: " << addr
);
374 /* Setup the socket addrinfo details for use */
375 addr
.getAddrInfo(AI
);
376 AI
->ai_socktype
= sock_type
;
377 AI
->ai_protocol
= proto
;
379 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr
);
381 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
382 const auto firstErrNo
= errno
;
384 /* under IPv6 there is the possibility IPv6 is present but disabled. */
385 /* try again as IPv4-native if possible */
386 if ( new_socket
< 0 && Ip::EnableIpv6
&& addr
.isIPv6() && addr
.setIPv4() ) {
387 /* attempt to open this IPv4-only. */
388 Ip::Address::FreeAddr(AI
);
389 /* Setup the socket addrinfo details for use */
390 addr
.getAddrInfo(AI
);
391 AI
->ai_socktype
= sock_type
;
392 AI
->ai_protocol
= proto
;
393 debugs(50, 3, "Attempt fallback open socket for: " << addr
);
394 new_socket
= socket(AI
->ai_family
, AI
->ai_socktype
, AI
->ai_protocol
);
395 // TODO: Report failures of this second socket() call.
396 // if both socket() calls fail, we use firstErrNo
397 debugs(50, 2, "attempt open " << note
<< " socket on: " << addr
);
400 if (new_socket
< 0) {
401 /* Increase the number of reserved fd's if calls to socket()
402 * are failing because the open file table is full. This
403 * limits the number of simultaneous clients */
405 if (limitError(firstErrNo
)) {
406 debugs(50, DBG_IMPORTANT
, MYNAME
<< "socket failure: " << xstrerr(firstErrNo
));
409 debugs(50, DBG_CRITICAL
, MYNAME
<< "socket failure: " << xstrerr(firstErrNo
));
412 Ip::Address::FreeAddr(AI
);
414 errno
= firstErrNo
; // restore for caller
418 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
419 Comm::ConnectionPointer conn
= new Comm::Connection
;
421 conn
->fd
= new_socket
;
423 debugs(50, 3, "comm_openex: Opened socket " << conn
<< " : family=" << AI
->ai_family
<< ", type=" << AI
->ai_socktype
<< ", protocol=" << AI
->ai_protocol
);
425 if ( Ip::EnableIpv6
&IPV6_SPECIAL_SPLITSTACK
&& addr
.isIPv6() )
426 comm_set_v6only(conn
->fd
, 1);
428 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
429 /* Other OS may have this administratively disabled for general use. Same deal. */
430 if ( Ip::EnableIpv6
&IPV6_SPECIAL_V4MAPPING
&& addr
.isIPv6() )
431 comm_set_v6only(conn
->fd
, 0);
433 comm_init_opened(conn
, note
, AI
);
434 new_socket
= comm_apply_flags(conn
->fd
, addr
, flags
, AI
);
436 Ip::Address::FreeAddr(AI
);
438 // XXX transition only. prevent conn from closing the new FD on function exit.
440 // XXX: firstErrNo is not applicable here -- socket() calls succeeded above!
441 // TODO: Stop reporting error codes via errno.
446 /// update FD tables after a local or remote (IPC) comm_openex();
448 comm_init_opened(const Comm::ConnectionPointer
&conn
,
452 assert(Comm::IsConnOpen(conn
));
456 debugs(5, 5, conn
<< " is a new socket");
458 assert(!isOpen(conn
->fd
)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
459 fd_open(conn
->fd
, FD_SOCKET
, note
);
461 fde
*F
= &fd_table
[conn
->fd
];
462 F
->local_addr
= conn
->local
;
464 F
->sock_family
= AI
->ai_family
;
467 /// apply flags after a local comm_open*() call;
468 /// returns new_socket or -1 on error
470 comm_apply_flags(int new_socket
,
475 assert(new_socket
>= 0);
477 const int sock_type
= AI
->ai_socktype
;
479 if (!(flags
& COMM_NOCLOEXEC
))
480 commSetCloseOnExec(new_socket
);
482 if ((flags
& COMM_REUSEADDR
))
483 commSetReuseAddr(new_socket
);
485 if (addr
.port() > (unsigned short) 0) {
487 if (sock_type
!= SOCK_DGRAM
)
489 commConfigureLinger(new_socket
, OnOff::off
);
492 commSetReuseAddr(new_socket
);
495 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
496 if ((flags
& COMM_TRANSPARENT
)) {
497 comm_set_transparent(new_socket
);
500 if ( (flags
& COMM_DOBIND
) || addr
.port() > 0 || !addr
.isAnyAddr() ) {
501 if ( !(flags
& COMM_DOBIND
) && addr
.isAnyAddr() )
502 debugs(5, DBG_IMPORTANT
,"WARNING: Squid is attempting to bind() port " << addr
<< " without being a listener.");
503 if ( addr
.isNoAddr() )
504 debugs(5, DBG_CRITICAL
, "ERROR: Squid is attempting to bind() port " << addr
<< "!!");
506 #if defined(SO_REUSEPORT)
507 if (flags
& COMM_REUSEPORT
) {
509 if (setsockopt(new_socket
, SOL_SOCKET
, SO_REUSEPORT
, reinterpret_cast<char*>(&on
), sizeof(on
)) < 0) {
510 const auto savedErrno
= errno
;
511 const auto errorMessage
= ToSBuf("cannot enable SO_REUSEPORT socket option when binding to ",
512 addr
, ": ", xstrerr(savedErrno
));
514 debugs(5, DBG_IMPORTANT
, "ERROR: " << errorMessage
);
516 throw TexcHere(errorMessage
);
521 if ((flags
& COMM_DOBIND_PORT_LATER
))
522 commSetBindAddressNoPort(new_socket
);
524 if (commBind(new_socket
, *AI
) != Comm::OK
) {
525 comm_close(new_socket
);
530 if (flags
& COMM_NONBLOCKING
)
531 if (commSetNonBlocking(new_socket
) == Comm::COMM_ERROR
) {
532 comm_close(new_socket
);
537 if (sock_type
== SOCK_STREAM
)
538 commSetTcpNoDelay(new_socket
);
542 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
543 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);
549 comm_import_opened(const Comm::ConnectionPointer
&conn
,
554 assert(Comm::IsConnOpen(conn
));
557 comm_init_opened(conn
, note
, AI
);
559 if ((conn
->flags
& COMM_TRANSPARENT
))
560 fd_table
[conn
->fd
].flags
.transparent
= true;
562 if (conn
->flags
& COMM_NONBLOCKING
)
563 fd_table
[conn
->fd
].flags
.nonblocking
= true;
566 if (AI
->ai_socktype
== SOCK_STREAM
)
567 fd_table
[conn
->fd
].flags
.nodelay
= true;
570 /* no fd_table[fd].flags. updates needed for these conditions:
571 * if ((flags & COMM_REUSEADDR)) ...
572 * if ((flags & COMM_DOBIND) ...) ...
576 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
577 // With handler already unset. Leaving this present until that can be verified for all code paths.
579 commUnsetFdTimeout(int fd
)
581 debugs(5, 3, "Remove timeout for FD " << fd
);
583 assert(fd
< Squid_MaxFD
);
584 fde
*F
= &fd_table
[fd
];
585 assert(F
->flags
.open
);
587 F
->timeoutHandler
= nullptr;
592 commSetConnTimeout(const Comm::ConnectionPointer
&conn
, time_t timeout
, AsyncCall::Pointer
&callback
)
594 debugs(5, 3, conn
<< " timeout " << timeout
);
595 assert(Comm::IsConnOpen(conn
));
596 assert(conn
->fd
< Squid_MaxFD
);
597 fde
*F
= &fd_table
[conn
->fd
];
598 assert(F
->flags
.open
);
601 F
->timeoutHandler
= nullptr;
604 if (callback
!= nullptr) {
605 typedef CommTimeoutCbParams Params
;
606 Params
¶ms
= GetCommParams
<Params
>(callback
);
608 F
->timeoutHandler
= callback
;
611 F
->timeout
= squid_curtime
+ timeout
;
616 commUnsetConnTimeout(const Comm::ConnectionPointer
&conn
)
618 debugs(5, 3, "Remove timeout for " << conn
);
619 AsyncCall::Pointer nil
;
620 commSetConnTimeout(conn
, -1, nil
);
624 * Connect socket FD to given remote address.
625 * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
626 * then error code will also be returned in errno.
629 comm_connect_addr(int sock
, const Ip::Address
&address
)
631 Comm::Flag status
= Comm::OK
;
632 fde
*F
= &fd_table
[sock
];
636 struct addrinfo
*AI
= nullptr;
638 assert(address
.port() != 0);
640 debugs(5, 9, "connecting socket FD " << sock
<< " to " << address
<< " (want family: " << F
->sock_family
<< ")");
642 /* Handle IPv6 over IPv4-only socket case.
643 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
644 * NP: because commResetFD is private to ConnStateData we have to return an error and
645 * trust its handled properly.
647 if (F
->sock_family
== AF_INET
&& !address
.isIPv4()) {
649 return Comm::ERR_PROTOCOL
;
652 /* Handle IPv4 over IPv6-only socket case.
653 * This case is presently handled here as it's both a known case and it's
654 * uncertain what error will be returned by the IPv6 stack in such case. It's
655 * possible this will also be handled by the errno checks below after connect()
656 * but needs careful cross-platform verification, and verifying the address
657 * condition here is simple.
659 if (!F
->local_addr
.isIPv4() && address
.isIPv4()) {
661 return Comm::ERR_PROTOCOL
;
664 address
.getAddrInfo(AI
, F
->sock_family
);
666 /* Establish connection. */
669 if (!F
->flags
.called_connect
) {
670 F
->flags
.called_connect
= true;
671 ++ statCounter
.syscalls
.sock
.connects
;
674 if ((x
= connect(sock
, AI
->ai_addr
, AI
->ai_addrlen
)) < 0) {
676 debugs(5,5, "sock=" << sock
<< ", addrinfo(" <<
677 " flags=" << AI
->ai_flags
<<
678 ", family=" << AI
->ai_family
<<
679 ", socktype=" << AI
->ai_socktype
<<
680 ", protocol=" << AI
->ai_protocol
<<
681 ", &addr=" << AI
->ai_addr
<<
682 ", addrlen=" << AI
->ai_addrlen
<< " )");
683 debugs(5, 9, "connect FD " << sock
<< ": (" << x
<< ") " << xstrerr(xerrno
));
684 debugs(14,9, "connecting to: " << address
);
687 // XXX: ICAP code refuses callbacks during a pending comm_ call
688 // Async calls development will fix this.
690 xerrno
= EINPROGRESS
;
695 errlen
= sizeof(err
);
696 x
= getsockopt(sock
, SOL_SOCKET
, SO_ERROR
, &err
, &errlen
);
702 * Solaris 2.4's socket emulation doesn't allow you
703 * to determine the error from a failed non-blocking
704 * connect and just returns EPIPE. Create a fake
705 * error message for connect. -- fenner@parc.xerox.com
707 if (x
< 0 && xerrno
== EPIPE
)
714 Ip::Address::FreeAddr(AI
);
717 if (xerrno
== 0 || xerrno
== EISCONN
)
719 else if (ignoreErrno(xerrno
))
720 status
= Comm::INPROGRESS
;
721 else if (xerrno
== EAFNOSUPPORT
|| xerrno
== EINVAL
)
722 return Comm::ERR_PROTOCOL
;
724 return Comm::COMM_ERROR
;
726 address
.toStr(F
->ipaddr
, MAX_IPSTRLEN
);
728 F
->remote_port
= address
.port(); /* remote_port is HS */
730 if (status
== Comm::OK
) {
731 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connected to " << address
);
732 } else if (status
== Comm::INPROGRESS
) {
733 debugs(5, DBG_DATA
, "comm_connect_addr: FD " << sock
<< " connection pending");
741 commCallCloseHandlers(int fd
)
743 fde
*F
= &fd_table
[fd
];
744 debugs(5, 5, "commCallCloseHandlers: FD " << fd
);
746 while (F
->closeHandler
!= nullptr) {
747 AsyncCall::Pointer call
= F
->closeHandler
;
748 F
->closeHandler
= call
->Next();
749 call
->setNext(nullptr);
750 // If call is not canceled schedule it for execution else ignore it
751 if (!call
->canceled()) {
752 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call
);
753 // XXX: Without the following code, callback fd may be -1.
754 // typedef CommCloseCbParams Params;
755 // auto ¶ms = GetCommParams<Params>(call);
757 ScheduleCallHere(call
);
762 /// sets SO_LINGER socket(7) option
763 /// \param enabled -- whether linger will be active (sets linger::l_onoff)
765 commConfigureLinger(const int fd
, const OnOff enabled
)
767 struct linger l
= {};
768 l
.l_onoff
= (enabled
== OnOff::on
? 1 : 0);
769 l
.l_linger
= 0; // how long to linger for, in seconds
771 fd_table
[fd
].flags
.harshClosureRequested
= (l
.l_onoff
&& !l
.l_linger
); // close(2) sends TCP RST if true
773 if (setsockopt(fd
, SOL_SOCKET
, SO_LINGER
, reinterpret_cast<char*>(&l
), sizeof(l
)) < 0) {
774 const auto xerrno
= errno
;
775 debugs(50, DBG_CRITICAL
, "ERROR: Failed to set closure behavior (SO_LINGER) for FD " << fd
<< ": " << xstrerr(xerrno
));
780 * enable linger with time of 0 so that when the socket is
781 * closed, TCP generates a RESET
784 comm_reset_close(const Comm::ConnectionPointer
&conn
)
786 if (Comm::IsConnOpen(conn
)) {
787 commConfigureLinger(conn
->fd
, OnOff::on
);
788 debugs(5, 7, conn
->id
);
793 // Legacy close function.
795 old_comm_reset_close(int fd
)
798 commConfigureLinger(fd
, OnOff::on
);
804 commStartTlsClose(const int fd
)
806 Security::SessionSendGoodbye(fd_table
[fd
].ssl
);
810 comm_close_complete(const int fd
)
812 auto F
= &fd_table
[fd
];
814 F
->dynamicTlsContext
.reset();
815 fd_close(fd
); /* update fdstat */
818 ++ statCounter
.syscalls
.sock
.closes
;
820 /* When one connection closes, give accept() a chance, if need be */
821 CodeContext::Reset(); // exit FD-specific context
822 Comm::AcceptLimiter::Instance().kick();
826 * Close the socket fd.
828 * + call write handlers with ERR_CLOSING
829 * + call read handlers with ERR_CLOSING
830 * + call closing handlers
832 * A deferred reader has no Comm read handler mentioned above. To stay in sync,
833 * such a reader must register a Comm closing handler.
836 _comm_close(int fd
, char const *file
, int line
)
838 debugs(5, 3, "start closing FD " << fd
<< " by " << file
<< ":" << line
);
840 assert(fd
< Squid_MaxFD
);
842 fde
*F
= &fd_table
[fd
];
847 /* XXX: is this obsolete behind F->closing() ? */
848 if ( (shutting_down
|| reconfiguring
) && (!F
->flags
.open
|| F
->type
== FD_FILE
))
851 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
853 debugs(50, DBG_IMPORTANT
, "ERROR: Squid BUG #3556: FD " << fd
<< " is not an open socket.");
854 // XXX: do we need to run close(fd) or fd_close(fd) here?
858 assert(F
->type
!= FD_FILE
);
860 F
->flags
.close_request
= true;
862 // We have caller's context and fde::codeContext. In the unlikely event they
863 // differ, it is not clear which context is more applicable to this closure.
864 // For simplicity sake, we remain in the caller's context while still
865 // allowing individual advanced callbacks to overwrite it.
867 if (F
->ssl
&& !F
->flags
.harshClosureRequested
) {
868 const auto startCall
= asyncCall(5, 4, "commStartTlsClose",
869 callDialer(commStartTlsClose
, fd
));
870 ScheduleCallHere(startCall
);
873 // a half-closed fd may lack a reader, so we stop monitoring explicitly
874 if (commHasHalfClosedMonitor(fd
))
875 commStopHalfClosedMonitor(fd
);
876 commUnsetFdTimeout(fd
);
878 // notify read/write handlers after canceling select reservations, if any
879 if (COMMIO_FD_WRITECB(fd
)->active()) {
880 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, nullptr, nullptr, 0);
881 COMMIO_FD_WRITECB(fd
)->finish(Comm::ERR_CLOSING
, 0);
883 if (COMMIO_FD_READCB(fd
)->active()) {
884 Comm::SetSelect(fd
, COMM_SELECT_READ
, nullptr, nullptr, 0);
885 COMMIO_FD_READCB(fd
)->finish(Comm::ERR_CLOSING
, 0);
889 if (BandwidthBucket
*bucket
= BandwidthBucket::SelectBucket(F
)) {
890 if (bucket
->selectWaiting
)
891 bucket
->onFdClosed();
895 commCallCloseHandlers(fd
);
897 comm_empty_os_read_buffers(fd
);
899 // must use async call to wait for all callbacks
900 // scheduled before comm_close() to finish
901 const auto completeCall
= asyncCall(5, 4, "comm_close_complete",
902 callDialer(comm_close_complete
, fd
));
903 ScheduleCallHere(completeCall
);
906 /* Send a udp datagram to specified TO_ADDR. */
908 comm_udp_sendto(int fd
,
909 const Ip::Address
&to_addr
,
913 ++ statCounter
.syscalls
.sock
.sendtos
;
915 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr
<<
916 " using FD " << fd
<< " using Port " << comm_local_port(fd
) );
918 struct addrinfo
*AI
= nullptr;
919 to_addr
.getAddrInfo(AI
, fd_table
[fd
].sock_family
);
920 int x
= sendto(fd
, buf
, len
, 0, AI
->ai_addr
, AI
->ai_addrlen
);
922 Ip::Address::FreeAddr(AI
);
925 errno
= xerrno
; // restore for caller to use
930 if (ECONNREFUSED
!= xerrno
)
932 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", (family=" << fd_table
[fd
].sock_family
<< ") " << to_addr
<< ": " << xstrerr(xerrno
));
934 errno
= xerrno
; // restore for caller to use
935 return Comm::COMM_ERROR
;
939 comm_add_close_handler(int fd
, CLCB
* handler
, void *data
)
941 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", handler=" <<
942 handler
<< ", data=" << data
);
944 AsyncCall::Pointer call
=commCbCall(5,4, "SomeCloseHandler",
945 CommCloseCbPtrFun(handler
, data
));
946 comm_add_close_handler(fd
, call
);
951 comm_add_close_handler(int fd
, AsyncCall::Pointer
&call
)
953 debugs(5, 5, "comm_add_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
955 /*TODO:Check for a similar scheduled AsyncCall*/
956 // for (c = fd_table[fd].closeHandler; c; c = c->next)
957 // assert(c->handler != handler || c->data != data);
959 // TODO: Consider enhancing AsyncCallList to support random-access close
960 // handlers, perhaps after upgrading the remaining legacy CLCB handlers.
961 call
->setNext(fd_table
[fd
].closeHandler
);
963 fd_table
[fd
].closeHandler
= call
;
966 // remove function-based close handler
968 comm_remove_close_handler(int fd
, CLCB
* handler
, void *data
)
971 /* Find handler in list */
972 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", handler=" <<
973 handler
<< ", data=" << data
);
975 AsyncCall::Pointer p
, prev
= nullptr;
976 for (p
= fd_table
[fd
].closeHandler
; p
!= nullptr; prev
= p
, p
= p
->Next()) {
977 typedef CommCbFunPtrCallT
<CommCloseCbPtrFun
> Call
;
978 const Call
*call
= dynamic_cast<const Call
*>(p
.getRaw());
979 if (!call
) // method callbacks have their own comm_remove_close_handler
982 typedef CommCloseCbParams Params
;
983 const Params
¶ms
= GetCommParams
<Params
>(p
);
984 if (call
->dialer
.handler
== handler
&& params
.data
== data
)
985 break; /* This is our handler */
988 // comm_close removes all close handlers so our handler may be gone
990 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
991 p
->cancel("comm_remove_close_handler");
995 // remove method-based close handler
997 comm_remove_close_handler(int fd
, AsyncCall::Pointer
&call
)
1000 debugs(5, 5, "comm_remove_close_handler: FD " << fd
<< ", AsyncCall=" << call
);
1002 // comm_close removes all close handlers so our handler may be gone
1003 AsyncCall::Pointer p
, prev
= nullptr;
1004 for (p
= fd_table
[fd
].closeHandler
; p
!= nullptr && p
!= call
; prev
= p
, p
= p
->Next());
1007 p
->dequeue(fd_table
[fd
].closeHandler
, prev
);
1008 call
->cancel("comm_remove_close_handler");
1012 commSetReuseAddr(int fd
)
1015 if (setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *) &on
, sizeof(on
)) < 0) {
1017 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1022 commSetTcpRcvbuf(int fd
, int size
)
1024 if (setsockopt(fd
, SOL_SOCKET
, SO_RCVBUF
, (char *) &size
, sizeof(size
)) < 0) {
1026 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1028 if (setsockopt(fd
, SOL_SOCKET
, SO_SNDBUF
, (char *) &size
, sizeof(size
)) < 0) {
1030 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1032 #ifdef TCP_WINDOW_CLAMP
1033 if (setsockopt(fd
, SOL_TCP
, TCP_WINDOW_CLAMP
, (char *) &size
, sizeof(size
)) < 0) {
1035 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ", SIZE " << size
<< ": " << xstrerr(xerrno
));
1041 commSetNonBlocking(int fd
)
1044 int nonblocking
= TRUE
;
1046 if (ioctl(fd
, FIONBIO
, &nonblocking
) < 0) {
1048 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
) << " " << fd_table
[fd
].type
);
1049 return Comm::COMM_ERROR
;
1056 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1058 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFL: " << xstrerr(xerrno
));
1059 return Comm::COMM_ERROR
;
1062 if (fcntl(fd
, F_SETFL
, flags
| SQUID_NONBLOCK
) < 0) {
1064 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1065 return Comm::COMM_ERROR
;
1069 fd_table
[fd
].flags
.nonblocking
= true;
1074 commUnsetNonBlocking(int fd
)
1077 int nonblocking
= FALSE
;
1079 if (ioctlsocket(fd
, FIONBIO
, (unsigned long *) &nonblocking
) < 0) {
1084 if ((flags
= fcntl(fd
, F_GETFL
, dummy
)) < 0) {
1086 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFL: " << xstrerr(xerrno
));
1087 return Comm::COMM_ERROR
;
1090 if (fcntl(fd
, F_SETFL
, flags
& (~SQUID_NONBLOCK
)) < 0) {
1093 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1094 return Comm::COMM_ERROR
;
1097 fd_table
[fd
].flags
.nonblocking
= false;
1102 commSetCloseOnExec(int fd
)
1108 if ((flags
= fcntl(fd
, F_GETFD
, dummy
)) < 0) {
1110 debugs(50, DBG_CRITICAL
, MYNAME
<< "FD " << fd
<< ": fcntl F_GETFD: " << xstrerr(xerrno
));
1114 if (fcntl(fd
, F_SETFD
, flags
| FD_CLOEXEC
) < 0) {
1116 debugs(50, DBG_CRITICAL
, "ERROR: " << MYNAME
<< "FD " << fd
<< ": set close-on-exec failed: " << xstrerr(xerrno
));
1123 commSetTcpNoDelay(int fd
)
1127 if (setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *) &on
, sizeof(on
)) < 0) {
1129 debugs(50, DBG_IMPORTANT
, MYNAME
<< "FD " << fd
<< ": " << xstrerr(xerrno
));
1132 fd_table
[fd
].flags
.nodelay
= true;
1142 /* XXX account fd_table */
1143 /* Keep a few file descriptors free so that we don't run out of FD's
1144 * after accepting a client but before it opens a socket or a file.
1145 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1146 RESERVED_FD
= min(100, Squid_MaxFD
/ 4);
1148 TheHalfClosed
= new DescriptorSet
;
1150 /* setup the select loop module */
1151 Comm::SelectLoopInit();
1157 delete TheHalfClosed
;
1158 TheHalfClosed
= nullptr;
1162 // called when the queue is done waiting for the client bucket to fill
1164 commHandleWriteHelper(void * data
)
1166 CommQuotaQueue
*queue
= static_cast<CommQuotaQueue
*>(data
);
1169 ClientInfo
*clientInfo
= queue
->clientInfo
;
1170 // ClientInfo invalidates queue if freed, so if we got here through,
1171 // evenAdd cbdata protections, everything should be valid and consistent
1173 assert(clientInfo
->hasQueue());
1174 assert(clientInfo
->hasQueue(queue
));
1175 assert(clientInfo
->eventWaiting
);
1176 clientInfo
->eventWaiting
= false;
1179 clientInfo
->writeOrDequeue();
1180 if (clientInfo
->selectWaiting
)
1182 } while (clientInfo
->hasQueue());
1184 debugs(77, 3, "emptied queue");
1188 ClientInfo::writeOrDequeue()
1190 assert(!selectWaiting
);
1191 const auto head
= quotaPeekFd();
1192 const auto &headFde
= fd_table
[head
];
1193 CallBack(headFde
.codeContext
, [&] {
1194 const auto ccb
= COMMIO_FD_WRITECB(head
);
1195 // check that the head descriptor is still relevant
1196 if (headFde
.clientInfo
== this &&
1197 quotaPeekReserv() == ccb
->quotaQueueReserv
&&
1198 !headFde
.closing()) {
1200 // wait for the head descriptor to become ready for writing
1201 Comm::SetSelect(head
, COMM_SELECT_WRITE
, Comm::HandleWrite
, ccb
, 0);
1202 selectWaiting
= true;
1204 quotaDequeue(); // remove the no longer relevant descriptor
1210 ClientInfo::hasQueue() const
1213 return !quotaQueue
->empty();
1217 ClientInfo::hasQueue(const CommQuotaQueue
*q
) const
1220 return quotaQueue
== q
;
1223 /// returns the first descriptor to be dequeued
1225 ClientInfo::quotaPeekFd() const
1228 return quotaQueue
->front();
1231 /// returns the reservation ID of the first descriptor to be dequeued
1233 ClientInfo::quotaPeekReserv() const
1236 return quotaQueue
->outs
+ 1;
1239 /// queues a given fd, creating the queue if necessary; returns reservation ID
1241 ClientInfo::quotaEnqueue(int fd
)
1244 return quotaQueue
->enqueue(fd
);
1247 /// removes queue head
1249 ClientInfo::quotaDequeue()
1252 quotaQueue
->dequeue();
1256 ClientInfo::kickQuotaQueue()
1258 if (!eventWaiting
&& !selectWaiting
&& hasQueue()) {
1259 // wait at least a second if the bucket is empty
1260 const double delay
= (bucketLevel
< 1.0) ? 1.0 : 0.0;
1261 eventAdd("commHandleWriteHelper", &commHandleWriteHelper
,
1262 quotaQueue
, delay
, 0, true);
1263 eventWaiting
= true;
1267 /// calculates how much to write for a single dequeued client
1271 /* If we have multiple clients and give full bucketSize to each client then
1272 * clt1 may often get a lot more because clt1->clt2 time distance in the
1273 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1274 * We divide quota evenly to be more fair. */
1276 if (!rationedCount
) {
1277 rationedCount
= quotaQueue
->size() + 1;
1279 // The delay in ration recalculation _temporary_ deprives clients from
1280 // bytes that should have trickled in while rationedCount was positive.
1283 // Rounding errors do not accumulate here, but we round down to avoid
1284 // negative bucket sizes after write with rationedCount=1.
1285 rationedQuota
= static_cast<int>(floor(bucketLevel
/rationedCount
));
1286 debugs(77,5, "new rationedQuota: " << rationedQuota
<<
1287 '*' << rationedCount
);
1291 debugs(77,7, "rationedQuota: " << rationedQuota
<<
1292 " rations remaining: " << rationedCount
);
1294 // update 'last seen' time to prevent clientdb GC from dropping us
1295 last_seen
= squid_curtime
;
1296 return rationedQuota
;
1300 ClientInfo::applyQuota(int &nleft
, Comm::IoCallback
*state
)
1303 assert(quotaPeekFd() == state
->conn
->fd
);
1304 quotaDequeue(); // we will write or requeue below
1305 if (nleft
> 0 && !BandwidthBucket::applyQuota(nleft
, state
)) {
1306 state
->quotaQueueReserv
= quotaEnqueue(state
->conn
->fd
);
1314 ClientInfo::scheduleWrite(Comm::IoCallback
*state
)
1316 if (writeLimitingActive
) {
1317 state
->quotaQueueReserv
= quotaEnqueue(state
->conn
->fd
);
1323 ClientInfo::onFdClosed()
1325 BandwidthBucket::onFdClosed();
1326 // kick queue or it will get stuck as commWriteHandle is not called
1331 ClientInfo::reduceBucket(const int len
)
1334 BandwidthBucket::reduceBucket(len
);
1335 // even if we wrote nothing, we were served; give others a chance
1340 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit
, const double anInitialBurst
, const double aHighWatermark
)
1342 debugs(77,5, "Write limits for " << (const char*)key
<<
1343 " speed=" << aWriteSpeedLimit
<< " burst=" << anInitialBurst
<<
1344 " highwatermark=" << aHighWatermark
);
1346 // set or possibly update traffic shaping parameters
1347 writeLimitingActive
= true;
1348 writeSpeedLimit
= aWriteSpeedLimit
;
1349 bucketSizeLimit
= aHighWatermark
;
1351 // but some members should only be set once for a newly activated bucket
1352 if (firstTimeConnection
) {
1353 firstTimeConnection
= false;
1355 assert(!selectWaiting
);
1356 assert(!quotaQueue
);
1357 quotaQueue
= new CommQuotaQueue(this);
1359 bucketLevel
= anInitialBurst
;
1360 prevTime
= current_dtime
;
1364 CommQuotaQueue::CommQuotaQueue(ClientInfo
*info
): clientInfo(info
),
1370 CommQuotaQueue::~CommQuotaQueue()
1372 assert(!clientInfo
); // ClientInfo should clear this before destroying us
1375 /// places the given fd at the end of the queue; returns reservation ID
1377 CommQuotaQueue::enqueue(int fd
)
1379 debugs(77,5, "clt" << (const char*)clientInfo
->key
<<
1380 ": FD " << fd
<< " with qqid" << (ins
+1) << ' ' << fds
.size());
1382 fd_table
[fd
].codeContext
= CodeContext::Current();
1386 /// removes queue head
1388 CommQuotaQueue::dequeue()
1390 assert(!fds
.empty());
1391 debugs(77,5, "clt" << (const char*)clientInfo
->key
<<
1392 ": FD " << fds
.front() << " with qqid" << (outs
+1) << ' ' <<
1397 #endif /* USE_DELAY_POOLS */
1400 * hm, this might be too general-purpose for all the places we'd
1404 ignoreErrno(int ierrno
)
1411 #if EAGAIN != EWOULDBLOCK
1434 commCloseAllSockets(void)
1439 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1445 if (F
->type
!= FD_SOCKET
)
1448 if (F
->flags
.ipc
) /* don't close inter-process sockets */
1451 if (F
->timeoutHandler
!= nullptr) {
1452 AsyncCall::Pointer callback
= F
->timeoutHandler
;
1453 F
->timeoutHandler
= nullptr;
1454 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": Calling timeout handler");
1455 ScheduleCallHere(callback
);
1457 debugs(5, 5, "commCloseAllSockets: FD " << fd
<< ": calling comm_reset_close()");
1458 old_comm_reset_close(fd
);
1464 AlreadyTimedOut(fde
*F
)
1469 if (F
->timeout
== 0)
1472 if (F
->timeout
> squid_curtime
)
1479 writeTimedOut(int fd
)
1481 if (!COMMIO_FD_WRITECB(fd
)->active())
1484 if ((squid_curtime
- fd_table
[fd
].writeStart
) < Config
.Timeout
.write
)
1495 AsyncCall::Pointer callback
;
1497 for (fd
= 0; fd
<= Biggest_FD
; ++fd
) {
1500 if (writeTimedOut(fd
)) {
1501 // We have an active write callback and we are timed out
1502 CodeContext::Reset(F
->codeContext
);
1503 debugs(5, 5, "checkTimeouts: FD " << fd
<< " auto write timeout");
1504 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, nullptr, nullptr, 0);
1505 COMMIO_FD_WRITECB(fd
)->finish(Comm::COMM_ERROR
, ETIMEDOUT
);
1506 CodeContext::Reset();
1509 } else if (F
->writeQuotaHandler
!= nullptr && COMMIO_FD_WRITECB(fd
)->conn
!= nullptr) {
1510 // TODO: Move and extract quota() call to place it inside F->codeContext.
1511 if (!F
->writeQuotaHandler
->selectWaiting
&& F
->writeQuotaHandler
->quota() && !F
->closing()) {
1512 CodeContext::Reset(F
->codeContext
);
1513 F
->writeQuotaHandler
->selectWaiting
= true;
1514 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, Comm::HandleWrite
, COMMIO_FD_WRITECB(fd
), 0);
1515 CodeContext::Reset();
1520 else if (AlreadyTimedOut(F
))
1523 CodeContext::Reset(F
->codeContext
);
1524 debugs(5, 5, "checkTimeouts: FD " << fd
<< " Expired");
1526 if (F
->timeoutHandler
!= nullptr) {
1527 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Call timeout handler");
1528 callback
= F
->timeoutHandler
;
1529 F
->timeoutHandler
= nullptr;
1530 ScheduleCallHere(callback
);
1532 debugs(5, 5, "checkTimeouts: FD " << fd
<< ": Forcing comm_close()");
1536 CodeContext::Reset();
1540 /// Start waiting for a possibly half-closed connection to close
1541 // by scheduling a read callback to a monitoring handler that
1542 // will close the connection on read errors.
1544 commStartHalfClosedMonitor(int fd
)
1546 debugs(5, 5, "adding FD " << fd
<< " to " << *TheHalfClosed
);
1547 assert(isOpen(fd
) && !commHasHalfClosedMonitor(fd
));
1548 (void)TheHalfClosed
->add(fd
); // could also assert the result
1549 fd_table
[fd
].codeContext
= CodeContext::Current();
1550 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1555 commPlanHalfClosedCheck()
1557 if (!WillCheckHalfClosed
&& !TheHalfClosed
->empty()) {
1558 eventAdd("commHalfClosedCheck", &commHalfClosedCheck
, nullptr, 1.0, 1);
1559 WillCheckHalfClosed
= true;
1563 /// iterates over all descriptors that may need half-closed tests and
1564 /// calls comm_read for those that do; re-schedules the check if needed
1567 commHalfClosedCheck(void *)
1569 debugs(5, 5, "checking " << *TheHalfClosed
);
1571 typedef DescriptorSet::const_iterator DSCI
;
1572 const DSCI end
= TheHalfClosed
->end();
1573 for (DSCI i
= TheHalfClosed
->begin(); i
!= end
; ++i
) {
1574 Comm::ConnectionPointer c
= new Comm::Connection
; // XXX: temporary. make HalfClosed a list of these.
1576 if (!fd_table
[c
->fd
].halfClosedReader
) { // not reading already
1577 CallBack(fd_table
[c
->fd
].codeContext
, [&c
] {
1578 AsyncCall::Pointer call
= commCbCall(5,4, "commHalfClosedReader",
1579 CommIoCbPtrFun(&commHalfClosedReader
, nullptr));
1580 Comm::Read(c
, call
);
1581 fd_table
[c
->fd
].halfClosedReader
= call
;
1584 c
->fd
= -1; // XXX: temporary. prevent c replacement erase closing listed FD
1587 WillCheckHalfClosed
= false; // as far as we know
1588 commPlanHalfClosedCheck(); // may need to check again
1591 /// checks whether we are waiting for possibly half-closed connection to close
1592 // We are monitoring if the read handler for the fd is the monitoring handler.
1594 commHasHalfClosedMonitor(int fd
)
1596 return TheHalfClosed
->has(fd
);
1599 /// stop waiting for possibly half-closed connection to close
1601 commStopHalfClosedMonitor(int const fd
)
1603 debugs(5, 5, "removing FD " << fd
<< " from " << *TheHalfClosed
);
1605 // cancel the read if one was scheduled
1606 AsyncCall::Pointer reader
= fd_table
[fd
].halfClosedReader
;
1607 if (reader
!= nullptr)
1608 Comm::ReadCancel(fd
, reader
);
1609 fd_table
[fd
].halfClosedReader
= nullptr;
1611 TheHalfClosed
->del(fd
);
1614 /// I/O handler for the possibly half-closed connection monitoring code
1616 commHalfClosedReader(const Comm::ConnectionPointer
&conn
, char *, size_t size
, Comm::Flag flag
, int, void *)
1618 // there cannot be more data coming in on half-closed connections
1620 assert(conn
!= nullptr);
1621 assert(commHasHalfClosedMonitor(conn
->fd
)); // or we would have canceled the read
1623 fd_table
[conn
->fd
].halfClosedReader
= nullptr; // done reading, for now
1625 // nothing to do if fd is being closed
1626 if (flag
== Comm::ERR_CLOSING
)
1629 // if read failed, close the connection
1630 if (flag
!= Comm::OK
) {
1631 debugs(5, 3, "closing " << conn
);
1636 // continue waiting for close or error
1637 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1641 CommSelectEngine::checkEvents(int timeout
)
1643 static time_t last_timeout
= 0;
1645 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1646 if (squid_curtime
> last_timeout
) {
1647 last_timeout
= squid_curtime
;
1651 switch (Comm::DoSelect(timeout
)) {
1660 case Comm::SHUTDOWN
:
1663 case Comm::COMM_ERROR
:
1667 fatal_dump("comm.cc: Internal error -- this should never happen.");
1672 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1674 comm_open_uds(int sock_type
,
1676 struct sockaddr_un
* addr
,
1679 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1683 /* Create socket for accepting new connections. */
1684 ++ statCounter
.syscalls
.sock
.sockets
;
1686 /* Setup the socket addrinfo details for use */
1689 AI
.ai_family
= PF_UNIX
;
1690 AI
.ai_socktype
= sock_type
;
1691 AI
.ai_protocol
= proto
;
1692 AI
.ai_addrlen
= SUN_LEN(addr
);
1693 AI
.ai_addr
= (sockaddr
*)addr
;
1694 AI
.ai_canonname
= nullptr;
1695 AI
.ai_next
= nullptr;
1697 debugs(50, 3, "Attempt open socket for: " << addr
->sun_path
);
1699 if ((new_socket
= socket(AI
.ai_family
, AI
.ai_socktype
, AI
.ai_protocol
)) < 0) {
1701 /* Increase the number of reserved fd's if calls to socket()
1702 * are failing because the open file table is full. This
1703 * limits the number of simultaneous clients */
1705 if (limitError(xerrno
)) {
1706 debugs(50, DBG_IMPORTANT
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
1709 debugs(50, DBG_CRITICAL
, MYNAME
<< "socket failure: " << xstrerr(xerrno
));
1714 debugs(50, 3, "Opened UDS FD " << new_socket
<< " : family=" << AI
.ai_family
<< ", type=" << AI
.ai_socktype
<< ", protocol=" << AI
.ai_protocol
);
1717 debugs(50, 5, "FD " << new_socket
<< " is a new socket");
1719 assert(!isOpen(new_socket
));
1720 fd_open(new_socket
, FD_MSGHDR
, addr
->sun_path
);
1722 fd_table
[new_socket
].sock_family
= AI
.ai_family
;
1724 if (!(flags
& COMM_NOCLOEXEC
))
1725 commSetCloseOnExec(new_socket
);
1727 if (flags
& COMM_REUSEADDR
)
1728 commSetReuseAddr(new_socket
);
1730 if (flags
& COMM_NONBLOCKING
) {
1731 if (commSetNonBlocking(new_socket
) != Comm::OK
) {
1732 comm_close(new_socket
);
1737 if (flags
& COMM_DOBIND
) {
1738 if (commBind(new_socket
, AI
) != Comm::OK
) {
1739 comm_close(new_socket
);
1745 if (sock_type
== SOCK_STREAM
)
1746 commSetTcpNoDelay(new_socket
);
1750 if (Config
.tcpRcvBufsz
> 0 && sock_type
== SOCK_STREAM
)
1751 commSetTcpRcvbuf(new_socket
, Config
.tcpRcvBufsz
);