]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Docs: Copyright updates for 2018 (#114)
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12 #include "ClientInfo.h"
13 #include "comm/AcceptLimiter.h"
14 #include "comm/comm_internal.h"
15 #include "comm/Connection.h"
16 #include "comm/IoCallback.h"
17 #include "comm/Loops.h"
18 #include "comm/Read.h"
19 #include "comm/TcpAcceptor.h"
20 #include "comm/Write.h"
21 #include "CommRead.h"
22 #include "compat/cmsg.h"
23 #include "DescriptorSet.h"
24 #include "event.h"
25 #include "fd.h"
26 #include "fde.h"
27 #include "globals.h"
28 #include "icmp/net_db.h"
29 #include "ip/Intercept.h"
30 #include "ip/QosConfig.h"
31 #include "ip/tools.h"
32 #include "pconn.h"
33 #include "profiler/Profiler.h"
34 #include "sbuf/SBuf.h"
35 #include "SquidConfig.h"
36 #include "StatCounters.h"
37 #include "StoreIOBuffer.h"
38 #include "tools.h"
39
40 #if USE_OPENSSL
41 #include "ssl/support.h"
42 #endif
43
44 #include <cerrno>
45 #include <cmath>
46 #if _SQUID_CYGWIN_
47 #include <sys/ioctl.h>
48 #endif
49 #ifdef HAVE_NETINET_TCP_H
50 #include <netinet/tcp.h>
51 #endif
52 #if HAVE_SYS_UN_H
53 #include <sys/un.h>
54 #endif
55
56 /*
57 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
58 */
59
60 static IOCB commHalfClosedReader;
61 static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
62 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
63
64 #if USE_DELAY_POOLS
65 CBDATA_CLASS_INIT(CommQuotaQueue);
66
67 static void commHandleWriteHelper(void * data);
68 #endif
69
70 /* STATIC */
71
72 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
73 static bool WillCheckHalfClosed = false; /// true if check is scheduled
74 static EVH commHalfClosedCheck;
75 static void commPlanHalfClosedCheck();
76
77 static Comm::Flag commBind(int s, struct addrinfo &);
78 static void commSetReuseAddr(int);
79 static void commSetNoLinger(int);
80 #ifdef TCP_NODELAY
81 static void commSetTcpNoDelay(int);
82 #endif
83 static void commSetTcpRcvbuf(int, int);
84
85 bool
86 isOpen(const int fd)
87 {
88 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
89 }
90
91 /**
92 * Empty the read buffers
93 *
94 * This is a magical routine that empties the read buffers.
95 * Under some platforms (Linux) if a buffer has data in it before
96 * you call close(), the socket will hang and take quite a while
97 * to timeout.
98 */
99 static void
100 comm_empty_os_read_buffers(int fd)
101 {
102 #if _SQUID_LINUX_
103 #if USE_OPENSSL
104 // Bug 4146: SSL-Bump BIO does not release sockets on close.
105 if (fd_table[fd].ssl)
106 return;
107 #endif
108
109 /* prevent those nasty RST packets */
110 char buf[SQUID_TCP_SO_RCVBUF];
111 if (fd_table[fd].flags.nonblocking && fd_table[fd].type != FD_MSGHDR) {
112 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
113 }
114 #endif
115 }
116
117 /**
118 * synchronous wrapper around udp socket functions
119 */
120 int
121 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
122 {
123 ++ statCounter.syscalls.sock.recvfroms;
124 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
125 struct addrinfo *AI = NULL;
126 Ip::Address::InitAddr(AI);
127 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
128 from = *AI;
129 Ip::Address::FreeAddr(AI);
130 return x;
131 }
132
133 int
134 comm_udp_recv(int fd, void *buf, size_t len, int flags)
135 {
136 Ip::Address nul;
137 return comm_udp_recvfrom(fd, buf, len, flags, nul);
138 }
139
140 ssize_t
141 comm_udp_send(int s, const void *buf, size_t len, int flags)
142 {
143 return send(s, buf, len, flags);
144 }
145
146 bool
147 comm_has_incomplete_write(int fd)
148 {
149 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd) != NULL);
150 return COMMIO_FD_WRITECB(fd)->active();
151 }
152
153 /**
154 * Queue a write. handler/handler_data are called when the write fully
155 * completes, on error, or on file descriptor close.
156 */
157
158 /* Return the local port associated with fd. */
159 unsigned short
160 comm_local_port(int fd)
161 {
162 Ip::Address temp;
163 struct addrinfo *addr = NULL;
164 fde *F = &fd_table[fd];
165
166 /* If the fd is closed already, just return */
167
168 if (!F->flags.open) {
169 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
170 return 0;
171 }
172
173 if (F->local_addr.port())
174 return F->local_addr.port();
175
176 if (F->sock_family == AF_INET)
177 temp.setIPv4();
178
179 Ip::Address::InitAddr(addr);
180
181 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
182 int xerrno = errno;
183 debugs(50, DBG_IMPORTANT, MYNAME << "Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerr(xerrno));
184 Ip::Address::FreeAddr(addr);
185 return 0;
186 }
187 temp = *addr;
188
189 Ip::Address::FreeAddr(addr);
190
191 if (F->local_addr.isAnyAddr()) {
192 /* save the whole local address, not just the port. */
193 F->local_addr = temp;
194 } else {
195 F->local_addr.port(temp.port());
196 }
197
198 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
199 return F->local_addr.port();
200 }
201
202 static Comm::Flag
203 commBind(int s, struct addrinfo &inaddr)
204 {
205 ++ statCounter.syscalls.sock.binds;
206
207 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
208 debugs(50, 6, "bind socket FD " << s << " to " << fd_table[s].local_addr);
209 return Comm::OK;
210 }
211 int xerrno = errno;
212 debugs(50, DBG_CRITICAL, MYNAME << "Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerr(xerrno));
213
214 return Comm::COMM_ERROR;
215 }
216
217 /**
218 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
219 * is OR of flags specified in comm.h. Defaults TOS
220 */
221 int
222 comm_open(int sock_type,
223 int proto,
224 Ip::Address &addr,
225 int flags,
226 const char *note)
227 {
228 return comm_openex(sock_type, proto, addr, flags, note);
229 }
230
231 void
232 comm_open_listener(int sock_type,
233 int proto,
234 Comm::ConnectionPointer &conn,
235 const char *note)
236 {
237 /* all listener sockets require bind() */
238 conn->flags |= COMM_DOBIND;
239
240 /* attempt native enabled port. */
241 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
242 }
243
244 int
245 comm_open_listener(int sock_type,
246 int proto,
247 Ip::Address &addr,
248 int flags,
249 const char *note)
250 {
251 int sock = -1;
252
253 /* all listener sockets require bind() */
254 flags |= COMM_DOBIND;
255
256 /* attempt native enabled port. */
257 sock = comm_openex(sock_type, proto, addr, flags, note);
258
259 return sock;
260 }
261
262 static bool
263 limitError(int const anErrno)
264 {
265 return anErrno == ENFILE || anErrno == EMFILE;
266 }
267
268 void
269 comm_set_v6only(int fd, int tos)
270 {
271 #ifdef IPV6_V6ONLY
272 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
273 int xerrno = errno;
274 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerr(xerrno));
275 }
276 #else
277 debugs(50, DBG_CRITICAL, MYNAME << "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
278 #endif /* sockopt */
279 }
280
281 /**
282 * Set the socket option required for TPROXY spoofing for:
283 * - Linux TPROXY v4 support,
284 * - OpenBSD divert-to support,
285 * - FreeBSD IPFW TPROXY v4 support.
286 */
287 void
288 comm_set_transparent(int fd)
289 {
290 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
291 # define soLevel SOL_IP
292 # define soFlag IP_TRANSPARENT
293 bool doneSuid = false;
294
295 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
296 # define soLevel SOL_SOCKET
297 # define soFlag SO_BINDANY
298 enter_suid();
299 bool doneSuid = true;
300
301 #elif defined(IP_BINDANY) // FreeBSD with IPFW
302 # define soLevel IPPROTO_IP
303 # define soFlag IP_BINDANY
304 enter_suid();
305 bool doneSuid = true;
306
307 #else
308 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
309 #endif /* sockopt */
310
311 #if defined(soLevel) && defined(soFlag)
312 int tos = 1;
313 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
314 int xerrno = errno;
315 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(TPROXY) on FD " << fd << ": " << xstrerr(xerrno));
316 } else {
317 /* mark the socket as having transparent options */
318 fd_table[fd].flags.transparent = true;
319 }
320 if (doneSuid)
321 leave_suid();
322 #endif
323 }
324
325 /**
326 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
327 * is OR of flags specified in defines.h:COMM_*
328 */
329 int
330 comm_openex(int sock_type,
331 int proto,
332 Ip::Address &addr,
333 int flags,
334 const char *note)
335 {
336 int new_socket;
337 struct addrinfo *AI = NULL;
338
339 PROF_start(comm_open);
340 /* Create socket for accepting new connections. */
341 ++ statCounter.syscalls.sock.sockets;
342
343 /* Setup the socket addrinfo details for use */
344 addr.getAddrInfo(AI);
345 AI->ai_socktype = sock_type;
346 AI->ai_protocol = proto;
347
348 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
349
350 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
351 int xerrno = errno;
352
353 /* under IPv6 there is the possibility IPv6 is present but disabled. */
354 /* try again as IPv4-native if possible */
355 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
356 /* attempt to open this IPv4-only. */
357 Ip::Address::FreeAddr(AI);
358 /* Setup the socket addrinfo details for use */
359 addr.getAddrInfo(AI);
360 AI->ai_socktype = sock_type;
361 AI->ai_protocol = proto;
362 debugs(50, 3, "Attempt fallback open socket for: " << addr );
363 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
364 debugs(50, 2, "attempt open " << note << " socket on: " << addr);
365 }
366
367 if (new_socket < 0) {
368 /* Increase the number of reserved fd's if calls to socket()
369 * are failing because the open file table is full. This
370 * limits the number of simultaneous clients */
371
372 if (limitError(errno)) {
373 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
374 fdAdjustReserved();
375 } else {
376 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
377 }
378
379 Ip::Address::FreeAddr(AI);
380
381 PROF_stop(comm_open);
382 errno = xerrno; // restore for caller
383 return -1;
384 }
385
386 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
387 Comm::ConnectionPointer conn = new Comm::Connection;
388 conn->local = addr;
389 conn->fd = new_socket;
390
391 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
392
393 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
394 comm_set_v6only(conn->fd, 1);
395
396 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
397 /* Other OS may have this administratively disabled for general use. Same deal. */
398 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
399 comm_set_v6only(conn->fd, 0);
400
401 comm_init_opened(conn, note, AI);
402 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
403
404 Ip::Address::FreeAddr(AI);
405
406 PROF_stop(comm_open);
407
408 // XXX transition only. prevent conn from closing the new FD on function exit.
409 conn->fd = -1;
410 errno = xerrno; // restore for caller
411 return new_socket;
412 }
413
414 /// update FD tables after a local or remote (IPC) comm_openex();
415 void
416 comm_init_opened(const Comm::ConnectionPointer &conn,
417 const char *note,
418 struct addrinfo *AI)
419 {
420 assert(Comm::IsConnOpen(conn));
421 assert(AI);
422
423 /* update fdstat */
424 debugs(5, 5, HERE << conn << " is a new socket");
425
426 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
427 fd_open(conn->fd, FD_SOCKET, note);
428
429 fde *F = &fd_table[conn->fd];
430 F->local_addr = conn->local;
431
432 F->sock_family = AI->ai_family;
433 }
434
435 /// apply flags after a local comm_open*() call;
436 /// returns new_socket or -1 on error
437 static int
438 comm_apply_flags(int new_socket,
439 Ip::Address &addr,
440 int flags,
441 struct addrinfo *AI)
442 {
443 assert(new_socket >= 0);
444 assert(AI);
445 const int sock_type = AI->ai_socktype;
446
447 if (!(flags & COMM_NOCLOEXEC))
448 commSetCloseOnExec(new_socket);
449
450 if ((flags & COMM_REUSEADDR))
451 commSetReuseAddr(new_socket);
452
453 if (addr.port() > (unsigned short) 0) {
454 #if _SQUID_WINDOWS_
455 if (sock_type != SOCK_DGRAM)
456 #endif
457 commSetNoLinger(new_socket);
458
459 if (opt_reuseaddr)
460 commSetReuseAddr(new_socket);
461 }
462
463 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
464 if ((flags & COMM_TRANSPARENT)) {
465 comm_set_transparent(new_socket);
466 }
467
468 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
469 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
470 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
471 if ( addr.isNoAddr() )
472 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
473
474 if (commBind(new_socket, *AI) != Comm::OK) {
475 comm_close(new_socket);
476 return -1;
477 }
478 }
479
480 if (flags & COMM_NONBLOCKING)
481 if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
482 comm_close(new_socket);
483 return -1;
484 }
485
486 #ifdef TCP_NODELAY
487 if (sock_type == SOCK_STREAM)
488 commSetTcpNoDelay(new_socket);
489
490 #endif
491
492 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
493 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
494
495 return new_socket;
496 }
497
498 void
499 comm_import_opened(const Comm::ConnectionPointer &conn,
500 const char *note,
501 struct addrinfo *AI)
502 {
503 debugs(5, 2, HERE << conn);
504 assert(Comm::IsConnOpen(conn));
505 assert(AI);
506
507 comm_init_opened(conn, note, AI);
508
509 if (!(conn->flags & COMM_NOCLOEXEC))
510 fd_table[conn->fd].flags.close_on_exec = true;
511
512 if (conn->local.port() > (unsigned short) 0) {
513 #if _SQUID_WINDOWS_
514 if (AI->ai_socktype != SOCK_DGRAM)
515 #endif
516 fd_table[conn->fd].flags.nolinger = true;
517 }
518
519 if ((conn->flags & COMM_TRANSPARENT))
520 fd_table[conn->fd].flags.transparent = true;
521
522 if (conn->flags & COMM_NONBLOCKING)
523 fd_table[conn->fd].flags.nonblocking = true;
524
525 #ifdef TCP_NODELAY
526 if (AI->ai_socktype == SOCK_STREAM)
527 fd_table[conn->fd].flags.nodelay = true;
528 #endif
529
530 /* no fd_table[fd].flags. updates needed for these conditions:
531 * if ((flags & COMM_REUSEADDR)) ...
532 * if ((flags & COMM_DOBIND) ...) ...
533 */
534 }
535
536 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
537 // With handler already unset. Leaving this present until that can be verified for all code paths.
538 void
539 commUnsetFdTimeout(int fd)
540 {
541 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
542 assert(fd >= 0);
543 assert(fd < Squid_MaxFD);
544 fde *F = &fd_table[fd];
545 assert(F->flags.open);
546
547 F->timeoutHandler = NULL;
548 F->timeout = 0;
549 }
550
551 int
552 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
553 {
554 debugs(5, 3, HERE << conn << " timeout " << timeout);
555 assert(Comm::IsConnOpen(conn));
556 assert(conn->fd < Squid_MaxFD);
557 fde *F = &fd_table[conn->fd];
558 assert(F->flags.open);
559
560 if (timeout < 0) {
561 F->timeoutHandler = NULL;
562 F->timeout = 0;
563 } else {
564 if (callback != NULL) {
565 typedef CommTimeoutCbParams Params;
566 Params &params = GetCommParams<Params>(callback);
567 params.conn = conn;
568 F->timeoutHandler = callback;
569 }
570
571 F->timeout = squid_curtime + (time_t) timeout;
572 }
573
574 return F->timeout;
575 }
576
577 int
578 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
579 {
580 debugs(5, 3, HERE << "Remove timeout for " << conn);
581 AsyncCall::Pointer nil;
582 return commSetConnTimeout(conn, -1, nil);
583 }
584
585 /**
586 * Connect socket FD to given remote address.
587 * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
588 * then error code will also be returned in errno.
589 */
590 int
591 comm_connect_addr(int sock, const Ip::Address &address)
592 {
593 Comm::Flag status = Comm::OK;
594 fde *F = &fd_table[sock];
595 int x = 0;
596 int err = 0;
597 socklen_t errlen;
598 struct addrinfo *AI = NULL;
599 PROF_start(comm_connect_addr);
600
601 assert(address.port() != 0);
602
603 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
604
605 /* Handle IPv6 over IPv4-only socket case.
606 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
607 * NP: because commResetFD is private to ConnStateData we have to return an error and
608 * trust its handled properly.
609 */
610 if (F->sock_family == AF_INET && !address.isIPv4()) {
611 errno = ENETUNREACH;
612 return Comm::ERR_PROTOCOL;
613 }
614
615 /* Handle IPv4 over IPv6-only socket case.
616 * This case is presently handled here as it's both a known case and it's
617 * uncertain what error will be returned by the IPv6 stack in such case. It's
618 * possible this will also be handled by the errno checks below after connect()
619 * but needs carefull cross-platform verification, and verifying the address
620 * condition here is simple.
621 */
622 if (!F->local_addr.isIPv4() && address.isIPv4()) {
623 errno = ENETUNREACH;
624 return Comm::ERR_PROTOCOL;
625 }
626
627 address.getAddrInfo(AI, F->sock_family);
628
629 /* Establish connection. */
630 int xerrno = 0;
631
632 if (!F->flags.called_connect) {
633 F->flags.called_connect = true;
634 ++ statCounter.syscalls.sock.connects;
635
636 errno = 0;
637 if ((x = connect(sock, AI->ai_addr, AI->ai_addrlen)) < 0) {
638 xerrno = errno;
639 debugs(5,5, "sock=" << sock << ", addrinfo(" <<
640 " flags=" << AI->ai_flags <<
641 ", family=" << AI->ai_family <<
642 ", socktype=" << AI->ai_socktype <<
643 ", protocol=" << AI->ai_protocol <<
644 ", &addr=" << AI->ai_addr <<
645 ", addrlen=" << AI->ai_addrlen << " )");
646 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerr(xerrno));
647 debugs(14,9, "connecting to: " << address);
648
649 } else if (x == 0) {
650 // XXX: ICAP code refuses callbacks during a pending comm_ call
651 // Async calls development will fix this.
652 x = -1;
653 xerrno = EINPROGRESS;
654 }
655
656 } else {
657 errno = 0;
658 #if _SQUID_NEWSOS6_
659 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
660 if (connect(sock, AI->ai_addr, AI->ai_addrlen) < 0)
661 xerrno = errno;
662
663 if (xerrno == EINVAL) {
664 errlen = sizeof(err);
665 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
666 if (x >= 0)
667 xerrno = x;
668 }
669 #else
670 errlen = sizeof(err);
671 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
672 if (x == 0)
673 xerrno = err;
674
675 #if _SQUID_SOLARIS_
676 /*
677 * Solaris 2.4's socket emulation doesn't allow you
678 * to determine the error from a failed non-blocking
679 * connect and just returns EPIPE. Create a fake
680 * error message for connect. -- fenner@parc.xerox.com
681 */
682 if (x < 0 && xerrno == EPIPE)
683 xerrno = ENOTCONN;
684 else
685 xerrno = errno;
686 #endif
687 #endif
688 }
689
690 Ip::Address::FreeAddr(AI);
691
692 PROF_stop(comm_connect_addr);
693
694 errno = xerrno;
695 if (xerrno == 0 || xerrno == EISCONN)
696 status = Comm::OK;
697 else if (ignoreErrno(xerrno))
698 status = Comm::INPROGRESS;
699 else if (xerrno == EAFNOSUPPORT || xerrno == EINVAL)
700 return Comm::ERR_PROTOCOL;
701 else
702 return Comm::COMM_ERROR;
703
704 address.toStr(F->ipaddr, MAX_IPSTRLEN);
705
706 F->remote_port = address.port(); /* remote_port is HS */
707
708 if (status == Comm::OK) {
709 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
710 } else if (status == Comm::INPROGRESS) {
711 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
712 }
713
714 errno = xerrno;
715 return status;
716 }
717
718 void
719 commCallCloseHandlers(int fd)
720 {
721 fde *F = &fd_table[fd];
722 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
723
724 while (F->closeHandler != NULL) {
725 AsyncCall::Pointer call = F->closeHandler;
726 F->closeHandler = call->Next();
727 call->setNext(NULL);
728 // If call is not canceled schedule it for execution else ignore it
729 if (!call->canceled()) {
730 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
731 ScheduleCallHere(call);
732 }
733 }
734 }
735
736 #if LINGERING_CLOSE
737 static void
738 commLingerClose(int fd, void *unused)
739 {
740 LOCAL_ARRAY(char, buf, 1024);
741 int n = FD_READ_METHOD(fd, buf, 1024);
742 if (n < 0) {
743 int xerrno = errno;
744 debugs(5, 3, "FD " << fd << " read: " << xstrerr(xerrno));
745 }
746 comm_close(fd);
747 }
748
749 static void
750 commLingerTimeout(const FdeCbParams &params)
751 {
752 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
753 comm_close(params.fd);
754 }
755
756 /*
757 * Inspired by apache
758 */
759 void
760 comm_lingering_close(int fd)
761 {
762 Security::SessionSendGoodbye(fd_table[fd].ssl);
763
764 if (shutdown(fd, 1) < 0) {
765 comm_close(fd);
766 return;
767 }
768
769 fd_note(fd, "lingering close");
770 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
771
772 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
773 assert(fd_table[fd].flags.open);
774 if (callback != NULL) {
775 typedef FdeCbParams Params;
776 Params &params = GetCommParams<Params>(callback);
777 params.fd = fd;
778 fd_table[fd].timeoutHandler = callback;
779 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
780 }
781
782 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
783 }
784
785 #endif
786
787 /**
788 * enable linger with time of 0 so that when the socket is
789 * closed, TCP generates a RESET
790 */
791 void
792 comm_reset_close(const Comm::ConnectionPointer &conn)
793 {
794 struct linger L;
795 L.l_onoff = 1;
796 L.l_linger = 0;
797
798 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
799 int xerrno = errno;
800 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerr(xerrno));
801 }
802 conn->close();
803 }
804
805 // Legacy close function.
806 void
807 old_comm_reset_close(int fd)
808 {
809 struct linger L;
810 L.l_onoff = 1;
811 L.l_linger = 0;
812
813 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
814 int xerrno = errno;
815 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerr(xerrno));
816 }
817 comm_close(fd);
818 }
819
820 void
821 commStartTlsClose(const FdeCbParams &params)
822 {
823 Security::SessionSendGoodbye(fd_table[params.fd].ssl);
824 }
825
826 void
827 comm_close_complete(const FdeCbParams &params)
828 {
829 fde *F = &fd_table[params.fd];
830 F->ssl.reset();
831 F->dynamicTlsContext.reset();
832 fd_close(params.fd); /* update fdstat */
833 close(params.fd);
834
835 ++ statCounter.syscalls.sock.closes;
836
837 /* When one connection closes, give accept() a chance, if need be */
838 Comm::AcceptLimiter::Instance().kick();
839 }
840
841 /*
842 * Close the socket fd.
843 *
844 * + call write handlers with ERR_CLOSING
845 * + call read handlers with ERR_CLOSING
846 * + call closing handlers
847 *
848 * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
849 * DeferredReadManager.
850 */
851 void
852 _comm_close(int fd, char const *file, int line)
853 {
854 debugs(5, 3, "start closing FD " << fd << " by " << file << ":" << line);
855 assert(fd >= 0);
856 assert(fd < Squid_MaxFD);
857
858 fde *F = &fd_table[fd];
859
860 if (F->closing())
861 return;
862
863 /* XXX: is this obsolete behind F->closing() ? */
864 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
865 return;
866
867 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
868 if (!isOpen(fd)) {
869 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
870 // XXX: do we need to run close(fd) or fd_close(fd) here?
871 return;
872 }
873
874 assert(F->type != FD_FILE);
875
876 PROF_start(comm_close);
877
878 F->flags.close_request = true;
879
880 if (F->ssl) {
881 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartTlsClose",
882 FdeCbPtrFun(commStartTlsClose, nullptr));
883 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
884 startParams.fd = fd;
885 ScheduleCallHere(startCall);
886 }
887
888 // a half-closed fd may lack a reader, so we stop monitoring explicitly
889 if (commHasHalfClosedMonitor(fd))
890 commStopHalfClosedMonitor(fd);
891 commUnsetFdTimeout(fd);
892
893 // notify read/write handlers after canceling select reservations, if any
894 if (COMMIO_FD_WRITECB(fd)->active()) {
895 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
896 COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
897 }
898 if (COMMIO_FD_READCB(fd)->active()) {
899 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
900 COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
901 }
902
903 #if USE_DELAY_POOLS
904 if (BandwidthBucket *bucket = BandwidthBucket::SelectBucket(F)) {
905 if (bucket->selectWaiting)
906 bucket->onFdClosed();
907 }
908 #endif
909
910 commCallCloseHandlers(fd);
911
912 comm_empty_os_read_buffers(fd);
913
914 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
915 FdeCbPtrFun(comm_close_complete, NULL));
916 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
917 completeParams.fd = fd;
918 // must use async call to wait for all callbacks
919 // scheduled before comm_close() to finish
920 ScheduleCallHere(completeCall);
921
922 PROF_stop(comm_close);
923 }
924
925 /* Send a udp datagram to specified TO_ADDR. */
926 int
927 comm_udp_sendto(int fd,
928 const Ip::Address &to_addr,
929 const void *buf,
930 int len)
931 {
932 PROF_start(comm_udp_sendto);
933 ++ statCounter.syscalls.sock.sendtos;
934
935 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
936 " using FD " << fd << " using Port " << comm_local_port(fd) );
937
938 struct addrinfo *AI = NULL;
939 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
940 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
941 int xerrno = errno;
942 Ip::Address::FreeAddr(AI);
943
944 PROF_stop(comm_udp_sendto);
945
946 if (x >= 0) {
947 errno = xerrno; // restore for caller to use
948 return x;
949 }
950
951 #if _SQUID_LINUX_
952 if (ECONNREFUSED != xerrno)
953 #endif
954 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerr(xerrno));
955
956 errno = xerrno; // restore for caller to use
957 return Comm::COMM_ERROR;
958 }
959
960 AsyncCall::Pointer
961 comm_add_close_handler(int fd, CLCB * handler, void *data)
962 {
963 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
964 handler << ", data=" << data);
965
966 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
967 CommCloseCbPtrFun(handler, data));
968 comm_add_close_handler(fd, call);
969 return call;
970 }
971
972 void
973 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
974 {
975 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
976
977 /*TODO:Check for a similar scheduled AsyncCall*/
978 // for (c = fd_table[fd].closeHandler; c; c = c->next)
979 // assert(c->handler != handler || c->data != data);
980
981 call->setNext(fd_table[fd].closeHandler);
982
983 fd_table[fd].closeHandler = call;
984 }
985
986 // remove function-based close handler
987 void
988 comm_remove_close_handler(int fd, CLCB * handler, void *data)
989 {
990 assert(isOpen(fd));
991 /* Find handler in list */
992 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
993 handler << ", data=" << data);
994
995 AsyncCall::Pointer p, prev = NULL;
996 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
997 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
998 const Call *call = dynamic_cast<const Call*>(p.getRaw());
999 if (!call) // method callbacks have their own comm_remove_close_handler
1000 continue;
1001
1002 typedef CommCloseCbParams Params;
1003 const Params &params = GetCommParams<Params>(p);
1004 if (call->dialer.handler == handler && params.data == data)
1005 break; /* This is our handler */
1006 }
1007
1008 // comm_close removes all close handlers so our handler may be gone
1009 if (p != NULL) {
1010 p->dequeue(fd_table[fd].closeHandler, prev);
1011 p->cancel("comm_remove_close_handler");
1012 }
1013 }
1014
1015 // remove method-based close handler
1016 void
1017 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1018 {
1019 assert(isOpen(fd));
1020 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1021
1022 // comm_close removes all close handlers so our handler may be gone
1023 AsyncCall::Pointer p, prev = NULL;
1024 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1025
1026 if (p != NULL)
1027 p->dequeue(fd_table[fd].closeHandler, prev);
1028 call->cancel("comm_remove_close_handler");
1029 }
1030
1031 static void
1032 commSetNoLinger(int fd)
1033 {
1034
1035 struct linger L;
1036 L.l_onoff = 0; /* off */
1037 L.l_linger = 0;
1038
1039 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
1040 int xerrno = errno;
1041 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1042 }
1043 fd_table[fd].flags.nolinger = true;
1044 }
1045
1046 static void
1047 commSetReuseAddr(int fd)
1048 {
1049 int on = 1;
1050 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0) {
1051 int xerrno = errno;
1052 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1053 }
1054 }
1055
1056 static void
1057 commSetTcpRcvbuf(int fd, int size)
1058 {
1059 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0) {
1060 int xerrno = errno;
1061 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1062 }
1063 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0) {
1064 int xerrno = errno;
1065 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1066 }
1067 #ifdef TCP_WINDOW_CLAMP
1068 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0) {
1069 int xerrno = errno;
1070 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1071 }
1072 #endif
1073 }
1074
1075 int
1076 commSetNonBlocking(int fd)
1077 {
1078 #if _SQUID_WINDOWS_
1079 int nonblocking = TRUE;
1080
1081 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1082 int xerrno = errno;
1083 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno) << " " << fd_table[fd].type);
1084 return Comm::COMM_ERROR;
1085 }
1086
1087 #else
1088 int flags;
1089 int dummy = 0;
1090
1091 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1092 int xerrno = errno;
1093 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1094 return Comm::COMM_ERROR;
1095 }
1096
1097 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1098 int xerrno = errno;
1099 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1100 return Comm::COMM_ERROR;
1101 }
1102 #endif
1103
1104 fd_table[fd].flags.nonblocking = true;
1105 return 0;
1106 }
1107
1108 int
1109 commUnsetNonBlocking(int fd)
1110 {
1111 #if _SQUID_WINDOWS_
1112 int nonblocking = FALSE;
1113
1114 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1115 #else
1116 int flags;
1117 int dummy = 0;
1118
1119 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1120 int xerrno = errno;
1121 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1122 return Comm::COMM_ERROR;
1123 }
1124
1125 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1126 #endif
1127 int xerrno = errno;
1128 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1129 return Comm::COMM_ERROR;
1130 }
1131
1132 fd_table[fd].flags.nonblocking = false;
1133 return 0;
1134 }
1135
1136 void
1137 commSetCloseOnExec(int fd)
1138 {
1139 #ifdef FD_CLOEXEC
1140 int flags;
1141 int dummy = 0;
1142
1143 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1144 int xerrno = errno;
1145 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFD: " << xstrerr(xerrno));
1146 return;
1147 }
1148
1149 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
1150 int xerrno = errno;
1151 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": set close-on-exec failed: " << xstrerr(xerrno));
1152 }
1153
1154 fd_table[fd].flags.close_on_exec = true;
1155
1156 #endif
1157 }
1158
1159 #ifdef TCP_NODELAY
1160 static void
1161 commSetTcpNoDelay(int fd)
1162 {
1163 int on = 1;
1164
1165 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0) {
1166 int xerrno = errno;
1167 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1168 }
1169
1170 fd_table[fd].flags.nodelay = true;
1171 }
1172
1173 #endif
1174
1175 void
1176 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1177 {
1178 int on = 1;
1179 #ifdef TCP_KEEPCNT
1180 if (timeout && interval) {
1181 int count = (timeout + interval - 1) / interval;
1182 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0) {
1183 int xerrno = errno;
1184 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1185 }
1186 }
1187 #endif
1188 #ifdef TCP_KEEPIDLE
1189 if (idle) {
1190 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0) {
1191 int xerrno = errno;
1192 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1193 }
1194 }
1195 #endif
1196 #ifdef TCP_KEEPINTVL
1197 if (interval) {
1198 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0) {
1199 int xerrno = errno;
1200 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1201 }
1202 }
1203 #endif
1204 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0) {
1205 int xerrno = errno;
1206 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1207 }
1208 }
1209
1210 void
1211 comm_init(void)
1212 {
1213 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1214
1215 /* make sure the accept() socket FIFO delay queue exists */
1216 Comm::AcceptLimiter::Instance();
1217
1218 // make sure the IO pending callback table exists
1219 Comm::CallbackTableInit();
1220
1221 /* XXX account fd_table */
1222 /* Keep a few file descriptors free so that we don't run out of FD's
1223 * after accepting a client but before it opens a socket or a file.
1224 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1225 RESERVED_FD = min(100, Squid_MaxFD / 4);
1226
1227 TheHalfClosed = new DescriptorSet;
1228
1229 /* setup the select loop module */
1230 Comm::SelectLoopInit();
1231 }
1232
1233 void
1234 comm_exit(void)
1235 {
1236 delete TheHalfClosed;
1237 TheHalfClosed = NULL;
1238
1239 safe_free(fd_table);
1240 Comm::CallbackTableDestruct();
1241 }
1242
1243 #if USE_DELAY_POOLS
1244 // called when the queue is done waiting for the client bucket to fill
1245 void
1246 commHandleWriteHelper(void * data)
1247 {
1248 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1249 assert(queue);
1250
1251 ClientInfo *clientInfo = queue->clientInfo;
1252 // ClientInfo invalidates queue if freed, so if we got here through,
1253 // evenAdd cbdata protections, everything should be valid and consistent
1254 assert(clientInfo);
1255 assert(clientInfo->hasQueue());
1256 assert(clientInfo->hasQueue(queue));
1257 assert(!clientInfo->selectWaiting);
1258 assert(clientInfo->eventWaiting);
1259 clientInfo->eventWaiting = false;
1260
1261 do {
1262 // check that the head descriptor is still relevant
1263 const int head = clientInfo->quotaPeekFd();
1264 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1265
1266 if (fd_table[head].clientInfo == clientInfo &&
1267 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1268 !fd_table[head].closing()) {
1269
1270 // wait for the head descriptor to become ready for writing
1271 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1272 clientInfo->selectWaiting = true;
1273 return;
1274 }
1275
1276 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1277 // and continue looking for a relevant one
1278 } while (clientInfo->hasQueue());
1279
1280 debugs(77,3, HERE << "emptied queue");
1281 }
1282
1283 bool
1284 ClientInfo::hasQueue() const
1285 {
1286 assert(quotaQueue);
1287 return !quotaQueue->empty();
1288 }
1289
1290 bool
1291 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1292 {
1293 assert(quotaQueue);
1294 return quotaQueue == q;
1295 }
1296
1297 /// returns the first descriptor to be dequeued
1298 int
1299 ClientInfo::quotaPeekFd() const
1300 {
1301 assert(quotaQueue);
1302 return quotaQueue->front();
1303 }
1304
1305 /// returns the reservation ID of the first descriptor to be dequeued
1306 unsigned int
1307 ClientInfo::quotaPeekReserv() const
1308 {
1309 assert(quotaQueue);
1310 return quotaQueue->outs + 1;
1311 }
1312
1313 /// queues a given fd, creating the queue if necessary; returns reservation ID
1314 unsigned int
1315 ClientInfo::quotaEnqueue(int fd)
1316 {
1317 assert(quotaQueue);
1318 return quotaQueue->enqueue(fd);
1319 }
1320
1321 /// removes queue head
1322 void
1323 ClientInfo::quotaDequeue()
1324 {
1325 assert(quotaQueue);
1326 quotaQueue->dequeue();
1327 }
1328
1329 void
1330 ClientInfo::kickQuotaQueue()
1331 {
1332 if (!eventWaiting && !selectWaiting && hasQueue()) {
1333 // wait at least a second if the bucket is empty
1334 const double delay = (bucketLevel < 1.0) ? 1.0 : 0.0;
1335 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1336 quotaQueue, delay, 0, true);
1337 eventWaiting = true;
1338 }
1339 }
1340
1341 /// calculates how much to write for a single dequeued client
1342 int
1343 ClientInfo::quota()
1344 {
1345 /* If we have multiple clients and give full bucketSize to each client then
1346 * clt1 may often get a lot more because clt1->clt2 time distance in the
1347 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1348 * We divide quota evenly to be more fair. */
1349
1350 if (!rationedCount) {
1351 rationedCount = quotaQueue->size() + 1;
1352
1353 // The delay in ration recalculation _temporary_ deprives clients from
1354 // bytes that should have trickled in while rationedCount was positive.
1355 refillBucket();
1356
1357 // Rounding errors do not accumulate here, but we round down to avoid
1358 // negative bucket sizes after write with rationedCount=1.
1359 rationedQuota = static_cast<int>(floor(bucketLevel/rationedCount));
1360 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1361 '*' << rationedCount);
1362 }
1363
1364 --rationedCount;
1365 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1366 " rations remaining: " << rationedCount);
1367
1368 // update 'last seen' time to prevent clientdb GC from dropping us
1369 last_seen = squid_curtime;
1370 return rationedQuota;
1371 }
1372
1373 bool
1374 ClientInfo::applyQuota(int &nleft, Comm::IoCallback *state)
1375 {
1376 assert(hasQueue());
1377 assert(quotaPeekFd() == state->conn->fd);
1378 quotaDequeue(); // we will write or requeue below
1379 if (nleft > 0 && !BandwidthBucket::applyQuota(nleft, state)) {
1380 state->quotaQueueReserv = quotaEnqueue(state->conn->fd);
1381 kickQuotaQueue();
1382 return false;
1383 }
1384 return true;
1385 }
1386
1387 void
1388 ClientInfo::scheduleWrite(Comm::IoCallback *state)
1389 {
1390 if (writeLimitingActive) {
1391 state->quotaQueueReserv = quotaEnqueue(state->conn->fd);
1392 kickQuotaQueue();
1393 }
1394 }
1395
1396 void
1397 ClientInfo::onFdClosed()
1398 {
1399 BandwidthBucket::onFdClosed();
1400 // kick queue or it will get stuck as commWriteHandle is not called
1401 kickQuotaQueue();
1402 }
1403
1404 void
1405 ClientInfo::reduceBucket(const int len)
1406 {
1407 if (len > 0)
1408 BandwidthBucket::reduceBucket(len);
1409 // even if we wrote nothing, we were served; give others a chance
1410 kickQuotaQueue();
1411 }
1412
1413 void
1414 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1415 {
1416 debugs(77,5, "Write limits for " << (const char*)key <<
1417 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1418 " highwatermark=" << aHighWatermark);
1419
1420 // set or possibly update traffic shaping parameters
1421 writeLimitingActive = true;
1422 writeSpeedLimit = aWriteSpeedLimit;
1423 bucketSizeLimit = aHighWatermark;
1424
1425 // but some members should only be set once for a newly activated bucket
1426 if (firstTimeConnection) {
1427 firstTimeConnection = false;
1428
1429 assert(!selectWaiting);
1430 assert(!quotaQueue);
1431 quotaQueue = new CommQuotaQueue(this);
1432
1433 bucketLevel = anInitialBurst;
1434 prevTime = current_dtime;
1435 }
1436 }
1437
1438 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1439 ins(0), outs(0)
1440 {
1441 assert(clientInfo);
1442 }
1443
1444 CommQuotaQueue::~CommQuotaQueue()
1445 {
1446 assert(!clientInfo); // ClientInfo should clear this before destroying us
1447 }
1448
1449 /// places the given fd at the end of the queue; returns reservation ID
1450 unsigned int
1451 CommQuotaQueue::enqueue(int fd)
1452 {
1453 debugs(77,5, "clt" << (const char*)clientInfo->key <<
1454 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1455 fds.push_back(fd);
1456 return ++ins;
1457 }
1458
1459 /// removes queue head
1460 void
1461 CommQuotaQueue::dequeue()
1462 {
1463 assert(!fds.empty());
1464 debugs(77,5, "clt" << (const char*)clientInfo->key <<
1465 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1466 fds.size());
1467 fds.pop_front();
1468 ++outs;
1469 }
1470 #endif /* USE_DELAY_POOLS */
1471
1472 /*
1473 * hm, this might be too general-purpose for all the places we'd
1474 * like to use it.
1475 */
1476 int
1477 ignoreErrno(int ierrno)
1478 {
1479 switch (ierrno) {
1480
1481 case EINPROGRESS:
1482
1483 case EWOULDBLOCK:
1484 #if EAGAIN != EWOULDBLOCK
1485
1486 case EAGAIN:
1487 #endif
1488
1489 case EALREADY:
1490
1491 case EINTR:
1492 #ifdef ERESTART
1493
1494 case ERESTART:
1495 #endif
1496
1497 return 1;
1498
1499 default:
1500 return 0;
1501 }
1502
1503 /* NOTREACHED */
1504 }
1505
1506 void
1507 commCloseAllSockets(void)
1508 {
1509 int fd;
1510 fde *F = NULL;
1511
1512 for (fd = 0; fd <= Biggest_FD; ++fd) {
1513 F = &fd_table[fd];
1514
1515 if (!F->flags.open)
1516 continue;
1517
1518 if (F->type != FD_SOCKET)
1519 continue;
1520
1521 if (F->flags.ipc) /* don't close inter-process sockets */
1522 continue;
1523
1524 if (F->timeoutHandler != NULL) {
1525 AsyncCall::Pointer callback = F->timeoutHandler;
1526 F->timeoutHandler = NULL;
1527 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1528 ScheduleCallHere(callback);
1529 } else {
1530 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1531 old_comm_reset_close(fd);
1532 }
1533 }
1534 }
1535
1536 static bool
1537 AlreadyTimedOut(fde *F)
1538 {
1539 if (!F->flags.open)
1540 return true;
1541
1542 if (F->timeout == 0)
1543 return true;
1544
1545 if (F->timeout > squid_curtime)
1546 return true;
1547
1548 return false;
1549 }
1550
1551 static bool
1552 writeTimedOut(int fd)
1553 {
1554 if (!COMMIO_FD_WRITECB(fd)->active())
1555 return false;
1556
1557 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1558 return false;
1559
1560 return true;
1561 }
1562
1563 void
1564 checkTimeouts(void)
1565 {
1566 int fd;
1567 fde *F = NULL;
1568 AsyncCall::Pointer callback;
1569
1570 for (fd = 0; fd <= Biggest_FD; ++fd) {
1571 F = &fd_table[fd];
1572
1573 if (writeTimedOut(fd)) {
1574 // We have an active write callback and we are timed out
1575 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1576 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1577 COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1578 #if USE_DELAY_POOLS
1579 } else if (F->writeQuotaHandler != nullptr && COMMIO_FD_WRITECB(fd)->conn != nullptr) {
1580 if (!F->writeQuotaHandler->selectWaiting && F->writeQuotaHandler->quota() && !F->closing()) {
1581 F->writeQuotaHandler->selectWaiting = true;
1582 Comm::SetSelect(fd, COMM_SELECT_WRITE, Comm::HandleWrite, COMMIO_FD_WRITECB(fd), 0);
1583 }
1584 continue;
1585 #endif
1586 }
1587 else if (AlreadyTimedOut(F))
1588 continue;
1589
1590 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1591
1592 if (F->timeoutHandler != NULL) {
1593 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1594 callback = F->timeoutHandler;
1595 F->timeoutHandler = NULL;
1596 ScheduleCallHere(callback);
1597 } else {
1598 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1599 comm_close(fd);
1600 }
1601 }
1602 }
1603
1604 /// Start waiting for a possibly half-closed connection to close
1605 // by scheduling a read callback to a monitoring handler that
1606 // will close the connection on read errors.
1607 void
1608 commStartHalfClosedMonitor(int fd)
1609 {
1610 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1611 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1612 (void)TheHalfClosed->add(fd); // could also assert the result
1613 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1614 }
1615
1616 static
1617 void
1618 commPlanHalfClosedCheck()
1619 {
1620 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1621 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1622 WillCheckHalfClosed = true;
1623 }
1624 }
1625
1626 /// iterates over all descriptors that may need half-closed tests and
1627 /// calls comm_read for those that do; re-schedules the check if needed
1628 static
1629 void
1630 commHalfClosedCheck(void *)
1631 {
1632 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1633
1634 typedef DescriptorSet::const_iterator DSCI;
1635 const DSCI end = TheHalfClosed->end();
1636 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1637 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1638 c->fd = *i;
1639 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1640 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1641 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1642 Comm::Read(c, call);
1643 fd_table[c->fd].halfClosedReader = call;
1644 } else
1645 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1646 }
1647
1648 WillCheckHalfClosed = false; // as far as we know
1649 commPlanHalfClosedCheck(); // may need to check again
1650 }
1651
1652 /// checks whether we are waiting for possibly half-closed connection to close
1653 // We are monitoring if the read handler for the fd is the monitoring handler.
1654 bool
1655 commHasHalfClosedMonitor(int fd)
1656 {
1657 return TheHalfClosed->has(fd);
1658 }
1659
1660 /// stop waiting for possibly half-closed connection to close
1661 void
1662 commStopHalfClosedMonitor(int const fd)
1663 {
1664 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1665
1666 // cancel the read if one was scheduled
1667 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1668 if (reader != NULL)
1669 Comm::ReadCancel(fd, reader);
1670 fd_table[fd].halfClosedReader = NULL;
1671
1672 TheHalfClosed->del(fd);
1673 }
1674
1675 /// I/O handler for the possibly half-closed connection monitoring code
1676 static void
1677 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1678 {
1679 // there cannot be more data coming in on half-closed connections
1680 assert(size == 0);
1681 assert(conn != NULL);
1682 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1683
1684 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1685
1686 // nothing to do if fd is being closed
1687 if (flag == Comm::ERR_CLOSING)
1688 return;
1689
1690 // if read failed, close the connection
1691 if (flag != Comm::OK) {
1692 debugs(5, 3, HERE << "closing " << conn);
1693 conn->close();
1694 return;
1695 }
1696
1697 // continue waiting for close or error
1698 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1699 }
1700
1701 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1702
1703 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1704 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1705
1706 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1707
1708 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1709
1710 DeferredReadManager::~DeferredReadManager()
1711 {
1712 flushReads();
1713 assert (deferredReads.empty());
1714 }
1715
1716 /* explicit instantiation required for some systems */
1717
1718 /// \cond AUTODOCS_IGNORE
1719 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1720 /// \endcond
1721
1722 void
1723 DeferredReadManager::delayRead(DeferredRead const &aRead)
1724 {
1725 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1726 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1727
1728 // We have to use a global function as a closer and point to temp
1729 // instead of "this" because DeferredReadManager is not a job and
1730 // is not even cbdata protected
1731 // XXX: and yet we use cbdata protection functions on it??
1732 AsyncCall::Pointer closer = commCbCall(5,4,
1733 "DeferredReadManager::CloseHandler",
1734 CommCloseCbPtrFun(&CloseHandler, temp));
1735 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1736 temp->element.closer = closer; // remeber so that we can cancel
1737 }
1738
1739 void
1740 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1741 {
1742 if (!cbdataReferenceValid(params.data))
1743 return;
1744
1745 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1746
1747 temp->element.closer = NULL;
1748 temp->element.markCancelled();
1749 }
1750
1751 DeferredRead
1752 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1753 {
1754 assert (!deferredReads.empty());
1755
1756 DeferredRead &read = deferredReads.head->element;
1757
1758 // NOTE: at this point the connection has been paused/stalled for an unknown
1759 // amount of time. We must re-validate that it is active and usable.
1760
1761 // If the connection has been closed already. Cancel this read.
1762 if (!fd_table || !Comm::IsConnOpen(read.theRead.conn)) {
1763 if (read.closer != NULL) {
1764 read.closer->cancel("Connection closed before.");
1765 read.closer = NULL;
1766 }
1767 read.markCancelled();
1768 }
1769
1770 if (!read.cancelled) {
1771 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1772 read.closer = NULL;
1773 }
1774
1775 DeferredRead result = deferredReads.pop_front();
1776
1777 return result;
1778 }
1779
1780 void
1781 DeferredReadManager::kickReads(int const count)
1782 {
1783 /* if we had CbDataList::size() we could consolidate this and flushReads */
1784
1785 if (count < 1) {
1786 flushReads();
1787 return;
1788 }
1789
1790 size_t remaining = count;
1791
1792 while (!deferredReads.empty() && remaining) {
1793 DeferredRead aRead = popHead(deferredReads);
1794 kickARead(aRead);
1795
1796 if (!aRead.cancelled)
1797 --remaining;
1798 }
1799 }
1800
1801 void
1802 DeferredReadManager::flushReads()
1803 {
1804 CbDataListContainer<DeferredRead> reads;
1805 reads = deferredReads;
1806 deferredReads = CbDataListContainer<DeferredRead>();
1807
1808 // XXX: For fairness this SHOULD randomize the order
1809 while (!reads.empty()) {
1810 DeferredRead aRead = popHead(reads);
1811 kickARead(aRead);
1812 }
1813 }
1814
1815 void
1816 DeferredReadManager::kickARead(DeferredRead const &aRead)
1817 {
1818 if (aRead.cancelled)
1819 return;
1820
1821 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1822 return;
1823
1824 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1825
1826 aRead.theReader(aRead.theContext, aRead.theRead);
1827 }
1828
1829 void
1830 DeferredRead::markCancelled()
1831 {
1832 cancelled = true;
1833 }
1834
1835 int
1836 CommSelectEngine::checkEvents(int timeout)
1837 {
1838 static time_t last_timeout = 0;
1839
1840 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1841 if (squid_curtime > last_timeout) {
1842 last_timeout = squid_curtime;
1843 checkTimeouts();
1844 }
1845
1846 switch (Comm::DoSelect(timeout)) {
1847
1848 case Comm::OK:
1849
1850 case Comm::TIMEOUT:
1851 return 0;
1852
1853 case Comm::IDLE:
1854
1855 case Comm::SHUTDOWN:
1856 return EVENT_IDLE;
1857
1858 case Comm::COMM_ERROR:
1859 return EVENT_ERROR;
1860
1861 default:
1862 fatal_dump("comm.cc: Internal error -- this should never happen.");
1863 return EVENT_ERROR;
1864 };
1865 }
1866
1867 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1868 int
1869 comm_open_uds(int sock_type,
1870 int proto,
1871 struct sockaddr_un* addr,
1872 int flags)
1873 {
1874 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1875
1876 int new_socket;
1877
1878 PROF_start(comm_open);
1879 /* Create socket for accepting new connections. */
1880 ++ statCounter.syscalls.sock.sockets;
1881
1882 /* Setup the socket addrinfo details for use */
1883 struct addrinfo AI;
1884 AI.ai_flags = 0;
1885 AI.ai_family = PF_UNIX;
1886 AI.ai_socktype = sock_type;
1887 AI.ai_protocol = proto;
1888 AI.ai_addrlen = SUN_LEN(addr);
1889 AI.ai_addr = (sockaddr*)addr;
1890 AI.ai_canonname = NULL;
1891 AI.ai_next = NULL;
1892
1893 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1894
1895 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1896 int xerrno = errno;
1897 /* Increase the number of reserved fd's if calls to socket()
1898 * are failing because the open file table is full. This
1899 * limits the number of simultaneous clients */
1900
1901 if (limitError(xerrno)) {
1902 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
1903 fdAdjustReserved();
1904 } else {
1905 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
1906 }
1907
1908 PROF_stop(comm_open);
1909 return -1;
1910 }
1911
1912 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1913
1914 /* update fdstat */
1915 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1916
1917 assert(!isOpen(new_socket));
1918 fd_open(new_socket, FD_MSGHDR, addr->sun_path);
1919
1920 fd_table[new_socket].sock_family = AI.ai_family;
1921
1922 if (!(flags & COMM_NOCLOEXEC))
1923 commSetCloseOnExec(new_socket);
1924
1925 if (flags & COMM_REUSEADDR)
1926 commSetReuseAddr(new_socket);
1927
1928 if (flags & COMM_NONBLOCKING) {
1929 if (commSetNonBlocking(new_socket) != Comm::OK) {
1930 comm_close(new_socket);
1931 PROF_stop(comm_open);
1932 return -1;
1933 }
1934 }
1935
1936 if (flags & COMM_DOBIND) {
1937 if (commBind(new_socket, AI) != Comm::OK) {
1938 comm_close(new_socket);
1939 PROF_stop(comm_open);
1940 return -1;
1941 }
1942 }
1943
1944 #ifdef TCP_NODELAY
1945 if (sock_type == SOCK_STREAM)
1946 commSetTcpNoDelay(new_socket);
1947
1948 #endif
1949
1950 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1951 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1952
1953 PROF_stop(comm_open);
1954
1955 return new_socket;
1956 }
1957