]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
C++11: Remove GnuRegex and all -lregex related code
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12 #include "ClientInfo.h"
13 #include "comm/AcceptLimiter.h"
14 #include "comm/comm_internal.h"
15 #include "comm/Connection.h"
16 #include "comm/IoCallback.h"
17 #include "comm/Loops.h"
18 #include "comm/Read.h"
19 #include "comm/TcpAcceptor.h"
20 #include "comm/Write.h"
21 #include "CommRead.h"
22 #include "compat/cmsg.h"
23 #include "DescriptorSet.h"
24 #include "event.h"
25 #include "fd.h"
26 #include "fde.h"
27 #include "globals.h"
28 #include "icmp/net_db.h"
29 #include "ip/Intercept.h"
30 #include "ip/QosConfig.h"
31 #include "ip/tools.h"
32 #include "pconn.h"
33 #include "profiler/Profiler.h"
34 #include "sbuf/SBuf.h"
35 #include "SquidConfig.h"
36 #include "StatCounters.h"
37 #include "StoreIOBuffer.h"
38 #include "tools.h"
39
40 #if USE_OPENSSL
41 #include "ssl/support.h"
42 #endif
43
44 #include <cerrno>
45 #include <cmath>
46 #if _SQUID_CYGWIN_
47 #include <sys/ioctl.h>
48 #endif
49 #ifdef HAVE_NETINET_TCP_H
50 #include <netinet/tcp.h>
51 #endif
52 #if HAVE_SYS_UN_H
53 #include <sys/un.h>
54 #endif
55
56 /*
57 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
58 */
59
60 static IOCB commHalfClosedReader;
61 static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
62 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
63
64 #if USE_DELAY_POOLS
65 CBDATA_CLASS_INIT(CommQuotaQueue);
66
67 static void commHandleWriteHelper(void * data);
68 #endif
69
70 /* STATIC */
71
72 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
73 static bool WillCheckHalfClosed = false; /// true if check is scheduled
74 static EVH commHalfClosedCheck;
75 static void commPlanHalfClosedCheck();
76
77 static Comm::Flag commBind(int s, struct addrinfo &);
78 static void commSetReuseAddr(int);
79 static void commSetNoLinger(int);
80 #ifdef TCP_NODELAY
81 static void commSetTcpNoDelay(int);
82 #endif
83 static void commSetTcpRcvbuf(int, int);
84
85 fd_debug_t *fdd_table = NULL;
86
87 bool
88 isOpen(const int fd)
89 {
90 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
91 }
92
93 /**
94 * Empty the read buffers
95 *
96 * This is a magical routine that empties the read buffers.
97 * Under some platforms (Linux) if a buffer has data in it before
98 * you call close(), the socket will hang and take quite a while
99 * to timeout.
100 */
101 static void
102 comm_empty_os_read_buffers(int fd)
103 {
104 #if _SQUID_LINUX_
105 #if USE_OPENSSL
106 // Bug 4146: SSL-Bump BIO does not release sockets on close.
107 if (fd_table[fd].ssl)
108 return;
109 #endif
110
111 /* prevent those nasty RST packets */
112 char buf[SQUID_TCP_SO_RCVBUF];
113 if (fd_table[fd].flags.nonblocking && fd_table[fd].type != FD_MSGHDR) {
114 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
115 }
116 #endif
117 }
118
119 /**
120 * synchronous wrapper around udp socket functions
121 */
122 int
123 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
124 {
125 ++ statCounter.syscalls.sock.recvfroms;
126 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
127 struct addrinfo *AI = NULL;
128 Ip::Address::InitAddr(AI);
129 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
130 from = *AI;
131 Ip::Address::FreeAddr(AI);
132 return x;
133 }
134
135 int
136 comm_udp_recv(int fd, void *buf, size_t len, int flags)
137 {
138 Ip::Address nul;
139 return comm_udp_recvfrom(fd, buf, len, flags, nul);
140 }
141
142 ssize_t
143 comm_udp_send(int s, const void *buf, size_t len, int flags)
144 {
145 return send(s, buf, len, flags);
146 }
147
148 bool
149 comm_has_incomplete_write(int fd)
150 {
151 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd) != NULL);
152 return COMMIO_FD_WRITECB(fd)->active();
153 }
154
155 /**
156 * Queue a write. handler/handler_data are called when the write fully
157 * completes, on error, or on file descriptor close.
158 */
159
160 /* Return the local port associated with fd. */
161 unsigned short
162 comm_local_port(int fd)
163 {
164 Ip::Address temp;
165 struct addrinfo *addr = NULL;
166 fde *F = &fd_table[fd];
167
168 /* If the fd is closed already, just return */
169
170 if (!F->flags.open) {
171 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
172 return 0;
173 }
174
175 if (F->local_addr.port())
176 return F->local_addr.port();
177
178 if (F->sock_family == AF_INET)
179 temp.setIPv4();
180
181 Ip::Address::InitAddr(addr);
182
183 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
184 int xerrno = errno;
185 debugs(50, DBG_IMPORTANT, MYNAME << "Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerr(xerrno));
186 Ip::Address::FreeAddr(addr);
187 return 0;
188 }
189 temp = *addr;
190
191 Ip::Address::FreeAddr(addr);
192
193 if (F->local_addr.isAnyAddr()) {
194 /* save the whole local address, not just the port. */
195 F->local_addr = temp;
196 } else {
197 F->local_addr.port(temp.port());
198 }
199
200 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
201 return F->local_addr.port();
202 }
203
204 static Comm::Flag
205 commBind(int s, struct addrinfo &inaddr)
206 {
207 ++ statCounter.syscalls.sock.binds;
208
209 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
210 debugs(50, 6, "bind socket FD " << s << " to " << fd_table[s].local_addr);
211 return Comm::OK;
212 }
213 int xerrno = errno;
214 debugs(50, DBG_CRITICAL, MYNAME << "Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerr(xerrno));
215
216 return Comm::COMM_ERROR;
217 }
218
219 /**
220 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
221 * is OR of flags specified in comm.h. Defaults TOS
222 */
223 int
224 comm_open(int sock_type,
225 int proto,
226 Ip::Address &addr,
227 int flags,
228 const char *note)
229 {
230 return comm_openex(sock_type, proto, addr, flags, note);
231 }
232
233 void
234 comm_open_listener(int sock_type,
235 int proto,
236 Comm::ConnectionPointer &conn,
237 const char *note)
238 {
239 /* all listener sockets require bind() */
240 conn->flags |= COMM_DOBIND;
241
242 /* attempt native enabled port. */
243 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
244 }
245
246 int
247 comm_open_listener(int sock_type,
248 int proto,
249 Ip::Address &addr,
250 int flags,
251 const char *note)
252 {
253 int sock = -1;
254
255 /* all listener sockets require bind() */
256 flags |= COMM_DOBIND;
257
258 /* attempt native enabled port. */
259 sock = comm_openex(sock_type, proto, addr, flags, note);
260
261 return sock;
262 }
263
264 static bool
265 limitError(int const anErrno)
266 {
267 return anErrno == ENFILE || anErrno == EMFILE;
268 }
269
270 void
271 comm_set_v6only(int fd, int tos)
272 {
273 #ifdef IPV6_V6ONLY
274 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
275 int xerrno = errno;
276 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerr(xerrno));
277 }
278 #else
279 debugs(50, DBG_CRITICAL, MYNAME << "WARNING: setsockopt(IPV6_V6ONLY) not supported on this platform");
280 #endif /* sockopt */
281 }
282
283 /**
284 * Set the socket option required for TPROXY spoofing for:
285 * - Linux TPROXY v4 support,
286 * - OpenBSD divert-to support,
287 * - FreeBSD IPFW TPROXY v4 support.
288 */
289 void
290 comm_set_transparent(int fd)
291 {
292 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
293 # define soLevel SOL_IP
294 # define soFlag IP_TRANSPARENT
295 bool doneSuid = false;
296
297 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
298 # define soLevel SOL_SOCKET
299 # define soFlag SO_BINDANY
300 enter_suid();
301 bool doneSuid = true;
302
303 #elif defined(IP_BINDANY) // FreeBSD with IPFW
304 # define soLevel IPPROTO_IP
305 # define soFlag IP_BINDANY
306 enter_suid();
307 bool doneSuid = true;
308
309 #else
310 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
311 #endif /* sockopt */
312
313 #if defined(soLevel) && defined(soFlag)
314 int tos = 1;
315 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
316 int xerrno = errno;
317 debugs(50, DBG_IMPORTANT, MYNAME << "setsockopt(TPROXY) on FD " << fd << ": " << xstrerr(xerrno));
318 } else {
319 /* mark the socket as having transparent options */
320 fd_table[fd].flags.transparent = true;
321 }
322 if (doneSuid)
323 leave_suid();
324 #endif
325 }
326
327 /**
328 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
329 * is OR of flags specified in defines.h:COMM_*
330 */
331 int
332 comm_openex(int sock_type,
333 int proto,
334 Ip::Address &addr,
335 int flags,
336 const char *note)
337 {
338 int new_socket;
339 struct addrinfo *AI = NULL;
340
341 PROF_start(comm_open);
342 /* Create socket for accepting new connections. */
343 ++ statCounter.syscalls.sock.sockets;
344
345 /* Setup the socket addrinfo details for use */
346 addr.getAddrInfo(AI);
347 AI->ai_socktype = sock_type;
348 AI->ai_protocol = proto;
349
350 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
351
352 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
353 int xerrno = errno;
354
355 /* under IPv6 there is the possibility IPv6 is present but disabled. */
356 /* try again as IPv4-native if possible */
357 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
358 /* attempt to open this IPv4-only. */
359 Ip::Address::FreeAddr(AI);
360 /* Setup the socket addrinfo details for use */
361 addr.getAddrInfo(AI);
362 AI->ai_socktype = sock_type;
363 AI->ai_protocol = proto;
364 debugs(50, 3, "Attempt fallback open socket for: " << addr );
365 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
366 debugs(50, 2, "attempt open " << note << " socket on: " << addr);
367 }
368
369 if (new_socket < 0) {
370 /* Increase the number of reserved fd's if calls to socket()
371 * are failing because the open file table is full. This
372 * limits the number of simultaneous clients */
373
374 if (limitError(errno)) {
375 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
376 fdAdjustReserved();
377 } else {
378 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
379 }
380
381 Ip::Address::FreeAddr(AI);
382
383 PROF_stop(comm_open);
384 errno = xerrno; // restore for caller
385 return -1;
386 }
387
388 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
389 Comm::ConnectionPointer conn = new Comm::Connection;
390 conn->local = addr;
391 conn->fd = new_socket;
392
393 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
394
395 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
396 comm_set_v6only(conn->fd, 1);
397
398 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
399 /* Other OS may have this administratively disabled for general use. Same deal. */
400 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
401 comm_set_v6only(conn->fd, 0);
402
403 comm_init_opened(conn, note, AI);
404 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
405
406 Ip::Address::FreeAddr(AI);
407
408 PROF_stop(comm_open);
409
410 // XXX transition only. prevent conn from closing the new FD on function exit.
411 conn->fd = -1;
412 errno = xerrno; // restore for caller
413 return new_socket;
414 }
415
416 /// update FD tables after a local or remote (IPC) comm_openex();
417 void
418 comm_init_opened(const Comm::ConnectionPointer &conn,
419 const char *note,
420 struct addrinfo *AI)
421 {
422 assert(Comm::IsConnOpen(conn));
423 assert(AI);
424
425 /* update fdstat */
426 debugs(5, 5, HERE << conn << " is a new socket");
427
428 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
429 fd_open(conn->fd, FD_SOCKET, note);
430
431 fdd_table[conn->fd].close_file = NULL;
432 fdd_table[conn->fd].close_line = 0;
433
434 fde *F = &fd_table[conn->fd];
435 F->local_addr = conn->local;
436
437 F->sock_family = AI->ai_family;
438 }
439
440 /// apply flags after a local comm_open*() call;
441 /// returns new_socket or -1 on error
442 static int
443 comm_apply_flags(int new_socket,
444 Ip::Address &addr,
445 int flags,
446 struct addrinfo *AI)
447 {
448 assert(new_socket >= 0);
449 assert(AI);
450 const int sock_type = AI->ai_socktype;
451
452 if (!(flags & COMM_NOCLOEXEC))
453 commSetCloseOnExec(new_socket);
454
455 if ((flags & COMM_REUSEADDR))
456 commSetReuseAddr(new_socket);
457
458 if (addr.port() > (unsigned short) 0) {
459 #if _SQUID_WINDOWS_
460 if (sock_type != SOCK_DGRAM)
461 #endif
462 commSetNoLinger(new_socket);
463
464 if (opt_reuseaddr)
465 commSetReuseAddr(new_socket);
466 }
467
468 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
469 if ((flags & COMM_TRANSPARENT)) {
470 comm_set_transparent(new_socket);
471 }
472
473 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
474 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
475 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
476 if ( addr.isNoAddr() )
477 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
478
479 if (commBind(new_socket, *AI) != Comm::OK) {
480 comm_close(new_socket);
481 return -1;
482 }
483 }
484
485 if (flags & COMM_NONBLOCKING)
486 if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
487 comm_close(new_socket);
488 return -1;
489 }
490
491 #ifdef TCP_NODELAY
492 if (sock_type == SOCK_STREAM)
493 commSetTcpNoDelay(new_socket);
494
495 #endif
496
497 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
498 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
499
500 return new_socket;
501 }
502
503 void
504 comm_import_opened(const Comm::ConnectionPointer &conn,
505 const char *note,
506 struct addrinfo *AI)
507 {
508 debugs(5, 2, HERE << conn);
509 assert(Comm::IsConnOpen(conn));
510 assert(AI);
511
512 comm_init_opened(conn, note, AI);
513
514 if (!(conn->flags & COMM_NOCLOEXEC))
515 fd_table[conn->fd].flags.close_on_exec = true;
516
517 if (conn->local.port() > (unsigned short) 0) {
518 #if _SQUID_WINDOWS_
519 if (AI->ai_socktype != SOCK_DGRAM)
520 #endif
521 fd_table[conn->fd].flags.nolinger = true;
522 }
523
524 if ((conn->flags & COMM_TRANSPARENT))
525 fd_table[conn->fd].flags.transparent = true;
526
527 if (conn->flags & COMM_NONBLOCKING)
528 fd_table[conn->fd].flags.nonblocking = true;
529
530 #ifdef TCP_NODELAY
531 if (AI->ai_socktype == SOCK_STREAM)
532 fd_table[conn->fd].flags.nodelay = true;
533 #endif
534
535 /* no fd_table[fd].flags. updates needed for these conditions:
536 * if ((flags & COMM_REUSEADDR)) ...
537 * if ((flags & COMM_DOBIND) ...) ...
538 */
539 }
540
541 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
542 // With handler already unset. Leaving this present until that can be verified for all code paths.
543 void
544 commUnsetFdTimeout(int fd)
545 {
546 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
547 assert(fd >= 0);
548 assert(fd < Squid_MaxFD);
549 fde *F = &fd_table[fd];
550 assert(F->flags.open);
551
552 F->timeoutHandler = NULL;
553 F->timeout = 0;
554 }
555
556 int
557 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
558 {
559 debugs(5, 3, HERE << conn << " timeout " << timeout);
560 assert(Comm::IsConnOpen(conn));
561 assert(conn->fd < Squid_MaxFD);
562 fde *F = &fd_table[conn->fd];
563 assert(F->flags.open);
564
565 if (timeout < 0) {
566 F->timeoutHandler = NULL;
567 F->timeout = 0;
568 } else {
569 if (callback != NULL) {
570 typedef CommTimeoutCbParams Params;
571 Params &params = GetCommParams<Params>(callback);
572 params.conn = conn;
573 F->timeoutHandler = callback;
574 }
575
576 F->timeout = squid_curtime + (time_t) timeout;
577 }
578
579 return F->timeout;
580 }
581
582 int
583 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
584 {
585 debugs(5, 3, HERE << "Remove timeout for " << conn);
586 AsyncCall::Pointer nil;
587 return commSetConnTimeout(conn, -1, nil);
588 }
589
590 /**
591 * Connect socket FD to given remote address.
592 * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
593 * then error code will also be returned in errno.
594 */
595 int
596 comm_connect_addr(int sock, const Ip::Address &address)
597 {
598 Comm::Flag status = Comm::OK;
599 fde *F = &fd_table[sock];
600 int x = 0;
601 int err = 0;
602 socklen_t errlen;
603 struct addrinfo *AI = NULL;
604 PROF_start(comm_connect_addr);
605
606 assert(address.port() != 0);
607
608 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
609
610 /* Handle IPv6 over IPv4-only socket case.
611 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
612 * NP: because commResetFD is private to ConnStateData we have to return an error and
613 * trust its handled properly.
614 */
615 if (F->sock_family == AF_INET && !address.isIPv4()) {
616 errno = ENETUNREACH;
617 return Comm::ERR_PROTOCOL;
618 }
619
620 /* Handle IPv4 over IPv6-only socket case.
621 * This case is presently handled here as it's both a known case and it's
622 * uncertain what error will be returned by the IPv6 stack in such case. It's
623 * possible this will also be handled by the errno checks below after connect()
624 * but needs carefull cross-platform verification, and verifying the address
625 * condition here is simple.
626 */
627 if (!F->local_addr.isIPv4() && address.isIPv4()) {
628 errno = ENETUNREACH;
629 return Comm::ERR_PROTOCOL;
630 }
631
632 address.getAddrInfo(AI, F->sock_family);
633
634 /* Establish connection. */
635 int xerrno = 0;
636
637 if (!F->flags.called_connect) {
638 F->flags.called_connect = true;
639 ++ statCounter.syscalls.sock.connects;
640
641 errno = 0;
642 if ((x = connect(sock, AI->ai_addr, AI->ai_addrlen)) < 0) {
643 xerrno = errno;
644 debugs(5,5, "sock=" << sock << ", addrinfo(" <<
645 " flags=" << AI->ai_flags <<
646 ", family=" << AI->ai_family <<
647 ", socktype=" << AI->ai_socktype <<
648 ", protocol=" << AI->ai_protocol <<
649 ", &addr=" << AI->ai_addr <<
650 ", addrlen=" << AI->ai_addrlen << " )");
651 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerr(xerrno));
652 debugs(14,9, "connecting to: " << address);
653
654 } else if (x == 0) {
655 // XXX: ICAP code refuses callbacks during a pending comm_ call
656 // Async calls development will fix this.
657 x = -1;
658 xerrno = EINPROGRESS;
659 }
660
661 } else {
662 errno = 0;
663 #if _SQUID_NEWSOS6_
664 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
665 if (connect(sock, AI->ai_addr, AI->ai_addrlen) < 0)
666 xerrno = errno;
667
668 if (xerrno == EINVAL) {
669 errlen = sizeof(err);
670 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
671 if (x >= 0)
672 xerrno = x;
673 }
674 #else
675 errlen = sizeof(err);
676 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
677 if (x == 0)
678 xerrno = err;
679
680 #if _SQUID_SOLARIS_
681 /*
682 * Solaris 2.4's socket emulation doesn't allow you
683 * to determine the error from a failed non-blocking
684 * connect and just returns EPIPE. Create a fake
685 * error message for connect. -- fenner@parc.xerox.com
686 */
687 if (x < 0 && xerrno == EPIPE)
688 xerrno = ENOTCONN;
689 else
690 xerrno = errno;
691 #endif
692 #endif
693 }
694
695 Ip::Address::FreeAddr(AI);
696
697 PROF_stop(comm_connect_addr);
698
699 errno = xerrno;
700 if (xerrno == 0 || xerrno == EISCONN)
701 status = Comm::OK;
702 else if (ignoreErrno(xerrno))
703 status = Comm::INPROGRESS;
704 else if (xerrno == EAFNOSUPPORT || xerrno == EINVAL)
705 return Comm::ERR_PROTOCOL;
706 else
707 return Comm::COMM_ERROR;
708
709 address.toStr(F->ipaddr, MAX_IPSTRLEN);
710
711 F->remote_port = address.port(); /* remote_port is HS */
712
713 if (status == Comm::OK) {
714 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
715 } else if (status == Comm::INPROGRESS) {
716 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
717 }
718
719 errno = xerrno;
720 return status;
721 }
722
723 void
724 commCallCloseHandlers(int fd)
725 {
726 fde *F = &fd_table[fd];
727 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
728
729 while (F->closeHandler != NULL) {
730 AsyncCall::Pointer call = F->closeHandler;
731 F->closeHandler = call->Next();
732 call->setNext(NULL);
733 // If call is not canceled schedule it for execution else ignore it
734 if (!call->canceled()) {
735 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
736 ScheduleCallHere(call);
737 }
738 }
739 }
740
741 #if LINGERING_CLOSE
742 static void
743 commLingerClose(int fd, void *unused)
744 {
745 LOCAL_ARRAY(char, buf, 1024);
746 int n = FD_READ_METHOD(fd, buf, 1024);
747 if (n < 0) {
748 int xerrno = errno;
749 debugs(5, 3, "FD " << fd << " read: " << xstrerr(xerrno));
750 }
751 comm_close(fd);
752 }
753
754 static void
755 commLingerTimeout(const FdeCbParams &params)
756 {
757 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
758 comm_close(params.fd);
759 }
760
761 /*
762 * Inspired by apache
763 */
764 void
765 comm_lingering_close(int fd)
766 {
767 #if USE_OPENSSL
768 if (fd_table[fd].ssl)
769 ssl_shutdown_method(fd_table[fd].ssl);
770 #endif
771
772 if (shutdown(fd, 1) < 0) {
773 comm_close(fd);
774 return;
775 }
776
777 fd_note(fd, "lingering close");
778 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
779
780 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
781 assert(fd_table[fd].flags.open);
782 if (callback != NULL) {
783 typedef FdeCbParams Params;
784 Params &params = GetCommParams<Params>(callback);
785 params.fd = fd;
786 fd_table[fd].timeoutHandler = callback;
787 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
788 }
789
790 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
791 }
792
793 #endif
794
795 /**
796 * enable linger with time of 0 so that when the socket is
797 * closed, TCP generates a RESET
798 */
799 void
800 comm_reset_close(const Comm::ConnectionPointer &conn)
801 {
802 struct linger L;
803 L.l_onoff = 1;
804 L.l_linger = 0;
805
806 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
807 int xerrno = errno;
808 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerr(xerrno));
809 }
810 conn->close();
811 }
812
813 // Legacy close function.
814 void
815 old_comm_reset_close(int fd)
816 {
817 struct linger L;
818 L.l_onoff = 1;
819 L.l_linger = 0;
820
821 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
822 int xerrno = errno;
823 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerr(xerrno));
824 }
825 comm_close(fd);
826 }
827
828 #if USE_OPENSSL
829 void
830 commStartSslClose(const FdeCbParams &params)
831 {
832 assert(fd_table[params.fd].ssl);
833 ssl_shutdown_method(fd_table[params.fd].ssl.get());
834 }
835 #endif
836
837 void
838 comm_close_complete(const FdeCbParams &params)
839 {
840 fde *F = &fd_table[params.fd];
841 F->ssl.reset();
842 F->dynamicTlsContext.reset();
843 fd_close(params.fd); /* update fdstat */
844 close(params.fd);
845
846 ++ statCounter.syscalls.sock.closes;
847
848 /* When one connection closes, give accept() a chance, if need be */
849 Comm::AcceptLimiter::Instance().kick();
850 }
851
852 /*
853 * Close the socket fd.
854 *
855 * + call write handlers with ERR_CLOSING
856 * + call read handlers with ERR_CLOSING
857 * + call closing handlers
858 *
859 * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
860 * DeferredReadManager.
861 */
862 void
863 _comm_close(int fd, char const *file, int line)
864 {
865 debugs(5, 3, "comm_close: start closing FD " << fd);
866 assert(fd >= 0);
867 assert(fd < Squid_MaxFD);
868
869 fde *F = &fd_table[fd];
870 fdd_table[fd].close_file = file;
871 fdd_table[fd].close_line = line;
872
873 if (F->closing())
874 return;
875
876 /* XXX: is this obsolete behind F->closing() ? */
877 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
878 return;
879
880 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
881 if (!isOpen(fd)) {
882 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
883 // XXX: do we need to run close(fd) or fd_close(fd) here?
884 return;
885 }
886
887 assert(F->type != FD_FILE);
888
889 PROF_start(comm_close);
890
891 F->flags.close_request = true;
892
893 #if USE_OPENSSL
894 if (F->ssl) {
895 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
896 FdeCbPtrFun(commStartSslClose, NULL));
897 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
898 startParams.fd = fd;
899 ScheduleCallHere(startCall);
900 }
901 #endif
902
903 // a half-closed fd may lack a reader, so we stop monitoring explicitly
904 if (commHasHalfClosedMonitor(fd))
905 commStopHalfClosedMonitor(fd);
906 commUnsetFdTimeout(fd);
907
908 // notify read/write handlers after canceling select reservations, if any
909 if (COMMIO_FD_WRITECB(fd)->active()) {
910 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
911 COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
912 }
913 if (COMMIO_FD_READCB(fd)->active()) {
914 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
915 COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
916 }
917
918 #if USE_DELAY_POOLS
919 if (ClientInfo *clientInfo = F->clientInfo) {
920 if (clientInfo->selectWaiting) {
921 clientInfo->selectWaiting = false;
922 // kick queue or it will get stuck as commWriteHandle is not called
923 clientInfo->kickQuotaQueue();
924 }
925 }
926 #endif
927
928 commCallCloseHandlers(fd);
929
930 comm_empty_os_read_buffers(fd);
931
932 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
933 FdeCbPtrFun(comm_close_complete, NULL));
934 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
935 completeParams.fd = fd;
936 // must use async call to wait for all callbacks
937 // scheduled before comm_close() to finish
938 ScheduleCallHere(completeCall);
939
940 PROF_stop(comm_close);
941 }
942
943 /* Send a udp datagram to specified TO_ADDR. */
944 int
945 comm_udp_sendto(int fd,
946 const Ip::Address &to_addr,
947 const void *buf,
948 int len)
949 {
950 PROF_start(comm_udp_sendto);
951 ++ statCounter.syscalls.sock.sendtos;
952
953 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
954 " using FD " << fd << " using Port " << comm_local_port(fd) );
955
956 struct addrinfo *AI = NULL;
957 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
958 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
959 int xerrno = errno;
960 Ip::Address::FreeAddr(AI);
961
962 PROF_stop(comm_udp_sendto);
963
964 if (x >= 0) {
965 errno = xerrno; // restore for caller to use
966 return x;
967 }
968
969 #if _SQUID_LINUX_
970 if (ECONNREFUSED != xerrno)
971 #endif
972 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerr(xerrno));
973
974 errno = xerrno; // restore for caller to use
975 return Comm::COMM_ERROR;
976 }
977
978 AsyncCall::Pointer
979 comm_add_close_handler(int fd, CLCB * handler, void *data)
980 {
981 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
982 handler << ", data=" << data);
983
984 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
985 CommCloseCbPtrFun(handler, data));
986 comm_add_close_handler(fd, call);
987 return call;
988 }
989
990 void
991 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
992 {
993 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
994
995 /*TODO:Check for a similar scheduled AsyncCall*/
996 // for (c = fd_table[fd].closeHandler; c; c = c->next)
997 // assert(c->handler != handler || c->data != data);
998
999 call->setNext(fd_table[fd].closeHandler);
1000
1001 fd_table[fd].closeHandler = call;
1002 }
1003
1004 // remove function-based close handler
1005 void
1006 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1007 {
1008 assert(isOpen(fd));
1009 /* Find handler in list */
1010 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1011 handler << ", data=" << data);
1012
1013 AsyncCall::Pointer p, prev = NULL;
1014 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1015 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1016 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1017 if (!call) // method callbacks have their own comm_remove_close_handler
1018 continue;
1019
1020 typedef CommCloseCbParams Params;
1021 const Params &params = GetCommParams<Params>(p);
1022 if (call->dialer.handler == handler && params.data == data)
1023 break; /* This is our handler */
1024 }
1025
1026 // comm_close removes all close handlers so our handler may be gone
1027 if (p != NULL) {
1028 p->dequeue(fd_table[fd].closeHandler, prev);
1029 p->cancel("comm_remove_close_handler");
1030 }
1031 }
1032
1033 // remove method-based close handler
1034 void
1035 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1036 {
1037 assert(isOpen(fd));
1038 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1039
1040 // comm_close removes all close handlers so our handler may be gone
1041 AsyncCall::Pointer p, prev = NULL;
1042 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1043
1044 if (p != NULL)
1045 p->dequeue(fd_table[fd].closeHandler, prev);
1046 call->cancel("comm_remove_close_handler");
1047 }
1048
1049 static void
1050 commSetNoLinger(int fd)
1051 {
1052
1053 struct linger L;
1054 L.l_onoff = 0; /* off */
1055 L.l_linger = 0;
1056
1057 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0) {
1058 int xerrno = errno;
1059 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1060 }
1061 fd_table[fd].flags.nolinger = true;
1062 }
1063
1064 static void
1065 commSetReuseAddr(int fd)
1066 {
1067 int on = 1;
1068 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0) {
1069 int xerrno = errno;
1070 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1071 }
1072 }
1073
1074 static void
1075 commSetTcpRcvbuf(int fd, int size)
1076 {
1077 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0) {
1078 int xerrno = errno;
1079 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1080 }
1081 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0) {
1082 int xerrno = errno;
1083 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1084 }
1085 #ifdef TCP_WINDOW_CLAMP
1086 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0) {
1087 int xerrno = errno;
1088 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ", SIZE " << size << ": " << xstrerr(xerrno));
1089 }
1090 #endif
1091 }
1092
1093 int
1094 commSetNonBlocking(int fd)
1095 {
1096 #if _SQUID_WINDOWS_
1097 int nonblocking = TRUE;
1098
1099 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1100 int xerrno = errno;
1101 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno) << " " << fd_table[fd].type);
1102 return Comm::COMM_ERROR;
1103 }
1104
1105 #else
1106 int flags;
1107 int dummy = 0;
1108
1109 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1110 int xerrno = errno;
1111 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1112 return Comm::COMM_ERROR;
1113 }
1114
1115 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1116 int xerrno = errno;
1117 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1118 return Comm::COMM_ERROR;
1119 }
1120 #endif
1121
1122 fd_table[fd].flags.nonblocking = true;
1123 return 0;
1124 }
1125
1126 int
1127 commUnsetNonBlocking(int fd)
1128 {
1129 #if _SQUID_WINDOWS_
1130 int nonblocking = FALSE;
1131
1132 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1133 #else
1134 int flags;
1135 int dummy = 0;
1136
1137 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1138 int xerrno = errno;
1139 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFL: " << xstrerr(xerrno));
1140 return Comm::COMM_ERROR;
1141 }
1142
1143 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1144 #endif
1145 int xerrno = errno;
1146 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1147 return Comm::COMM_ERROR;
1148 }
1149
1150 fd_table[fd].flags.nonblocking = false;
1151 return 0;
1152 }
1153
1154 void
1155 commSetCloseOnExec(int fd)
1156 {
1157 #ifdef FD_CLOEXEC
1158 int flags;
1159 int dummy = 0;
1160
1161 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1162 int xerrno = errno;
1163 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": fcntl F_GETFD: " << xstrerr(xerrno));
1164 return;
1165 }
1166
1167 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
1168 int xerrno = errno;
1169 debugs(50, DBG_CRITICAL, MYNAME << "FD " << fd << ": set close-on-exec failed: " << xstrerr(xerrno));
1170 }
1171
1172 fd_table[fd].flags.close_on_exec = true;
1173
1174 #endif
1175 }
1176
1177 #ifdef TCP_NODELAY
1178 static void
1179 commSetTcpNoDelay(int fd)
1180 {
1181 int on = 1;
1182
1183 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0) {
1184 int xerrno = errno;
1185 debugs(50, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1186 }
1187
1188 fd_table[fd].flags.nodelay = true;
1189 }
1190
1191 #endif
1192
1193 void
1194 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1195 {
1196 int on = 1;
1197 #ifdef TCP_KEEPCNT
1198 if (timeout && interval) {
1199 int count = (timeout + interval - 1) / interval;
1200 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0) {
1201 int xerrno = errno;
1202 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1203 }
1204 }
1205 #endif
1206 #ifdef TCP_KEEPIDLE
1207 if (idle) {
1208 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0) {
1209 int xerrno = errno;
1210 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1211 }
1212 }
1213 #endif
1214 #ifdef TCP_KEEPINTVL
1215 if (interval) {
1216 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0) {
1217 int xerrno = errno;
1218 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1219 }
1220 }
1221 #endif
1222 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0) {
1223 int xerrno = errno;
1224 debugs(5, DBG_IMPORTANT, MYNAME << "FD " << fd << ": " << xstrerr(xerrno));
1225 }
1226 }
1227
1228 void
1229 comm_init(void)
1230 {
1231 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1232 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1233
1234 /* make sure the accept() socket FIFO delay queue exists */
1235 Comm::AcceptLimiter::Instance();
1236
1237 // make sure the IO pending callback table exists
1238 Comm::CallbackTableInit();
1239
1240 /* XXX account fd_table */
1241 /* Keep a few file descriptors free so that we don't run out of FD's
1242 * after accepting a client but before it opens a socket or a file.
1243 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1244 RESERVED_FD = min(100, Squid_MaxFD / 4);
1245
1246 TheHalfClosed = new DescriptorSet;
1247
1248 /* setup the select loop module */
1249 Comm::SelectLoopInit();
1250 }
1251
1252 void
1253 comm_exit(void)
1254 {
1255 delete TheHalfClosed;
1256 TheHalfClosed = NULL;
1257
1258 safe_free(fd_table);
1259 safe_free(fdd_table);
1260 Comm::CallbackTableDestruct();
1261 }
1262
1263 #if USE_DELAY_POOLS
1264 // called when the queue is done waiting for the client bucket to fill
1265 void
1266 commHandleWriteHelper(void * data)
1267 {
1268 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1269 assert(queue);
1270
1271 ClientInfo *clientInfo = queue->clientInfo;
1272 // ClientInfo invalidates queue if freed, so if we got here through,
1273 // evenAdd cbdata protections, everything should be valid and consistent
1274 assert(clientInfo);
1275 assert(clientInfo->hasQueue());
1276 assert(clientInfo->hasQueue(queue));
1277 assert(!clientInfo->selectWaiting);
1278 assert(clientInfo->eventWaiting);
1279 clientInfo->eventWaiting = false;
1280
1281 do {
1282 // check that the head descriptor is still relevant
1283 const int head = clientInfo->quotaPeekFd();
1284 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1285
1286 if (fd_table[head].clientInfo == clientInfo &&
1287 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1288 !fd_table[head].closing()) {
1289
1290 // wait for the head descriptor to become ready for writing
1291 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1292 clientInfo->selectWaiting = true;
1293 return;
1294 }
1295
1296 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1297 // and continue looking for a relevant one
1298 } while (clientInfo->hasQueue());
1299
1300 debugs(77,3, HERE << "emptied queue");
1301 }
1302
1303 bool
1304 ClientInfo::hasQueue() const
1305 {
1306 assert(quotaQueue);
1307 return !quotaQueue->empty();
1308 }
1309
1310 bool
1311 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1312 {
1313 assert(quotaQueue);
1314 return quotaQueue == q;
1315 }
1316
1317 /// returns the first descriptor to be dequeued
1318 int
1319 ClientInfo::quotaPeekFd() const
1320 {
1321 assert(quotaQueue);
1322 return quotaQueue->front();
1323 }
1324
1325 /// returns the reservation ID of the first descriptor to be dequeued
1326 unsigned int
1327 ClientInfo::quotaPeekReserv() const
1328 {
1329 assert(quotaQueue);
1330 return quotaQueue->outs + 1;
1331 }
1332
1333 /// queues a given fd, creating the queue if necessary; returns reservation ID
1334 unsigned int
1335 ClientInfo::quotaEnqueue(int fd)
1336 {
1337 assert(quotaQueue);
1338 return quotaQueue->enqueue(fd);
1339 }
1340
1341 /// removes queue head
1342 void
1343 ClientInfo::quotaDequeue()
1344 {
1345 assert(quotaQueue);
1346 quotaQueue->dequeue();
1347 }
1348
1349 void
1350 ClientInfo::kickQuotaQueue()
1351 {
1352 if (!eventWaiting && !selectWaiting && hasQueue()) {
1353 // wait at least a second if the bucket is empty
1354 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1355 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1356 quotaQueue, delay, 0, true);
1357 eventWaiting = true;
1358 }
1359 }
1360
1361 /// calculates how much to write for a single dequeued client
1362 int
1363 ClientInfo::quotaForDequed()
1364 {
1365 /* If we have multiple clients and give full bucketSize to each client then
1366 * clt1 may often get a lot more because clt1->clt2 time distance in the
1367 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1368 * We divide quota evenly to be more fair. */
1369
1370 if (!rationedCount) {
1371 rationedCount = quotaQueue->size() + 1;
1372
1373 // The delay in ration recalculation _temporary_ deprives clients from
1374 // bytes that should have trickled in while rationedCount was positive.
1375 refillBucket();
1376
1377 // Rounding errors do not accumulate here, but we round down to avoid
1378 // negative bucket sizes after write with rationedCount=1.
1379 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1380 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1381 '*' << rationedCount);
1382 }
1383
1384 --rationedCount;
1385 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1386 " rations remaining: " << rationedCount);
1387
1388 // update 'last seen' time to prevent clientdb GC from dropping us
1389 last_seen = squid_curtime;
1390 return rationedQuota;
1391 }
1392
1393 ///< adds bytes to the quota bucket based on the rate and passed time
1394 void
1395 ClientInfo::refillBucket()
1396 {
1397 // all these times are in seconds, with double precision
1398 const double currTime = current_dtime;
1399 const double timePassed = currTime - prevTime;
1400
1401 // Calculate allowance for the time passed. Use double to avoid
1402 // accumulating rounding errors for small intervals. For example, always
1403 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1404 const double gain = timePassed * writeSpeedLimit;
1405
1406 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1407 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1408 " = " << gain << ')');
1409
1410 // to further combat error accumulation during micro updates,
1411 // quit before updating time if we cannot add at least one byte
1412 if (gain < 1.0)
1413 return;
1414
1415 prevTime = currTime;
1416
1417 // for "first" connections, drain initial fat before refilling but keep
1418 // updating prevTime to avoid bursts after the fat is gone
1419 if (bucketSize > bucketSizeLimit) {
1420 debugs(77,4, HERE << "not refilling while draining initial fat");
1421 return;
1422 }
1423
1424 bucketSize += gain;
1425
1426 // obey quota limits
1427 if (bucketSize > bucketSizeLimit)
1428 bucketSize = bucketSizeLimit;
1429 }
1430
1431 void
1432 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1433 {
1434 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1435 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1436 " highwatermark=" << aHighWatermark);
1437
1438 // set or possibly update traffic shaping parameters
1439 writeLimitingActive = true;
1440 writeSpeedLimit = aWriteSpeedLimit;
1441 bucketSizeLimit = aHighWatermark;
1442
1443 // but some members should only be set once for a newly activated bucket
1444 if (firstTimeConnection) {
1445 firstTimeConnection = false;
1446
1447 assert(!selectWaiting);
1448 assert(!quotaQueue);
1449 quotaQueue = new CommQuotaQueue(this);
1450
1451 bucketSize = anInitialBurst;
1452 prevTime = current_dtime;
1453 }
1454 }
1455
1456 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1457 ins(0), outs(0)
1458 {
1459 assert(clientInfo);
1460 }
1461
1462 CommQuotaQueue::~CommQuotaQueue()
1463 {
1464 assert(!clientInfo); // ClientInfo should clear this before destroying us
1465 }
1466
1467 /// places the given fd at the end of the queue; returns reservation ID
1468 unsigned int
1469 CommQuotaQueue::enqueue(int fd)
1470 {
1471 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1472 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1473 fds.push_back(fd);
1474 return ++ins;
1475 }
1476
1477 /// removes queue head
1478 void
1479 CommQuotaQueue::dequeue()
1480 {
1481 assert(!fds.empty());
1482 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1483 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1484 fds.size());
1485 fds.pop_front();
1486 ++outs;
1487 }
1488 #endif
1489
1490 /*
1491 * hm, this might be too general-purpose for all the places we'd
1492 * like to use it.
1493 */
1494 int
1495 ignoreErrno(int ierrno)
1496 {
1497 switch (ierrno) {
1498
1499 case EINPROGRESS:
1500
1501 case EWOULDBLOCK:
1502 #if EAGAIN != EWOULDBLOCK
1503
1504 case EAGAIN:
1505 #endif
1506
1507 case EALREADY:
1508
1509 case EINTR:
1510 #ifdef ERESTART
1511
1512 case ERESTART:
1513 #endif
1514
1515 return 1;
1516
1517 default:
1518 return 0;
1519 }
1520
1521 /* NOTREACHED */
1522 }
1523
1524 void
1525 commCloseAllSockets(void)
1526 {
1527 int fd;
1528 fde *F = NULL;
1529
1530 for (fd = 0; fd <= Biggest_FD; ++fd) {
1531 F = &fd_table[fd];
1532
1533 if (!F->flags.open)
1534 continue;
1535
1536 if (F->type != FD_SOCKET)
1537 continue;
1538
1539 if (F->flags.ipc) /* don't close inter-process sockets */
1540 continue;
1541
1542 if (F->timeoutHandler != NULL) {
1543 AsyncCall::Pointer callback = F->timeoutHandler;
1544 F->timeoutHandler = NULL;
1545 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1546 ScheduleCallHere(callback);
1547 } else {
1548 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1549 old_comm_reset_close(fd);
1550 }
1551 }
1552 }
1553
1554 static bool
1555 AlreadyTimedOut(fde *F)
1556 {
1557 if (!F->flags.open)
1558 return true;
1559
1560 if (F->timeout == 0)
1561 return true;
1562
1563 if (F->timeout > squid_curtime)
1564 return true;
1565
1566 return false;
1567 }
1568
1569 static bool
1570 writeTimedOut(int fd)
1571 {
1572 if (!COMMIO_FD_WRITECB(fd)->active())
1573 return false;
1574
1575 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1576 return false;
1577
1578 return true;
1579 }
1580
1581 void
1582 checkTimeouts(void)
1583 {
1584 int fd;
1585 fde *F = NULL;
1586 AsyncCall::Pointer callback;
1587
1588 for (fd = 0; fd <= Biggest_FD; ++fd) {
1589 F = &fd_table[fd];
1590
1591 if (writeTimedOut(fd)) {
1592 // We have an active write callback and we are timed out
1593 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1594 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1595 COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1596 } else if (AlreadyTimedOut(F))
1597 continue;
1598
1599 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1600
1601 if (F->timeoutHandler != NULL) {
1602 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1603 callback = F->timeoutHandler;
1604 F->timeoutHandler = NULL;
1605 ScheduleCallHere(callback);
1606 } else {
1607 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1608 comm_close(fd);
1609 }
1610 }
1611 }
1612
1613 /// Start waiting for a possibly half-closed connection to close
1614 // by scheduling a read callback to a monitoring handler that
1615 // will close the connection on read errors.
1616 void
1617 commStartHalfClosedMonitor(int fd)
1618 {
1619 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1620 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1621 (void)TheHalfClosed->add(fd); // could also assert the result
1622 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1623 }
1624
1625 static
1626 void
1627 commPlanHalfClosedCheck()
1628 {
1629 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1630 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1631 WillCheckHalfClosed = true;
1632 }
1633 }
1634
1635 /// iterates over all descriptors that may need half-closed tests and
1636 /// calls comm_read for those that do; re-schedules the check if needed
1637 static
1638 void
1639 commHalfClosedCheck(void *)
1640 {
1641 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1642
1643 typedef DescriptorSet::const_iterator DSCI;
1644 const DSCI end = TheHalfClosed->end();
1645 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1646 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1647 c->fd = *i;
1648 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1649 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1650 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1651 Comm::Read(c, call);
1652 fd_table[c->fd].halfClosedReader = call;
1653 } else
1654 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1655 }
1656
1657 WillCheckHalfClosed = false; // as far as we know
1658 commPlanHalfClosedCheck(); // may need to check again
1659 }
1660
1661 /// checks whether we are waiting for possibly half-closed connection to close
1662 // We are monitoring if the read handler for the fd is the monitoring handler.
1663 bool
1664 commHasHalfClosedMonitor(int fd)
1665 {
1666 return TheHalfClosed->has(fd);
1667 }
1668
1669 /// stop waiting for possibly half-closed connection to close
1670 void
1671 commStopHalfClosedMonitor(int const fd)
1672 {
1673 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1674
1675 // cancel the read if one was scheduled
1676 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1677 if (reader != NULL)
1678 Comm::ReadCancel(fd, reader);
1679 fd_table[fd].halfClosedReader = NULL;
1680
1681 TheHalfClosed->del(fd);
1682 }
1683
1684 /// I/O handler for the possibly half-closed connection monitoring code
1685 static void
1686 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1687 {
1688 // there cannot be more data coming in on half-closed connections
1689 assert(size == 0);
1690 assert(conn != NULL);
1691 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1692
1693 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1694
1695 // nothing to do if fd is being closed
1696 if (flag == Comm::ERR_CLOSING)
1697 return;
1698
1699 // if read failed, close the connection
1700 if (flag != Comm::OK) {
1701 debugs(5, 3, HERE << "closing " << conn);
1702 conn->close();
1703 return;
1704 }
1705
1706 // continue waiting for close or error
1707 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1708 }
1709
1710 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1711
1712 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1713 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1714
1715 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1716
1717 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1718
1719 DeferredReadManager::~DeferredReadManager()
1720 {
1721 flushReads();
1722 assert (deferredReads.empty());
1723 }
1724
1725 /* explicit instantiation required for some systems */
1726
1727 /// \cond AUTODOCS_IGNORE
1728 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1729 /// \endcond
1730
1731 void
1732 DeferredReadManager::delayRead(DeferredRead const &aRead)
1733 {
1734 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1735 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1736
1737 // We have to use a global function as a closer and point to temp
1738 // instead of "this" because DeferredReadManager is not a job and
1739 // is not even cbdata protected
1740 // XXX: and yet we use cbdata protection functions on it??
1741 AsyncCall::Pointer closer = commCbCall(5,4,
1742 "DeferredReadManager::CloseHandler",
1743 CommCloseCbPtrFun(&CloseHandler, temp));
1744 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1745 temp->element.closer = closer; // remeber so that we can cancel
1746 }
1747
1748 void
1749 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1750 {
1751 if (!cbdataReferenceValid(params.data))
1752 return;
1753
1754 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1755
1756 temp->element.closer = NULL;
1757 temp->element.markCancelled();
1758 }
1759
1760 DeferredRead
1761 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1762 {
1763 assert (!deferredReads.empty());
1764
1765 DeferredRead &read = deferredReads.head->element;
1766
1767 // NOTE: at this point the connection has been paused/stalled for an unknown
1768 // amount of time. We must re-validate that it is active and usable.
1769
1770 // If the connection has been closed already. Cancel this read.
1771 if (!fd_table || !Comm::IsConnOpen(read.theRead.conn)) {
1772 if (read.closer != NULL) {
1773 read.closer->cancel("Connection closed before.");
1774 read.closer = NULL;
1775 }
1776 read.markCancelled();
1777 }
1778
1779 if (!read.cancelled) {
1780 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1781 read.closer = NULL;
1782 }
1783
1784 DeferredRead result = deferredReads.pop_front();
1785
1786 return result;
1787 }
1788
1789 void
1790 DeferredReadManager::kickReads(int const count)
1791 {
1792 /* if we had CbDataList::size() we could consolidate this and flushReads */
1793
1794 if (count < 1) {
1795 flushReads();
1796 return;
1797 }
1798
1799 size_t remaining = count;
1800
1801 while (!deferredReads.empty() && remaining) {
1802 DeferredRead aRead = popHead(deferredReads);
1803 kickARead(aRead);
1804
1805 if (!aRead.cancelled)
1806 --remaining;
1807 }
1808 }
1809
1810 void
1811 DeferredReadManager::flushReads()
1812 {
1813 CbDataListContainer<DeferredRead> reads;
1814 reads = deferredReads;
1815 deferredReads = CbDataListContainer<DeferredRead>();
1816
1817 // XXX: For fairness this SHOULD randomize the order
1818 while (!reads.empty()) {
1819 DeferredRead aRead = popHead(reads);
1820 kickARead(aRead);
1821 }
1822 }
1823
1824 void
1825 DeferredReadManager::kickARead(DeferredRead const &aRead)
1826 {
1827 if (aRead.cancelled)
1828 return;
1829
1830 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1831 return;
1832
1833 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1834
1835 aRead.theReader(aRead.theContext, aRead.theRead);
1836 }
1837
1838 void
1839 DeferredRead::markCancelled()
1840 {
1841 cancelled = true;
1842 }
1843
1844 int
1845 CommSelectEngine::checkEvents(int timeout)
1846 {
1847 static time_t last_timeout = 0;
1848
1849 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1850 if (squid_curtime > last_timeout) {
1851 last_timeout = squid_curtime;
1852 checkTimeouts();
1853 }
1854
1855 switch (Comm::DoSelect(timeout)) {
1856
1857 case Comm::OK:
1858
1859 case Comm::TIMEOUT:
1860 return 0;
1861
1862 case Comm::IDLE:
1863
1864 case Comm::SHUTDOWN:
1865 return EVENT_IDLE;
1866
1867 case Comm::COMM_ERROR:
1868 return EVENT_ERROR;
1869
1870 default:
1871 fatal_dump("comm.cc: Internal error -- this should never happen.");
1872 return EVENT_ERROR;
1873 };
1874 }
1875
1876 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1877 int
1878 comm_open_uds(int sock_type,
1879 int proto,
1880 struct sockaddr_un* addr,
1881 int flags)
1882 {
1883 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1884
1885 int new_socket;
1886
1887 PROF_start(comm_open);
1888 /* Create socket for accepting new connections. */
1889 ++ statCounter.syscalls.sock.sockets;
1890
1891 /* Setup the socket addrinfo details for use */
1892 struct addrinfo AI;
1893 AI.ai_flags = 0;
1894 AI.ai_family = PF_UNIX;
1895 AI.ai_socktype = sock_type;
1896 AI.ai_protocol = proto;
1897 AI.ai_addrlen = SUN_LEN(addr);
1898 AI.ai_addr = (sockaddr*)addr;
1899 AI.ai_canonname = NULL;
1900 AI.ai_next = NULL;
1901
1902 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1903
1904 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1905 int xerrno = errno;
1906 /* Increase the number of reserved fd's if calls to socket()
1907 * are failing because the open file table is full. This
1908 * limits the number of simultaneous clients */
1909
1910 if (limitError(xerrno)) {
1911 debugs(50, DBG_IMPORTANT, MYNAME << "socket failure: " << xstrerr(xerrno));
1912 fdAdjustReserved();
1913 } else {
1914 debugs(50, DBG_CRITICAL, MYNAME << "socket failure: " << xstrerr(xerrno));
1915 }
1916
1917 PROF_stop(comm_open);
1918 return -1;
1919 }
1920
1921 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1922
1923 /* update fdstat */
1924 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1925
1926 assert(!isOpen(new_socket));
1927 fd_open(new_socket, FD_MSGHDR, addr->sun_path);
1928
1929 fdd_table[new_socket].close_file = NULL;
1930
1931 fdd_table[new_socket].close_line = 0;
1932
1933 fd_table[new_socket].sock_family = AI.ai_family;
1934
1935 if (!(flags & COMM_NOCLOEXEC))
1936 commSetCloseOnExec(new_socket);
1937
1938 if (flags & COMM_REUSEADDR)
1939 commSetReuseAddr(new_socket);
1940
1941 if (flags & COMM_NONBLOCKING) {
1942 if (commSetNonBlocking(new_socket) != Comm::OK) {
1943 comm_close(new_socket);
1944 PROF_stop(comm_open);
1945 return -1;
1946 }
1947 }
1948
1949 if (flags & COMM_DOBIND) {
1950 if (commBind(new_socket, AI) != Comm::OK) {
1951 comm_close(new_socket);
1952 PROF_stop(comm_open);
1953 return -1;
1954 }
1955 }
1956
1957 #ifdef TCP_NODELAY
1958 if (sock_type == SOCK_STREAM)
1959 commSetTcpNoDelay(new_socket);
1960
1961 #endif
1962
1963 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1964 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1965
1966 PROF_stop(comm_open);
1967
1968 return new_socket;
1969 }
1970