]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Fix printf format for size_t and mb_size_t
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 05 Socket Functions */
10
11 #include "squid.h"
12 #include "ClientInfo.h"
13 #include "comm/AcceptLimiter.h"
14 #include "comm/comm_internal.h"
15 #include "comm/Connection.h"
16 #include "comm/IoCallback.h"
17 #include "comm/Loops.h"
18 #include "comm/Read.h"
19 #include "comm/TcpAcceptor.h"
20 #include "comm/Write.h"
21 #include "CommRead.h"
22 #include "compat/cmsg.h"
23 #include "DescriptorSet.h"
24 #include "event.h"
25 #include "fd.h"
26 #include "fde.h"
27 #include "globals.h"
28 #include "icmp/net_db.h"
29 #include "ip/Intercept.h"
30 #include "ip/QosConfig.h"
31 #include "ip/tools.h"
32 #include "pconn.h"
33 #include "profiler/Profiler.h"
34 #include "SBuf.h"
35 #include "SquidConfig.h"
36 #include "StatCounters.h"
37 #include "StoreIOBuffer.h"
38 #include "tools.h"
39
40 #if USE_OPENSSL
41 #include "ssl/support.h"
42 #endif
43
44 #include <cerrno>
45 #include <cmath>
46 #if _SQUID_CYGWIN_
47 #include <sys/ioctl.h>
48 #endif
49 #ifdef HAVE_NETINET_TCP_H
50 #include <netinet/tcp.h>
51 #endif
52 #if HAVE_SYS_UN_H
53 #include <sys/un.h>
54 #endif
55
56 /*
57 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
58 */
59
60 static IOCB commHalfClosedReader;
61 static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
62 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
63
64 #if USE_DELAY_POOLS
65 CBDATA_CLASS_INIT(CommQuotaQueue);
66
67 static void commHandleWriteHelper(void * data);
68 #endif
69
70 /* STATIC */
71
72 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
73 static bool WillCheckHalfClosed = false; /// true if check is scheduled
74 static EVH commHalfClosedCheck;
75 static void commPlanHalfClosedCheck();
76
77 static Comm::Flag commBind(int s, struct addrinfo &);
78 static void commSetReuseAddr(int);
79 static void commSetNoLinger(int);
80 #ifdef TCP_NODELAY
81 static void commSetTcpNoDelay(int);
82 #endif
83 static void commSetTcpRcvbuf(int, int);
84
85 fd_debug_t *fdd_table = NULL;
86
87 bool
88 isOpen(const int fd)
89 {
90 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
91 }
92
93 /**
94 * Empty the read buffers
95 *
96 * This is a magical routine that empties the read buffers.
97 * Under some platforms (Linux) if a buffer has data in it before
98 * you call close(), the socket will hang and take quite a while
99 * to timeout.
100 */
101 static void
102 comm_empty_os_read_buffers(int fd)
103 {
104 #if _SQUID_LINUX_
105 #if USE_OPENSSL
106 // Bug 4146: SSL-Bump BIO does not release sockets on close.
107 if (fd_table[fd].ssl)
108 return;
109 #endif
110
111 /* prevent those nasty RST packets */
112 char buf[SQUID_TCP_SO_RCVBUF];
113 if (fd_table[fd].flags.nonblocking) {
114 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
115 }
116 #endif
117 }
118
119 /**
120 * synchronous wrapper around udp socket functions
121 */
122 int
123 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
124 {
125 ++ statCounter.syscalls.sock.recvfroms;
126 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
127 struct addrinfo *AI = NULL;
128 Ip::Address::InitAddr(AI);
129 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
130 from = *AI;
131 Ip::Address::FreeAddr(AI);
132 return x;
133 }
134
135 int
136 comm_udp_recv(int fd, void *buf, size_t len, int flags)
137 {
138 Ip::Address nul;
139 return comm_udp_recvfrom(fd, buf, len, flags, nul);
140 }
141
142 ssize_t
143 comm_udp_send(int s, const void *buf, size_t len, int flags)
144 {
145 return send(s, buf, len, flags);
146 }
147
148 bool
149 comm_has_incomplete_write(int fd)
150 {
151 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd) != NULL);
152 return COMMIO_FD_WRITECB(fd)->active();
153 }
154
155 /**
156 * Queue a write. handler/handler_data are called when the write fully
157 * completes, on error, or on file descriptor close.
158 */
159
160 /* Return the local port associated with fd. */
161 unsigned short
162 comm_local_port(int fd)
163 {
164 Ip::Address temp;
165 struct addrinfo *addr = NULL;
166 fde *F = &fd_table[fd];
167
168 /* If the fd is closed already, just return */
169
170 if (!F->flags.open) {
171 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
172 return 0;
173 }
174
175 if (F->local_addr.port())
176 return F->local_addr.port();
177
178 if (F->sock_family == AF_INET)
179 temp.setIPv4();
180
181 Ip::Address::InitAddr(addr);
182
183 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
184 debugs(50, DBG_IMPORTANT, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
185 Ip::Address::FreeAddr(addr);
186 return 0;
187 }
188 temp = *addr;
189
190 Ip::Address::FreeAddr(addr);
191
192 if (F->local_addr.isAnyAddr()) {
193 /* save the whole local address, not just the port. */
194 F->local_addr = temp;
195 } else {
196 F->local_addr.port(temp.port());
197 }
198
199 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
200 return F->local_addr.port();
201 }
202
203 static Comm::Flag
204 commBind(int s, struct addrinfo &inaddr)
205 {
206 ++ statCounter.syscalls.sock.binds;
207
208 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
209 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
210 return Comm::OK;
211 }
212
213 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
214
215 return Comm::COMM_ERROR;
216 }
217
218 /**
219 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
220 * is OR of flags specified in comm.h. Defaults TOS
221 */
222 int
223 comm_open(int sock_type,
224 int proto,
225 Ip::Address &addr,
226 int flags,
227 const char *note)
228 {
229 return comm_openex(sock_type, proto, addr, flags, note);
230 }
231
232 void
233 comm_open_listener(int sock_type,
234 int proto,
235 Comm::ConnectionPointer &conn,
236 const char *note)
237 {
238 /* all listener sockets require bind() */
239 conn->flags |= COMM_DOBIND;
240
241 /* attempt native enabled port. */
242 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
243 }
244
245 int
246 comm_open_listener(int sock_type,
247 int proto,
248 Ip::Address &addr,
249 int flags,
250 const char *note)
251 {
252 int sock = -1;
253
254 /* all listener sockets require bind() */
255 flags |= COMM_DOBIND;
256
257 /* attempt native enabled port. */
258 sock = comm_openex(sock_type, proto, addr, flags, note);
259
260 return sock;
261 }
262
263 static bool
264 limitError(int const anErrno)
265 {
266 return anErrno == ENFILE || anErrno == EMFILE;
267 }
268
269 void
270 comm_set_v6only(int fd, int tos)
271 {
272 #ifdef IPV6_V6ONLY
273 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
274 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
275 }
276 #else
277 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
278 #endif /* sockopt */
279 }
280
281 /**
282 * Set the socket option required for TPROXY spoofing for:
283 * - Linux TPROXY v4 support,
284 * - OpenBSD divert-to support,
285 * - FreeBSD IPFW TPROXY v4 support.
286 */
287 void
288 comm_set_transparent(int fd)
289 {
290 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
291 # define soLevel SOL_IP
292 # define soFlag IP_TRANSPARENT
293 bool doneSuid = false;
294
295 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
296 # define soLevel SOL_SOCKET
297 # define soFlag SO_BINDANY
298 enter_suid();
299 bool doneSuid = true;
300
301 #elif defined(IP_BINDANY) // FreeBSD with IPFW
302 # define soLevel IPPROTO_IP
303 # define soFlag IP_BINDANY
304 enter_suid();
305 bool doneSuid = true;
306
307 #else
308 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
309 #endif /* sockopt */
310
311 #if defined(soLevel) && defined(soFlag)
312 int tos = 1;
313 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
314 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(TPROXY) on FD " << fd << ": " << xstrerror());
315 } else {
316 /* mark the socket as having transparent options */
317 fd_table[fd].flags.transparent = true;
318 }
319 if (doneSuid)
320 leave_suid();
321 #endif
322 }
323
324 /**
325 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
326 * is OR of flags specified in defines.h:COMM_*
327 */
328 int
329 comm_openex(int sock_type,
330 int proto,
331 Ip::Address &addr,
332 int flags,
333 const char *note)
334 {
335 int new_socket;
336 struct addrinfo *AI = NULL;
337
338 PROF_start(comm_open);
339 /* Create socket for accepting new connections. */
340 ++ statCounter.syscalls.sock.sockets;
341
342 /* Setup the socket addrinfo details for use */
343 addr.getAddrInfo(AI);
344 AI->ai_socktype = sock_type;
345 AI->ai_protocol = proto;
346
347 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
348
349 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
350
351 /* under IPv6 there is the possibility IPv6 is present but disabled. */
352 /* try again as IPv4-native if possible */
353 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
354 /* attempt to open this IPv4-only. */
355 Ip::Address::FreeAddr(AI);
356 /* Setup the socket addrinfo details for use */
357 addr.getAddrInfo(AI);
358 AI->ai_socktype = sock_type;
359 AI->ai_protocol = proto;
360 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
361 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
362 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
363 }
364
365 if (new_socket < 0) {
366 /* Increase the number of reserved fd's if calls to socket()
367 * are failing because the open file table is full. This
368 * limits the number of simultaneous clients */
369
370 if (limitError(errno)) {
371 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
372 fdAdjustReserved();
373 } else {
374 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
375 }
376
377 Ip::Address::FreeAddr(AI);
378
379 PROF_stop(comm_open);
380 return -1;
381 }
382
383 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
384 Comm::ConnectionPointer conn = new Comm::Connection;
385 conn->local = addr;
386 conn->fd = new_socket;
387
388 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
389
390 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
391 comm_set_v6only(conn->fd, 1);
392
393 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
394 /* Other OS may have this administratively disabled for general use. Same deal. */
395 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
396 comm_set_v6only(conn->fd, 0);
397
398 comm_init_opened(conn, note, AI);
399 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
400
401 Ip::Address::FreeAddr(AI);
402
403 PROF_stop(comm_open);
404
405 // XXX transition only. prevent conn from closing the new FD on function exit.
406 conn->fd = -1;
407 return new_socket;
408 }
409
410 /// update FD tables after a local or remote (IPC) comm_openex();
411 void
412 comm_init_opened(const Comm::ConnectionPointer &conn,
413 const char *note,
414 struct addrinfo *AI)
415 {
416 assert(Comm::IsConnOpen(conn));
417 assert(AI);
418
419 /* update fdstat */
420 debugs(5, 5, HERE << conn << " is a new socket");
421
422 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
423 fd_open(conn->fd, FD_SOCKET, note);
424
425 fdd_table[conn->fd].close_file = NULL;
426 fdd_table[conn->fd].close_line = 0;
427
428 fde *F = &fd_table[conn->fd];
429 F->local_addr = conn->local;
430
431 F->sock_family = AI->ai_family;
432 }
433
434 /// apply flags after a local comm_open*() call;
435 /// returns new_socket or -1 on error
436 static int
437 comm_apply_flags(int new_socket,
438 Ip::Address &addr,
439 int flags,
440 struct addrinfo *AI)
441 {
442 assert(new_socket >= 0);
443 assert(AI);
444 const int sock_type = AI->ai_socktype;
445
446 if (!(flags & COMM_NOCLOEXEC))
447 commSetCloseOnExec(new_socket);
448
449 if ((flags & COMM_REUSEADDR))
450 commSetReuseAddr(new_socket);
451
452 if (addr.port() > (unsigned short) 0) {
453 #if _SQUID_WINDOWS_
454 if (sock_type != SOCK_DGRAM)
455 #endif
456 commSetNoLinger(new_socket);
457
458 if (opt_reuseaddr)
459 commSetReuseAddr(new_socket);
460 }
461
462 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
463 if ((flags & COMM_TRANSPARENT)) {
464 comm_set_transparent(new_socket);
465 }
466
467 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
468 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
469 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
470 if ( addr.isNoAddr() )
471 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
472
473 if (commBind(new_socket, *AI) != Comm::OK) {
474 comm_close(new_socket);
475 return -1;
476 }
477 }
478
479 if (flags & COMM_NONBLOCKING)
480 if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
481 comm_close(new_socket);
482 return -1;
483 }
484
485 #ifdef TCP_NODELAY
486 if (sock_type == SOCK_STREAM)
487 commSetTcpNoDelay(new_socket);
488
489 #endif
490
491 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
492 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
493
494 return new_socket;
495 }
496
497 void
498 comm_import_opened(const Comm::ConnectionPointer &conn,
499 const char *note,
500 struct addrinfo *AI)
501 {
502 debugs(5, 2, HERE << conn);
503 assert(Comm::IsConnOpen(conn));
504 assert(AI);
505
506 comm_init_opened(conn, note, AI);
507
508 if (!(conn->flags & COMM_NOCLOEXEC))
509 fd_table[conn->fd].flags.close_on_exec = true;
510
511 if (conn->local.port() > (unsigned short) 0) {
512 #if _SQUID_WINDOWS_
513 if (AI->ai_socktype != SOCK_DGRAM)
514 #endif
515 fd_table[conn->fd].flags.nolinger = true;
516 }
517
518 if ((conn->flags & COMM_TRANSPARENT))
519 fd_table[conn->fd].flags.transparent = true;
520
521 if (conn->flags & COMM_NONBLOCKING)
522 fd_table[conn->fd].flags.nonblocking = true;
523
524 #ifdef TCP_NODELAY
525 if (AI->ai_socktype == SOCK_STREAM)
526 fd_table[conn->fd].flags.nodelay = true;
527 #endif
528
529 /* no fd_table[fd].flags. updates needed for these conditions:
530 * if ((flags & COMM_REUSEADDR)) ...
531 * if ((flags & COMM_DOBIND) ...) ...
532 */
533 }
534
535 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
536 // With handler already unset. Leaving this present until that can be verified for all code paths.
537 void
538 commUnsetFdTimeout(int fd)
539 {
540 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
541 assert(fd >= 0);
542 assert(fd < Squid_MaxFD);
543 fde *F = &fd_table[fd];
544 assert(F->flags.open);
545
546 F->timeoutHandler = NULL;
547 F->timeout = 0;
548 }
549
550 int
551 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
552 {
553 debugs(5, 3, HERE << conn << " timeout " << timeout);
554 assert(Comm::IsConnOpen(conn));
555 assert(conn->fd < Squid_MaxFD);
556 fde *F = &fd_table[conn->fd];
557 assert(F->flags.open);
558
559 if (timeout < 0) {
560 F->timeoutHandler = NULL;
561 F->timeout = 0;
562 } else {
563 if (callback != NULL) {
564 typedef CommTimeoutCbParams Params;
565 Params &params = GetCommParams<Params>(callback);
566 params.conn = conn;
567 F->timeoutHandler = callback;
568 }
569
570 F->timeout = squid_curtime + (time_t) timeout;
571 }
572
573 return F->timeout;
574 }
575
576 int
577 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
578 {
579 debugs(5, 3, HERE << "Remove timeout for " << conn);
580 AsyncCall::Pointer nil;
581 return commSetConnTimeout(conn, -1, nil);
582 }
583
584 /**
585 * Connect socket FD to given remote address.
586 * If return value is an error flag (COMM_ERROR, ERR_CONNECT, ERR_PROTOCOL, etc.),
587 * then error code will also be returned in errno.
588 */
589 int
590 comm_connect_addr(int sock, const Ip::Address &address)
591 {
592 Comm::Flag status = Comm::OK;
593 fde *F = &fd_table[sock];
594 int x = 0;
595 int err = 0;
596 socklen_t errlen;
597 struct addrinfo *AI = NULL;
598 PROF_start(comm_connect_addr);
599
600 assert(address.port() != 0);
601
602 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
603
604 /* Handle IPv6 over IPv4-only socket case.
605 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
606 * NP: because commResetFD is private to ConnStateData we have to return an error and
607 * trust its handled properly.
608 */
609 if (F->sock_family == AF_INET && !address.isIPv4()) {
610 errno = ENETUNREACH;
611 return Comm::ERR_PROTOCOL;
612 }
613
614 /* Handle IPv4 over IPv6-only socket case.
615 * This case is presently handled here as it's both a known case and it's
616 * uncertain what error will be returned by the IPv6 stack in such case. It's
617 * possible this will also be handled by the errno checks below after connect()
618 * but needs carefull cross-platform verification, and verifying the address
619 * condition here is simple.
620 */
621 if (!F->local_addr.isIPv4() && address.isIPv4()) {
622 errno = ENETUNREACH;
623 return Comm::ERR_PROTOCOL;
624 }
625
626 address.getAddrInfo(AI, F->sock_family);
627
628 /* Establish connection. */
629 int xerrno = 0;
630
631 if (!F->flags.called_connect) {
632 F->flags.called_connect = true;
633 ++ statCounter.syscalls.sock.connects;
634
635 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
636
637 // XXX: ICAP code refuses callbacks during a pending comm_ call
638 // Async calls development will fix this.
639 if (x == 0) {
640 x = -1;
641 xerrno = EINPROGRESS;
642 } else if (x < 0) {
643 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
644 " flags=" << AI->ai_flags <<
645 ", family=" << AI->ai_family <<
646 ", socktype=" << AI->ai_socktype <<
647 ", protocol=" << AI->ai_protocol <<
648 ", &addr=" << AI->ai_addr <<
649 ", addrlen=" << AI->ai_addrlen <<
650 " )" );
651 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerr(xerrno));
652 debugs(14,9, "connecting to: " << address );
653 }
654
655 } else {
656 errno = 0;
657 #if _SQUID_NEWSOS6_
658 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
659 if (connect(sock, AI->ai_addr, AI->ai_addrlen) < 0)
660 xerrno = errno;
661
662 if (xerrno == EINVAL) {
663 errlen = sizeof(err);
664 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
665 if (x >= 0)
666 xerrno = x;
667 }
668 #else
669 errlen = sizeof(err);
670 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
671 if (x == 0)
672 xerrno = err;
673
674 #if _SQUID_SOLARIS_
675 /*
676 * Solaris 2.4's socket emulation doesn't allow you
677 * to determine the error from a failed non-blocking
678 * connect and just returns EPIPE. Create a fake
679 * error message for connect. -- fenner@parc.xerox.com
680 */
681 if (x < 0 && xerrno == EPIPE)
682 xerrno = ENOTCONN;
683 else
684 xerrno = errno;
685 #endif
686 #endif
687 }
688
689 Ip::Address::FreeAddr(AI);
690
691 PROF_stop(comm_connect_addr);
692
693 errno = xerrno;
694 if (xerrno == 0 || xerrno == EISCONN)
695 status = Comm::OK;
696 else if (ignoreErrno(xerrno))
697 status = Comm::INPROGRESS;
698 else if (xerrno == EAFNOSUPPORT || xerrno == EINVAL)
699 return Comm::ERR_PROTOCOL;
700 else
701 return Comm::COMM_ERROR;
702
703 address.toStr(F->ipaddr, MAX_IPSTRLEN);
704
705 F->remote_port = address.port(); /* remote_port is HS */
706
707 if (status == Comm::OK) {
708 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
709 } else if (status == Comm::INPROGRESS) {
710 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
711 }
712
713 errno = xerrno;
714 return status;
715 }
716
717 void
718 commCallCloseHandlers(int fd)
719 {
720 fde *F = &fd_table[fd];
721 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
722
723 while (F->closeHandler != NULL) {
724 AsyncCall::Pointer call = F->closeHandler;
725 F->closeHandler = call->Next();
726 call->setNext(NULL);
727 // If call is not canceled schedule it for execution else ignore it
728 if (!call->canceled()) {
729 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
730 ScheduleCallHere(call);
731 }
732 }
733 }
734
735 #if LINGERING_CLOSE
736 static void
737 commLingerClose(int fd, void *unused)
738 {
739 LOCAL_ARRAY(char, buf, 1024);
740 int n;
741 n = FD_READ_METHOD(fd, buf, 1024);
742
743 if (n < 0)
744 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
745
746 comm_close(fd);
747 }
748
749 static void
750 commLingerTimeout(const FdeCbParams &params)
751 {
752 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
753 comm_close(params.fd);
754 }
755
756 /*
757 * Inspired by apache
758 */
759 void
760 comm_lingering_close(int fd)
761 {
762 #if USE_OPENSSL
763 if (fd_table[fd].ssl)
764 ssl_shutdown_method(fd_table[fd].ssl);
765 #endif
766
767 if (shutdown(fd, 1) < 0) {
768 comm_close(fd);
769 return;
770 }
771
772 fd_note(fd, "lingering close");
773 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
774
775 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
776 assert(fd_table[fd].flags.open);
777 if (callback != NULL) {
778 typedef FdeCbParams Params;
779 Params &params = GetCommParams<Params>(callback);
780 params.fd = fd;
781 fd_table[fd].timeoutHandler = callback;
782 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
783 }
784
785 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
786 }
787
788 #endif
789
790 /**
791 * enable linger with time of 0 so that when the socket is
792 * closed, TCP generates a RESET
793 */
794 void
795 comm_reset_close(const Comm::ConnectionPointer &conn)
796 {
797 struct linger L;
798 L.l_onoff = 1;
799 L.l_linger = 0;
800
801 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
802 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
803
804 conn->close();
805 }
806
807 // Legacy close function.
808 void
809 old_comm_reset_close(int fd)
810 {
811 struct linger L;
812 L.l_onoff = 1;
813 L.l_linger = 0;
814
815 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
816 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
817
818 comm_close(fd);
819 }
820
821 #if USE_OPENSSL
822 void
823 commStartSslClose(const FdeCbParams &params)
824 {
825 assert(fd_table[params.fd].ssl != NULL);
826 ssl_shutdown_method(fd_table[params.fd].ssl);
827 }
828 #endif
829
830 void
831 comm_close_complete(const FdeCbParams &params)
832 {
833 #if USE_OPENSSL
834 fde *F = &fd_table[params.fd];
835
836 if (F->ssl) {
837 SSL_free(F->ssl);
838 F->ssl = NULL;
839 }
840
841 if (F->dynamicSslContext) {
842 SSL_CTX_free(F->dynamicSslContext);
843 F->dynamicSslContext = NULL;
844 }
845 #endif
846 fd_close(params.fd); /* update fdstat */
847 close(params.fd);
848
849 ++ statCounter.syscalls.sock.closes;
850
851 /* When one connection closes, give accept() a chance, if need be */
852 Comm::AcceptLimiter::Instance().kick();
853 }
854
855 /*
856 * Close the socket fd.
857 *
858 * + call write handlers with ERR_CLOSING
859 * + call read handlers with ERR_CLOSING
860 * + call closing handlers
861 *
862 * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
863 * DeferredReadManager.
864 */
865 void
866 _comm_close(int fd, char const *file, int line)
867 {
868 debugs(5, 3, "comm_close: start closing FD " << fd);
869 assert(fd >= 0);
870 assert(fd < Squid_MaxFD);
871
872 fde *F = &fd_table[fd];
873 fdd_table[fd].close_file = file;
874 fdd_table[fd].close_line = line;
875
876 if (F->closing())
877 return;
878
879 /* XXX: is this obsolete behind F->closing() ? */
880 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
881 return;
882
883 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
884 if (!isOpen(fd)) {
885 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
886 // XXX: do we need to run close(fd) or fd_close(fd) here?
887 return;
888 }
889
890 assert(F->type != FD_FILE);
891
892 PROF_start(comm_close);
893
894 F->flags.close_request = true;
895
896 #if USE_OPENSSL
897 if (F->ssl) {
898 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
899 FdeCbPtrFun(commStartSslClose, NULL));
900 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
901 startParams.fd = fd;
902 ScheduleCallHere(startCall);
903 }
904 #endif
905
906 // a half-closed fd may lack a reader, so we stop monitoring explicitly
907 if (commHasHalfClosedMonitor(fd))
908 commStopHalfClosedMonitor(fd);
909 commUnsetFdTimeout(fd);
910
911 // notify read/write handlers after canceling select reservations, if any
912 if (COMMIO_FD_WRITECB(fd)->active()) {
913 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
914 COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
915 }
916 if (COMMIO_FD_READCB(fd)->active()) {
917 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
918 COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
919 }
920
921 #if USE_DELAY_POOLS
922 if (ClientInfo *clientInfo = F->clientInfo) {
923 if (clientInfo->selectWaiting) {
924 clientInfo->selectWaiting = false;
925 // kick queue or it will get stuck as commWriteHandle is not called
926 clientInfo->kickQuotaQueue();
927 }
928 }
929 #endif
930
931 commCallCloseHandlers(fd);
932
933 comm_empty_os_read_buffers(fd);
934
935 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
936 FdeCbPtrFun(comm_close_complete, NULL));
937 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
938 completeParams.fd = fd;
939 // must use async call to wait for all callbacks
940 // scheduled before comm_close() to finish
941 ScheduleCallHere(completeCall);
942
943 PROF_stop(comm_close);
944 }
945
946 /* Send a udp datagram to specified TO_ADDR. */
947 int
948 comm_udp_sendto(int fd,
949 const Ip::Address &to_addr,
950 const void *buf,
951 int len)
952 {
953 PROF_start(comm_udp_sendto);
954 ++ statCounter.syscalls.sock.sendtos;
955
956 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
957 " using FD " << fd << " using Port " << comm_local_port(fd) );
958
959 struct addrinfo *AI = NULL;
960 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
961 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
962 Ip::Address::FreeAddr(AI);
963
964 PROF_stop(comm_udp_sendto);
965
966 if (x >= 0)
967 return x;
968
969 #if _SQUID_LINUX_
970
971 if (ECONNREFUSED != errno)
972 #endif
973
974 debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
975
976 return Comm::COMM_ERROR;
977 }
978
979 void
980 comm_add_close_handler(int fd, CLCB * handler, void *data)
981 {
982 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
983 handler << ", data=" << data);
984
985 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
986 CommCloseCbPtrFun(handler, data));
987 comm_add_close_handler(fd, call);
988 }
989
990 void
991 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
992 {
993 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
994
995 /*TODO:Check for a similar scheduled AsyncCall*/
996 // for (c = fd_table[fd].closeHandler; c; c = c->next)
997 // assert(c->handler != handler || c->data != data);
998
999 call->setNext(fd_table[fd].closeHandler);
1000
1001 fd_table[fd].closeHandler = call;
1002 }
1003
1004 // remove function-based close handler
1005 void
1006 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1007 {
1008 assert(isOpen(fd));
1009 /* Find handler in list */
1010 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1011 handler << ", data=" << data);
1012
1013 AsyncCall::Pointer p, prev = NULL;
1014 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1015 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1016 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1017 if (!call) // method callbacks have their own comm_remove_close_handler
1018 continue;
1019
1020 typedef CommCloseCbParams Params;
1021 const Params &params = GetCommParams<Params>(p);
1022 if (call->dialer.handler == handler && params.data == data)
1023 break; /* This is our handler */
1024 }
1025
1026 // comm_close removes all close handlers so our handler may be gone
1027 if (p != NULL) {
1028 p->dequeue(fd_table[fd].closeHandler, prev);
1029 p->cancel("comm_remove_close_handler");
1030 }
1031 }
1032
1033 // remove method-based close handler
1034 void
1035 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1036 {
1037 assert(isOpen(fd));
1038 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1039
1040 // comm_close removes all close handlers so our handler may be gone
1041 AsyncCall::Pointer p, prev = NULL;
1042 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1043
1044 if (p != NULL)
1045 p->dequeue(fd_table[fd].closeHandler, prev);
1046 call->cancel("comm_remove_close_handler");
1047 }
1048
1049 static void
1050 commSetNoLinger(int fd)
1051 {
1052
1053 struct linger L;
1054 L.l_onoff = 0; /* off */
1055 L.l_linger = 0;
1056
1057 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1058 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1059
1060 fd_table[fd].flags.nolinger = true;
1061 }
1062
1063 static void
1064 commSetReuseAddr(int fd)
1065 {
1066 int on = 1;
1067
1068 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1069 debugs(50, DBG_IMPORTANT, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1070 }
1071
1072 static void
1073 commSetTcpRcvbuf(int fd, int size)
1074 {
1075 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1076 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1077 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1078 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1079 #ifdef TCP_WINDOW_CLAMP
1080 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1081 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1082 #endif
1083 }
1084
1085 int
1086 commSetNonBlocking(int fd)
1087 {
1088 #if _SQUID_WINDOWS_
1089 int nonblocking = TRUE;
1090
1091 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1092 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1093 return Comm::COMM_ERROR;
1094 }
1095
1096 #else
1097 int flags;
1098 int dummy = 0;
1099
1100 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1101 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1102 return Comm::COMM_ERROR;
1103 }
1104
1105 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1106 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1107 return Comm::COMM_ERROR;
1108 }
1109 #endif
1110
1111 fd_table[fd].flags.nonblocking = true;
1112 return 0;
1113 }
1114
1115 int
1116 commUnsetNonBlocking(int fd)
1117 {
1118 #if _SQUID_WINDOWS_
1119 int nonblocking = FALSE;
1120
1121 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1122 #else
1123 int flags;
1124 int dummy = 0;
1125
1126 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1127 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1128 return Comm::COMM_ERROR;
1129 }
1130
1131 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1132 #endif
1133 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1134 return Comm::COMM_ERROR;
1135 }
1136
1137 fd_table[fd].flags.nonblocking = false;
1138 return 0;
1139 }
1140
1141 void
1142 commSetCloseOnExec(int fd)
1143 {
1144 #ifdef FD_CLOEXEC
1145 int flags;
1146 int dummy = 0;
1147
1148 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1149 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1150 return;
1151 }
1152
1153 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1154 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1155
1156 fd_table[fd].flags.close_on_exec = true;
1157
1158 #endif
1159 }
1160
1161 #ifdef TCP_NODELAY
1162 static void
1163 commSetTcpNoDelay(int fd)
1164 {
1165 int on = 1;
1166
1167 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1168 debugs(50, DBG_IMPORTANT, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1169
1170 fd_table[fd].flags.nodelay = true;
1171 }
1172
1173 #endif
1174
1175 void
1176 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1177 {
1178 int on = 1;
1179 #ifdef TCP_KEEPCNT
1180 if (timeout && interval) {
1181 int count = (timeout + interval - 1) / interval;
1182 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1183 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1184 }
1185 #endif
1186 #ifdef TCP_KEEPIDLE
1187 if (idle) {
1188 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1189 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1190 }
1191 #endif
1192 #ifdef TCP_KEEPINTVL
1193 if (interval) {
1194 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1195 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1196 }
1197 #endif
1198 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1199 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1200 }
1201
1202 void
1203 comm_init(void)
1204 {
1205 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1206 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1207
1208 /* make sure the accept() socket FIFO delay queue exists */
1209 Comm::AcceptLimiter::Instance();
1210
1211 // make sure the IO pending callback table exists
1212 Comm::CallbackTableInit();
1213
1214 /* XXX account fd_table */
1215 /* Keep a few file descriptors free so that we don't run out of FD's
1216 * after accepting a client but before it opens a socket or a file.
1217 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1218 RESERVED_FD = min(100, Squid_MaxFD / 4);
1219
1220 TheHalfClosed = new DescriptorSet;
1221
1222 /* setup the select loop module */
1223 Comm::SelectLoopInit();
1224 }
1225
1226 void
1227 comm_exit(void)
1228 {
1229 delete TheHalfClosed;
1230 TheHalfClosed = NULL;
1231
1232 safe_free(fd_table);
1233 safe_free(fdd_table);
1234 Comm::CallbackTableDestruct();
1235 }
1236
1237 #if USE_DELAY_POOLS
1238 // called when the queue is done waiting for the client bucket to fill
1239 void
1240 commHandleWriteHelper(void * data)
1241 {
1242 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1243 assert(queue);
1244
1245 ClientInfo *clientInfo = queue->clientInfo;
1246 // ClientInfo invalidates queue if freed, so if we got here through,
1247 // evenAdd cbdata protections, everything should be valid and consistent
1248 assert(clientInfo);
1249 assert(clientInfo->hasQueue());
1250 assert(clientInfo->hasQueue(queue));
1251 assert(!clientInfo->selectWaiting);
1252 assert(clientInfo->eventWaiting);
1253 clientInfo->eventWaiting = false;
1254
1255 do {
1256 // check that the head descriptor is still relevant
1257 const int head = clientInfo->quotaPeekFd();
1258 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1259
1260 if (fd_table[head].clientInfo == clientInfo &&
1261 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1262 !fd_table[head].closing()) {
1263
1264 // wait for the head descriptor to become ready for writing
1265 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1266 clientInfo->selectWaiting = true;
1267 return;
1268 }
1269
1270 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1271 // and continue looking for a relevant one
1272 } while (clientInfo->hasQueue());
1273
1274 debugs(77,3, HERE << "emptied queue");
1275 }
1276
1277 bool
1278 ClientInfo::hasQueue() const
1279 {
1280 assert(quotaQueue);
1281 return !quotaQueue->empty();
1282 }
1283
1284 bool
1285 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1286 {
1287 assert(quotaQueue);
1288 return quotaQueue == q;
1289 }
1290
1291 /// returns the first descriptor to be dequeued
1292 int
1293 ClientInfo::quotaPeekFd() const
1294 {
1295 assert(quotaQueue);
1296 return quotaQueue->front();
1297 }
1298
1299 /// returns the reservation ID of the first descriptor to be dequeued
1300 unsigned int
1301 ClientInfo::quotaPeekReserv() const
1302 {
1303 assert(quotaQueue);
1304 return quotaQueue->outs + 1;
1305 }
1306
1307 /// queues a given fd, creating the queue if necessary; returns reservation ID
1308 unsigned int
1309 ClientInfo::quotaEnqueue(int fd)
1310 {
1311 assert(quotaQueue);
1312 return quotaQueue->enqueue(fd);
1313 }
1314
1315 /// removes queue head
1316 void
1317 ClientInfo::quotaDequeue()
1318 {
1319 assert(quotaQueue);
1320 quotaQueue->dequeue();
1321 }
1322
1323 void
1324 ClientInfo::kickQuotaQueue()
1325 {
1326 if (!eventWaiting && !selectWaiting && hasQueue()) {
1327 // wait at least a second if the bucket is empty
1328 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1329 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1330 quotaQueue, delay, 0, true);
1331 eventWaiting = true;
1332 }
1333 }
1334
1335 /// calculates how much to write for a single dequeued client
1336 int
1337 ClientInfo::quotaForDequed()
1338 {
1339 /* If we have multiple clients and give full bucketSize to each client then
1340 * clt1 may often get a lot more because clt1->clt2 time distance in the
1341 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1342 * We divide quota evenly to be more fair. */
1343
1344 if (!rationedCount) {
1345 rationedCount = quotaQueue->size() + 1;
1346
1347 // The delay in ration recalculation _temporary_ deprives clients from
1348 // bytes that should have trickled in while rationedCount was positive.
1349 refillBucket();
1350
1351 // Rounding errors do not accumulate here, but we round down to avoid
1352 // negative bucket sizes after write with rationedCount=1.
1353 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1354 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1355 '*' << rationedCount);
1356 }
1357
1358 --rationedCount;
1359 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1360 " rations remaining: " << rationedCount);
1361
1362 // update 'last seen' time to prevent clientdb GC from dropping us
1363 last_seen = squid_curtime;
1364 return rationedQuota;
1365 }
1366
1367 ///< adds bytes to the quota bucket based on the rate and passed time
1368 void
1369 ClientInfo::refillBucket()
1370 {
1371 // all these times are in seconds, with double precision
1372 const double currTime = current_dtime;
1373 const double timePassed = currTime - prevTime;
1374
1375 // Calculate allowance for the time passed. Use double to avoid
1376 // accumulating rounding errors for small intervals. For example, always
1377 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1378 const double gain = timePassed * writeSpeedLimit;
1379
1380 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1381 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1382 " = " << gain << ')');
1383
1384 // to further combat error accumulation during micro updates,
1385 // quit before updating time if we cannot add at least one byte
1386 if (gain < 1.0)
1387 return;
1388
1389 prevTime = currTime;
1390
1391 // for "first" connections, drain initial fat before refilling but keep
1392 // updating prevTime to avoid bursts after the fat is gone
1393 if (bucketSize > bucketSizeLimit) {
1394 debugs(77,4, HERE << "not refilling while draining initial fat");
1395 return;
1396 }
1397
1398 bucketSize += gain;
1399
1400 // obey quota limits
1401 if (bucketSize > bucketSizeLimit)
1402 bucketSize = bucketSizeLimit;
1403 }
1404
1405 void
1406 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1407 {
1408 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1409 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1410 " highwatermark=" << aHighWatermark);
1411
1412 // set or possibly update traffic shaping parameters
1413 writeLimitingActive = true;
1414 writeSpeedLimit = aWriteSpeedLimit;
1415 bucketSizeLimit = aHighWatermark;
1416
1417 // but some members should only be set once for a newly activated bucket
1418 if (firstTimeConnection) {
1419 firstTimeConnection = false;
1420
1421 assert(!selectWaiting);
1422 assert(!quotaQueue);
1423 quotaQueue = new CommQuotaQueue(this);
1424
1425 bucketSize = anInitialBurst;
1426 prevTime = current_dtime;
1427 }
1428 }
1429
1430 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1431 ins(0), outs(0)
1432 {
1433 assert(clientInfo);
1434 }
1435
1436 CommQuotaQueue::~CommQuotaQueue()
1437 {
1438 assert(!clientInfo); // ClientInfo should clear this before destroying us
1439 }
1440
1441 /// places the given fd at the end of the queue; returns reservation ID
1442 unsigned int
1443 CommQuotaQueue::enqueue(int fd)
1444 {
1445 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1446 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1447 fds.push_back(fd);
1448 return ++ins;
1449 }
1450
1451 /// removes queue head
1452 void
1453 CommQuotaQueue::dequeue()
1454 {
1455 assert(!fds.empty());
1456 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1457 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1458 fds.size());
1459 fds.pop_front();
1460 ++outs;
1461 }
1462 #endif
1463
1464 /*
1465 * hm, this might be too general-purpose for all the places we'd
1466 * like to use it.
1467 */
1468 int
1469 ignoreErrno(int ierrno)
1470 {
1471 switch (ierrno) {
1472
1473 case EINPROGRESS:
1474
1475 case EWOULDBLOCK:
1476 #if EAGAIN != EWOULDBLOCK
1477
1478 case EAGAIN:
1479 #endif
1480
1481 case EALREADY:
1482
1483 case EINTR:
1484 #ifdef ERESTART
1485
1486 case ERESTART:
1487 #endif
1488
1489 return 1;
1490
1491 default:
1492 return 0;
1493 }
1494
1495 /* NOTREACHED */
1496 }
1497
1498 void
1499 commCloseAllSockets(void)
1500 {
1501 int fd;
1502 fde *F = NULL;
1503
1504 for (fd = 0; fd <= Biggest_FD; ++fd) {
1505 F = &fd_table[fd];
1506
1507 if (!F->flags.open)
1508 continue;
1509
1510 if (F->type != FD_SOCKET)
1511 continue;
1512
1513 if (F->flags.ipc) /* don't close inter-process sockets */
1514 continue;
1515
1516 if (F->timeoutHandler != NULL) {
1517 AsyncCall::Pointer callback = F->timeoutHandler;
1518 F->timeoutHandler = NULL;
1519 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1520 ScheduleCallHere(callback);
1521 } else {
1522 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1523 old_comm_reset_close(fd);
1524 }
1525 }
1526 }
1527
1528 static bool
1529 AlreadyTimedOut(fde *F)
1530 {
1531 if (!F->flags.open)
1532 return true;
1533
1534 if (F->timeout == 0)
1535 return true;
1536
1537 if (F->timeout > squid_curtime)
1538 return true;
1539
1540 return false;
1541 }
1542
1543 static bool
1544 writeTimedOut(int fd)
1545 {
1546 if (!COMMIO_FD_WRITECB(fd)->active())
1547 return false;
1548
1549 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1550 return false;
1551
1552 return true;
1553 }
1554
1555 void
1556 checkTimeouts(void)
1557 {
1558 int fd;
1559 fde *F = NULL;
1560 AsyncCall::Pointer callback;
1561
1562 for (fd = 0; fd <= Biggest_FD; ++fd) {
1563 F = &fd_table[fd];
1564
1565 if (writeTimedOut(fd)) {
1566 // We have an active write callback and we are timed out
1567 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1568 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1569 COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1570 } else if (AlreadyTimedOut(F))
1571 continue;
1572
1573 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1574
1575 if (F->timeoutHandler != NULL) {
1576 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1577 callback = F->timeoutHandler;
1578 F->timeoutHandler = NULL;
1579 ScheduleCallHere(callback);
1580 } else {
1581 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1582 comm_close(fd);
1583 }
1584 }
1585 }
1586
1587 /// Start waiting for a possibly half-closed connection to close
1588 // by scheduling a read callback to a monitoring handler that
1589 // will close the connection on read errors.
1590 void
1591 commStartHalfClosedMonitor(int fd)
1592 {
1593 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1594 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1595 (void)TheHalfClosed->add(fd); // could also assert the result
1596 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1597 }
1598
1599 static
1600 void
1601 commPlanHalfClosedCheck()
1602 {
1603 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1604 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1605 WillCheckHalfClosed = true;
1606 }
1607 }
1608
1609 /// iterates over all descriptors that may need half-closed tests and
1610 /// calls comm_read for those that do; re-schedules the check if needed
1611 static
1612 void
1613 commHalfClosedCheck(void *)
1614 {
1615 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1616
1617 typedef DescriptorSet::const_iterator DSCI;
1618 const DSCI end = TheHalfClosed->end();
1619 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1620 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1621 c->fd = *i;
1622 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1623 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1624 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1625 Comm::Read(c, call);
1626 fd_table[c->fd].halfClosedReader = call;
1627 } else
1628 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1629 }
1630
1631 WillCheckHalfClosed = false; // as far as we know
1632 commPlanHalfClosedCheck(); // may need to check again
1633 }
1634
1635 /// checks whether we are waiting for possibly half-closed connection to close
1636 // We are monitoring if the read handler for the fd is the monitoring handler.
1637 bool
1638 commHasHalfClosedMonitor(int fd)
1639 {
1640 return TheHalfClosed->has(fd);
1641 }
1642
1643 /// stop waiting for possibly half-closed connection to close
1644 void
1645 commStopHalfClosedMonitor(int const fd)
1646 {
1647 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1648
1649 // cancel the read if one was scheduled
1650 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1651 if (reader != NULL)
1652 Comm::ReadCancel(fd, reader);
1653 fd_table[fd].halfClosedReader = NULL;
1654
1655 TheHalfClosed->del(fd);
1656 }
1657
1658 /// I/O handler for the possibly half-closed connection monitoring code
1659 static void
1660 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1661 {
1662 // there cannot be more data coming in on half-closed connections
1663 assert(size == 0);
1664 assert(conn != NULL);
1665 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1666
1667 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1668
1669 // nothing to do if fd is being closed
1670 if (flag == Comm::ERR_CLOSING)
1671 return;
1672
1673 // if read failed, close the connection
1674 if (flag != Comm::OK) {
1675 debugs(5, 3, HERE << "closing " << conn);
1676 conn->close();
1677 return;
1678 }
1679
1680 // continue waiting for close or error
1681 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1682 }
1683
1684 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1685
1686 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1687 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1688
1689 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1690
1691 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1692
1693 DeferredReadManager::~DeferredReadManager()
1694 {
1695 flushReads();
1696 assert (deferredReads.empty());
1697 }
1698
1699 /* explicit instantiation required for some systems */
1700
1701 /// \cond AUTODOCS_IGNORE
1702 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1703 /// \endcond
1704
1705 void
1706 DeferredReadManager::delayRead(DeferredRead const &aRead)
1707 {
1708 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1709 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1710
1711 // We have to use a global function as a closer and point to temp
1712 // instead of "this" because DeferredReadManager is not a job and
1713 // is not even cbdata protected
1714 // XXX: and yet we use cbdata protection functions on it??
1715 AsyncCall::Pointer closer = commCbCall(5,4,
1716 "DeferredReadManager::CloseHandler",
1717 CommCloseCbPtrFun(&CloseHandler, temp));
1718 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1719 temp->element.closer = closer; // remeber so that we can cancel
1720 }
1721
1722 void
1723 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1724 {
1725 if (!cbdataReferenceValid(params.data))
1726 return;
1727
1728 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1729
1730 temp->element.closer = NULL;
1731 temp->element.markCancelled();
1732 }
1733
1734 DeferredRead
1735 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1736 {
1737 assert (!deferredReads.empty());
1738
1739 DeferredRead &read = deferredReads.head->element;
1740
1741 // NOTE: at this point the connection has been paused/stalled for an unknown
1742 // amount of time. We must re-validate that it is active and usable.
1743
1744 // If the connection has been closed already. Cancel this read.
1745 if (!Comm::IsConnOpen(read.theRead.conn)) {
1746 if (read.closer != NULL) {
1747 read.closer->cancel("Connection closed before.");
1748 read.closer = NULL;
1749 }
1750 read.markCancelled();
1751 }
1752
1753 if (!read.cancelled) {
1754 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1755 read.closer = NULL;
1756 }
1757
1758 DeferredRead result = deferredReads.pop_front();
1759
1760 return result;
1761 }
1762
1763 void
1764 DeferredReadManager::kickReads(int const count)
1765 {
1766 /* if we had CbDataList::size() we could consolidate this and flushReads */
1767
1768 if (count < 1) {
1769 flushReads();
1770 return;
1771 }
1772
1773 size_t remaining = count;
1774
1775 while (!deferredReads.empty() && remaining) {
1776 DeferredRead aRead = popHead(deferredReads);
1777 kickARead(aRead);
1778
1779 if (!aRead.cancelled)
1780 --remaining;
1781 }
1782 }
1783
1784 void
1785 DeferredReadManager::flushReads()
1786 {
1787 CbDataListContainer<DeferredRead> reads;
1788 reads = deferredReads;
1789 deferredReads = CbDataListContainer<DeferredRead>();
1790
1791 // XXX: For fairness this SHOULD randomize the order
1792 while (!reads.empty()) {
1793 DeferredRead aRead = popHead(reads);
1794 kickARead(aRead);
1795 }
1796 }
1797
1798 void
1799 DeferredReadManager::kickARead(DeferredRead const &aRead)
1800 {
1801 if (aRead.cancelled)
1802 return;
1803
1804 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1805 return;
1806
1807 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1808
1809 aRead.theReader(aRead.theContext, aRead.theRead);
1810 }
1811
1812 void
1813 DeferredRead::markCancelled()
1814 {
1815 cancelled = true;
1816 }
1817
1818 int
1819 CommSelectEngine::checkEvents(int timeout)
1820 {
1821 static time_t last_timeout = 0;
1822
1823 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1824 if (squid_curtime > last_timeout) {
1825 last_timeout = squid_curtime;
1826 checkTimeouts();
1827 }
1828
1829 switch (Comm::DoSelect(timeout)) {
1830
1831 case Comm::OK:
1832
1833 case Comm::TIMEOUT:
1834 return 0;
1835
1836 case Comm::IDLE:
1837
1838 case Comm::SHUTDOWN:
1839 return EVENT_IDLE;
1840
1841 case Comm::COMM_ERROR:
1842 return EVENT_ERROR;
1843
1844 default:
1845 fatal_dump("comm.cc: Internal error -- this should never happen.");
1846 return EVENT_ERROR;
1847 };
1848 }
1849
1850 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1851 int
1852 comm_open_uds(int sock_type,
1853 int proto,
1854 struct sockaddr_un* addr,
1855 int flags)
1856 {
1857 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1858
1859 int new_socket;
1860
1861 PROF_start(comm_open);
1862 /* Create socket for accepting new connections. */
1863 ++ statCounter.syscalls.sock.sockets;
1864
1865 /* Setup the socket addrinfo details for use */
1866 struct addrinfo AI;
1867 AI.ai_flags = 0;
1868 AI.ai_family = PF_UNIX;
1869 AI.ai_socktype = sock_type;
1870 AI.ai_protocol = proto;
1871 AI.ai_addrlen = SUN_LEN(addr);
1872 AI.ai_addr = (sockaddr*)addr;
1873 AI.ai_canonname = NULL;
1874 AI.ai_next = NULL;
1875
1876 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1877
1878 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1879 /* Increase the number of reserved fd's if calls to socket()
1880 * are failing because the open file table is full. This
1881 * limits the number of simultaneous clients */
1882
1883 if (limitError(errno)) {
1884 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
1885 fdAdjustReserved();
1886 } else {
1887 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
1888 }
1889
1890 PROF_stop(comm_open);
1891 return -1;
1892 }
1893
1894 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1895
1896 /* update fdstat */
1897 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1898
1899 assert(!isOpen(new_socket));
1900 fd_open(new_socket, FD_MSGHDR, addr->sun_path);
1901
1902 fdd_table[new_socket].close_file = NULL;
1903
1904 fdd_table[new_socket].close_line = 0;
1905
1906 fd_table[new_socket].sock_family = AI.ai_family;
1907
1908 if (!(flags & COMM_NOCLOEXEC))
1909 commSetCloseOnExec(new_socket);
1910
1911 if (flags & COMM_REUSEADDR)
1912 commSetReuseAddr(new_socket);
1913
1914 if (flags & COMM_NONBLOCKING) {
1915 if (commSetNonBlocking(new_socket) != Comm::OK) {
1916 comm_close(new_socket);
1917 PROF_stop(comm_open);
1918 return -1;
1919 }
1920 }
1921
1922 if (flags & COMM_DOBIND) {
1923 if (commBind(new_socket, AI) != Comm::OK) {
1924 comm_close(new_socket);
1925 PROF_stop(comm_open);
1926 return -1;
1927 }
1928 }
1929
1930 #ifdef TCP_NODELAY
1931 if (sock_type == SOCK_STREAM)
1932 commSetTcpNoDelay(new_socket);
1933
1934 #endif
1935
1936 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1937 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1938
1939 PROF_stop(comm_open);
1940
1941 return new_socket;
1942 }
1943