]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Fix tcp outgoing tos bugs
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid.h"
36 #include "ClientInfo.h"
37 #include "comm/AcceptLimiter.h"
38 #include "comm/comm_internal.h"
39 #include "comm/Connection.h"
40 #include "comm/IoCallback.h"
41 #include "comm/Loops.h"
42 #include "comm/Read.h"
43 #include "comm/TcpAcceptor.h"
44 #include "comm/Write.h"
45 #include "CommRead.h"
46 #include "compat/cmsg.h"
47 #include "DescriptorSet.h"
48 #include "event.h"
49 #include "fd.h"
50 #include "fde.h"
51 #include "globals.h"
52 #include "icmp/net_db.h"
53 #include "ip/Intercept.h"
54 #include "ip/QosConfig.h"
55 #include "ip/tools.h"
56 #include "pconn.h"
57 #include "profiler/Profiler.h"
58 #include "SBuf.h"
59 #include "SquidConfig.h"
60 #include "StatCounters.h"
61 #include "StoreIOBuffer.h"
62 #include "tools.h"
63
64 #if USE_OPENSSL
65 #include "ssl/support.h"
66 #endif
67
68 #include <cerrno>
69 #include <cmath>
70 #if _SQUID_CYGWIN_
71 #include <sys/ioctl.h>
72 #endif
73 #ifdef HAVE_NETINET_TCP_H
74 #include <netinet/tcp.h>
75 #endif
76 #if HAVE_SYS_UN_H
77 #include <sys/un.h>
78 #endif
79
80 /*
81 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
82 */
83
84 static IOCB commHalfClosedReader;
85 static void comm_init_opened(const Comm::ConnectionPointer &conn, const char *note, struct addrinfo *AI);
86 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
87
88 #if USE_DELAY_POOLS
89 CBDATA_CLASS_INIT(CommQuotaQueue);
90
91 static void commHandleWriteHelper(void * data);
92 #endif
93
94 /* STATIC */
95
96 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
97 static bool WillCheckHalfClosed = false; /// true if check is scheduled
98 static EVH commHalfClosedCheck;
99 static void commPlanHalfClosedCheck();
100
101 static Comm::Flag commBind(int s, struct addrinfo &);
102 static void commSetReuseAddr(int);
103 static void commSetNoLinger(int);
104 #ifdef TCP_NODELAY
105 static void commSetTcpNoDelay(int);
106 #endif
107 static void commSetTcpRcvbuf(int, int);
108
109 fd_debug_t *fdd_table = NULL;
110
111 bool
112 isOpen(const int fd)
113 {
114 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
115 }
116
117 /**
118 * Empty the read buffers
119 *
120 * This is a magical routine that empties the read buffers.
121 * Under some platforms (Linux) if a buffer has data in it before
122 * you call close(), the socket will hang and take quite a while
123 * to timeout.
124 */
125 static void
126 comm_empty_os_read_buffers(int fd)
127 {
128 #if _SQUID_LINUX_
129 /* prevent those nasty RST packets */
130 char buf[SQUID_TCP_SO_RCVBUF];
131
132 if (fd_table[fd].flags.nonblocking) {
133 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
134 }
135 #endif
136 }
137
138 /**
139 * synchronous wrapper around udp socket functions
140 */
141 int
142 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
143 {
144 ++ statCounter.syscalls.sock.recvfroms;
145 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
146 struct addrinfo *AI = NULL;
147 Ip::Address::InitAddrInfo(AI);
148 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
149 from = *AI;
150 Ip::Address::FreeAddrInfo(AI);
151 return x;
152 }
153
154 int
155 comm_udp_recv(int fd, void *buf, size_t len, int flags)
156 {
157 Ip::Address nul;
158 return comm_udp_recvfrom(fd, buf, len, flags, nul);
159 }
160
161 ssize_t
162 comm_udp_send(int s, const void *buf, size_t len, int flags)
163 {
164 return send(s, buf, len, flags);
165 }
166
167 bool
168 comm_has_incomplete_write(int fd)
169 {
170 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd));
171 return COMMIO_FD_WRITECB(fd)->active();
172 }
173
174 /**
175 * Queue a write. handler/handler_data are called when the write fully
176 * completes, on error, or on file descriptor close.
177 */
178
179 /* Return the local port associated with fd. */
180 unsigned short
181 comm_local_port(int fd)
182 {
183 Ip::Address temp;
184 struct addrinfo *addr = NULL;
185 fde *F = &fd_table[fd];
186
187 /* If the fd is closed already, just return */
188
189 if (!F->flags.open) {
190 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
191 return 0;
192 }
193
194 if (F->local_addr.port())
195 return F->local_addr.port();
196
197 if (F->sock_family == AF_INET)
198 temp.setIPv4();
199
200 Ip::Address::InitAddrInfo(addr);
201
202 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
203 debugs(50, DBG_IMPORTANT, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
204 Ip::Address::FreeAddrInfo(addr);
205 return 0;
206 }
207 temp = *addr;
208
209 Ip::Address::FreeAddrInfo(addr);
210
211 if (F->local_addr.isAnyAddr()) {
212 /* save the whole local address, not just the port. */
213 F->local_addr = temp;
214 } else {
215 F->local_addr.port(temp.port());
216 }
217
218 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
219 return F->local_addr.port();
220 }
221
222 static Comm::Flag
223 commBind(int s, struct addrinfo &inaddr)
224 {
225 ++ statCounter.syscalls.sock.binds;
226
227 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
228 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
229 return Comm::OK;
230 }
231
232 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
233
234 return Comm::COMM_ERROR;
235 }
236
237 /**
238 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
239 * is OR of flags specified in comm.h. Defaults TOS
240 */
241 int
242 comm_open(int sock_type,
243 int proto,
244 Ip::Address &addr,
245 int flags,
246 const char *note)
247 {
248 return comm_openex(sock_type, proto, addr, flags, note);
249 }
250
251 void
252 comm_open_listener(int sock_type,
253 int proto,
254 Comm::ConnectionPointer &conn,
255 const char *note)
256 {
257 /* all listener sockets require bind() */
258 conn->flags |= COMM_DOBIND;
259
260 /* attempt native enabled port. */
261 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, note);
262 }
263
264 int
265 comm_open_listener(int sock_type,
266 int proto,
267 Ip::Address &addr,
268 int flags,
269 const char *note)
270 {
271 int sock = -1;
272
273 /* all listener sockets require bind() */
274 flags |= COMM_DOBIND;
275
276 /* attempt native enabled port. */
277 sock = comm_openex(sock_type, proto, addr, flags, note);
278
279 return sock;
280 }
281
282 static bool
283 limitError(int const anErrno)
284 {
285 return anErrno == ENFILE || anErrno == EMFILE;
286 }
287
288 void
289 comm_set_v6only(int fd, int tos)
290 {
291 #ifdef IPV6_V6ONLY
292 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
293 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
294 }
295 #else
296 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
297 #endif /* sockopt */
298 }
299
300 /**
301 * Set the socket option required for TPROXY spoofing for:
302 * - Linux TPROXY v4 support,
303 * - OpenBSD divert-to support,
304 * - FreeBSD IPFW TPROXY v4 support.
305 */
306 void
307 comm_set_transparent(int fd)
308 {
309 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
310 # define soLevel SOL_IP
311 # define soFlag IP_TRANSPARENT
312 bool doneSuid = false;
313
314 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
315 # define soLevel SOL_SOCKET
316 # define soFlag SO_BINDANY
317 enter_suid();
318 bool doneSuid = true;
319
320 #elif defined(IP_BINDANY) // FreeBSD with IPFW
321 # define soLevel IPPROTO_IP
322 # define soFlag IP_BINDANY
323 enter_suid();
324 bool doneSuid = true;
325
326 #else
327 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
328 #endif /* sockopt */
329
330 #if defined(soLevel) && defined(soFlag)
331 int tos = 1;
332 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
333 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(TPROXY) on FD " << fd << ": " << xstrerror());
334 } else {
335 /* mark the socket as having transparent options */
336 fd_table[fd].flags.transparent = true;
337 }
338 if (doneSuid)
339 leave_suid();
340 #endif
341 }
342
343 /**
344 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
345 * is OR of flags specified in defines.h:COMM_*
346 */
347 int
348 comm_openex(int sock_type,
349 int proto,
350 Ip::Address &addr,
351 int flags,
352 const char *note)
353 {
354 int new_socket;
355 struct addrinfo *AI = NULL;
356
357 PROF_start(comm_open);
358 /* Create socket for accepting new connections. */
359 ++ statCounter.syscalls.sock.sockets;
360
361 /* Setup the socket addrinfo details for use */
362 addr.getAddrInfo(AI);
363 AI->ai_socktype = sock_type;
364 AI->ai_protocol = proto;
365
366 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
367
368 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
369
370 /* under IPv6 there is the possibility IPv6 is present but disabled. */
371 /* try again as IPv4-native if possible */
372 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
373 /* attempt to open this IPv4-only. */
374 Ip::Address::FreeAddrInfo(AI);
375 /* Setup the socket addrinfo details for use */
376 addr.getAddrInfo(AI);
377 AI->ai_socktype = sock_type;
378 AI->ai_protocol = proto;
379 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
380 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
381 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
382 }
383
384 if (new_socket < 0) {
385 /* Increase the number of reserved fd's if calls to socket()
386 * are failing because the open file table is full. This
387 * limits the number of simultaneous clients */
388
389 if (limitError(errno)) {
390 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
391 fdAdjustReserved();
392 } else {
393 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
394 }
395
396 Ip::Address::FreeAddrInfo(AI);
397
398 PROF_stop(comm_open);
399 return -1;
400 }
401
402 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
403 Comm::ConnectionPointer conn = new Comm::Connection;
404 conn->local = addr;
405 conn->fd = new_socket;
406
407 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
408
409 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
410 comm_set_v6only(conn->fd, 1);
411
412 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
413 /* Other OS may have this administratively disabled for general use. Same deal. */
414 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
415 comm_set_v6only(conn->fd, 0);
416
417 comm_init_opened(conn, note, AI);
418 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
419
420 Ip::Address::FreeAddrInfo(AI);
421
422 PROF_stop(comm_open);
423
424 // XXX transition only. prevent conn from closing the new FD on function exit.
425 conn->fd = -1;
426 return new_socket;
427 }
428
429 /// update FD tables after a local or remote (IPC) comm_openex();
430 void
431 comm_init_opened(const Comm::ConnectionPointer &conn,
432 const char *note,
433 struct addrinfo *AI)
434 {
435 assert(Comm::IsConnOpen(conn));
436 assert(AI);
437
438 /* update fdstat */
439 debugs(5, 5, HERE << conn << " is a new socket");
440
441 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
442 fd_open(conn->fd, FD_SOCKET, note);
443
444 fdd_table[conn->fd].close_file = NULL;
445 fdd_table[conn->fd].close_line = 0;
446
447 fde *F = &fd_table[conn->fd];
448 F->local_addr = conn->local;
449
450 F->sock_family = AI->ai_family;
451 }
452
453 /// apply flags after a local comm_open*() call;
454 /// returns new_socket or -1 on error
455 static int
456 comm_apply_flags(int new_socket,
457 Ip::Address &addr,
458 int flags,
459 struct addrinfo *AI)
460 {
461 assert(new_socket >= 0);
462 assert(AI);
463 const int sock_type = AI->ai_socktype;
464
465 if (!(flags & COMM_NOCLOEXEC))
466 commSetCloseOnExec(new_socket);
467
468 if ((flags & COMM_REUSEADDR))
469 commSetReuseAddr(new_socket);
470
471 if (addr.port() > (unsigned short) 0) {
472 #if _SQUID_WINDOWS_
473 if (sock_type != SOCK_DGRAM)
474 #endif
475 commSetNoLinger(new_socket);
476
477 if (opt_reuseaddr)
478 commSetReuseAddr(new_socket);
479 }
480
481 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
482 if ((flags & COMM_TRANSPARENT)) {
483 comm_set_transparent(new_socket);
484 }
485
486 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
487 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
488 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
489 if ( addr.isNoAddr() )
490 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
491
492 if (commBind(new_socket, *AI) != Comm::OK) {
493 comm_close(new_socket);
494 return -1;
495 }
496 }
497
498 if (flags & COMM_NONBLOCKING)
499 if (commSetNonBlocking(new_socket) == Comm::COMM_ERROR) {
500 comm_close(new_socket);
501 return -1;
502 }
503
504 #ifdef TCP_NODELAY
505 if (sock_type == SOCK_STREAM)
506 commSetTcpNoDelay(new_socket);
507
508 #endif
509
510 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
511 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
512
513 return new_socket;
514 }
515
516 void
517 comm_import_opened(const Comm::ConnectionPointer &conn,
518 const char *note,
519 struct addrinfo *AI)
520 {
521 debugs(5, 2, HERE << conn);
522 assert(Comm::IsConnOpen(conn));
523 assert(AI);
524
525 comm_init_opened(conn, note, AI);
526
527 if (!(conn->flags & COMM_NOCLOEXEC))
528 fd_table[conn->fd].flags.close_on_exec = true;
529
530 if (conn->local.port() > (unsigned short) 0) {
531 #if _SQUID_WINDOWS_
532 if (AI->ai_socktype != SOCK_DGRAM)
533 #endif
534 fd_table[conn->fd].flags.nolinger = true;
535 }
536
537 if ((conn->flags & COMM_TRANSPARENT))
538 fd_table[conn->fd].flags.transparent = true;
539
540 if (conn->flags & COMM_NONBLOCKING)
541 fd_table[conn->fd].flags.nonblocking = true;
542
543 #ifdef TCP_NODELAY
544 if (AI->ai_socktype == SOCK_STREAM)
545 fd_table[conn->fd].flags.nodelay = true;
546 #endif
547
548 /* no fd_table[fd].flags. updates needed for these conditions:
549 * if ((flags & COMM_REUSEADDR)) ...
550 * if ((flags & COMM_DOBIND) ...) ...
551 */
552 }
553
554 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
555 // With handler already unset. Leaving this present until that can be verified for all code paths.
556 void
557 commUnsetFdTimeout(int fd)
558 {
559 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
560 assert(fd >= 0);
561 assert(fd < Squid_MaxFD);
562 fde *F = &fd_table[fd];
563 assert(F->flags.open);
564
565 F->timeoutHandler = NULL;
566 F->timeout = 0;
567 }
568
569 int
570 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
571 {
572 debugs(5, 3, HERE << conn << " timeout " << timeout);
573 assert(Comm::IsConnOpen(conn));
574 assert(conn->fd < Squid_MaxFD);
575 fde *F = &fd_table[conn->fd];
576 assert(F->flags.open);
577
578 if (timeout < 0) {
579 F->timeoutHandler = NULL;
580 F->timeout = 0;
581 } else {
582 if (callback != NULL) {
583 typedef CommTimeoutCbParams Params;
584 Params &params = GetCommParams<Params>(callback);
585 params.conn = conn;
586 F->timeoutHandler = callback;
587 }
588
589 F->timeout = squid_curtime + (time_t) timeout;
590 }
591
592 return F->timeout;
593 }
594
595 int
596 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
597 {
598 debugs(5, 3, HERE << "Remove timeout for " << conn);
599 AsyncCall::Pointer nil;
600 return commSetConnTimeout(conn, -1, nil);
601 }
602
603 int
604 comm_connect_addr(int sock, const Ip::Address &address)
605 {
606 Comm::Flag status = Comm::OK;
607 fde *F = &fd_table[sock];
608 int x = 0;
609 int err = 0;
610 socklen_t errlen;
611 struct addrinfo *AI = NULL;
612 PROF_start(comm_connect_addr);
613
614 assert(address.port() != 0);
615
616 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
617
618 /* Handle IPv6 over IPv4-only socket case.
619 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
620 * NP: because commResetFD is private to ConnStateData we have to return an error and
621 * trust its handled properly.
622 */
623 if (F->sock_family == AF_INET && !address.isIPv4()) {
624 errno = ENETUNREACH;
625 return Comm::ERR_PROTOCOL;
626 }
627
628 /* Handle IPv4 over IPv6-only socket case.
629 * This case is presently handled here as it's both a known case and it's
630 * uncertain what error will be returned by the IPv6 stack in such case. It's
631 * possible this will also be handled by the errno checks below after connect()
632 * but needs carefull cross-platform verification, and verifying the address
633 * condition here is simple.
634 */
635 if (!F->local_addr.isIPv4() && address.isIPv4()) {
636 errno = ENETUNREACH;
637 return Comm::ERR_PROTOCOL;
638 }
639
640 address.getAddrInfo(AI, F->sock_family);
641
642 /* Establish connection. */
643 errno = 0;
644
645 if (!F->flags.called_connect) {
646 F->flags.called_connect = true;
647 ++ statCounter.syscalls.sock.connects;
648
649 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
650
651 // XXX: ICAP code refuses callbacks during a pending comm_ call
652 // Async calls development will fix this.
653 if (x == 0) {
654 x = -1;
655 errno = EINPROGRESS;
656 }
657
658 if (x < 0) {
659 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
660 " flags=" << AI->ai_flags <<
661 ", family=" << AI->ai_family <<
662 ", socktype=" << AI->ai_socktype <<
663 ", protocol=" << AI->ai_protocol <<
664 ", &addr=" << AI->ai_addr <<
665 ", addrlen=" << AI->ai_addrlen <<
666 " )" );
667 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
668 debugs(14,9, "connecting to: " << address );
669 }
670 } else {
671 #if _SQUID_NEWSOS6_
672 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
673
674 connect(sock, AI->ai_addr, AI->ai_addrlen);
675
676 if (errno == EINVAL) {
677 errlen = sizeof(err);
678 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
679
680 if (x >= 0)
681 errno = x;
682 }
683
684 #else
685 errlen = sizeof(err);
686
687 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
688
689 if (x == 0)
690 errno = err;
691
692 #if _SQUID_SOLARIS_
693 /*
694 * Solaris 2.4's socket emulation doesn't allow you
695 * to determine the error from a failed non-blocking
696 * connect and just returns EPIPE. Create a fake
697 * error message for connect. -- fenner@parc.xerox.com
698 */
699 if (x < 0 && errno == EPIPE)
700 errno = ENOTCONN;
701
702 #endif
703 #endif
704
705 }
706
707 Ip::Address::FreeAddrInfo(AI);
708
709 PROF_stop(comm_connect_addr);
710
711 if (errno == 0 || errno == EISCONN)
712 status = Comm::OK;
713 else if (ignoreErrno(errno))
714 status = Comm::INPROGRESS;
715 else if (errno == EAFNOSUPPORT || errno == EINVAL)
716 return Comm::ERR_PROTOCOL;
717 else
718 return Comm::COMM_ERROR;
719
720 address.toStr(F->ipaddr, MAX_IPSTRLEN);
721
722 F->remote_port = address.port(); /* remote_port is HS */
723
724 if (status == Comm::OK) {
725 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
726 } else if (status == Comm::INPROGRESS) {
727 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
728 }
729
730 return status;
731 }
732
733 void
734 commCallCloseHandlers(int fd)
735 {
736 fde *F = &fd_table[fd];
737 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
738
739 while (F->closeHandler != NULL) {
740 AsyncCall::Pointer call = F->closeHandler;
741 F->closeHandler = call->Next();
742 call->setNext(NULL);
743 // If call is not canceled schedule it for execution else ignore it
744 if (!call->canceled()) {
745 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
746 ScheduleCallHere(call);
747 }
748 }
749 }
750
751 #if LINGERING_CLOSE
752 static void
753 commLingerClose(int fd, void *unused)
754 {
755 LOCAL_ARRAY(char, buf, 1024);
756 int n;
757 n = FD_READ_METHOD(fd, buf, 1024);
758
759 if (n < 0)
760 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
761
762 comm_close(fd);
763 }
764
765 static void
766 commLingerTimeout(const FdeCbParams &params)
767 {
768 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
769 comm_close(params.fd);
770 }
771
772 /*
773 * Inspired by apache
774 */
775 void
776 comm_lingering_close(int fd)
777 {
778 #if USE_OPENSSL
779 if (fd_table[fd].ssl)
780 ssl_shutdown_method(fd_table[fd].ssl);
781 #endif
782
783 if (shutdown(fd, 1) < 0) {
784 comm_close(fd);
785 return;
786 }
787
788 fd_note(fd, "lingering close");
789 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
790
791 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
792 assert(fd_table[fd].flags.open);
793 if (callback != NULL) {
794 typedef FdeCbParams Params;
795 Params &params = GetCommParams<Params>(callback);
796 params.fd = fd;
797 fd_table[fd].timeoutHandler = callback;
798 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
799 }
800
801 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
802 }
803
804 #endif
805
806 /**
807 * enable linger with time of 0 so that when the socket is
808 * closed, TCP generates a RESET
809 */
810 void
811 comm_reset_close(const Comm::ConnectionPointer &conn)
812 {
813 struct linger L;
814 L.l_onoff = 1;
815 L.l_linger = 0;
816
817 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
818 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
819
820 conn->close();
821 }
822
823 // Legacy close function.
824 void
825 old_comm_reset_close(int fd)
826 {
827 struct linger L;
828 L.l_onoff = 1;
829 L.l_linger = 0;
830
831 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
832 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
833
834 comm_close(fd);
835 }
836
837 #if USE_OPENSSL
838 void
839 commStartSslClose(const FdeCbParams &params)
840 {
841 assert(&fd_table[params.fd].ssl);
842 ssl_shutdown_method(fd_table[params.fd].ssl);
843 }
844 #endif
845
846 void
847 comm_close_complete(const FdeCbParams &params)
848 {
849 #if USE_OPENSSL
850 fde *F = &fd_table[params.fd];
851
852 if (F->ssl) {
853 SSL_free(F->ssl);
854 F->ssl = NULL;
855 }
856
857 if (F->dynamicSslContext) {
858 SSL_CTX_free(F->dynamicSslContext);
859 F->dynamicSslContext = NULL;
860 }
861 #endif
862 fd_close(params.fd); /* update fdstat */
863 close(params.fd);
864
865 ++ statCounter.syscalls.sock.closes;
866
867 /* When one connection closes, give accept() a chance, if need be */
868 Comm::AcceptLimiter::Instance().kick();
869 }
870
871 /*
872 * Close the socket fd.
873 *
874 * + call write handlers with ERR_CLOSING
875 * + call read handlers with ERR_CLOSING
876 * + call closing handlers
877 *
878 * NOTE: Comm::ERR_CLOSING will NOT be called for CommReads' sitting in a
879 * DeferredReadManager.
880 */
881 void
882 _comm_close(int fd, char const *file, int line)
883 {
884 debugs(5, 3, "comm_close: start closing FD " << fd);
885 assert(fd >= 0);
886 assert(fd < Squid_MaxFD);
887
888 fde *F = &fd_table[fd];
889 fdd_table[fd].close_file = file;
890 fdd_table[fd].close_line = line;
891
892 if (F->closing())
893 return;
894
895 /* XXX: is this obsolete behind F->closing() ? */
896 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
897 return;
898
899 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
900 if (!isOpen(fd)) {
901 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
902 // XXX: do we need to run close(fd) or fd_close(fd) here?
903 return;
904 }
905
906 assert(F->type != FD_FILE);
907
908 PROF_start(comm_close);
909
910 F->flags.close_request = true;
911
912 #if USE_OPENSSL
913 if (F->ssl) {
914 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
915 FdeCbPtrFun(commStartSslClose, NULL));
916 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
917 startParams.fd = fd;
918 ScheduleCallHere(startCall);
919 }
920 #endif
921
922 // a half-closed fd may lack a reader, so we stop monitoring explicitly
923 if (commHasHalfClosedMonitor(fd))
924 commStopHalfClosedMonitor(fd);
925 commUnsetFdTimeout(fd);
926
927 // notify read/write handlers after canceling select reservations, if any
928 if (COMMIO_FD_WRITECB(fd)->active()) {
929 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
930 COMMIO_FD_WRITECB(fd)->finish(Comm::ERR_CLOSING, errno);
931 }
932 if (COMMIO_FD_READCB(fd)->active()) {
933 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
934 COMMIO_FD_READCB(fd)->finish(Comm::ERR_CLOSING, errno);
935 }
936
937 #if USE_DELAY_POOLS
938 if (ClientInfo *clientInfo = F->clientInfo) {
939 if (clientInfo->selectWaiting) {
940 clientInfo->selectWaiting = false;
941 // kick queue or it will get stuck as commWriteHandle is not called
942 clientInfo->kickQuotaQueue();
943 }
944 }
945 #endif
946
947 commCallCloseHandlers(fd);
948
949 comm_empty_os_read_buffers(fd);
950
951 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
952 FdeCbPtrFun(comm_close_complete, NULL));
953 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
954 completeParams.fd = fd;
955 // must use async call to wait for all callbacks
956 // scheduled before comm_close() to finish
957 ScheduleCallHere(completeCall);
958
959 PROF_stop(comm_close);
960 }
961
962 /* Send a udp datagram to specified TO_ADDR. */
963 int
964 comm_udp_sendto(int fd,
965 const Ip::Address &to_addr,
966 const void *buf,
967 int len)
968 {
969 PROF_start(comm_udp_sendto);
970 ++ statCounter.syscalls.sock.sendtos;
971
972 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
973 " using FD " << fd << " using Port " << comm_local_port(fd) );
974
975 struct addrinfo *AI = NULL;
976 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
977 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
978 Ip::Address::FreeAddrInfo(AI);
979
980 PROF_stop(comm_udp_sendto);
981
982 if (x >= 0)
983 return x;
984
985 #if _SQUID_LINUX_
986
987 if (ECONNREFUSED != errno)
988 #endif
989
990 debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
991
992 return Comm::COMM_ERROR;
993 }
994
995 void
996 comm_add_close_handler(int fd, CLCB * handler, void *data)
997 {
998 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
999 handler << ", data=" << data);
1000
1001 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1002 CommCloseCbPtrFun(handler, data));
1003 comm_add_close_handler(fd, call);
1004 }
1005
1006 void
1007 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1008 {
1009 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1010
1011 /*TODO:Check for a similar scheduled AsyncCall*/
1012 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1013 // assert(c->handler != handler || c->data != data);
1014
1015 call->setNext(fd_table[fd].closeHandler);
1016
1017 fd_table[fd].closeHandler = call;
1018 }
1019
1020 // remove function-based close handler
1021 void
1022 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1023 {
1024 assert(isOpen(fd));
1025 /* Find handler in list */
1026 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1027 handler << ", data=" << data);
1028
1029 AsyncCall::Pointer p, prev = NULL;
1030 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1031 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1032 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1033 if (!call) // method callbacks have their own comm_remove_close_handler
1034 continue;
1035
1036 typedef CommCloseCbParams Params;
1037 const Params &params = GetCommParams<Params>(p);
1038 if (call->dialer.handler == handler && params.data == data)
1039 break; /* This is our handler */
1040 }
1041
1042 // comm_close removes all close handlers so our handler may be gone
1043 if (p != NULL) {
1044 p->dequeue(fd_table[fd].closeHandler, prev);
1045 p->cancel("comm_remove_close_handler");
1046 }
1047 }
1048
1049 // remove method-based close handler
1050 void
1051 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1052 {
1053 assert(isOpen(fd));
1054 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1055
1056 // comm_close removes all close handlers so our handler may be gone
1057 AsyncCall::Pointer p, prev = NULL;
1058 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1059
1060 if (p != NULL)
1061 p->dequeue(fd_table[fd].closeHandler, prev);
1062 call->cancel("comm_remove_close_handler");
1063 }
1064
1065 static void
1066 commSetNoLinger(int fd)
1067 {
1068
1069 struct linger L;
1070 L.l_onoff = 0; /* off */
1071 L.l_linger = 0;
1072
1073 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1074 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1075
1076 fd_table[fd].flags.nolinger = true;
1077 }
1078
1079 static void
1080 commSetReuseAddr(int fd)
1081 {
1082 int on = 1;
1083
1084 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1085 debugs(50, DBG_IMPORTANT, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1086 }
1087
1088 static void
1089 commSetTcpRcvbuf(int fd, int size)
1090 {
1091 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1092 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1093 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1094 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1095 #ifdef TCP_WINDOW_CLAMP
1096 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1097 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1098 #endif
1099 }
1100
1101 int
1102 commSetNonBlocking(int fd)
1103 {
1104 #if !_SQUID_WINDOWS_
1105 int flags;
1106 int dummy = 0;
1107 #endif
1108 #if _SQUID_WINDOWS_
1109 int nonblocking = TRUE;
1110
1111 #if _SQUID_CYGWIN_
1112 if (fd_table[fd].type != FD_PIPE) {
1113 #endif
1114
1115 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1116 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1117 return Comm::COMM_ERROR;
1118 }
1119
1120 #if _SQUID_CYGWIN_
1121 } else {
1122 #endif
1123 #endif
1124 #if !_SQUID_WINDOWS_
1125
1126 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1127 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1128 return Comm::COMM_ERROR;
1129 }
1130
1131 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1132 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1133 return Comm::COMM_ERROR;
1134 }
1135
1136 #endif
1137 #if _SQUID_CYGWIN_
1138 }
1139 #endif
1140 fd_table[fd].flags.nonblocking = true;
1141
1142 return 0;
1143 }
1144
1145 int
1146 commUnsetNonBlocking(int fd)
1147 {
1148 #if _SQUID_WINDOWS_
1149 int nonblocking = FALSE;
1150
1151 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1152 #else
1153 int flags;
1154 int dummy = 0;
1155
1156 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1157 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1158 return Comm::COMM_ERROR;
1159 }
1160
1161 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1162 #endif
1163 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1164 return Comm::COMM_ERROR;
1165 }
1166
1167 fd_table[fd].flags.nonblocking = false;
1168 return 0;
1169 }
1170
1171 void
1172 commSetCloseOnExec(int fd)
1173 {
1174 #ifdef FD_CLOEXEC
1175 int flags;
1176 int dummy = 0;
1177
1178 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1179 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1180 return;
1181 }
1182
1183 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1184 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1185
1186 fd_table[fd].flags.close_on_exec = true;
1187
1188 #endif
1189 }
1190
1191 #ifdef TCP_NODELAY
1192 static void
1193 commSetTcpNoDelay(int fd)
1194 {
1195 int on = 1;
1196
1197 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1198 debugs(50, DBG_IMPORTANT, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1199
1200 fd_table[fd].flags.nodelay = true;
1201 }
1202
1203 #endif
1204
1205 void
1206 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1207 {
1208 int on = 1;
1209 #ifdef TCP_KEEPCNT
1210 if (timeout && interval) {
1211 int count = (timeout + interval - 1) / interval;
1212 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1213 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1214 }
1215 #endif
1216 #ifdef TCP_KEEPIDLE
1217 if (idle) {
1218 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1219 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1220 }
1221 #endif
1222 #ifdef TCP_KEEPINTVL
1223 if (interval) {
1224 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1225 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1226 }
1227 #endif
1228 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1229 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1230 }
1231
1232 void
1233 comm_init(void)
1234 {
1235 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1236 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1237
1238 /* make sure the accept() socket FIFO delay queue exists */
1239 Comm::AcceptLimiter::Instance();
1240
1241 // make sure the IO pending callback table exists
1242 Comm::CallbackTableInit();
1243
1244 /* XXX account fd_table */
1245 /* Keep a few file descriptors free so that we don't run out of FD's
1246 * after accepting a client but before it opens a socket or a file.
1247 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1248 RESERVED_FD = min(100, Squid_MaxFD / 4);
1249
1250 TheHalfClosed = new DescriptorSet;
1251
1252 /* setup the select loop module */
1253 Comm::SelectLoopInit();
1254 }
1255
1256 void
1257 comm_exit(void)
1258 {
1259 delete TheHalfClosed;
1260 TheHalfClosed = NULL;
1261
1262 safe_free(fd_table);
1263 safe_free(fdd_table);
1264 Comm::CallbackTableDestruct();
1265 }
1266
1267 #if USE_DELAY_POOLS
1268 // called when the queue is done waiting for the client bucket to fill
1269 void
1270 commHandleWriteHelper(void * data)
1271 {
1272 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1273 assert(queue);
1274
1275 ClientInfo *clientInfo = queue->clientInfo;
1276 // ClientInfo invalidates queue if freed, so if we got here through,
1277 // evenAdd cbdata protections, everything should be valid and consistent
1278 assert(clientInfo);
1279 assert(clientInfo->hasQueue());
1280 assert(clientInfo->hasQueue(queue));
1281 assert(!clientInfo->selectWaiting);
1282 assert(clientInfo->eventWaiting);
1283 clientInfo->eventWaiting = false;
1284
1285 do {
1286 // check that the head descriptor is still relevant
1287 const int head = clientInfo->quotaPeekFd();
1288 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1289
1290 if (fd_table[head].clientInfo == clientInfo &&
1291 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1292 !fd_table[head].closing()) {
1293
1294 // wait for the head descriptor to become ready for writing
1295 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1296 clientInfo->selectWaiting = true;
1297 return;
1298 }
1299
1300 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1301 // and continue looking for a relevant one
1302 } while (clientInfo->hasQueue());
1303
1304 debugs(77,3, HERE << "emptied queue");
1305 }
1306
1307 bool
1308 ClientInfo::hasQueue() const
1309 {
1310 assert(quotaQueue);
1311 return !quotaQueue->empty();
1312 }
1313
1314 bool
1315 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1316 {
1317 assert(quotaQueue);
1318 return quotaQueue == q;
1319 }
1320
1321 /// returns the first descriptor to be dequeued
1322 int
1323 ClientInfo::quotaPeekFd() const
1324 {
1325 assert(quotaQueue);
1326 return quotaQueue->front();
1327 }
1328
1329 /// returns the reservation ID of the first descriptor to be dequeued
1330 unsigned int
1331 ClientInfo::quotaPeekReserv() const
1332 {
1333 assert(quotaQueue);
1334 return quotaQueue->outs + 1;
1335 }
1336
1337 /// queues a given fd, creating the queue if necessary; returns reservation ID
1338 unsigned int
1339 ClientInfo::quotaEnqueue(int fd)
1340 {
1341 assert(quotaQueue);
1342 return quotaQueue->enqueue(fd);
1343 }
1344
1345 /// removes queue head
1346 void
1347 ClientInfo::quotaDequeue()
1348 {
1349 assert(quotaQueue);
1350 quotaQueue->dequeue();
1351 }
1352
1353 void
1354 ClientInfo::kickQuotaQueue()
1355 {
1356 if (!eventWaiting && !selectWaiting && hasQueue()) {
1357 // wait at least a second if the bucket is empty
1358 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1359 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1360 quotaQueue, delay, 0, true);
1361 eventWaiting = true;
1362 }
1363 }
1364
1365 /// calculates how much to write for a single dequeued client
1366 int
1367 ClientInfo::quotaForDequed()
1368 {
1369 /* If we have multiple clients and give full bucketSize to each client then
1370 * clt1 may often get a lot more because clt1->clt2 time distance in the
1371 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1372 * We divide quota evenly to be more fair. */
1373
1374 if (!rationedCount) {
1375 rationedCount = quotaQueue->size() + 1;
1376
1377 // The delay in ration recalculation _temporary_ deprives clients from
1378 // bytes that should have trickled in while rationedCount was positive.
1379 refillBucket();
1380
1381 // Rounding errors do not accumulate here, but we round down to avoid
1382 // negative bucket sizes after write with rationedCount=1.
1383 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1384 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1385 '*' << rationedCount);
1386 }
1387
1388 --rationedCount;
1389 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1390 " rations remaining: " << rationedCount);
1391
1392 // update 'last seen' time to prevent clientdb GC from dropping us
1393 last_seen = squid_curtime;
1394 return rationedQuota;
1395 }
1396
1397 ///< adds bytes to the quota bucket based on the rate and passed time
1398 void
1399 ClientInfo::refillBucket()
1400 {
1401 // all these times are in seconds, with double precision
1402 const double currTime = current_dtime;
1403 const double timePassed = currTime - prevTime;
1404
1405 // Calculate allowance for the time passed. Use double to avoid
1406 // accumulating rounding errors for small intervals. For example, always
1407 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1408 const double gain = timePassed * writeSpeedLimit;
1409
1410 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1411 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1412 " = " << gain << ')');
1413
1414 // to further combat error accumulation during micro updates,
1415 // quit before updating time if we cannot add at least one byte
1416 if (gain < 1.0)
1417 return;
1418
1419 prevTime = currTime;
1420
1421 // for "first" connections, drain initial fat before refilling but keep
1422 // updating prevTime to avoid bursts after the fat is gone
1423 if (bucketSize > bucketSizeLimit) {
1424 debugs(77,4, HERE << "not refilling while draining initial fat");
1425 return;
1426 }
1427
1428 bucketSize += gain;
1429
1430 // obey quota limits
1431 if (bucketSize > bucketSizeLimit)
1432 bucketSize = bucketSizeLimit;
1433 }
1434
1435 void
1436 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1437 {
1438 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1439 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1440 " highwatermark=" << aHighWatermark);
1441
1442 // set or possibly update traffic shaping parameters
1443 writeLimitingActive = true;
1444 writeSpeedLimit = aWriteSpeedLimit;
1445 bucketSizeLimit = aHighWatermark;
1446
1447 // but some members should only be set once for a newly activated bucket
1448 if (firstTimeConnection) {
1449 firstTimeConnection = false;
1450
1451 assert(!selectWaiting);
1452 assert(!quotaQueue);
1453 quotaQueue = new CommQuotaQueue(this);
1454
1455 bucketSize = anInitialBurst;
1456 prevTime = current_dtime;
1457 }
1458 }
1459
1460 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1461 ins(0), outs(0)
1462 {
1463 assert(clientInfo);
1464 }
1465
1466 CommQuotaQueue::~CommQuotaQueue()
1467 {
1468 assert(!clientInfo); // ClientInfo should clear this before destroying us
1469 }
1470
1471 /// places the given fd at the end of the queue; returns reservation ID
1472 unsigned int
1473 CommQuotaQueue::enqueue(int fd)
1474 {
1475 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1476 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1477 fds.push_back(fd);
1478 return ++ins;
1479 }
1480
1481 /// removes queue head
1482 void
1483 CommQuotaQueue::dequeue()
1484 {
1485 assert(!fds.empty());
1486 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1487 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1488 fds.size());
1489 fds.pop_front();
1490 ++outs;
1491 }
1492 #endif
1493
1494 /*
1495 * hm, this might be too general-purpose for all the places we'd
1496 * like to use it.
1497 */
1498 int
1499 ignoreErrno(int ierrno)
1500 {
1501 switch (ierrno) {
1502
1503 case EINPROGRESS:
1504
1505 case EWOULDBLOCK:
1506 #if EAGAIN != EWOULDBLOCK
1507
1508 case EAGAIN:
1509 #endif
1510
1511 case EALREADY:
1512
1513 case EINTR:
1514 #ifdef ERESTART
1515
1516 case ERESTART:
1517 #endif
1518
1519 return 1;
1520
1521 default:
1522 return 0;
1523 }
1524
1525 /* NOTREACHED */
1526 }
1527
1528 void
1529 commCloseAllSockets(void)
1530 {
1531 int fd;
1532 fde *F = NULL;
1533
1534 for (fd = 0; fd <= Biggest_FD; ++fd) {
1535 F = &fd_table[fd];
1536
1537 if (!F->flags.open)
1538 continue;
1539
1540 if (F->type != FD_SOCKET)
1541 continue;
1542
1543 if (F->flags.ipc) /* don't close inter-process sockets */
1544 continue;
1545
1546 if (F->timeoutHandler != NULL) {
1547 AsyncCall::Pointer callback = F->timeoutHandler;
1548 F->timeoutHandler = NULL;
1549 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1550 ScheduleCallHere(callback);
1551 } else {
1552 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1553 old_comm_reset_close(fd);
1554 }
1555 }
1556 }
1557
1558 static bool
1559 AlreadyTimedOut(fde *F)
1560 {
1561 if (!F->flags.open)
1562 return true;
1563
1564 if (F->timeout == 0)
1565 return true;
1566
1567 if (F->timeout > squid_curtime)
1568 return true;
1569
1570 return false;
1571 }
1572
1573 static bool
1574 writeTimedOut(int fd)
1575 {
1576 if (!COMMIO_FD_WRITECB(fd)->active())
1577 return false;
1578
1579 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1580 return false;
1581
1582 return true;
1583 }
1584
1585 void
1586 checkTimeouts(void)
1587 {
1588 int fd;
1589 fde *F = NULL;
1590 AsyncCall::Pointer callback;
1591
1592 for (fd = 0; fd <= Biggest_FD; ++fd) {
1593 F = &fd_table[fd];
1594
1595 if (writeTimedOut(fd)) {
1596 // We have an active write callback and we are timed out
1597 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1598 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1599 COMMIO_FD_WRITECB(fd)->finish(Comm::COMM_ERROR, ETIMEDOUT);
1600 } else if (AlreadyTimedOut(F))
1601 continue;
1602
1603 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1604
1605 if (F->timeoutHandler != NULL) {
1606 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1607 callback = F->timeoutHandler;
1608 F->timeoutHandler = NULL;
1609 ScheduleCallHere(callback);
1610 } else {
1611 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1612 comm_close(fd);
1613 }
1614 }
1615 }
1616
1617 /// Start waiting for a possibly half-closed connection to close
1618 // by scheduling a read callback to a monitoring handler that
1619 // will close the connection on read errors.
1620 void
1621 commStartHalfClosedMonitor(int fd)
1622 {
1623 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1624 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1625 (void)TheHalfClosed->add(fd); // could also assert the result
1626 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1627 }
1628
1629 static
1630 void
1631 commPlanHalfClosedCheck()
1632 {
1633 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1634 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1635 WillCheckHalfClosed = true;
1636 }
1637 }
1638
1639 /// iterates over all descriptors that may need half-closed tests and
1640 /// calls comm_read for those that do; re-schedules the check if needed
1641 static
1642 void
1643 commHalfClosedCheck(void *)
1644 {
1645 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1646
1647 typedef DescriptorSet::const_iterator DSCI;
1648 const DSCI end = TheHalfClosed->end();
1649 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1650 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1651 c->fd = *i;
1652 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1653 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1654 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1655 Comm::Read(c, call);
1656 fd_table[c->fd].halfClosedReader = call;
1657 } else
1658 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1659 }
1660
1661 WillCheckHalfClosed = false; // as far as we know
1662 commPlanHalfClosedCheck(); // may need to check again
1663 }
1664
1665 /// checks whether we are waiting for possibly half-closed connection to close
1666 // We are monitoring if the read handler for the fd is the monitoring handler.
1667 bool
1668 commHasHalfClosedMonitor(int fd)
1669 {
1670 return TheHalfClosed->has(fd);
1671 }
1672
1673 /// stop waiting for possibly half-closed connection to close
1674 void
1675 commStopHalfClosedMonitor(int const fd)
1676 {
1677 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1678
1679 // cancel the read if one was scheduled
1680 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1681 if (reader != NULL)
1682 Comm::ReadCancel(fd, reader);
1683 fd_table[fd].halfClosedReader = NULL;
1684
1685 TheHalfClosed->del(fd);
1686 }
1687
1688 /// I/O handler for the possibly half-closed connection monitoring code
1689 static void
1690 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, Comm::Flag flag, int, void *)
1691 {
1692 // there cannot be more data coming in on half-closed connections
1693 assert(size == 0);
1694 assert(conn != NULL);
1695 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1696
1697 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1698
1699 // nothing to do if fd is being closed
1700 if (flag == Comm::ERR_CLOSING)
1701 return;
1702
1703 // if read failed, close the connection
1704 if (flag != Comm::OK) {
1705 debugs(5, 3, HERE << "closing " << conn);
1706 conn->close();
1707 return;
1708 }
1709
1710 // continue waiting for close or error
1711 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1712 }
1713
1714 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1715
1716 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1717 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1718
1719 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1720
1721 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1722
1723 DeferredReadManager::~DeferredReadManager()
1724 {
1725 flushReads();
1726 assert (deferredReads.empty());
1727 }
1728
1729 /* explicit instantiation required for some systems */
1730
1731 /// \cond AUTODOCS_IGNORE
1732 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1733 /// \endcond
1734
1735 void
1736 DeferredReadManager::delayRead(DeferredRead const &aRead)
1737 {
1738 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1739 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1740
1741 // We have to use a global function as a closer and point to temp
1742 // instead of "this" because DeferredReadManager is not a job and
1743 // is not even cbdata protected
1744 // XXX: and yet we use cbdata protection functions on it??
1745 AsyncCall::Pointer closer = commCbCall(5,4,
1746 "DeferredReadManager::CloseHandler",
1747 CommCloseCbPtrFun(&CloseHandler, temp));
1748 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1749 temp->element.closer = closer; // remeber so that we can cancel
1750 }
1751
1752 void
1753 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1754 {
1755 if (!cbdataReferenceValid(params.data))
1756 return;
1757
1758 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1759
1760 temp->element.closer = NULL;
1761 temp->element.markCancelled();
1762 }
1763
1764 DeferredRead
1765 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1766 {
1767 assert (!deferredReads.empty());
1768
1769 DeferredRead &read = deferredReads.head->element;
1770
1771 // NOTE: at this point the connection has been paused/stalled for an unknown
1772 // amount of time. We must re-validate that it is active and usable.
1773
1774 // If the connection has been closed already. Cancel this read.
1775 if (!Comm::IsConnOpen(read.theRead.conn)) {
1776 if (read.closer != NULL) {
1777 read.closer->cancel("Connection closed before.");
1778 read.closer = NULL;
1779 }
1780 read.markCancelled();
1781 }
1782
1783 if (!read.cancelled) {
1784 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1785 read.closer = NULL;
1786 }
1787
1788 DeferredRead result = deferredReads.pop_front();
1789
1790 return result;
1791 }
1792
1793 void
1794 DeferredReadManager::kickReads(int const count)
1795 {
1796 /* if we had CbDataList::size() we could consolidate this and flushReads */
1797
1798 if (count < 1) {
1799 flushReads();
1800 return;
1801 }
1802
1803 size_t remaining = count;
1804
1805 while (!deferredReads.empty() && remaining) {
1806 DeferredRead aRead = popHead(deferredReads);
1807 kickARead(aRead);
1808
1809 if (!aRead.cancelled)
1810 --remaining;
1811 }
1812 }
1813
1814 void
1815 DeferredReadManager::flushReads()
1816 {
1817 CbDataListContainer<DeferredRead> reads;
1818 reads = deferredReads;
1819 deferredReads = CbDataListContainer<DeferredRead>();
1820
1821 // XXX: For fairness this SHOULD randomize the order
1822 while (!reads.empty()) {
1823 DeferredRead aRead = popHead(reads);
1824 kickARead(aRead);
1825 }
1826 }
1827
1828 void
1829 DeferredReadManager::kickARead(DeferredRead const &aRead)
1830 {
1831 if (aRead.cancelled)
1832 return;
1833
1834 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1835 return;
1836
1837 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1838
1839 aRead.theReader(aRead.theContext, aRead.theRead);
1840 }
1841
1842 void
1843 DeferredRead::markCancelled()
1844 {
1845 cancelled = true;
1846 }
1847
1848 int
1849 CommSelectEngine::checkEvents(int timeout)
1850 {
1851 static time_t last_timeout = 0;
1852
1853 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1854 if (squid_curtime > last_timeout) {
1855 last_timeout = squid_curtime;
1856 checkTimeouts();
1857 }
1858
1859 switch (Comm::DoSelect(timeout)) {
1860
1861 case Comm::OK:
1862
1863 case Comm::TIMEOUT:
1864 return 0;
1865
1866 case Comm::IDLE:
1867
1868 case Comm::SHUTDOWN:
1869 return EVENT_IDLE;
1870
1871 case Comm::COMM_ERROR:
1872 return EVENT_ERROR;
1873
1874 default:
1875 fatal_dump("comm.cc: Internal error -- this should never happen.");
1876 return EVENT_ERROR;
1877 };
1878 }
1879
1880 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1881 int
1882 comm_open_uds(int sock_type,
1883 int proto,
1884 struct sockaddr_un* addr,
1885 int flags)
1886 {
1887 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1888
1889 int new_socket;
1890
1891 PROF_start(comm_open);
1892 /* Create socket for accepting new connections. */
1893 ++ statCounter.syscalls.sock.sockets;
1894
1895 /* Setup the socket addrinfo details for use */
1896 struct addrinfo AI;
1897 AI.ai_flags = 0;
1898 AI.ai_family = PF_UNIX;
1899 AI.ai_socktype = sock_type;
1900 AI.ai_protocol = proto;
1901 AI.ai_addrlen = SUN_LEN(addr);
1902 AI.ai_addr = (sockaddr*)addr;
1903 AI.ai_canonname = NULL;
1904 AI.ai_next = NULL;
1905
1906 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1907
1908 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1909 /* Increase the number of reserved fd's if calls to socket()
1910 * are failing because the open file table is full. This
1911 * limits the number of simultaneous clients */
1912
1913 if (limitError(errno)) {
1914 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
1915 fdAdjustReserved();
1916 } else {
1917 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
1918 }
1919
1920 PROF_stop(comm_open);
1921 return -1;
1922 }
1923
1924 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1925
1926 /* update fdstat */
1927 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1928
1929 assert(!isOpen(new_socket));
1930 fd_open(new_socket, FD_MSGHDR, NULL);
1931
1932 fdd_table[new_socket].close_file = NULL;
1933
1934 fdd_table[new_socket].close_line = 0;
1935
1936 fd_table[new_socket].sock_family = AI.ai_family;
1937
1938 if (!(flags & COMM_NOCLOEXEC))
1939 commSetCloseOnExec(new_socket);
1940
1941 if (flags & COMM_REUSEADDR)
1942 commSetReuseAddr(new_socket);
1943
1944 if (flags & COMM_NONBLOCKING) {
1945 if (commSetNonBlocking(new_socket) != Comm::OK) {
1946 comm_close(new_socket);
1947 PROF_stop(comm_open);
1948 return -1;
1949 }
1950 }
1951
1952 if (flags & COMM_DOBIND) {
1953 if (commBind(new_socket, AI) != Comm::OK) {
1954 comm_close(new_socket);
1955 PROF_stop(comm_open);
1956 return -1;
1957 }
1958 }
1959
1960 #ifdef TCP_NODELAY
1961 if (sock_type == SOCK_STREAM)
1962 commSetTcpNoDelay(new_socket);
1963
1964 #endif
1965
1966 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1967 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1968
1969 PROF_stop(comm_open);
1970
1971 return new_socket;
1972 }