]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Cleanup: remove comm_read_cancel(AsyncCall) API
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid.h"
36 #include "ClientInfo.h"
37 #include "comm/AcceptLimiter.h"
38 #include "comm/comm_internal.h"
39 #include "comm/Connection.h"
40 #include "comm/IoCallback.h"
41 #include "comm/Loops.h"
42 #include "comm/Read.h"
43 #include "comm/TcpAcceptor.h"
44 #include "comm/Write.h"
45 #include "CommRead.h"
46 #include "compat/cmsg.h"
47 #include "DescriptorSet.h"
48 #include "event.h"
49 #include "fd.h"
50 #include "fde.h"
51 #include "globals.h"
52 #include "icmp/net_db.h"
53 #include "ip/Intercept.h"
54 #include "ip/QosConfig.h"
55 #include "ip/tools.h"
56 #include "pconn.h"
57 #include "profiler/Profiler.h"
58 #include "SBuf.h"
59 #include "SquidConfig.h"
60 #include "StatCounters.h"
61 #include "StoreIOBuffer.h"
62 #include "tools.h"
63
64 #if USE_OPENSSL
65 #include "ssl/support.h"
66 #endif
67
68 #include <cerrno>
69 #include <cmath>
70 #if _SQUID_CYGWIN_
71 #include <sys/ioctl.h>
72 #endif
73 #ifdef HAVE_NETINET_TCP_H
74 #include <netinet/tcp.h>
75 #endif
76 #if HAVE_SYS_UN_H
77 #include <sys/un.h>
78 #endif
79
80 /*
81 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
82 */
83
84 static IOCB commHalfClosedReader;
85 static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
86 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
87
88 #if USE_DELAY_POOLS
89 CBDATA_CLASS_INIT(CommQuotaQueue);
90
91 static void commHandleWriteHelper(void * data);
92 #endif
93
94 /* STATIC */
95
96 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
97 static bool WillCheckHalfClosed = false; /// true if check is scheduled
98 static EVH commHalfClosedCheck;
99 static void commPlanHalfClosedCheck();
100
101 static comm_err_t commBind(int s, struct addrinfo &);
102 static void commSetReuseAddr(int);
103 static void commSetNoLinger(int);
104 #ifdef TCP_NODELAY
105 static void commSetTcpNoDelay(int);
106 #endif
107 static void commSetTcpRcvbuf(int, int);
108
109 fd_debug_t *fdd_table = NULL;
110
111 bool
112 isOpen(const int fd)
113 {
114 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
115 }
116
117 /**
118 * Empty the read buffers
119 *
120 * This is a magical routine that empties the read buffers.
121 * Under some platforms (Linux) if a buffer has data in it before
122 * you call close(), the socket will hang and take quite a while
123 * to timeout.
124 */
125 static void
126 comm_empty_os_read_buffers(int fd)
127 {
128 #if _SQUID_LINUX_
129 /* prevent those nasty RST packets */
130 char buf[SQUID_TCP_SO_RCVBUF];
131
132 if (fd_table[fd].flags.nonblocking) {
133 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
134 }
135 #endif
136 }
137
138 /**
139 * synchronous wrapper around udp socket functions
140 */
141 int
142 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
143 {
144 ++ statCounter.syscalls.sock.recvfroms;
145 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
146 struct addrinfo *AI = NULL;
147 Ip::Address::InitAddrInfo(AI);
148 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
149 from = *AI;
150 Ip::Address::FreeAddrInfo(AI);
151 return x;
152 }
153
154 int
155 comm_udp_recv(int fd, void *buf, size_t len, int flags)
156 {
157 Ip::Address nul;
158 return comm_udp_recvfrom(fd, buf, len, flags, nul);
159 }
160
161 ssize_t
162 comm_udp_send(int s, const void *buf, size_t len, int flags)
163 {
164 return send(s, buf, len, flags);
165 }
166
167 bool
168 comm_has_incomplete_write(int fd)
169 {
170 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd));
171 return COMMIO_FD_WRITECB(fd)->active();
172 }
173
174 /**
175 * Queue a write. handler/handler_data are called when the write fully
176 * completes, on error, or on file descriptor close.
177 */
178
179 /* Return the local port associated with fd. */
180 unsigned short
181 comm_local_port(int fd)
182 {
183 Ip::Address temp;
184 struct addrinfo *addr = NULL;
185 fde *F = &fd_table[fd];
186
187 /* If the fd is closed already, just return */
188
189 if (!F->flags.open) {
190 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
191 return 0;
192 }
193
194 if (F->local_addr.port())
195 return F->local_addr.port();
196
197 if (F->sock_family == AF_INET)
198 temp.setIPv4();
199
200 Ip::Address::InitAddrInfo(addr);
201
202 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
203 debugs(50, DBG_IMPORTANT, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
204 Ip::Address::FreeAddrInfo(addr);
205 return 0;
206 }
207 temp = *addr;
208
209 Ip::Address::FreeAddrInfo(addr);
210
211 if (F->local_addr.isAnyAddr()) {
212 /* save the whole local address, not just the port. */
213 F->local_addr = temp;
214 } else {
215 F->local_addr.port(temp.port());
216 }
217
218 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
219 return F->local_addr.port();
220 }
221
222 static comm_err_t
223 commBind(int s, struct addrinfo &inaddr)
224 {
225 ++ statCounter.syscalls.sock.binds;
226
227 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
228 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
229 return COMM_OK;
230 }
231
232 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
233
234 return COMM_ERROR;
235 }
236
237 /**
238 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
239 * is OR of flags specified in comm.h. Defaults TOS
240 */
241 int
242 comm_open(int sock_type,
243 int proto,
244 Ip::Address &addr,
245 int flags,
246 const char *note)
247 {
248 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
249 }
250
251 void
252 comm_open_listener(int sock_type,
253 int proto,
254 Comm::ConnectionPointer &conn,
255 const char *note)
256 {
257 /* all listener sockets require bind() */
258 conn->flags |= COMM_DOBIND;
259
260 /* attempt native enabled port. */
261 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
262 }
263
264 int
265 comm_open_listener(int sock_type,
266 int proto,
267 Ip::Address &addr,
268 int flags,
269 const char *note)
270 {
271 int sock = -1;
272
273 /* all listener sockets require bind() */
274 flags |= COMM_DOBIND;
275
276 /* attempt native enabled port. */
277 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
278
279 return sock;
280 }
281
282 static bool
283 limitError(int const anErrno)
284 {
285 return anErrno == ENFILE || anErrno == EMFILE;
286 }
287
288 void
289 comm_set_v6only(int fd, int tos)
290 {
291 #ifdef IPV6_V6ONLY
292 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
293 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
294 }
295 #else
296 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
297 #endif /* sockopt */
298 }
299
300 /**
301 * Set the socket option required for TPROXY spoofing for:
302 * - Linux TPROXY v4 support,
303 * - OpenBSD divert-to support,
304 * - FreeBSD IPFW TPROXY v4 support.
305 */
306 void
307 comm_set_transparent(int fd)
308 {
309 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
310 # define soLevel SOL_IP
311 # define soFlag IP_TRANSPARENT
312 bool doneSuid = false;
313
314 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
315 # define soLevel SOL_SOCKET
316 # define soFlag SO_BINDANY
317 enter_suid();
318 bool doneSuid = true;
319
320 #elif defined(IP_BINDANY) // FreeBSD with IPFW
321 # define soLevel IPPROTO_IP
322 # define soFlag IP_BINDANY
323 enter_suid();
324 bool doneSuid = true;
325
326 #else
327 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
328 #endif /* sockopt */
329
330 #if defined(soLevel) && defined(soFlag)
331 int tos = 1;
332 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
333 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(TPROXY) on FD " << fd << ": " << xstrerror());
334 } else {
335 /* mark the socket as having transparent options */
336 fd_table[fd].flags.transparent = true;
337 }
338 if (doneSuid)
339 leave_suid();
340 #endif
341 }
342
343 /**
344 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
345 * is OR of flags specified in defines.h:COMM_*
346 */
347 int
348 comm_openex(int sock_type,
349 int proto,
350 Ip::Address &addr,
351 int flags,
352 tos_t tos,
353 nfmark_t nfmark,
354 const char *note)
355 {
356 int new_socket;
357 struct addrinfo *AI = NULL;
358
359 PROF_start(comm_open);
360 /* Create socket for accepting new connections. */
361 ++ statCounter.syscalls.sock.sockets;
362
363 /* Setup the socket addrinfo details for use */
364 addr.getAddrInfo(AI);
365 AI->ai_socktype = sock_type;
366 AI->ai_protocol = proto;
367
368 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
369
370 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
371
372 /* under IPv6 there is the possibility IPv6 is present but disabled. */
373 /* try again as IPv4-native if possible */
374 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
375 /* attempt to open this IPv4-only. */
376 Ip::Address::FreeAddrInfo(AI);
377 /* Setup the socket addrinfo details for use */
378 addr.getAddrInfo(AI);
379 AI->ai_socktype = sock_type;
380 AI->ai_protocol = proto;
381 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
382 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
383 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
384 }
385
386 if (new_socket < 0) {
387 /* Increase the number of reserved fd's if calls to socket()
388 * are failing because the open file table is full. This
389 * limits the number of simultaneous clients */
390
391 if (limitError(errno)) {
392 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
393 fdAdjustReserved();
394 } else {
395 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
396 }
397
398 Ip::Address::FreeAddrInfo(AI);
399
400 PROF_stop(comm_open);
401 return -1;
402 }
403
404 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
405 Comm::ConnectionPointer conn = new Comm::Connection;
406 conn->local = addr;
407 conn->fd = new_socket;
408
409 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
410
411 /* set TOS if needed */
412 if (tos)
413 Ip::Qos::setSockTos(conn, tos);
414
415 /* set netfilter mark if needed */
416 if (nfmark)
417 Ip::Qos::setSockNfmark(conn, nfmark);
418
419 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
420 comm_set_v6only(conn->fd, 1);
421
422 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
423 /* Other OS may have this administratively disabled for general use. Same deal. */
424 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
425 comm_set_v6only(conn->fd, 0);
426
427 comm_init_opened(conn, tos, nfmark, note, AI);
428 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
429
430 Ip::Address::FreeAddrInfo(AI);
431
432 PROF_stop(comm_open);
433
434 // XXX transition only. prevent conn from closing the new FD on function exit.
435 conn->fd = -1;
436 return new_socket;
437 }
438
439 /// update FD tables after a local or remote (IPC) comm_openex();
440 void
441 comm_init_opened(const Comm::ConnectionPointer &conn,
442 tos_t tos,
443 nfmark_t nfmark,
444 const char *note,
445 struct addrinfo *AI)
446 {
447 assert(Comm::IsConnOpen(conn));
448 assert(AI);
449
450 /* update fdstat */
451 debugs(5, 5, HERE << conn << " is a new socket");
452
453 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
454 fd_open(conn->fd, FD_SOCKET, note);
455
456 fdd_table[conn->fd].close_file = NULL;
457 fdd_table[conn->fd].close_line = 0;
458
459 fde *F = &fd_table[conn->fd];
460 F->local_addr = conn->local;
461 F->tosToServer = tos;
462
463 F->nfmarkToServer = nfmark;
464
465 F->sock_family = AI->ai_family;
466 }
467
468 /// apply flags after a local comm_open*() call;
469 /// returns new_socket or -1 on error
470 static int
471 comm_apply_flags(int new_socket,
472 Ip::Address &addr,
473 int flags,
474 struct addrinfo *AI)
475 {
476 assert(new_socket >= 0);
477 assert(AI);
478 const int sock_type = AI->ai_socktype;
479
480 if (!(flags & COMM_NOCLOEXEC))
481 commSetCloseOnExec(new_socket);
482
483 if ((flags & COMM_REUSEADDR))
484 commSetReuseAddr(new_socket);
485
486 if (addr.port() > (unsigned short) 0) {
487 #if _SQUID_WINDOWS_
488 if (sock_type != SOCK_DGRAM)
489 #endif
490 commSetNoLinger(new_socket);
491
492 if (opt_reuseaddr)
493 commSetReuseAddr(new_socket);
494 }
495
496 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
497 if ((flags & COMM_TRANSPARENT)) {
498 comm_set_transparent(new_socket);
499 }
500
501 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
502 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
503 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
504 if ( addr.isNoAddr() )
505 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
506
507 if (commBind(new_socket, *AI) != COMM_OK) {
508 comm_close(new_socket);
509 return -1;
510 }
511 }
512
513 if (flags & COMM_NONBLOCKING)
514 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
515 comm_close(new_socket);
516 return -1;
517 }
518
519 #ifdef TCP_NODELAY
520 if (sock_type == SOCK_STREAM)
521 commSetTcpNoDelay(new_socket);
522
523 #endif
524
525 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
526 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
527
528 return new_socket;
529 }
530
531 void
532 comm_import_opened(const Comm::ConnectionPointer &conn,
533 const char *note,
534 struct addrinfo *AI)
535 {
536 debugs(5, 2, HERE << conn);
537 assert(Comm::IsConnOpen(conn));
538 assert(AI);
539
540 comm_init_opened(conn, 0, 0, note, AI);
541
542 if (!(conn->flags & COMM_NOCLOEXEC))
543 fd_table[conn->fd].flags.close_on_exec = true;
544
545 if (conn->local.port() > (unsigned short) 0) {
546 #if _SQUID_WINDOWS_
547 if (AI->ai_socktype != SOCK_DGRAM)
548 #endif
549 fd_table[conn->fd].flags.nolinger = true;
550 }
551
552 if ((conn->flags & COMM_TRANSPARENT))
553 fd_table[conn->fd].flags.transparent = true;
554
555 if (conn->flags & COMM_NONBLOCKING)
556 fd_table[conn->fd].flags.nonblocking = true;
557
558 #ifdef TCP_NODELAY
559 if (AI->ai_socktype == SOCK_STREAM)
560 fd_table[conn->fd].flags.nodelay = true;
561 #endif
562
563 /* no fd_table[fd].flags. updates needed for these conditions:
564 * if ((flags & COMM_REUSEADDR)) ...
565 * if ((flags & COMM_DOBIND) ...) ...
566 */
567 }
568
569 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
570 // With handler already unset. Leaving this present until that can be verified for all code paths.
571 void
572 commUnsetFdTimeout(int fd)
573 {
574 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
575 assert(fd >= 0);
576 assert(fd < Squid_MaxFD);
577 fde *F = &fd_table[fd];
578 assert(F->flags.open);
579
580 F->timeoutHandler = NULL;
581 F->timeout = 0;
582 }
583
584 int
585 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
586 {
587 debugs(5, 3, HERE << conn << " timeout " << timeout);
588 assert(Comm::IsConnOpen(conn));
589 assert(conn->fd < Squid_MaxFD);
590 fde *F = &fd_table[conn->fd];
591 assert(F->flags.open);
592
593 if (timeout < 0) {
594 F->timeoutHandler = NULL;
595 F->timeout = 0;
596 } else {
597 if (callback != NULL) {
598 typedef CommTimeoutCbParams Params;
599 Params &params = GetCommParams<Params>(callback);
600 params.conn = conn;
601 F->timeoutHandler = callback;
602 }
603
604 F->timeout = squid_curtime + (time_t) timeout;
605 }
606
607 return F->timeout;
608 }
609
610 int
611 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
612 {
613 debugs(5, 3, HERE << "Remove timeout for " << conn);
614 AsyncCall::Pointer nil;
615 return commSetConnTimeout(conn, -1, nil);
616 }
617
618 int
619 comm_connect_addr(int sock, const Ip::Address &address)
620 {
621 comm_err_t status = COMM_OK;
622 fde *F = &fd_table[sock];
623 int x = 0;
624 int err = 0;
625 socklen_t errlen;
626 struct addrinfo *AI = NULL;
627 PROF_start(comm_connect_addr);
628
629 assert(address.port() != 0);
630
631 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
632
633 /* Handle IPv6 over IPv4-only socket case.
634 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
635 * NP: because commResetFD is private to ConnStateData we have to return an error and
636 * trust its handled properly.
637 */
638 if (F->sock_family == AF_INET && !address.isIPv4()) {
639 errno = ENETUNREACH;
640 return COMM_ERR_PROTOCOL;
641 }
642
643 /* Handle IPv4 over IPv6-only socket case.
644 * This case is presently handled here as it's both a known case and it's
645 * uncertain what error will be returned by the IPv6 stack in such case. It's
646 * possible this will also be handled by the errno checks below after connect()
647 * but needs carefull cross-platform verification, and verifying the address
648 * condition here is simple.
649 */
650 if (!F->local_addr.isIPv4() && address.isIPv4()) {
651 errno = ENETUNREACH;
652 return COMM_ERR_PROTOCOL;
653 }
654
655 address.getAddrInfo(AI, F->sock_family);
656
657 /* Establish connection. */
658 errno = 0;
659
660 if (!F->flags.called_connect) {
661 F->flags.called_connect = true;
662 ++ statCounter.syscalls.sock.connects;
663
664 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
665
666 // XXX: ICAP code refuses callbacks during a pending comm_ call
667 // Async calls development will fix this.
668 if (x == 0) {
669 x = -1;
670 errno = EINPROGRESS;
671 }
672
673 if (x < 0) {
674 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
675 " flags=" << AI->ai_flags <<
676 ", family=" << AI->ai_family <<
677 ", socktype=" << AI->ai_socktype <<
678 ", protocol=" << AI->ai_protocol <<
679 ", &addr=" << AI->ai_addr <<
680 ", addrlen=" << AI->ai_addrlen <<
681 " )" );
682 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
683 debugs(14,9, "connecting to: " << address );
684 }
685 } else {
686 #if _SQUID_NEWSOS6_
687 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
688
689 connect(sock, AI->ai_addr, AI->ai_addrlen);
690
691 if (errno == EINVAL) {
692 errlen = sizeof(err);
693 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
694
695 if (x >= 0)
696 errno = x;
697 }
698
699 #else
700 errlen = sizeof(err);
701
702 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
703
704 if (x == 0)
705 errno = err;
706
707 #if _SQUID_SOLARIS_
708 /*
709 * Solaris 2.4's socket emulation doesn't allow you
710 * to determine the error from a failed non-blocking
711 * connect and just returns EPIPE. Create a fake
712 * error message for connect. -- fenner@parc.xerox.com
713 */
714 if (x < 0 && errno == EPIPE)
715 errno = ENOTCONN;
716
717 #endif
718 #endif
719
720 }
721
722 Ip::Address::FreeAddrInfo(AI);
723
724 PROF_stop(comm_connect_addr);
725
726 if (errno == 0 || errno == EISCONN)
727 status = COMM_OK;
728 else if (ignoreErrno(errno))
729 status = COMM_INPROGRESS;
730 else if (errno == EAFNOSUPPORT || errno == EINVAL)
731 return COMM_ERR_PROTOCOL;
732 else
733 return COMM_ERROR;
734
735 address.toStr(F->ipaddr, MAX_IPSTRLEN);
736
737 F->remote_port = address.port(); /* remote_port is HS */
738
739 if (status == COMM_OK) {
740 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
741 } else if (status == COMM_INPROGRESS) {
742 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
743 }
744
745 return status;
746 }
747
748 void
749 commCallCloseHandlers(int fd)
750 {
751 fde *F = &fd_table[fd];
752 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
753
754 while (F->closeHandler != NULL) {
755 AsyncCall::Pointer call = F->closeHandler;
756 F->closeHandler = call->Next();
757 call->setNext(NULL);
758 // If call is not canceled schedule it for execution else ignore it
759 if (!call->canceled()) {
760 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
761 ScheduleCallHere(call);
762 }
763 }
764 }
765
766 #if LINGERING_CLOSE
767 static void
768 commLingerClose(int fd, void *unused)
769 {
770 LOCAL_ARRAY(char, buf, 1024);
771 int n;
772 n = FD_READ_METHOD(fd, buf, 1024);
773
774 if (n < 0)
775 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
776
777 comm_close(fd);
778 }
779
780 static void
781 commLingerTimeout(const FdeCbParams &params)
782 {
783 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
784 comm_close(params.fd);
785 }
786
787 /*
788 * Inspired by apache
789 */
790 void
791 comm_lingering_close(int fd)
792 {
793 #if USE_OPENSSL
794 if (fd_table[fd].ssl)
795 ssl_shutdown_method(fd_table[fd].ssl);
796 #endif
797
798 if (shutdown(fd, 1) < 0) {
799 comm_close(fd);
800 return;
801 }
802
803 fd_note(fd, "lingering close");
804 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
805
806 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
807 assert(fd_table[fd].flags.open);
808 if (callback != NULL) {
809 typedef FdeCbParams Params;
810 Params &params = GetCommParams<Params>(callback);
811 params.fd = fd;
812 fd_table[fd].timeoutHandler = callback;
813 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
814 }
815
816 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
817 }
818
819 #endif
820
821 /**
822 * enable linger with time of 0 so that when the socket is
823 * closed, TCP generates a RESET
824 */
825 void
826 comm_reset_close(const Comm::ConnectionPointer &conn)
827 {
828 struct linger L;
829 L.l_onoff = 1;
830 L.l_linger = 0;
831
832 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
833 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
834
835 conn->close();
836 }
837
838 // Legacy close function.
839 void
840 old_comm_reset_close(int fd)
841 {
842 struct linger L;
843 L.l_onoff = 1;
844 L.l_linger = 0;
845
846 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
847 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
848
849 comm_close(fd);
850 }
851
852 #if USE_OPENSSL
853 void
854 commStartSslClose(const FdeCbParams &params)
855 {
856 assert(&fd_table[params.fd].ssl);
857 ssl_shutdown_method(fd_table[params.fd].ssl);
858 }
859 #endif
860
861 void
862 comm_close_complete(const FdeCbParams &params)
863 {
864 #if USE_OPENSSL
865 fde *F = &fd_table[params.fd];
866
867 if (F->ssl) {
868 SSL_free(F->ssl);
869 F->ssl = NULL;
870 }
871
872 if (F->dynamicSslContext) {
873 SSL_CTX_free(F->dynamicSslContext);
874 F->dynamicSslContext = NULL;
875 }
876 #endif
877 fd_close(params.fd); /* update fdstat */
878 close(params.fd);
879
880 ++ statCounter.syscalls.sock.closes;
881
882 /* When one connection closes, give accept() a chance, if need be */
883 Comm::AcceptLimiter::Instance().kick();
884 }
885
886 /*
887 * Close the socket fd.
888 *
889 * + call write handlers with ERR_CLOSING
890 * + call read handlers with ERR_CLOSING
891 * + call closing handlers
892 *
893 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
894 * DeferredReadManager.
895 */
896 void
897 _comm_close(int fd, char const *file, int line)
898 {
899 debugs(5, 3, "comm_close: start closing FD " << fd);
900 assert(fd >= 0);
901 assert(fd < Squid_MaxFD);
902
903 fde *F = &fd_table[fd];
904 fdd_table[fd].close_file = file;
905 fdd_table[fd].close_line = line;
906
907 if (F->closing())
908 return;
909
910 /* XXX: is this obsolete behind F->closing() ? */
911 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
912 return;
913
914 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
915 if (!isOpen(fd)) {
916 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
917 // XXX: do we need to run close(fd) or fd_close(fd) here?
918 return;
919 }
920
921 assert(F->type != FD_FILE);
922
923 PROF_start(comm_close);
924
925 F->flags.close_request = true;
926
927 #if USE_OPENSSL
928 if (F->ssl) {
929 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
930 FdeCbPtrFun(commStartSslClose, NULL));
931 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
932 startParams.fd = fd;
933 ScheduleCallHere(startCall);
934 }
935 #endif
936
937 // a half-closed fd may lack a reader, so we stop monitoring explicitly
938 if (commHasHalfClosedMonitor(fd))
939 commStopHalfClosedMonitor(fd);
940 commUnsetFdTimeout(fd);
941
942 // notify read/write handlers after canceling select reservations, if any
943 if (COMMIO_FD_WRITECB(fd)->active()) {
944 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
945 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
946 }
947 if (COMMIO_FD_READCB(fd)->active()) {
948 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
949 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
950 }
951
952 #if USE_DELAY_POOLS
953 if (ClientInfo *clientInfo = F->clientInfo) {
954 if (clientInfo->selectWaiting) {
955 clientInfo->selectWaiting = false;
956 // kick queue or it will get stuck as commWriteHandle is not called
957 clientInfo->kickQuotaQueue();
958 }
959 }
960 #endif
961
962 commCallCloseHandlers(fd);
963
964 comm_empty_os_read_buffers(fd);
965
966 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
967 FdeCbPtrFun(comm_close_complete, NULL));
968 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
969 completeParams.fd = fd;
970 // must use async call to wait for all callbacks
971 // scheduled before comm_close() to finish
972 ScheduleCallHere(completeCall);
973
974 PROF_stop(comm_close);
975 }
976
977 /* Send a udp datagram to specified TO_ADDR. */
978 int
979 comm_udp_sendto(int fd,
980 const Ip::Address &to_addr,
981 const void *buf,
982 int len)
983 {
984 PROF_start(comm_udp_sendto);
985 ++ statCounter.syscalls.sock.sendtos;
986
987 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
988 " using FD " << fd << " using Port " << comm_local_port(fd) );
989
990 struct addrinfo *AI = NULL;
991 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
992 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
993 Ip::Address::FreeAddrInfo(AI);
994
995 PROF_stop(comm_udp_sendto);
996
997 if (x >= 0)
998 return x;
999
1000 #if _SQUID_LINUX_
1001
1002 if (ECONNREFUSED != errno)
1003 #endif
1004
1005 debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1006
1007 return COMM_ERROR;
1008 }
1009
1010 void
1011 comm_add_close_handler(int fd, CLCB * handler, void *data)
1012 {
1013 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1014 handler << ", data=" << data);
1015
1016 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1017 CommCloseCbPtrFun(handler, data));
1018 comm_add_close_handler(fd, call);
1019 }
1020
1021 void
1022 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1023 {
1024 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1025
1026 /*TODO:Check for a similar scheduled AsyncCall*/
1027 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1028 // assert(c->handler != handler || c->data != data);
1029
1030 call->setNext(fd_table[fd].closeHandler);
1031
1032 fd_table[fd].closeHandler = call;
1033 }
1034
1035 // remove function-based close handler
1036 void
1037 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1038 {
1039 assert(isOpen(fd));
1040 /* Find handler in list */
1041 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1042 handler << ", data=" << data);
1043
1044 AsyncCall::Pointer p, prev = NULL;
1045 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1046 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1047 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1048 if (!call) // method callbacks have their own comm_remove_close_handler
1049 continue;
1050
1051 typedef CommCloseCbParams Params;
1052 const Params &params = GetCommParams<Params>(p);
1053 if (call->dialer.handler == handler && params.data == data)
1054 break; /* This is our handler */
1055 }
1056
1057 // comm_close removes all close handlers so our handler may be gone
1058 if (p != NULL) {
1059 p->dequeue(fd_table[fd].closeHandler, prev);
1060 p->cancel("comm_remove_close_handler");
1061 }
1062 }
1063
1064 // remove method-based close handler
1065 void
1066 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1067 {
1068 assert(isOpen(fd));
1069 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1070
1071 // comm_close removes all close handlers so our handler may be gone
1072 AsyncCall::Pointer p, prev = NULL;
1073 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1074
1075 if (p != NULL)
1076 p->dequeue(fd_table[fd].closeHandler, prev);
1077 call->cancel("comm_remove_close_handler");
1078 }
1079
1080 static void
1081 commSetNoLinger(int fd)
1082 {
1083
1084 struct linger L;
1085 L.l_onoff = 0; /* off */
1086 L.l_linger = 0;
1087
1088 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1089 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1090
1091 fd_table[fd].flags.nolinger = true;
1092 }
1093
1094 static void
1095 commSetReuseAddr(int fd)
1096 {
1097 int on = 1;
1098
1099 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1100 debugs(50, DBG_IMPORTANT, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1101 }
1102
1103 static void
1104 commSetTcpRcvbuf(int fd, int size)
1105 {
1106 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1107 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1108 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1109 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1110 #ifdef TCP_WINDOW_CLAMP
1111 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1112 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1113 #endif
1114 }
1115
1116 int
1117 commSetNonBlocking(int fd)
1118 {
1119 #if !_SQUID_WINDOWS_
1120 int flags;
1121 int dummy = 0;
1122 #endif
1123 #if _SQUID_WINDOWS_
1124 int nonblocking = TRUE;
1125
1126 #if _SQUID_CYGWIN_
1127 if (fd_table[fd].type != FD_PIPE) {
1128 #endif
1129
1130 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1131 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1132 return COMM_ERROR;
1133 }
1134
1135 #if _SQUID_CYGWIN_
1136 } else {
1137 #endif
1138 #endif
1139 #if !_SQUID_WINDOWS_
1140
1141 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1142 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1143 return COMM_ERROR;
1144 }
1145
1146 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1147 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1148 return COMM_ERROR;
1149 }
1150
1151 #endif
1152 #if _SQUID_CYGWIN_
1153 }
1154 #endif
1155 fd_table[fd].flags.nonblocking = true;
1156
1157 return 0;
1158 }
1159
1160 int
1161 commUnsetNonBlocking(int fd)
1162 {
1163 #if _SQUID_WINDOWS_
1164 int nonblocking = FALSE;
1165
1166 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1167 #else
1168 int flags;
1169 int dummy = 0;
1170
1171 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1172 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1173 return COMM_ERROR;
1174 }
1175
1176 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1177 #endif
1178 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1179 return COMM_ERROR;
1180 }
1181
1182 fd_table[fd].flags.nonblocking = false;
1183 return 0;
1184 }
1185
1186 void
1187 commSetCloseOnExec(int fd)
1188 {
1189 #ifdef FD_CLOEXEC
1190 int flags;
1191 int dummy = 0;
1192
1193 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1194 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1195 return;
1196 }
1197
1198 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1199 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1200
1201 fd_table[fd].flags.close_on_exec = true;
1202
1203 #endif
1204 }
1205
1206 #ifdef TCP_NODELAY
1207 static void
1208 commSetTcpNoDelay(int fd)
1209 {
1210 int on = 1;
1211
1212 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1213 debugs(50, DBG_IMPORTANT, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1214
1215 fd_table[fd].flags.nodelay = true;
1216 }
1217
1218 #endif
1219
1220 void
1221 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1222 {
1223 int on = 1;
1224 #ifdef TCP_KEEPCNT
1225 if (timeout && interval) {
1226 int count = (timeout + interval - 1) / interval;
1227 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1228 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1229 }
1230 #endif
1231 #ifdef TCP_KEEPIDLE
1232 if (idle) {
1233 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1234 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1235 }
1236 #endif
1237 #ifdef TCP_KEEPINTVL
1238 if (interval) {
1239 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1240 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1241 }
1242 #endif
1243 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1244 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1245 }
1246
1247 void
1248 comm_init(void)
1249 {
1250 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1251 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1252
1253 /* make sure the accept() socket FIFO delay queue exists */
1254 Comm::AcceptLimiter::Instance();
1255
1256 // make sure the IO pending callback table exists
1257 Comm::CallbackTableInit();
1258
1259 /* XXX account fd_table */
1260 /* Keep a few file descriptors free so that we don't run out of FD's
1261 * after accepting a client but before it opens a socket or a file.
1262 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1263 RESERVED_FD = min(100, Squid_MaxFD / 4);
1264
1265 TheHalfClosed = new DescriptorSet;
1266
1267 /* setup the select loop module */
1268 Comm::SelectLoopInit();
1269 }
1270
1271 void
1272 comm_exit(void)
1273 {
1274 delete TheHalfClosed;
1275 TheHalfClosed = NULL;
1276
1277 safe_free(fd_table);
1278 safe_free(fdd_table);
1279 Comm::CallbackTableDestruct();
1280 }
1281
1282 #if USE_DELAY_POOLS
1283 // called when the queue is done waiting for the client bucket to fill
1284 void
1285 commHandleWriteHelper(void * data)
1286 {
1287 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1288 assert(queue);
1289
1290 ClientInfo *clientInfo = queue->clientInfo;
1291 // ClientInfo invalidates queue if freed, so if we got here through,
1292 // evenAdd cbdata protections, everything should be valid and consistent
1293 assert(clientInfo);
1294 assert(clientInfo->hasQueue());
1295 assert(clientInfo->hasQueue(queue));
1296 assert(!clientInfo->selectWaiting);
1297 assert(clientInfo->eventWaiting);
1298 clientInfo->eventWaiting = false;
1299
1300 do {
1301 // check that the head descriptor is still relevant
1302 const int head = clientInfo->quotaPeekFd();
1303 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1304
1305 if (fd_table[head].clientInfo == clientInfo &&
1306 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1307 !fd_table[head].closing()) {
1308
1309 // wait for the head descriptor to become ready for writing
1310 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1311 clientInfo->selectWaiting = true;
1312 return;
1313 }
1314
1315 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1316 // and continue looking for a relevant one
1317 } while (clientInfo->hasQueue());
1318
1319 debugs(77,3, HERE << "emptied queue");
1320 }
1321
1322 bool
1323 ClientInfo::hasQueue() const
1324 {
1325 assert(quotaQueue);
1326 return !quotaQueue->empty();
1327 }
1328
1329 bool
1330 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1331 {
1332 assert(quotaQueue);
1333 return quotaQueue == q;
1334 }
1335
1336 /// returns the first descriptor to be dequeued
1337 int
1338 ClientInfo::quotaPeekFd() const
1339 {
1340 assert(quotaQueue);
1341 return quotaQueue->front();
1342 }
1343
1344 /// returns the reservation ID of the first descriptor to be dequeued
1345 unsigned int
1346 ClientInfo::quotaPeekReserv() const
1347 {
1348 assert(quotaQueue);
1349 return quotaQueue->outs + 1;
1350 }
1351
1352 /// queues a given fd, creating the queue if necessary; returns reservation ID
1353 unsigned int
1354 ClientInfo::quotaEnqueue(int fd)
1355 {
1356 assert(quotaQueue);
1357 return quotaQueue->enqueue(fd);
1358 }
1359
1360 /// removes queue head
1361 void
1362 ClientInfo::quotaDequeue()
1363 {
1364 assert(quotaQueue);
1365 quotaQueue->dequeue();
1366 }
1367
1368 void
1369 ClientInfo::kickQuotaQueue()
1370 {
1371 if (!eventWaiting && !selectWaiting && hasQueue()) {
1372 // wait at least a second if the bucket is empty
1373 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1374 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1375 quotaQueue, delay, 0, true);
1376 eventWaiting = true;
1377 }
1378 }
1379
1380 /// calculates how much to write for a single dequeued client
1381 int
1382 ClientInfo::quotaForDequed()
1383 {
1384 /* If we have multiple clients and give full bucketSize to each client then
1385 * clt1 may often get a lot more because clt1->clt2 time distance in the
1386 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1387 * We divide quota evenly to be more fair. */
1388
1389 if (!rationedCount) {
1390 rationedCount = quotaQueue->size() + 1;
1391
1392 // The delay in ration recalculation _temporary_ deprives clients from
1393 // bytes that should have trickled in while rationedCount was positive.
1394 refillBucket();
1395
1396 // Rounding errors do not accumulate here, but we round down to avoid
1397 // negative bucket sizes after write with rationedCount=1.
1398 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1399 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1400 '*' << rationedCount);
1401 }
1402
1403 --rationedCount;
1404 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1405 " rations remaining: " << rationedCount);
1406
1407 // update 'last seen' time to prevent clientdb GC from dropping us
1408 last_seen = squid_curtime;
1409 return rationedQuota;
1410 }
1411
1412 ///< adds bytes to the quota bucket based on the rate and passed time
1413 void
1414 ClientInfo::refillBucket()
1415 {
1416 // all these times are in seconds, with double precision
1417 const double currTime = current_dtime;
1418 const double timePassed = currTime - prevTime;
1419
1420 // Calculate allowance for the time passed. Use double to avoid
1421 // accumulating rounding errors for small intervals. For example, always
1422 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1423 const double gain = timePassed * writeSpeedLimit;
1424
1425 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1426 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1427 " = " << gain << ')');
1428
1429 // to further combat error accumulation during micro updates,
1430 // quit before updating time if we cannot add at least one byte
1431 if (gain < 1.0)
1432 return;
1433
1434 prevTime = currTime;
1435
1436 // for "first" connections, drain initial fat before refilling but keep
1437 // updating prevTime to avoid bursts after the fat is gone
1438 if (bucketSize > bucketSizeLimit) {
1439 debugs(77,4, HERE << "not refilling while draining initial fat");
1440 return;
1441 }
1442
1443 bucketSize += gain;
1444
1445 // obey quota limits
1446 if (bucketSize > bucketSizeLimit)
1447 bucketSize = bucketSizeLimit;
1448 }
1449
1450 void
1451 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1452 {
1453 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1454 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1455 " highwatermark=" << aHighWatermark);
1456
1457 // set or possibly update traffic shaping parameters
1458 writeLimitingActive = true;
1459 writeSpeedLimit = aWriteSpeedLimit;
1460 bucketSizeLimit = aHighWatermark;
1461
1462 // but some members should only be set once for a newly activated bucket
1463 if (firstTimeConnection) {
1464 firstTimeConnection = false;
1465
1466 assert(!selectWaiting);
1467 assert(!quotaQueue);
1468 quotaQueue = new CommQuotaQueue(this);
1469
1470 bucketSize = anInitialBurst;
1471 prevTime = current_dtime;
1472 }
1473 }
1474
1475 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1476 ins(0), outs(0)
1477 {
1478 assert(clientInfo);
1479 }
1480
1481 CommQuotaQueue::~CommQuotaQueue()
1482 {
1483 assert(!clientInfo); // ClientInfo should clear this before destroying us
1484 }
1485
1486 /// places the given fd at the end of the queue; returns reservation ID
1487 unsigned int
1488 CommQuotaQueue::enqueue(int fd)
1489 {
1490 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1491 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1492 fds.push_back(fd);
1493 return ++ins;
1494 }
1495
1496 /// removes queue head
1497 void
1498 CommQuotaQueue::dequeue()
1499 {
1500 assert(!fds.empty());
1501 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1502 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1503 fds.size());
1504 fds.pop_front();
1505 ++outs;
1506 }
1507 #endif
1508
1509 /*
1510 * hm, this might be too general-purpose for all the places we'd
1511 * like to use it.
1512 */
1513 int
1514 ignoreErrno(int ierrno)
1515 {
1516 switch (ierrno) {
1517
1518 case EINPROGRESS:
1519
1520 case EWOULDBLOCK:
1521 #if EAGAIN != EWOULDBLOCK
1522
1523 case EAGAIN:
1524 #endif
1525
1526 case EALREADY:
1527
1528 case EINTR:
1529 #ifdef ERESTART
1530
1531 case ERESTART:
1532 #endif
1533
1534 return 1;
1535
1536 default:
1537 return 0;
1538 }
1539
1540 /* NOTREACHED */
1541 }
1542
1543 void
1544 commCloseAllSockets(void)
1545 {
1546 int fd;
1547 fde *F = NULL;
1548
1549 for (fd = 0; fd <= Biggest_FD; ++fd) {
1550 F = &fd_table[fd];
1551
1552 if (!F->flags.open)
1553 continue;
1554
1555 if (F->type != FD_SOCKET)
1556 continue;
1557
1558 if (F->flags.ipc) /* don't close inter-process sockets */
1559 continue;
1560
1561 if (F->timeoutHandler != NULL) {
1562 AsyncCall::Pointer callback = F->timeoutHandler;
1563 F->timeoutHandler = NULL;
1564 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1565 ScheduleCallHere(callback);
1566 } else {
1567 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1568 old_comm_reset_close(fd);
1569 }
1570 }
1571 }
1572
1573 static bool
1574 AlreadyTimedOut(fde *F)
1575 {
1576 if (!F->flags.open)
1577 return true;
1578
1579 if (F->timeout == 0)
1580 return true;
1581
1582 if (F->timeout > squid_curtime)
1583 return true;
1584
1585 return false;
1586 }
1587
1588 static bool
1589 writeTimedOut(int fd)
1590 {
1591 if (!COMMIO_FD_WRITECB(fd)->active())
1592 return false;
1593
1594 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1595 return false;
1596
1597 return true;
1598 }
1599
1600 void
1601 checkTimeouts(void)
1602 {
1603 int fd;
1604 fde *F = NULL;
1605 AsyncCall::Pointer callback;
1606
1607 for (fd = 0; fd <= Biggest_FD; ++fd) {
1608 F = &fd_table[fd];
1609
1610 if (writeTimedOut(fd)) {
1611 // We have an active write callback and we are timed out
1612 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1613 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1614 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1615 } else if (AlreadyTimedOut(F))
1616 continue;
1617
1618 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1619
1620 if (F->timeoutHandler != NULL) {
1621 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1622 callback = F->timeoutHandler;
1623 F->timeoutHandler = NULL;
1624 ScheduleCallHere(callback);
1625 } else {
1626 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1627 comm_close(fd);
1628 }
1629 }
1630 }
1631
1632 /// Start waiting for a possibly half-closed connection to close
1633 // by scheduling a read callback to a monitoring handler that
1634 // will close the connection on read errors.
1635 void
1636 commStartHalfClosedMonitor(int fd)
1637 {
1638 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1639 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1640 (void)TheHalfClosed->add(fd); // could also assert the result
1641 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1642 }
1643
1644 static
1645 void
1646 commPlanHalfClosedCheck()
1647 {
1648 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1649 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1650 WillCheckHalfClosed = true;
1651 }
1652 }
1653
1654 /// iterates over all descriptors that may need half-closed tests and
1655 /// calls comm_read for those that do; re-schedules the check if needed
1656 static
1657 void
1658 commHalfClosedCheck(void *)
1659 {
1660 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1661
1662 typedef DescriptorSet::const_iterator DSCI;
1663 const DSCI end = TheHalfClosed->end();
1664 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1665 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1666 c->fd = *i;
1667 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1668 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1669 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1670 Comm::Read(c, call);
1671 fd_table[c->fd].halfClosedReader = call;
1672 } else
1673 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1674 }
1675
1676 WillCheckHalfClosed = false; // as far as we know
1677 commPlanHalfClosedCheck(); // may need to check again
1678 }
1679
1680 /// checks whether we are waiting for possibly half-closed connection to close
1681 // We are monitoring if the read handler for the fd is the monitoring handler.
1682 bool
1683 commHasHalfClosedMonitor(int fd)
1684 {
1685 return TheHalfClosed->has(fd);
1686 }
1687
1688 /// stop waiting for possibly half-closed connection to close
1689 void
1690 commStopHalfClosedMonitor(int const fd)
1691 {
1692 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1693
1694 // cancel the read if one was scheduled
1695 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1696 if (reader != NULL)
1697 Comm::ReadCancel(fd, reader);
1698 fd_table[fd].halfClosedReader = NULL;
1699
1700 TheHalfClosed->del(fd);
1701 }
1702
1703 /// I/O handler for the possibly half-closed connection monitoring code
1704 static void
1705 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
1706 {
1707 // there cannot be more data coming in on half-closed connections
1708 assert(size == 0);
1709 assert(conn != NULL);
1710 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1711
1712 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1713
1714 // nothing to do if fd is being closed
1715 if (flag == COMM_ERR_CLOSING)
1716 return;
1717
1718 // if read failed, close the connection
1719 if (flag != COMM_OK) {
1720 debugs(5, 3, HERE << "closing " << conn);
1721 conn->close();
1722 return;
1723 }
1724
1725 // continue waiting for close or error
1726 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1727 }
1728
1729 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1730
1731 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1732 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1733
1734 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1735
1736 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1737
1738 DeferredReadManager::~DeferredReadManager()
1739 {
1740 flushReads();
1741 assert (deferredReads.empty());
1742 }
1743
1744 /* explicit instantiation required for some systems */
1745
1746 /// \cond AUTODOCS_IGNORE
1747 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1748 /// \endcond
1749
1750 void
1751 DeferredReadManager::delayRead(DeferredRead const &aRead)
1752 {
1753 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1754 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1755
1756 // We have to use a global function as a closer and point to temp
1757 // instead of "this" because DeferredReadManager is not a job and
1758 // is not even cbdata protected
1759 // XXX: and yet we use cbdata protection functions on it??
1760 AsyncCall::Pointer closer = commCbCall(5,4,
1761 "DeferredReadManager::CloseHandler",
1762 CommCloseCbPtrFun(&CloseHandler, temp));
1763 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1764 temp->element.closer = closer; // remeber so that we can cancel
1765 }
1766
1767 void
1768 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1769 {
1770 if (!cbdataReferenceValid(params.data))
1771 return;
1772
1773 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1774
1775 temp->element.closer = NULL;
1776 temp->element.markCancelled();
1777 }
1778
1779 DeferredRead
1780 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1781 {
1782 assert (!deferredReads.empty());
1783
1784 DeferredRead &read = deferredReads.head->element;
1785
1786 // NOTE: at this point the connection has been paused/stalled for an unknown
1787 // amount of time. We must re-validate that it is active and usable.
1788
1789 // If the connection has been closed already. Cancel this read.
1790 if (!Comm::IsConnOpen(read.theRead.conn)) {
1791 if (read.closer != NULL) {
1792 read.closer->cancel("Connection closed before.");
1793 read.closer = NULL;
1794 }
1795 read.markCancelled();
1796 }
1797
1798 if (!read.cancelled) {
1799 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
1800 read.closer = NULL;
1801 }
1802
1803 DeferredRead result = deferredReads.pop_front();
1804
1805 return result;
1806 }
1807
1808 void
1809 DeferredReadManager::kickReads(int const count)
1810 {
1811 /* if we had CbDataList::size() we could consolidate this and flushReads */
1812
1813 if (count < 1) {
1814 flushReads();
1815 return;
1816 }
1817
1818 size_t remaining = count;
1819
1820 while (!deferredReads.empty() && remaining) {
1821 DeferredRead aRead = popHead(deferredReads);
1822 kickARead(aRead);
1823
1824 if (!aRead.cancelled)
1825 --remaining;
1826 }
1827 }
1828
1829 void
1830 DeferredReadManager::flushReads()
1831 {
1832 CbDataListContainer<DeferredRead> reads;
1833 reads = deferredReads;
1834 deferredReads = CbDataListContainer<DeferredRead>();
1835
1836 // XXX: For fairness this SHOULD randomize the order
1837 while (!reads.empty()) {
1838 DeferredRead aRead = popHead(reads);
1839 kickARead(aRead);
1840 }
1841 }
1842
1843 void
1844 DeferredReadManager::kickARead(DeferredRead const &aRead)
1845 {
1846 if (aRead.cancelled)
1847 return;
1848
1849 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
1850 return;
1851
1852 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
1853
1854 aRead.theReader(aRead.theContext, aRead.theRead);
1855 }
1856
1857 void
1858 DeferredRead::markCancelled()
1859 {
1860 cancelled = true;
1861 }
1862
1863 int
1864 CommSelectEngine::checkEvents(int timeout)
1865 {
1866 static time_t last_timeout = 0;
1867
1868 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
1869 if (squid_curtime > last_timeout) {
1870 last_timeout = squid_curtime;
1871 checkTimeouts();
1872 }
1873
1874 switch (Comm::DoSelect(timeout)) {
1875
1876 case COMM_OK:
1877
1878 case COMM_TIMEOUT:
1879 return 0;
1880
1881 case COMM_IDLE:
1882
1883 case COMM_SHUTDOWN:
1884 return EVENT_IDLE;
1885
1886 case COMM_ERROR:
1887 return EVENT_ERROR;
1888
1889 default:
1890 fatal_dump("comm.cc: Internal error -- this should never happen.");
1891 return EVENT_ERROR;
1892 };
1893 }
1894
1895 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
1896 int
1897 comm_open_uds(int sock_type,
1898 int proto,
1899 struct sockaddr_un* addr,
1900 int flags)
1901 {
1902 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
1903
1904 int new_socket;
1905
1906 PROF_start(comm_open);
1907 /* Create socket for accepting new connections. */
1908 ++ statCounter.syscalls.sock.sockets;
1909
1910 /* Setup the socket addrinfo details for use */
1911 struct addrinfo AI;
1912 AI.ai_flags = 0;
1913 AI.ai_family = PF_UNIX;
1914 AI.ai_socktype = sock_type;
1915 AI.ai_protocol = proto;
1916 AI.ai_addrlen = SUN_LEN(addr);
1917 AI.ai_addr = (sockaddr*)addr;
1918 AI.ai_canonname = NULL;
1919 AI.ai_next = NULL;
1920
1921 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
1922
1923 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
1924 /* Increase the number of reserved fd's if calls to socket()
1925 * are failing because the open file table is full. This
1926 * limits the number of simultaneous clients */
1927
1928 if (limitError(errno)) {
1929 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
1930 fdAdjustReserved();
1931 } else {
1932 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
1933 }
1934
1935 PROF_stop(comm_open);
1936 return -1;
1937 }
1938
1939 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
1940
1941 /* update fdstat */
1942 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
1943
1944 assert(!isOpen(new_socket));
1945 fd_open(new_socket, FD_MSGHDR, NULL);
1946
1947 fdd_table[new_socket].close_file = NULL;
1948
1949 fdd_table[new_socket].close_line = 0;
1950
1951 fd_table[new_socket].sock_family = AI.ai_family;
1952
1953 if (!(flags & COMM_NOCLOEXEC))
1954 commSetCloseOnExec(new_socket);
1955
1956 if (flags & COMM_REUSEADDR)
1957 commSetReuseAddr(new_socket);
1958
1959 if (flags & COMM_NONBLOCKING) {
1960 if (commSetNonBlocking(new_socket) != COMM_OK) {
1961 comm_close(new_socket);
1962 PROF_stop(comm_open);
1963 return -1;
1964 }
1965 }
1966
1967 if (flags & COMM_DOBIND) {
1968 if (commBind(new_socket, AI) != COMM_OK) {
1969 comm_close(new_socket);
1970 PROF_stop(comm_open);
1971 return -1;
1972 }
1973 }
1974
1975 #ifdef TCP_NODELAY
1976 if (sock_type == SOCK_STREAM)
1977 commSetTcpNoDelay(new_socket);
1978
1979 #endif
1980
1981 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
1982 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
1983
1984 PROF_stop(comm_open);
1985
1986 return new_socket;
1987 }