]> git.ipfire.org Git - thirdparty/squid.git/blame_incremental - src/comm.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / comm.cc
... / ...
CommitLineData
1/*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35#include "squid.h"
36#include "base/AsyncCall.h"
37#include "comm.h"
38#include "ClientInfo.h"
39#include "CommCalls.h"
40#include "comm/AcceptLimiter.h"
41#include "comm/comm_internal.h"
42#include "comm/Connection.h"
43#include "comm/IoCallback.h"
44#include "comm/Loops.h"
45#include "comm/Write.h"
46#include "comm/TcpAcceptor.h"
47#include "CommRead.h"
48#include "compat/cmsg.h"
49#include "DescriptorSet.h"
50#include "event.h"
51#include "fde.h"
52#include "globals.h"
53#include "icmp/net_db.h"
54#include "ip/Address.h"
55#include "ip/Intercept.h"
56#include "ip/QosConfig.h"
57#include "ip/tools.h"
58#include "MemBuf.h"
59#include "pconn.h"
60#include "protos.h"
61#include "profiler/Profiler.h"
62#include "SquidTime.h"
63#include "StatCounters.h"
64#include "StoreIOBuffer.h"
65#if USE_SSL
66#include "ssl/support.h"
67#endif
68
69#include "cbdata.h"
70#if _SQUID_CYGWIN_
71#include <sys/ioctl.h>
72#endif
73#ifdef HAVE_NETINET_TCP_H
74#include <netinet/tcp.h>
75#endif
76#if HAVE_SYS_UN_H
77#include <sys/un.h>
78#endif
79#if HAVE_MATH_H
80#include <math.h>
81#endif
82#if HAVE_ERRNO_H
83#include <errno.h>
84#endif
85
86/*
87 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
88 */
89
90static void commStopHalfClosedMonitor(int fd);
91static IOCB commHalfClosedReader;
92static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
93static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
94
95#if USE_DELAY_POOLS
96CBDATA_CLASS_INIT(CommQuotaQueue);
97
98static void commHandleWriteHelper(void * data);
99#endif
100
101/* STATIC */
102
103static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
104static bool WillCheckHalfClosed = false; /// true if check is scheduled
105static EVH commHalfClosedCheck;
106static void commPlanHalfClosedCheck();
107
108static comm_err_t commBind(int s, struct addrinfo &);
109static void commSetReuseAddr(int);
110static void commSetNoLinger(int);
111#ifdef TCP_NODELAY
112static void commSetTcpNoDelay(int);
113#endif
114static void commSetTcpRcvbuf(int, int);
115
116static MemAllocator *conn_close_pool = NULL;
117fd_debug_t *fdd_table = NULL;
118
119bool
120isOpen(const int fd)
121{
122 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
123}
124
125/**
126 * Attempt a read
127 *
128 * If the read attempt succeeds or fails, call the callback.
129 * Else, wait for another IO notification.
130 */
131void
132commHandleRead(int fd, void *data)
133{
134 Comm::IoCallback *ccb = (Comm::IoCallback *) data;
135
136 assert(data == COMMIO_FD_READCB(fd));
137 assert(ccb->active());
138 /* Attempt a read */
139 ++ statCounter.syscalls.sock.reads;
140 errno = 0;
141 int retval;
142 retval = FD_READ_METHOD(fd, ccb->buf, ccb->size);
143 debugs(5, 3, "comm_read_try: FD " << fd << ", size " << ccb->size << ", retval " << retval << ", errno " << errno);
144
145 if (retval < 0 && !ignoreErrno(errno)) {
146 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
147 ccb->offset = 0;
148 ccb->finish(COMM_ERROR, errno);
149 return;
150 };
151
152 /* See if we read anything */
153 /* Note - read 0 == socket EOF, which is a valid read */
154 if (retval >= 0) {
155 fd_bytes(fd, retval, FD_READ);
156 ccb->offset = retval;
157 ccb->finish(COMM_OK, errno);
158 return;
159 }
160
161 /* Nope, register for some more IO */
162 Comm::SetSelect(fd, COMM_SELECT_READ, commHandleRead, data, 0);
163}
164
165/**
166 * Queue a read. handler/handler_data are called when the read
167 * completes, on error, or on file descriptor close.
168 */
169void
170comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, AsyncCall::Pointer &callback)
171{
172 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
173
174 /* Make sure we are open and not closing */
175 assert(Comm::IsConnOpen(conn));
176 assert(!fd_table[conn->fd].closing());
177 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
178
179 // Make sure we are either not reading or just passively monitoring.
180 // Active/passive conflicts are OK and simply cancel passive monitoring.
181 if (ccb->active()) {
182 // if the assertion below fails, we have an active comm_read conflict
183 assert(fd_table[conn->fd].halfClosedReader != NULL);
184 commStopHalfClosedMonitor(conn->fd);
185 assert(!ccb->active());
186 }
187 ccb->conn = conn;
188
189 /* Queue the read */
190 ccb->setCallback(Comm::IOCB_READ, callback, (char *)buf, NULL, size);
191 Comm::SetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
192}
193
194/**
195 * Empty the read buffers
196 *
197 * This is a magical routine that empties the read buffers.
198 * Under some platforms (Linux) if a buffer has data in it before
199 * you call close(), the socket will hang and take quite a while
200 * to timeout.
201 */
202static void
203comm_empty_os_read_buffers(int fd)
204{
205#if _SQUID_LINUX_
206 /* prevent those nasty RST packets */
207 char buf[SQUID_TCP_SO_RCVBUF];
208
209 if (fd_table[fd].flags.nonblocking == 1) {
210 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
211 }
212#endif
213}
214
215/**
216 * Return whether the FD has a pending completed callback.
217 * NP: does not work.
218 */
219int
220comm_has_pending_read_callback(int fd)
221{
222 assert(isOpen(fd));
223 // XXX: We do not know whether there is a read callback scheduled.
224 // This is used for pconn management that should probably be more
225 // tightly integrated into comm to minimize the chance that a
226 // closing pconn socket will be used for a new transaction.
227 return false;
228}
229
230// Does comm check this fd for read readiness?
231// Note that when comm is not monitoring, there can be a pending callback
232// call, which may resume comm monitoring once fired.
233bool
234comm_monitors_read(int fd)
235{
236 assert(isOpen(fd));
237 // Being active is usually the same as monitoring because we always
238 // start monitoring the FD when we configure Comm::IoCallback for I/O
239 // and we usually configure Comm::IoCallback for I/O when we starting
240 // monitoring a FD for reading.
241 return COMMIO_FD_READCB(fd)->active();
242}
243
244/**
245 * Cancel a pending read. Assert that we have the right parameters,
246 * and that there are no pending read events!
247 *
248 * XXX: We do not assert that there are no pending read events and
249 * with async calls it becomes even more difficult.
250 * The whole interface should be reworked to do callback->cancel()
251 * instead of searching for places where the callback may be stored and
252 * updating the state of those places.
253 *
254 * AHC Don't call the comm handlers?
255 */
256void
257comm_read_cancel(int fd, IOCB *callback, void *data)
258{
259 if (!isOpen(fd)) {
260 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
261 return;
262 }
263
264 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
265 // TODO: is "active" == "monitors FD"?
266 if (!cb->active()) {
267 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
268 return;
269 }
270
271 typedef CommCbFunPtrCallT<CommIoCbPtrFun> Call;
272 Call *call = dynamic_cast<Call*>(cb->callback.getRaw());
273 if (!call) {
274 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " lacks callback");
275 return;
276 }
277
278 call->cancel("old comm_read_cancel");
279
280 typedef CommIoCbParams Params;
281 const Params &params = GetCommParams<Params>(cb->callback);
282
283 /* Ok, we can be reasonably sure we won't lose any data here! */
284 assert(call->dialer.handler == callback);
285 assert(params.data == data);
286
287 /* Delete the callback */
288 cb->cancel("old comm_read_cancel");
289
290 /* And the IO event */
291 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
292}
293
294void
295comm_read_cancel(int fd, AsyncCall::Pointer &callback)
296{
297 callback->cancel("comm_read_cancel");
298
299 if (!isOpen(fd)) {
300 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
301 return;
302 }
303
304 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
305
306 if (!cb->active()) {
307 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
308 return;
309 }
310
311 AsyncCall::Pointer call = cb->callback;
312 assert(call != NULL); // XXX: should never fail (active() checks for callback==NULL)
313
314 /* Ok, we can be reasonably sure we won't lose any data here! */
315 assert(call == callback);
316
317 /* Delete the callback */
318 cb->cancel("comm_read_cancel");
319
320 /* And the IO event */
321 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
322}
323
324/**
325 * synchronous wrapper around udp socket functions
326 */
327int
328comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
329{
330 ++ statCounter.syscalls.sock.recvfroms;
331 int x = 0;
332 struct addrinfo *AI = NULL;
333
334 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
335
336 assert( NULL == AI );
337
338 from.InitAddrInfo(AI);
339
340 x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
341
342 from = *AI;
343
344 from.FreeAddrInfo(AI);
345
346 return x;
347}
348
349int
350comm_udp_recv(int fd, void *buf, size_t len, int flags)
351{
352 Ip::Address nul;
353 return comm_udp_recvfrom(fd, buf, len, flags, nul);
354}
355
356ssize_t
357comm_udp_send(int s, const void *buf, size_t len, int flags)
358{
359 return send(s, buf, len, flags);
360}
361
362bool
363comm_has_incomplete_write(int fd)
364{
365 assert(isOpen(fd));
366 return COMMIO_FD_WRITECB(fd)->active();
367}
368
369/**
370 * Queue a write. handler/handler_data are called when the write fully
371 * completes, on error, or on file descriptor close.
372 */
373
374/* Return the local port associated with fd. */
375unsigned short
376comm_local_port(int fd)
377{
378 Ip::Address temp;
379 struct addrinfo *addr = NULL;
380 fde *F = &fd_table[fd];
381
382 /* If the fd is closed already, just return */
383
384 if (!F->flags.open) {
385 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
386 return 0;
387 }
388
389 if (F->local_addr.GetPort())
390 return F->local_addr.GetPort();
391
392 if (F->sock_family == AF_INET)
393 temp.SetIPv4();
394
395 temp.InitAddrInfo(addr);
396
397 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
398 debugs(50, DBG_IMPORTANT, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
399 temp.FreeAddrInfo(addr);
400 return 0;
401 }
402 temp = *addr;
403
404 temp.FreeAddrInfo(addr);
405
406 if (F->local_addr.IsAnyAddr()) {
407 /* save the whole local address, not just the port. */
408 F->local_addr = temp;
409 } else {
410 F->local_addr.SetPort(temp.GetPort());
411 }
412
413 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.GetPort() << "(family=" << F->sock_family << ")");
414 return F->local_addr.GetPort();
415}
416
417static comm_err_t
418commBind(int s, struct addrinfo &inaddr)
419{
420 ++ statCounter.syscalls.sock.binds;
421
422 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
423 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
424 return COMM_OK;
425 }
426
427 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
428
429 return COMM_ERROR;
430}
431
432/**
433 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
434 * is OR of flags specified in comm.h. Defaults TOS
435 */
436int
437comm_open(int sock_type,
438 int proto,
439 Ip::Address &addr,
440 int flags,
441 const char *note)
442{
443 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
444}
445
446void
447comm_open_listener(int sock_type,
448 int proto,
449 Comm::ConnectionPointer &conn,
450 const char *note)
451{
452 /* all listener sockets require bind() */
453 conn->flags |= COMM_DOBIND;
454
455 /* attempt native enabled port. */
456 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
457}
458
459int
460comm_open_listener(int sock_type,
461 int proto,
462 Ip::Address &addr,
463 int flags,
464 const char *note)
465{
466 int sock = -1;
467
468 /* all listener sockets require bind() */
469 flags |= COMM_DOBIND;
470
471 /* attempt native enabled port. */
472 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
473
474 return sock;
475}
476
477static bool
478limitError(int const anErrno)
479{
480 return anErrno == ENFILE || anErrno == EMFILE;
481}
482
483void
484comm_set_v6only(int fd, int tos)
485{
486#ifdef IPV6_V6ONLY
487 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
488 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
489 }
490#else
491 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
492#endif /* sockopt */
493}
494
495/**
496 * Set the socket IP_TRANSPARENT option for Linux TPROXY v4 support.
497 */
498void
499comm_set_transparent(int fd)
500{
501#if defined(IP_TRANSPARENT)
502 int tos = 1;
503 if (setsockopt(fd, SOL_IP, IP_TRANSPARENT, (char *) &tos, sizeof(int)) < 0) {
504 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IP_TRANSPARENT) on FD " << fd << ": " << xstrerror());
505 } else {
506 /* mark the socket as having transparent options */
507 fd_table[fd].flags.transparent = 1;
508 }
509#else
510 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(IP_TRANSPARENT) not supported on this platform");
511#endif /* sockopt */
512}
513
514/**
515 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
516 * is OR of flags specified in defines.h:COMM_*
517 */
518int
519comm_openex(int sock_type,
520 int proto,
521 Ip::Address &addr,
522 int flags,
523 tos_t tos,
524 nfmark_t nfmark,
525 const char *note)
526{
527 int new_socket;
528 struct addrinfo *AI = NULL;
529
530 PROF_start(comm_open);
531 /* Create socket for accepting new connections. */
532 ++ statCounter.syscalls.sock.sockets;
533
534 /* Setup the socket addrinfo details for use */
535 addr.GetAddrInfo(AI);
536 AI->ai_socktype = sock_type;
537 AI->ai_protocol = proto;
538
539 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
540
541 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
542
543 /* under IPv6 there is the possibility IPv6 is present but disabled. */
544 /* try again as IPv4-native if possible */
545 if ( new_socket < 0 && Ip::EnableIpv6 && addr.IsIPv6() && addr.SetIPv4() ) {
546 /* attempt to open this IPv4-only. */
547 addr.FreeAddrInfo(AI);
548 /* Setup the socket addrinfo details for use */
549 addr.GetAddrInfo(AI);
550 AI->ai_socktype = sock_type;
551 AI->ai_protocol = proto;
552 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
553 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
554 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
555 }
556
557 if (new_socket < 0) {
558 /* Increase the number of reserved fd's if calls to socket()
559 * are failing because the open file table is full. This
560 * limits the number of simultaneous clients */
561
562 if (limitError(errno)) {
563 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
564 fdAdjustReserved();
565 } else {
566 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
567 }
568
569 addr.FreeAddrInfo(AI);
570
571 PROF_stop(comm_open);
572 return -1;
573 }
574
575 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
576 Comm::ConnectionPointer conn = new Comm::Connection;
577 conn->local = addr;
578 conn->fd = new_socket;
579
580 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
581
582 /* set TOS if needed */
583 if (tos)
584 Ip::Qos::setSockTos(conn, tos);
585
586 /* set netfilter mark if needed */
587 if (nfmark)
588 Ip::Qos::setSockNfmark(conn, nfmark);
589
590 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.IsIPv6() )
591 comm_set_v6only(conn->fd, 1);
592
593 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
594 /* Other OS may have this administratively disabled for general use. Same deal. */
595 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.IsIPv6() )
596 comm_set_v6only(conn->fd, 0);
597
598 comm_init_opened(conn, tos, nfmark, note, AI);
599 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
600
601 addr.FreeAddrInfo(AI);
602
603 PROF_stop(comm_open);
604
605 // XXX transition only. prevent conn from closing the new FD on function exit.
606 conn->fd = -1;
607 return new_socket;
608}
609
610/// update FD tables after a local or remote (IPC) comm_openex();
611void
612comm_init_opened(const Comm::ConnectionPointer &conn,
613 tos_t tos,
614 nfmark_t nfmark,
615 const char *note,
616 struct addrinfo *AI)
617{
618 assert(Comm::IsConnOpen(conn));
619 assert(AI);
620
621 /* update fdstat */
622 debugs(5, 5, HERE << conn << " is a new socket");
623
624 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
625 fd_open(conn->fd, FD_SOCKET, note);
626
627 fdd_table[conn->fd].close_file = NULL;
628 fdd_table[conn->fd].close_line = 0;
629
630 fde *F = &fd_table[conn->fd];
631 F->local_addr = conn->local;
632 F->tosToServer = tos;
633
634 F->nfmarkToServer = nfmark;
635
636 F->sock_family = AI->ai_family;
637}
638
639/// apply flags after a local comm_open*() call;
640/// returns new_socket or -1 on error
641static int
642comm_apply_flags(int new_socket,
643 Ip::Address &addr,
644 int flags,
645 struct addrinfo *AI)
646{
647 assert(new_socket >= 0);
648 assert(AI);
649 const int sock_type = AI->ai_socktype;
650
651 if (!(flags & COMM_NOCLOEXEC))
652 commSetCloseOnExec(new_socket);
653
654 if ((flags & COMM_REUSEADDR))
655 commSetReuseAddr(new_socket);
656
657 if (addr.GetPort() > (unsigned short) 0) {
658#if _SQUID_MSWIN_
659 if (sock_type != SOCK_DGRAM)
660#endif
661 commSetNoLinger(new_socket);
662
663 if (opt_reuseaddr)
664 commSetReuseAddr(new_socket);
665 }
666
667 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
668 if ((flags & COMM_TRANSPARENT)) {
669 comm_set_transparent(new_socket);
670 }
671
672 if ( (flags & COMM_DOBIND) || addr.GetPort() > 0 || !addr.IsAnyAddr() ) {
673 if ( !(flags & COMM_DOBIND) && addr.IsAnyAddr() )
674 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
675 if ( addr.IsNoAddr() )
676 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
677
678 if (commBind(new_socket, *AI) != COMM_OK) {
679 comm_close(new_socket);
680 return -1;
681 }
682 }
683
684 if (flags & COMM_NONBLOCKING)
685 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
686 comm_close(new_socket);
687 return -1;
688 }
689
690#ifdef TCP_NODELAY
691 if (sock_type == SOCK_STREAM)
692 commSetTcpNoDelay(new_socket);
693
694#endif
695
696 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
697 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
698
699 return new_socket;
700}
701
702void
703comm_import_opened(const Comm::ConnectionPointer &conn,
704 const char *note,
705 struct addrinfo *AI)
706{
707 debugs(5, 2, HERE << conn);
708 assert(Comm::IsConnOpen(conn));
709 assert(AI);
710
711 comm_init_opened(conn, 0, 0, note, AI);
712
713 if (!(conn->flags & COMM_NOCLOEXEC))
714 fd_table[conn->fd].flags.close_on_exec = 1;
715
716 if (conn->local.GetPort() > (unsigned short) 0) {
717#if _SQUID_MSWIN_
718 if (AI->ai_socktype != SOCK_DGRAM)
719#endif
720 fd_table[conn->fd].flags.nolinger = 1;
721 }
722
723 if ((conn->flags & COMM_TRANSPARENT))
724 fd_table[conn->fd].flags.transparent = 1;
725
726 if (conn->flags & COMM_NONBLOCKING)
727 fd_table[conn->fd].flags.nonblocking = 1;
728
729#ifdef TCP_NODELAY
730 if (AI->ai_socktype == SOCK_STREAM)
731 fd_table[conn->fd].flags.nodelay = 1;
732#endif
733
734 /* no fd_table[fd].flags. updates needed for these conditions:
735 * if ((flags & COMM_REUSEADDR)) ...
736 * if ((flags & COMM_DOBIND) ...) ...
737 */
738}
739
740// XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
741// With handler already unset. Leaving this present until that can be verified for all code paths.
742void
743commUnsetFdTimeout(int fd)
744{
745 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
746 assert(fd >= 0);
747 assert(fd < Squid_MaxFD);
748 fde *F = &fd_table[fd];
749 assert(F->flags.open);
750
751 F->timeoutHandler = NULL;
752 F->timeout = 0;
753}
754
755int
756commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
757{
758 debugs(5, 3, HERE << conn << " timeout " << timeout);
759 assert(Comm::IsConnOpen(conn));
760 assert(conn->fd < Squid_MaxFD);
761 fde *F = &fd_table[conn->fd];
762 assert(F->flags.open);
763
764 if (timeout < 0) {
765 F->timeoutHandler = NULL;
766 F->timeout = 0;
767 } else {
768 if (callback != NULL) {
769 typedef CommTimeoutCbParams Params;
770 Params &params = GetCommParams<Params>(callback);
771 params.conn = conn;
772 F->timeoutHandler = callback;
773 }
774
775 F->timeout = squid_curtime + (time_t) timeout;
776 }
777
778 return F->timeout;
779}
780
781int
782commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
783{
784 debugs(5, 3, HERE << "Remove timeout for " << conn);
785 AsyncCall::Pointer nil;
786 return commSetConnTimeout(conn, -1, nil);
787}
788
789int
790comm_connect_addr(int sock, const Ip::Address &address)
791{
792 comm_err_t status = COMM_OK;
793 fde *F = &fd_table[sock];
794 int x = 0;
795 int err = 0;
796 socklen_t errlen;
797 struct addrinfo *AI = NULL;
798 PROF_start(comm_connect_addr);
799
800 assert(address.GetPort() != 0);
801
802 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
803
804 /* Handle IPv6 over IPv4-only socket case.
805 * this case must presently be handled here since the GetAddrInfo asserts on bad mappings.
806 * NP: because commResetFD is private to ConnStateData we have to return an error and
807 * trust its handled properly.
808 */
809 if (F->sock_family == AF_INET && !address.IsIPv4()) {
810 errno = ENETUNREACH;
811 return COMM_ERR_PROTOCOL;
812 }
813
814 /* Handle IPv4 over IPv6-only socket case.
815 * This case is presently handled here as it's both a known case and it's
816 * uncertain what error will be returned by the IPv6 stack in such case. It's
817 * possible this will also be handled by the errno checks below after connect()
818 * but needs carefull cross-platform verification, and verifying the address
819 * condition here is simple.
820 */
821 if (!F->local_addr.IsIPv4() && address.IsIPv4()) {
822 errno = ENETUNREACH;
823 return COMM_ERR_PROTOCOL;
824 }
825
826 address.GetAddrInfo(AI, F->sock_family);
827
828 /* Establish connection. */
829 errno = 0;
830
831 if (!F->flags.called_connect) {
832 F->flags.called_connect = 1;
833 ++ statCounter.syscalls.sock.connects;
834
835 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
836
837 // XXX: ICAP code refuses callbacks during a pending comm_ call
838 // Async calls development will fix this.
839 if (x == 0) {
840 x = -1;
841 errno = EINPROGRESS;
842 }
843
844 if (x < 0) {
845 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
846 " flags=" << AI->ai_flags <<
847 ", family=" << AI->ai_family <<
848 ", socktype=" << AI->ai_socktype <<
849 ", protocol=" << AI->ai_protocol <<
850 ", &addr=" << AI->ai_addr <<
851 ", addrlen=" << AI->ai_addrlen <<
852 " )" );
853 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
854 debugs(14,9, "connecting to: " << address );
855 }
856 } else {
857#if _SQUID_NEWSOS6_
858 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
859
860 connect(sock, AI->ai_addr, AI->ai_addrlen);
861
862 if (errno == EINVAL) {
863 errlen = sizeof(err);
864 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
865
866 if (x >= 0)
867 errno = x;
868 }
869
870#else
871 errlen = sizeof(err);
872
873 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
874
875 if (x == 0)
876 errno = err;
877
878#if _SQUID_SOLARIS_
879 /*
880 * Solaris 2.4's socket emulation doesn't allow you
881 * to determine the error from a failed non-blocking
882 * connect and just returns EPIPE. Create a fake
883 * error message for connect. -- fenner@parc.xerox.com
884 */
885 if (x < 0 && errno == EPIPE)
886 errno = ENOTCONN;
887
888#endif
889#endif
890
891 }
892
893 /* Squid seems to be working fine without this code. With this code,
894 * we leak memory on many connect requests because of EINPROGRESS.
895 * If you find that this code is needed, please file a bug report. */
896#if 0
897#if _SQUID_LINUX_
898 /* 2007-11-27:
899 * Linux Debian replaces our allocated AI pointer with garbage when
900 * connect() fails. This leads to segmentation faults deallocating
901 * the system-allocated memory when we go to clean up our pointer.
902 * HACK: is to leak the memory returned since we can't deallocate.
903 */
904 if (errno != 0) {
905 AI = NULL;
906 }
907#endif
908#endif
909
910 address.FreeAddrInfo(AI);
911
912 PROF_stop(comm_connect_addr);
913
914 if (errno == 0 || errno == EISCONN)
915 status = COMM_OK;
916 else if (ignoreErrno(errno))
917 status = COMM_INPROGRESS;
918 else if (errno == EAFNOSUPPORT || errno == EINVAL)
919 return COMM_ERR_PROTOCOL;
920 else
921 return COMM_ERROR;
922
923 address.NtoA(F->ipaddr, MAX_IPSTRLEN);
924
925 F->remote_port = address.GetPort(); /* remote_port is HS */
926
927 if (status == COMM_OK) {
928 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
929 } else if (status == COMM_INPROGRESS) {
930 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
931 }
932
933 return status;
934}
935
936void
937commCallCloseHandlers(int fd)
938{
939 fde *F = &fd_table[fd];
940 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
941
942 while (F->closeHandler != NULL) {
943 AsyncCall::Pointer call = F->closeHandler;
944 F->closeHandler = call->Next();
945 call->setNext(NULL);
946 // If call is not canceled schedule it for execution else ignore it
947 if (!call->canceled()) {
948 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
949 ScheduleCallHere(call);
950 }
951 }
952}
953
954#if LINGERING_CLOSE
955static void
956commLingerClose(int fd, void *unused)
957{
958 LOCAL_ARRAY(char, buf, 1024);
959 int n;
960 n = FD_READ_METHOD(fd, buf, 1024);
961
962 if (n < 0)
963 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
964
965 comm_close(fd);
966}
967
968static void
969commLingerTimeout(const FdeCbParams &params)
970{
971 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
972 comm_close(params.fd);
973}
974
975/*
976 * Inspired by apache
977 */
978void
979comm_lingering_close(int fd)
980{
981#if USE_SSL
982 if (fd_table[fd].ssl)
983 ssl_shutdown_method(fd_table[fd].ssl);
984#endif
985
986 if (shutdown(fd, 1) < 0) {
987 comm_close(fd);
988 return;
989 }
990
991 fd_note(fd, "lingering close");
992 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
993
994 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
995 assert(fd_table[fd].flags.open);
996 if (callback != NULL) {
997 typedef FdeCbParams Params;
998 Params &params = GetCommParams<Params>(callback);
999 params.fd = fd;
1000 fd_table[fd].timeoutHandler = callback;
1001 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
1002 }
1003
1004 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
1005}
1006
1007#endif
1008
1009/**
1010 * enable linger with time of 0 so that when the socket is
1011 * closed, TCP generates a RESET
1012 */
1013void
1014comm_reset_close(const Comm::ConnectionPointer &conn)
1015{
1016 struct linger L;
1017 L.l_onoff = 1;
1018 L.l_linger = 0;
1019
1020 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1021 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
1022
1023 conn->close();
1024}
1025
1026// Legacy close function.
1027void
1028old_comm_reset_close(int fd)
1029{
1030 struct linger L;
1031 L.l_onoff = 1;
1032 L.l_linger = 0;
1033
1034 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1035 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
1036
1037 comm_close(fd);
1038}
1039
1040#if USE_SSL
1041void
1042commStartSslClose(const FdeCbParams &params)
1043{
1044 assert(&fd_table[params.fd].ssl);
1045 ssl_shutdown_method(fd_table[params.fd].ssl);
1046}
1047#endif
1048
1049void
1050comm_close_complete(const FdeCbParams &params)
1051{
1052#if USE_SSL
1053 fde *F = &fd_table[params.fd];
1054
1055 if (F->ssl) {
1056 SSL_free(F->ssl);
1057 F->ssl = NULL;
1058 }
1059
1060 if (F->dynamicSslContext) {
1061 SSL_CTX_free(F->dynamicSslContext);
1062 F->dynamicSslContext = NULL;
1063 }
1064#endif
1065 fd_close(params.fd); /* update fdstat */
1066 close(params.fd);
1067
1068 ++ statCounter.syscalls.sock.closes;
1069
1070 /* When one connection closes, give accept() a chance, if need be */
1071 Comm::AcceptLimiter::Instance().kick();
1072}
1073
1074/*
1075 * Close the socket fd.
1076 *
1077 * + call write handlers with ERR_CLOSING
1078 * + call read handlers with ERR_CLOSING
1079 * + call closing handlers
1080 *
1081 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1082 * DeferredReadManager.
1083 */
1084void
1085_comm_close(int fd, char const *file, int line)
1086{
1087 debugs(5, 3, "comm_close: start closing FD " << fd);
1088 assert(fd >= 0);
1089 assert(fd < Squid_MaxFD);
1090
1091 fde *F = &fd_table[fd];
1092 fdd_table[fd].close_file = file;
1093 fdd_table[fd].close_line = line;
1094
1095 if (F->closing())
1096 return;
1097
1098 /* XXX: is this obsolete behind F->closing() ? */
1099 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
1100 return;
1101
1102 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1103 if (!isOpen(fd)) {
1104 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
1105 // XXX: do we need to run close(fd) or fd_close(fd) here?
1106 return;
1107 }
1108
1109 assert(F->type != FD_FILE);
1110
1111 PROF_start(comm_close);
1112
1113 F->flags.close_request = 1;
1114
1115#if USE_SSL
1116 if (F->ssl) {
1117 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
1118 FdeCbPtrFun(commStartSslClose, NULL));
1119 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
1120 startParams.fd = fd;
1121 ScheduleCallHere(startCall);
1122 }
1123#endif
1124
1125 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1126 if (commHasHalfClosedMonitor(fd))
1127 commStopHalfClosedMonitor(fd);
1128 commUnsetFdTimeout(fd);
1129
1130 // notify read/write handlers after canceling select reservations, if any
1131 if (COMMIO_FD_WRITECB(fd)->active()) {
1132 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1133 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
1134 }
1135 if (COMMIO_FD_READCB(fd)->active()) {
1136 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
1137 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
1138 }
1139
1140#if USE_DELAY_POOLS
1141 if (ClientInfo *clientInfo = F->clientInfo) {
1142 if (clientInfo->selectWaiting) {
1143 clientInfo->selectWaiting = false;
1144 // kick queue or it will get stuck as commWriteHandle is not called
1145 clientInfo->kickQuotaQueue();
1146 }
1147 }
1148#endif
1149
1150 commCallCloseHandlers(fd);
1151
1152 if (F->pconn.uses && F->pconn.pool)
1153 F->pconn.pool->noteUses(F->pconn.uses);
1154
1155 comm_empty_os_read_buffers(fd);
1156
1157 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
1158 FdeCbPtrFun(comm_close_complete, NULL));
1159 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
1160 completeParams.fd = fd;
1161 // must use async call to wait for all callbacks
1162 // scheduled before comm_close() to finish
1163 ScheduleCallHere(completeCall);
1164
1165 PROF_stop(comm_close);
1166}
1167
1168/* Send a udp datagram to specified TO_ADDR. */
1169int
1170comm_udp_sendto(int fd,
1171 const Ip::Address &to_addr,
1172 const void *buf,
1173 int len)
1174{
1175 int x = 0;
1176 struct addrinfo *AI = NULL;
1177
1178 PROF_start(comm_udp_sendto);
1179 ++ statCounter.syscalls.sock.sendtos;
1180
1181 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
1182 " using FD " << fd << " using Port " << comm_local_port(fd) );
1183
1184 /* BUG: something in the above macro appears to occasionally be setting AI to garbage. */
1185 /* AYJ: 2007-08-27 : or was it because I wasn't then setting 'fd_table[fd].sock_family' to fill properly. */
1186 assert( NULL == AI );
1187
1188 to_addr.GetAddrInfo(AI, fd_table[fd].sock_family);
1189
1190 x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
1191
1192 to_addr.FreeAddrInfo(AI);
1193
1194 PROF_stop(comm_udp_sendto);
1195
1196 if (x >= 0)
1197 return x;
1198
1199#if _SQUID_LINUX_
1200
1201 if (ECONNREFUSED != errno)
1202#endif
1203
1204 debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1205
1206 return COMM_ERROR;
1207}
1208
1209void
1210comm_add_close_handler(int fd, CLCB * handler, void *data)
1211{
1212 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1213 handler << ", data=" << data);
1214
1215 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1216 CommCloseCbPtrFun(handler, data));
1217 comm_add_close_handler(fd, call);
1218}
1219
1220void
1221comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1222{
1223 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1224
1225 /*TODO:Check for a similar scheduled AsyncCall*/
1226// for (c = fd_table[fd].closeHandler; c; c = c->next)
1227// assert(c->handler != handler || c->data != data);
1228
1229 call->setNext(fd_table[fd].closeHandler);
1230
1231 fd_table[fd].closeHandler = call;
1232}
1233
1234// remove function-based close handler
1235void
1236comm_remove_close_handler(int fd, CLCB * handler, void *data)
1237{
1238 assert (isOpen(fd));
1239 /* Find handler in list */
1240 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1241 handler << ", data=" << data);
1242
1243 AsyncCall::Pointer p, prev = NULL;
1244 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1245 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1246 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1247 if (!call) // method callbacks have their own comm_remove_close_handler
1248 continue;
1249
1250 typedef CommCloseCbParams Params;
1251 const Params &params = GetCommParams<Params>(p);
1252 if (call->dialer.handler == handler && params.data == data)
1253 break; /* This is our handler */
1254 }
1255
1256 // comm_close removes all close handlers so our handler may be gone
1257 if (p != NULL) {
1258 p->dequeue(fd_table[fd].closeHandler, prev);
1259 p->cancel("comm_remove_close_handler");
1260 }
1261}
1262
1263// remove method-based close handler
1264void
1265comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1266{
1267 assert (isOpen(fd));
1268 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1269
1270 // comm_close removes all close handlers so our handler may be gone
1271 AsyncCall::Pointer p, prev = NULL;
1272 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1273
1274 if (p != NULL)
1275 p->dequeue(fd_table[fd].closeHandler, prev);
1276 call->cancel("comm_remove_close_handler");
1277}
1278
1279static void
1280commSetNoLinger(int fd)
1281{
1282
1283 struct linger L;
1284 L.l_onoff = 0; /* off */
1285 L.l_linger = 0;
1286
1287 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1288 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1289
1290 fd_table[fd].flags.nolinger = 1;
1291}
1292
1293static void
1294commSetReuseAddr(int fd)
1295{
1296 int on = 1;
1297
1298 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1299 debugs(50, DBG_IMPORTANT, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1300}
1301
1302static void
1303commSetTcpRcvbuf(int fd, int size)
1304{
1305 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1306 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1307 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1308 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1309#ifdef TCP_WINDOW_CLAMP
1310 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1311 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1312#endif
1313}
1314
1315int
1316commSetNonBlocking(int fd)
1317{
1318#if !_SQUID_MSWIN_
1319 int flags;
1320 int dummy = 0;
1321#endif
1322#if _SQUID_WINDOWS_
1323 int nonblocking = TRUE;
1324
1325#if _SQUID_CYGWIN_
1326 if (fd_table[fd].type != FD_PIPE) {
1327#endif
1328
1329 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1330 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1331 return COMM_ERROR;
1332 }
1333
1334#if _SQUID_CYGWIN_
1335 } else {
1336#endif
1337#endif
1338#if !_SQUID_MSWIN_
1339
1340 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1341 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1342 return COMM_ERROR;
1343 }
1344
1345 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1346 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1347 return COMM_ERROR;
1348 }
1349
1350#endif
1351#if _SQUID_CYGWIN_
1352 }
1353#endif
1354 fd_table[fd].flags.nonblocking = 1;
1355
1356 return 0;
1357}
1358
1359int
1360commUnsetNonBlocking(int fd)
1361{
1362#if _SQUID_MSWIN_
1363 int nonblocking = FALSE;
1364
1365 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1366#else
1367 int flags;
1368 int dummy = 0;
1369
1370 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1371 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1372 return COMM_ERROR;
1373 }
1374
1375 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1376#endif
1377 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1378 return COMM_ERROR;
1379 }
1380
1381 fd_table[fd].flags.nonblocking = 0;
1382 return 0;
1383}
1384
1385void
1386commSetCloseOnExec(int fd)
1387{
1388#ifdef FD_CLOEXEC
1389 int flags;
1390 int dummy = 0;
1391
1392 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1393 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1394 return;
1395 }
1396
1397 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1398 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1399
1400 fd_table[fd].flags.close_on_exec = 1;
1401
1402#endif
1403}
1404
1405#ifdef TCP_NODELAY
1406static void
1407commSetTcpNoDelay(int fd)
1408{
1409 int on = 1;
1410
1411 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1412 debugs(50, DBG_IMPORTANT, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1413
1414 fd_table[fd].flags.nodelay = 1;
1415}
1416
1417#endif
1418
1419void
1420commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1421{
1422 int on = 1;
1423#ifdef TCP_KEEPCNT
1424 if (timeout && interval) {
1425 int count = (timeout + interval - 1) / interval;
1426 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1427 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1428 }
1429#endif
1430#ifdef TCP_KEEPIDLE
1431 if (idle) {
1432 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1433 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1434 }
1435#endif
1436#ifdef TCP_KEEPINTVL
1437 if (interval) {
1438 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1439 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1440 }
1441#endif
1442 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1443 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1444}
1445
1446void
1447comm_init(void)
1448{
1449 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1450 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1451
1452 /* make sure the accept() socket FIFO delay queue exists */
1453 Comm::AcceptLimiter::Instance();
1454
1455 // make sure the IO pending callback table exists
1456 Comm::CallbackTableInit();
1457
1458 /* XXX account fd_table */
1459 /* Keep a few file descriptors free so that we don't run out of FD's
1460 * after accepting a client but before it opens a socket or a file.
1461 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1462 RESERVED_FD = min(100, Squid_MaxFD / 4);
1463
1464 conn_close_pool = memPoolCreate("close_handler", sizeof(close_handler));
1465
1466 TheHalfClosed = new DescriptorSet;
1467
1468 /* setup the select loop module */
1469 Comm::SelectLoopInit();
1470}
1471
1472void
1473comm_exit(void)
1474{
1475 delete TheHalfClosed;
1476 TheHalfClosed = NULL;
1477
1478 safe_free(fd_table);
1479 safe_free(fdd_table);
1480 Comm::CallbackTableDestruct();
1481}
1482
1483#if USE_DELAY_POOLS
1484// called when the queue is done waiting for the client bucket to fill
1485void
1486commHandleWriteHelper(void * data)
1487{
1488 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1489 assert(queue);
1490
1491 ClientInfo *clientInfo = queue->clientInfo;
1492 // ClientInfo invalidates queue if freed, so if we got here through,
1493 // evenAdd cbdata protections, everything should be valid and consistent
1494 assert(clientInfo);
1495 assert(clientInfo->hasQueue());
1496 assert(clientInfo->hasQueue(queue));
1497 assert(!clientInfo->selectWaiting);
1498 assert(clientInfo->eventWaiting);
1499 clientInfo->eventWaiting = false;
1500
1501 do {
1502 // check that the head descriptor is still relevant
1503 const int head = clientInfo->quotaPeekFd();
1504 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1505
1506 if (fd_table[head].clientInfo == clientInfo &&
1507 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1508 !fd_table[head].closing()) {
1509
1510 // wait for the head descriptor to become ready for writing
1511 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1512 clientInfo->selectWaiting = true;
1513 return;
1514 }
1515
1516 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1517 // and continue looking for a relevant one
1518 } while (clientInfo->hasQueue());
1519
1520 debugs(77,3, HERE << "emptied queue");
1521}
1522
1523bool
1524ClientInfo::hasQueue() const
1525{
1526 assert(quotaQueue);
1527 return !quotaQueue->empty();
1528}
1529
1530bool
1531ClientInfo::hasQueue(const CommQuotaQueue *q) const
1532{
1533 assert(quotaQueue);
1534 return quotaQueue == q;
1535}
1536
1537/// returns the first descriptor to be dequeued
1538int
1539ClientInfo::quotaPeekFd() const
1540{
1541 assert(quotaQueue);
1542 return quotaQueue->front();
1543}
1544
1545/// returns the reservation ID of the first descriptor to be dequeued
1546unsigned int
1547ClientInfo::quotaPeekReserv() const
1548{
1549 assert(quotaQueue);
1550 return quotaQueue->outs + 1;
1551}
1552
1553/// queues a given fd, creating the queue if necessary; returns reservation ID
1554unsigned int
1555ClientInfo::quotaEnqueue(int fd)
1556{
1557 assert(quotaQueue);
1558 return quotaQueue->enqueue(fd);
1559}
1560
1561/// removes queue head
1562void
1563ClientInfo::quotaDequeue()
1564{
1565 assert(quotaQueue);
1566 quotaQueue->dequeue();
1567}
1568
1569void
1570ClientInfo::kickQuotaQueue()
1571{
1572 if (!eventWaiting && !selectWaiting && hasQueue()) {
1573 // wait at least a second if the bucket is empty
1574 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1575 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1576 quotaQueue, delay, 0, true);
1577 eventWaiting = true;
1578 }
1579}
1580
1581/// calculates how much to write for a single dequeued client
1582int
1583ClientInfo::quotaForDequed()
1584{
1585 /* If we have multiple clients and give full bucketSize to each client then
1586 * clt1 may often get a lot more because clt1->clt2 time distance in the
1587 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1588 * We divide quota evenly to be more fair. */
1589
1590 if (!rationedCount) {
1591 rationedCount = quotaQueue->size() + 1;
1592
1593 // The delay in ration recalculation _temporary_ deprives clients from
1594 // bytes that should have trickled in while rationedCount was positive.
1595 refillBucket();
1596
1597 // Rounding errors do not accumulate here, but we round down to avoid
1598 // negative bucket sizes after write with rationedCount=1.
1599 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1600 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1601 '*' << rationedCount);
1602 }
1603
1604 --rationedCount;
1605 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1606 " rations remaining: " << rationedCount);
1607
1608 // update 'last seen' time to prevent clientdb GC from dropping us
1609 last_seen = squid_curtime;
1610 return rationedQuota;
1611}
1612
1613///< adds bytes to the quota bucket based on the rate and passed time
1614void
1615ClientInfo::refillBucket()
1616{
1617 // all these times are in seconds, with double precision
1618 const double currTime = current_dtime;
1619 const double timePassed = currTime - prevTime;
1620
1621 // Calculate allowance for the time passed. Use double to avoid
1622 // accumulating rounding errors for small intervals. For example, always
1623 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1624 const double gain = timePassed * writeSpeedLimit;
1625
1626 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1627 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1628 " = " << gain << ')');
1629
1630 // to further combat error accumulation during micro updates,
1631 // quit before updating time if we cannot add at least one byte
1632 if (gain < 1.0)
1633 return;
1634
1635 prevTime = currTime;
1636
1637 // for "first" connections, drain initial fat before refilling but keep
1638 // updating prevTime to avoid bursts after the fat is gone
1639 if (bucketSize > bucketSizeLimit) {
1640 debugs(77,4, HERE << "not refilling while draining initial fat");
1641 return;
1642 }
1643
1644 bucketSize += gain;
1645
1646 // obey quota limits
1647 if (bucketSize > bucketSizeLimit)
1648 bucketSize = bucketSizeLimit;
1649}
1650
1651void
1652ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1653{
1654 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1655 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1656 " highwatermark=" << aHighWatermark);
1657
1658 // set or possibly update traffic shaping parameters
1659 writeLimitingActive = true;
1660 writeSpeedLimit = aWriteSpeedLimit;
1661 bucketSizeLimit = aHighWatermark;
1662
1663 // but some members should only be set once for a newly activated bucket
1664 if (firstTimeConnection) {
1665 firstTimeConnection = false;
1666
1667 assert(!selectWaiting);
1668 assert(!quotaQueue);
1669 quotaQueue = new CommQuotaQueue(this);
1670
1671 bucketSize = anInitialBurst;
1672 prevTime = current_dtime;
1673 }
1674}
1675
1676CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1677 ins(0), outs(0)
1678{
1679 assert(clientInfo);
1680}
1681
1682CommQuotaQueue::~CommQuotaQueue()
1683{
1684 assert(!clientInfo); // ClientInfo should clear this before destroying us
1685}
1686
1687/// places the given fd at the end of the queue; returns reservation ID
1688unsigned int
1689CommQuotaQueue::enqueue(int fd)
1690{
1691 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1692 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1693 fds.push_back(fd);
1694 return ++ins;
1695}
1696
1697/// removes queue head
1698void
1699CommQuotaQueue::dequeue()
1700{
1701 assert(!fds.empty());
1702 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1703 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1704 fds.size());
1705 fds.pop_front();
1706 ++outs;
1707}
1708#endif
1709
1710/*
1711 * hm, this might be too general-purpose for all the places we'd
1712 * like to use it.
1713 */
1714int
1715ignoreErrno(int ierrno)
1716{
1717 switch (ierrno) {
1718
1719 case EINPROGRESS:
1720
1721 case EWOULDBLOCK:
1722#if EAGAIN != EWOULDBLOCK
1723
1724 case EAGAIN:
1725#endif
1726
1727 case EALREADY:
1728
1729 case EINTR:
1730#ifdef ERESTART
1731
1732 case ERESTART:
1733#endif
1734
1735 return 1;
1736
1737 default:
1738 return 0;
1739 }
1740
1741 /* NOTREACHED */
1742}
1743
1744void
1745commCloseAllSockets(void)
1746{
1747 int fd;
1748 fde *F = NULL;
1749
1750 for (fd = 0; fd <= Biggest_FD; ++fd) {
1751 F = &fd_table[fd];
1752
1753 if (!F->flags.open)
1754 continue;
1755
1756 if (F->type != FD_SOCKET)
1757 continue;
1758
1759 if (F->flags.ipc) /* don't close inter-process sockets */
1760 continue;
1761
1762 if (F->timeoutHandler != NULL) {
1763 AsyncCall::Pointer callback = F->timeoutHandler;
1764 F->timeoutHandler = NULL;
1765 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1766 ScheduleCallHere(callback);
1767 } else {
1768 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1769 old_comm_reset_close(fd);
1770 }
1771 }
1772}
1773
1774static bool
1775AlreadyTimedOut(fde *F)
1776{
1777 if (!F->flags.open)
1778 return true;
1779
1780 if (F->timeout == 0)
1781 return true;
1782
1783 if (F->timeout > squid_curtime)
1784 return true;
1785
1786 return false;
1787}
1788
1789static bool
1790writeTimedOut(int fd)
1791{
1792 if (!COMMIO_FD_WRITECB(fd)->active())
1793 return false;
1794
1795 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1796 return false;
1797
1798 return true;
1799}
1800
1801void
1802checkTimeouts(void)
1803{
1804 int fd;
1805 fde *F = NULL;
1806 AsyncCall::Pointer callback;
1807
1808 for (fd = 0; fd <= Biggest_FD; ++fd) {
1809 F = &fd_table[fd];
1810
1811 if (writeTimedOut(fd)) {
1812 // We have an active write callback and we are timed out
1813 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1814 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1815 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1816 } else if (AlreadyTimedOut(F))
1817 continue;
1818
1819 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1820
1821 if (F->timeoutHandler != NULL) {
1822 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1823 callback = F->timeoutHandler;
1824 F->timeoutHandler = NULL;
1825 ScheduleCallHere(callback);
1826 } else {
1827 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1828 comm_close(fd);
1829 }
1830 }
1831}
1832
1833/// Start waiting for a possibly half-closed connection to close
1834// by scheduling a read callback to a monitoring handler that
1835// will close the connection on read errors.
1836void
1837commStartHalfClosedMonitor(int fd)
1838{
1839 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1840 assert(isOpen(fd));
1841 assert(!commHasHalfClosedMonitor(fd));
1842 (void)TheHalfClosed->add(fd); // could also assert the result
1843 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1844}
1845
1846static
1847void
1848commPlanHalfClosedCheck()
1849{
1850 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1851 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1852 WillCheckHalfClosed = true;
1853 }
1854}
1855
1856/// iterates over all descriptors that may need half-closed tests and
1857/// calls comm_read for those that do; re-schedules the check if needed
1858static
1859void
1860commHalfClosedCheck(void *)
1861{
1862 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1863
1864 typedef DescriptorSet::const_iterator DSCI;
1865 const DSCI end = TheHalfClosed->end();
1866 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1867 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1868 c->fd = *i;
1869 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1870 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1871 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1872 comm_read(c, NULL, 0, call);
1873 fd_table[c->fd].halfClosedReader = call;
1874 } else
1875 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1876 }
1877
1878 WillCheckHalfClosed = false; // as far as we know
1879 commPlanHalfClosedCheck(); // may need to check again
1880}
1881
1882/// checks whether we are waiting for possibly half-closed connection to close
1883// We are monitoring if the read handler for the fd is the monitoring handler.
1884bool
1885commHasHalfClosedMonitor(int fd)
1886{
1887 return TheHalfClosed->has(fd);
1888}
1889
1890/// stop waiting for possibly half-closed connection to close
1891static void
1892commStopHalfClosedMonitor(int const fd)
1893{
1894 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1895
1896 // cancel the read if one was scheduled
1897 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1898 if (reader != NULL)
1899 comm_read_cancel(fd, reader);
1900 fd_table[fd].halfClosedReader = NULL;
1901
1902 TheHalfClosed->del(fd);
1903}
1904
1905/// I/O handler for the possibly half-closed connection monitoring code
1906static void
1907commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
1908{
1909 // there cannot be more data coming in on half-closed connections
1910 assert(size == 0);
1911 assert(conn != NULL);
1912 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1913
1914 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1915
1916 // nothing to do if fd is being closed
1917 if (flag == COMM_ERR_CLOSING)
1918 return;
1919
1920 // if read failed, close the connection
1921 if (flag != COMM_OK) {
1922 debugs(5, 3, HERE << "closing " << conn);
1923 conn->close();
1924 return;
1925 }
1926
1927 // continue waiting for close or error
1928 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1929}
1930
1931CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1932
1933CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1934 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1935
1936DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1937
1938DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1939
1940DeferredReadManager::~DeferredReadManager()
1941{
1942 flushReads();
1943 assert (deferredReads.empty());
1944}
1945
1946/* explicit instantiation required for some systems */
1947
1948/// \cond AUTODOCS-IGNORE
1949template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1950/// \endcond
1951
1952void
1953DeferredReadManager::delayRead(DeferredRead const &aRead)
1954{
1955 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1956 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1957
1958 // We have to use a global function as a closer and point to temp
1959 // instead of "this" because DeferredReadManager is not a job and
1960 // is not even cbdata protected
1961 // XXX: and yet we use cbdata protection functions on it??
1962 AsyncCall::Pointer closer = commCbCall(5,4,
1963 "DeferredReadManager::CloseHandler",
1964 CommCloseCbPtrFun(&CloseHandler, temp));
1965 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1966 temp->element.closer = closer; // remeber so that we can cancel
1967}
1968
1969void
1970DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1971{
1972 if (!cbdataReferenceValid(params.data))
1973 return;
1974
1975 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1976
1977 temp->element.closer = NULL;
1978 temp->element.markCancelled();
1979}
1980
1981DeferredRead
1982DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
1983{
1984 assert (!deferredReads.empty());
1985
1986 DeferredRead &read = deferredReads.head->element;
1987
1988 // NOTE: at this point the connection has been paused/stalled for an unknown
1989 // amount of time. We must re-validate that it is active and usable.
1990
1991 // If the connection has been closed already. Cancel this read.
1992 if (!Comm::IsConnOpen(read.theRead.conn)) {
1993 if (read.closer != NULL) {
1994 read.closer->cancel("Connection closed before.");
1995 read.closer = NULL;
1996 }
1997 read.markCancelled();
1998 }
1999
2000 if (!read.cancelled) {
2001 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
2002 read.closer = NULL;
2003 }
2004
2005 DeferredRead result = deferredReads.pop_front();
2006
2007 return result;
2008}
2009
2010void
2011DeferredReadManager::kickReads(int const count)
2012{
2013 /* if we had CbDataList::size() we could consolidate this and flushReads */
2014
2015 if (count < 1) {
2016 flushReads();
2017 return;
2018 }
2019
2020 size_t remaining = count;
2021
2022 while (!deferredReads.empty() && remaining) {
2023 DeferredRead aRead = popHead(deferredReads);
2024 kickARead(aRead);
2025
2026 if (!aRead.cancelled)
2027 --remaining;
2028 }
2029}
2030
2031void
2032DeferredReadManager::flushReads()
2033{
2034 CbDataListContainer<DeferredRead> reads;
2035 reads = deferredReads;
2036 deferredReads = CbDataListContainer<DeferredRead>();
2037
2038 // XXX: For fairness this SHOULD randomize the order
2039 while (!reads.empty()) {
2040 DeferredRead aRead = popHead(reads);
2041 kickARead(aRead);
2042 }
2043}
2044
2045void
2046DeferredReadManager::kickARead(DeferredRead const &aRead)
2047{
2048 if (aRead.cancelled)
2049 return;
2050
2051 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
2052 return;
2053
2054 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
2055
2056 aRead.theReader(aRead.theContext, aRead.theRead);
2057}
2058
2059void
2060DeferredRead::markCancelled()
2061{
2062 cancelled = true;
2063}
2064
2065int
2066CommSelectEngine::checkEvents(int timeout)
2067{
2068 static time_t last_timeout = 0;
2069
2070 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2071 if (squid_curtime > last_timeout) {
2072 last_timeout = squid_curtime;
2073 checkTimeouts();
2074 }
2075
2076 switch (Comm::DoSelect(timeout)) {
2077
2078 case COMM_OK:
2079
2080 case COMM_TIMEOUT:
2081 return 0;
2082
2083 case COMM_IDLE:
2084
2085 case COMM_SHUTDOWN:
2086 return EVENT_IDLE;
2087
2088 case COMM_ERROR:
2089 return EVENT_ERROR;
2090
2091 default:
2092 fatal_dump("comm.cc: Internal error -- this should never happen.");
2093 return EVENT_ERROR;
2094 };
2095}
2096
2097/// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2098int
2099comm_open_uds(int sock_type,
2100 int proto,
2101 struct sockaddr_un* addr,
2102 int flags)
2103{
2104 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2105
2106 int new_socket;
2107
2108 PROF_start(comm_open);
2109 /* Create socket for accepting new connections. */
2110 ++ statCounter.syscalls.sock.sockets;
2111
2112 /* Setup the socket addrinfo details for use */
2113 struct addrinfo AI;
2114 AI.ai_flags = 0;
2115 AI.ai_family = PF_UNIX;
2116 AI.ai_socktype = sock_type;
2117 AI.ai_protocol = proto;
2118 AI.ai_addrlen = SUN_LEN(addr);
2119 AI.ai_addr = (sockaddr*)addr;
2120 AI.ai_canonname = NULL;
2121 AI.ai_next = NULL;
2122
2123 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
2124
2125 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
2126 /* Increase the number of reserved fd's if calls to socket()
2127 * are failing because the open file table is full. This
2128 * limits the number of simultaneous clients */
2129
2130 if (limitError(errno)) {
2131 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
2132 fdAdjustReserved();
2133 } else {
2134 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
2135 }
2136
2137 PROF_stop(comm_open);
2138 return -1;
2139 }
2140
2141 debugs(50, 3, HERE "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
2142
2143 /* update fdstat */
2144 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
2145
2146 assert(!isOpen(new_socket));
2147 fd_open(new_socket, FD_MSGHDR, NULL);
2148
2149 fdd_table[new_socket].close_file = NULL;
2150
2151 fdd_table[new_socket].close_line = 0;
2152
2153 fd_table[new_socket].sock_family = AI.ai_family;
2154
2155 if (!(flags & COMM_NOCLOEXEC))
2156 commSetCloseOnExec(new_socket);
2157
2158 if (flags & COMM_REUSEADDR)
2159 commSetReuseAddr(new_socket);
2160
2161 if (flags & COMM_NONBLOCKING) {
2162 if (commSetNonBlocking(new_socket) != COMM_OK) {
2163 comm_close(new_socket);
2164 PROF_stop(comm_open);
2165 return -1;
2166 }
2167 }
2168
2169 if (flags & COMM_DOBIND) {
2170 if (commBind(new_socket, AI) != COMM_OK) {
2171 comm_close(new_socket);
2172 PROF_stop(comm_open);
2173 return -1;
2174 }
2175 }
2176
2177#ifdef TCP_NODELAY
2178 if (sock_type == SOCK_STREAM)
2179 commSetTcpNoDelay(new_socket);
2180
2181#endif
2182
2183 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
2184 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
2185
2186 PROF_stop(comm_open);
2187
2188 return new_socket;
2189}