]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Merged from trunk
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid.h"
36 #include "ClientInfo.h"
37 #include "comm/AcceptLimiter.h"
38 #include "comm/comm_internal.h"
39 #include "comm/Connection.h"
40 #include "comm/IoCallback.h"
41 #include "comm/Loops.h"
42 #include "comm/TcpAcceptor.h"
43 #include "comm/Write.h"
44 #include "CommRead.h"
45 #include "compat/cmsg.h"
46 #include "DescriptorSet.h"
47 #include "event.h"
48 #include "fd.h"
49 #include "fde.h"
50 #include "globals.h"
51 #include "icmp/net_db.h"
52 #include "ip/Intercept.h"
53 #include "ip/QosConfig.h"
54 #include "ip/tools.h"
55 #include "pconn.h"
56 #include "profiler/Profiler.h"
57 #include "SBuf.h"
58 #include "SquidConfig.h"
59 #include "StatCounters.h"
60 #include "StoreIOBuffer.h"
61 #include "tools.h"
62
63 #if USE_OPENSSL
64 #include "ssl/support.h"
65 #endif
66
67 #include <cerrno>
68 #include <cmath>
69 #if _SQUID_CYGWIN_
70 #include <sys/ioctl.h>
71 #endif
72 #ifdef HAVE_NETINET_TCP_H
73 #include <netinet/tcp.h>
74 #endif
75 #if HAVE_SYS_UN_H
76 #include <sys/un.h>
77 #endif
78
79 /*
80 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
81 */
82
83 static void commStopHalfClosedMonitor(int fd);
84 static IOCB commHalfClosedReader;
85 static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
86 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
87
88 #if USE_DELAY_POOLS
89 CBDATA_CLASS_INIT(CommQuotaQueue);
90
91 static void commHandleWriteHelper(void * data);
92 #endif
93
94 /* STATIC */
95
96 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
97 static bool WillCheckHalfClosed = false; /// true if check is scheduled
98 static EVH commHalfClosedCheck;
99 static void commPlanHalfClosedCheck();
100
101 static comm_err_t commBind(int s, struct addrinfo &);
102 static void commSetReuseAddr(int);
103 static void commSetNoLinger(int);
104 #ifdef TCP_NODELAY
105 static void commSetTcpNoDelay(int);
106 #endif
107 static void commSetTcpRcvbuf(int, int);
108
109 fd_debug_t *fdd_table = NULL;
110
111 bool
112 isOpen(const int fd)
113 {
114 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
115 }
116
117 /**
118 * Attempt a read
119 *
120 * If the read attempt succeeds or fails, call the callback.
121 * Else, wait for another IO notification.
122 */
123 void
124 commHandleRead(int fd, void *data)
125 {
126 Comm::IoCallback *ccb = (Comm::IoCallback *) data;
127
128 assert(data == COMMIO_FD_READCB(fd));
129 assert(ccb->active());
130 /* Attempt a read */
131 ++ statCounter.syscalls.sock.reads;
132 errno = 0;
133 int retval;
134 if (ccb->buf) {
135 retval = FD_READ_METHOD(fd, ccb->buf, ccb->size);
136 debugs(5, 3, "char FD " << fd << ", size " << ccb->size << ", retval " << retval << ", errno " << errno);
137 } else {
138 assert(ccb->buf2 != NULL);
139 SBuf::size_type sz = ccb->buf2->spaceSize();
140 char *buf = ccb->buf2->rawSpace(sz);
141 retval = FD_READ_METHOD(fd, buf, sz-1); // blocking synchronous read(2)
142 if (retval > 0) {
143 ccb->buf2->append(buf, retval);
144 }
145 debugs(5, 3, "SBuf FD " << fd << ", size " << sz << ", retval " << retval << ", errno " << errno);
146 }
147
148 if (retval < 0 && !ignoreErrno(errno)) {
149 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
150 ccb->offset = 0;
151 ccb->finish(COMM_ERROR, errno);
152 return;
153 };
154
155 /* See if we read anything */
156 /* Note - read 0 == socket EOF, which is a valid read */
157 if (retval >= 0) {
158 fd_bytes(fd, retval, FD_READ);
159 ccb->offset = retval;
160 ccb->finish(COMM_OK, errno);
161 return;
162 }
163
164 /* Nope, register for some more IO */
165 Comm::SetSelect(fd, COMM_SELECT_READ, commHandleRead, data, 0);
166 }
167
168 /**
169 * Queue a read. handler/handler_data are called when the read
170 * completes, on error, or on file descriptor close.
171 */
172 void
173 comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, AsyncCall::Pointer &callback)
174 {
175 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
176
177 /* Make sure we are open and not closing */
178 assert(Comm::IsConnOpen(conn));
179 assert(!fd_table[conn->fd].closing());
180 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
181
182 // Make sure we are either not reading or just passively monitoring.
183 // Active/passive conflicts are OK and simply cancel passive monitoring.
184 if (ccb->active()) {
185 // if the assertion below fails, we have an active comm_read conflict
186 assert(fd_table[conn->fd].halfClosedReader != NULL);
187 commStopHalfClosedMonitor(conn->fd);
188 assert(!ccb->active());
189 }
190 ccb->conn = conn;
191
192 /* Queue the read */
193 ccb->setCallback(Comm::IOCB_READ, callback, (char *)buf, NULL, size);
194 Comm::SetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
195 }
196
197 /**
198 * Queue a read. handler/handler_data are called when the read
199 * completes, on error, or on file descriptor close.
200 */
201 void
202 comm_read(const Comm::ConnectionPointer &conn, SBuf &buf, AsyncCall::Pointer &callback)
203 {
204 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
205
206 /* Make sure we are open and not closing */
207 assert(Comm::IsConnOpen(conn));
208 assert(!fd_table[conn->fd].closing());
209 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
210
211 // Make sure we are either not reading or just passively monitoring.
212 // Active/passive conflicts are OK and simply cancel passive monitoring.
213 if (ccb->active()) {
214 // if the assertion below fails, we have an active comm_read conflict
215 assert(fd_table[conn->fd].halfClosedReader != NULL);
216 commStopHalfClosedMonitor(conn->fd);
217 assert(!ccb->active());
218 }
219 ccb->conn = conn;
220 ccb->buf2 = &buf;
221
222 /* Queue the read */
223 ccb->setCallback(Comm::IOCB_READ, callback, NULL, NULL, buf.spaceSize());
224 Comm::SetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
225 }
226
227 /**
228 * Empty the read buffers
229 *
230 * This is a magical routine that empties the read buffers.
231 * Under some platforms (Linux) if a buffer has data in it before
232 * you call close(), the socket will hang and take quite a while
233 * to timeout.
234 */
235 static void
236 comm_empty_os_read_buffers(int fd)
237 {
238 #if _SQUID_LINUX_
239 /* prevent those nasty RST packets */
240 char buf[SQUID_TCP_SO_RCVBUF];
241
242 if (fd_table[fd].flags.nonblocking) {
243 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
244 }
245 #endif
246 }
247
248 /**
249 * Return whether the FD has a pending completed callback.
250 * NP: does not work.
251 */
252 int
253 comm_has_pending_read_callback(int fd)
254 {
255 assert(isOpen(fd));
256 // XXX: We do not know whether there is a read callback scheduled.
257 // This is used for pconn management that should probably be more
258 // tightly integrated into comm to minimize the chance that a
259 // closing pconn socket will be used for a new transaction.
260 return false;
261 }
262
263 // Does comm check this fd for read readiness?
264 // Note that when comm is not monitoring, there can be a pending callback
265 // call, which may resume comm monitoring once fired.
266 bool
267 comm_monitors_read(int fd)
268 {
269 assert(isOpen(fd) && COMMIO_FD_READCB(fd));
270 // Being active is usually the same as monitoring because we always
271 // start monitoring the FD when we configure Comm::IoCallback for I/O
272 // and we usually configure Comm::IoCallback for I/O when we starting
273 // monitoring a FD for reading.
274 return COMMIO_FD_READCB(fd)->active();
275 }
276
277 /**
278 * Cancel a pending read. Assert that we have the right parameters,
279 * and that there are no pending read events!
280 *
281 * XXX: We do not assert that there are no pending read events and
282 * with async calls it becomes even more difficult.
283 * The whole interface should be reworked to do callback->cancel()
284 * instead of searching for places where the callback may be stored and
285 * updating the state of those places.
286 *
287 * AHC Don't call the comm handlers?
288 */
289 void
290 comm_read_cancel(int fd, IOCB *callback, void *data)
291 {
292 if (!isOpen(fd)) {
293 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
294 return;
295 }
296
297 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
298 // TODO: is "active" == "monitors FD"?
299 if (!cb->active()) {
300 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
301 return;
302 }
303
304 typedef CommCbFunPtrCallT<CommIoCbPtrFun> Call;
305 Call *call = dynamic_cast<Call*>(cb->callback.getRaw());
306 if (!call) {
307 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " lacks callback");
308 return;
309 }
310
311 call->cancel("old comm_read_cancel");
312
313 typedef CommIoCbParams Params;
314 const Params &params = GetCommParams<Params>(cb->callback);
315
316 /* Ok, we can be reasonably sure we won't lose any data here! */
317 assert(call->dialer.handler == callback);
318 assert(params.data == data);
319
320 /* Delete the callback */
321 cb->cancel("old comm_read_cancel");
322
323 /* And the IO event */
324 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
325 }
326
327 void
328 comm_read_cancel(int fd, AsyncCall::Pointer &callback)
329 {
330 callback->cancel("comm_read_cancel");
331
332 if (!isOpen(fd)) {
333 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
334 return;
335 }
336
337 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
338
339 if (!cb->active()) {
340 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
341 return;
342 }
343
344 AsyncCall::Pointer call = cb->callback;
345 assert(call != NULL); // XXX: should never fail (active() checks for callback==NULL)
346
347 /* Ok, we can be reasonably sure we won't lose any data here! */
348 assert(call == callback);
349
350 /* Delete the callback */
351 cb->cancel("comm_read_cancel");
352
353 /* And the IO event */
354 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
355 }
356
357 /**
358 * synchronous wrapper around udp socket functions
359 */
360 int
361 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
362 {
363 ++ statCounter.syscalls.sock.recvfroms;
364 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
365 struct addrinfo *AI = NULL;
366 Ip::Address::InitAddrInfo(AI);
367 int x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
368 from = *AI;
369 Ip::Address::FreeAddrInfo(AI);
370 return x;
371 }
372
373 int
374 comm_udp_recv(int fd, void *buf, size_t len, int flags)
375 {
376 Ip::Address nul;
377 return comm_udp_recvfrom(fd, buf, len, flags, nul);
378 }
379
380 ssize_t
381 comm_udp_send(int s, const void *buf, size_t len, int flags)
382 {
383 return send(s, buf, len, flags);
384 }
385
386 bool
387 comm_has_incomplete_write(int fd)
388 {
389 assert(isOpen(fd) && COMMIO_FD_WRITECB(fd));
390 return COMMIO_FD_WRITECB(fd)->active();
391 }
392
393 /**
394 * Queue a write. handler/handler_data are called when the write fully
395 * completes, on error, or on file descriptor close.
396 */
397
398 /* Return the local port associated with fd. */
399 unsigned short
400 comm_local_port(int fd)
401 {
402 Ip::Address temp;
403 struct addrinfo *addr = NULL;
404 fde *F = &fd_table[fd];
405
406 /* If the fd is closed already, just return */
407
408 if (!F->flags.open) {
409 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
410 return 0;
411 }
412
413 if (F->local_addr.port())
414 return F->local_addr.port();
415
416 if (F->sock_family == AF_INET)
417 temp.setIPv4();
418
419 Ip::Address::InitAddrInfo(addr);
420
421 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
422 debugs(50, DBG_IMPORTANT, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
423 Ip::Address::FreeAddrInfo(addr);
424 return 0;
425 }
426 temp = *addr;
427
428 Ip::Address::FreeAddrInfo(addr);
429
430 if (F->local_addr.isAnyAddr()) {
431 /* save the whole local address, not just the port. */
432 F->local_addr = temp;
433 } else {
434 F->local_addr.port(temp.port());
435 }
436
437 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.port() << "(family=" << F->sock_family << ")");
438 return F->local_addr.port();
439 }
440
441 static comm_err_t
442 commBind(int s, struct addrinfo &inaddr)
443 {
444 ++ statCounter.syscalls.sock.binds;
445
446 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
447 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
448 return COMM_OK;
449 }
450
451 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
452
453 return COMM_ERROR;
454 }
455
456 /**
457 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
458 * is OR of flags specified in comm.h. Defaults TOS
459 */
460 int
461 comm_open(int sock_type,
462 int proto,
463 Ip::Address &addr,
464 int flags,
465 const char *note)
466 {
467 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
468 }
469
470 void
471 comm_open_listener(int sock_type,
472 int proto,
473 Comm::ConnectionPointer &conn,
474 const char *note)
475 {
476 /* all listener sockets require bind() */
477 conn->flags |= COMM_DOBIND;
478
479 /* attempt native enabled port. */
480 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
481 }
482
483 int
484 comm_open_listener(int sock_type,
485 int proto,
486 Ip::Address &addr,
487 int flags,
488 const char *note)
489 {
490 int sock = -1;
491
492 /* all listener sockets require bind() */
493 flags |= COMM_DOBIND;
494
495 /* attempt native enabled port. */
496 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
497
498 return sock;
499 }
500
501 static bool
502 limitError(int const anErrno)
503 {
504 return anErrno == ENFILE || anErrno == EMFILE;
505 }
506
507 void
508 comm_set_v6only(int fd, int tos)
509 {
510 #ifdef IPV6_V6ONLY
511 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
512 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
513 }
514 #else
515 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
516 #endif /* sockopt */
517 }
518
519 /**
520 * Set the socket option required for TPROXY spoofing for:
521 * - Linux TPROXY v4 support,
522 * - OpenBSD divert-to support,
523 * - FreeBSD IPFW TPROXY v4 support.
524 */
525 void
526 comm_set_transparent(int fd)
527 {
528 #if _SQUID_LINUX_ && defined(IP_TRANSPARENT) // Linux
529 # define soLevel SOL_IP
530 # define soFlag IP_TRANSPARENT
531 bool doneSuid = false;
532
533 #elif defined(SO_BINDANY) // OpenBSD 4.7+ and NetBSD with PF
534 # define soLevel SOL_SOCKET
535 # define soFlag SO_BINDANY
536 enter_suid();
537 bool doneSuid = true;
538
539 #elif defined(IP_BINDANY) // FreeBSD with IPFW
540 # define soLevel IPPROTO_IP
541 # define soFlag IP_BINDANY
542 enter_suid();
543 bool doneSuid = true;
544
545 #else
546 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(TPROXY) not supported on this platform");
547 #endif /* sockopt */
548
549 #if defined(soLevel) && defined(soFlag)
550 int tos = 1;
551 if (setsockopt(fd, soLevel, soFlag, (char *) &tos, sizeof(int)) < 0) {
552 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(TPROXY) on FD " << fd << ": " << xstrerror());
553 } else {
554 /* mark the socket as having transparent options */
555 fd_table[fd].flags.transparent = true;
556 }
557 if (doneSuid)
558 leave_suid();
559 #endif
560 }
561
562 /**
563 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
564 * is OR of flags specified in defines.h:COMM_*
565 */
566 int
567 comm_openex(int sock_type,
568 int proto,
569 Ip::Address &addr,
570 int flags,
571 tos_t tos,
572 nfmark_t nfmark,
573 const char *note)
574 {
575 int new_socket;
576 struct addrinfo *AI = NULL;
577
578 PROF_start(comm_open);
579 /* Create socket for accepting new connections. */
580 ++ statCounter.syscalls.sock.sockets;
581
582 /* Setup the socket addrinfo details for use */
583 addr.getAddrInfo(AI);
584 AI->ai_socktype = sock_type;
585 AI->ai_protocol = proto;
586
587 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
588
589 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
590
591 /* under IPv6 there is the possibility IPv6 is present but disabled. */
592 /* try again as IPv4-native if possible */
593 if ( new_socket < 0 && Ip::EnableIpv6 && addr.isIPv6() && addr.setIPv4() ) {
594 /* attempt to open this IPv4-only. */
595 Ip::Address::FreeAddrInfo(AI);
596 /* Setup the socket addrinfo details for use */
597 addr.getAddrInfo(AI);
598 AI->ai_socktype = sock_type;
599 AI->ai_protocol = proto;
600 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
601 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
602 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
603 }
604
605 if (new_socket < 0) {
606 /* Increase the number of reserved fd's if calls to socket()
607 * are failing because the open file table is full. This
608 * limits the number of simultaneous clients */
609
610 if (limitError(errno)) {
611 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
612 fdAdjustReserved();
613 } else {
614 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
615 }
616
617 Ip::Address::FreeAddrInfo(AI);
618
619 PROF_stop(comm_open);
620 return -1;
621 }
622
623 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
624 Comm::ConnectionPointer conn = new Comm::Connection;
625 conn->local = addr;
626 conn->fd = new_socket;
627
628 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
629
630 /* set TOS if needed */
631 if (tos)
632 Ip::Qos::setSockTos(conn, tos);
633
634 /* set netfilter mark if needed */
635 if (nfmark)
636 Ip::Qos::setSockNfmark(conn, nfmark);
637
638 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.isIPv6() )
639 comm_set_v6only(conn->fd, 1);
640
641 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
642 /* Other OS may have this administratively disabled for general use. Same deal. */
643 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.isIPv6() )
644 comm_set_v6only(conn->fd, 0);
645
646 comm_init_opened(conn, tos, nfmark, note, AI);
647 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
648
649 Ip::Address::FreeAddrInfo(AI);
650
651 PROF_stop(comm_open);
652
653 // XXX transition only. prevent conn from closing the new FD on function exit.
654 conn->fd = -1;
655 return new_socket;
656 }
657
658 /// update FD tables after a local or remote (IPC) comm_openex();
659 void
660 comm_init_opened(const Comm::ConnectionPointer &conn,
661 tos_t tos,
662 nfmark_t nfmark,
663 const char *note,
664 struct addrinfo *AI)
665 {
666 assert(Comm::IsConnOpen(conn));
667 assert(AI);
668
669 /* update fdstat */
670 debugs(5, 5, HERE << conn << " is a new socket");
671
672 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
673 fd_open(conn->fd, FD_SOCKET, note);
674
675 fdd_table[conn->fd].close_file = NULL;
676 fdd_table[conn->fd].close_line = 0;
677
678 fde *F = &fd_table[conn->fd];
679 F->local_addr = conn->local;
680 F->tosToServer = tos;
681
682 F->nfmarkToServer = nfmark;
683
684 F->sock_family = AI->ai_family;
685 }
686
687 /// apply flags after a local comm_open*() call;
688 /// returns new_socket or -1 on error
689 static int
690 comm_apply_flags(int new_socket,
691 Ip::Address &addr,
692 int flags,
693 struct addrinfo *AI)
694 {
695 assert(new_socket >= 0);
696 assert(AI);
697 const int sock_type = AI->ai_socktype;
698
699 if (!(flags & COMM_NOCLOEXEC))
700 commSetCloseOnExec(new_socket);
701
702 if ((flags & COMM_REUSEADDR))
703 commSetReuseAddr(new_socket);
704
705 if (addr.port() > (unsigned short) 0) {
706 #if _SQUID_WINDOWS_
707 if (sock_type != SOCK_DGRAM)
708 #endif
709 commSetNoLinger(new_socket);
710
711 if (opt_reuseaddr)
712 commSetReuseAddr(new_socket);
713 }
714
715 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
716 if ((flags & COMM_TRANSPARENT)) {
717 comm_set_transparent(new_socket);
718 }
719
720 if ( (flags & COMM_DOBIND) || addr.port() > 0 || !addr.isAnyAddr() ) {
721 if ( !(flags & COMM_DOBIND) && addr.isAnyAddr() )
722 debugs(5, DBG_IMPORTANT,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
723 if ( addr.isNoAddr() )
724 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
725
726 if (commBind(new_socket, *AI) != COMM_OK) {
727 comm_close(new_socket);
728 return -1;
729 }
730 }
731
732 if (flags & COMM_NONBLOCKING)
733 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
734 comm_close(new_socket);
735 return -1;
736 }
737
738 #ifdef TCP_NODELAY
739 if (sock_type == SOCK_STREAM)
740 commSetTcpNoDelay(new_socket);
741
742 #endif
743
744 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
745 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
746
747 return new_socket;
748 }
749
750 void
751 comm_import_opened(const Comm::ConnectionPointer &conn,
752 const char *note,
753 struct addrinfo *AI)
754 {
755 debugs(5, 2, HERE << conn);
756 assert(Comm::IsConnOpen(conn));
757 assert(AI);
758
759 comm_init_opened(conn, 0, 0, note, AI);
760
761 if (!(conn->flags & COMM_NOCLOEXEC))
762 fd_table[conn->fd].flags.close_on_exec = true;
763
764 if (conn->local.port() > (unsigned short) 0) {
765 #if _SQUID_WINDOWS_
766 if (AI->ai_socktype != SOCK_DGRAM)
767 #endif
768 fd_table[conn->fd].flags.nolinger = true;
769 }
770
771 if ((conn->flags & COMM_TRANSPARENT))
772 fd_table[conn->fd].flags.transparent = true;
773
774 if (conn->flags & COMM_NONBLOCKING)
775 fd_table[conn->fd].flags.nonblocking = true;
776
777 #ifdef TCP_NODELAY
778 if (AI->ai_socktype == SOCK_STREAM)
779 fd_table[conn->fd].flags.nodelay = true;
780 #endif
781
782 /* no fd_table[fd].flags. updates needed for these conditions:
783 * if ((flags & COMM_REUSEADDR)) ...
784 * if ((flags & COMM_DOBIND) ...) ...
785 */
786 }
787
788 // XXX: now that raw-FD timeouts are only unset for pipes and files this SHOULD be a no-op.
789 // With handler already unset. Leaving this present until that can be verified for all code paths.
790 void
791 commUnsetFdTimeout(int fd)
792 {
793 debugs(5, 3, HERE << "Remove timeout for FD " << fd);
794 assert(fd >= 0);
795 assert(fd < Squid_MaxFD);
796 fde *F = &fd_table[fd];
797 assert(F->flags.open);
798
799 F->timeoutHandler = NULL;
800 F->timeout = 0;
801 }
802
803 int
804 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
805 {
806 debugs(5, 3, HERE << conn << " timeout " << timeout);
807 assert(Comm::IsConnOpen(conn));
808 assert(conn->fd < Squid_MaxFD);
809 fde *F = &fd_table[conn->fd];
810 assert(F->flags.open);
811
812 if (timeout < 0) {
813 F->timeoutHandler = NULL;
814 F->timeout = 0;
815 } else {
816 if (callback != NULL) {
817 typedef CommTimeoutCbParams Params;
818 Params &params = GetCommParams<Params>(callback);
819 params.conn = conn;
820 F->timeoutHandler = callback;
821 }
822
823 F->timeout = squid_curtime + (time_t) timeout;
824 }
825
826 return F->timeout;
827 }
828
829 int
830 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
831 {
832 debugs(5, 3, HERE << "Remove timeout for " << conn);
833 AsyncCall::Pointer nil;
834 return commSetConnTimeout(conn, -1, nil);
835 }
836
837 int
838 comm_connect_addr(int sock, const Ip::Address &address)
839 {
840 comm_err_t status = COMM_OK;
841 fde *F = &fd_table[sock];
842 int x = 0;
843 int err = 0;
844 socklen_t errlen;
845 struct addrinfo *AI = NULL;
846 PROF_start(comm_connect_addr);
847
848 assert(address.port() != 0);
849
850 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
851
852 /* Handle IPv6 over IPv4-only socket case.
853 * this case must presently be handled here since the getAddrInfo asserts on bad mappings.
854 * NP: because commResetFD is private to ConnStateData we have to return an error and
855 * trust its handled properly.
856 */
857 if (F->sock_family == AF_INET && !address.isIPv4()) {
858 errno = ENETUNREACH;
859 return COMM_ERR_PROTOCOL;
860 }
861
862 /* Handle IPv4 over IPv6-only socket case.
863 * This case is presently handled here as it's both a known case and it's
864 * uncertain what error will be returned by the IPv6 stack in such case. It's
865 * possible this will also be handled by the errno checks below after connect()
866 * but needs carefull cross-platform verification, and verifying the address
867 * condition here is simple.
868 */
869 if (!F->local_addr.isIPv4() && address.isIPv4()) {
870 errno = ENETUNREACH;
871 return COMM_ERR_PROTOCOL;
872 }
873
874 address.getAddrInfo(AI, F->sock_family);
875
876 /* Establish connection. */
877 errno = 0;
878
879 if (!F->flags.called_connect) {
880 F->flags.called_connect = true;
881 ++ statCounter.syscalls.sock.connects;
882
883 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
884
885 // XXX: ICAP code refuses callbacks during a pending comm_ call
886 // Async calls development will fix this.
887 if (x == 0) {
888 x = -1;
889 errno = EINPROGRESS;
890 }
891
892 if (x < 0) {
893 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
894 " flags=" << AI->ai_flags <<
895 ", family=" << AI->ai_family <<
896 ", socktype=" << AI->ai_socktype <<
897 ", protocol=" << AI->ai_protocol <<
898 ", &addr=" << AI->ai_addr <<
899 ", addrlen=" << AI->ai_addrlen <<
900 " )" );
901 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
902 debugs(14,9, "connecting to: " << address );
903 }
904 } else {
905 #if _SQUID_NEWSOS6_
906 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
907
908 connect(sock, AI->ai_addr, AI->ai_addrlen);
909
910 if (errno == EINVAL) {
911 errlen = sizeof(err);
912 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
913
914 if (x >= 0)
915 errno = x;
916 }
917
918 #else
919 errlen = sizeof(err);
920
921 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
922
923 if (x == 0)
924 errno = err;
925
926 #if _SQUID_SOLARIS_
927 /*
928 * Solaris 2.4's socket emulation doesn't allow you
929 * to determine the error from a failed non-blocking
930 * connect and just returns EPIPE. Create a fake
931 * error message for connect. -- fenner@parc.xerox.com
932 */
933 if (x < 0 && errno == EPIPE)
934 errno = ENOTCONN;
935
936 #endif
937 #endif
938
939 }
940
941 Ip::Address::FreeAddrInfo(AI);
942
943 PROF_stop(comm_connect_addr);
944
945 if (errno == 0 || errno == EISCONN)
946 status = COMM_OK;
947 else if (ignoreErrno(errno))
948 status = COMM_INPROGRESS;
949 else if (errno == EAFNOSUPPORT || errno == EINVAL)
950 return COMM_ERR_PROTOCOL;
951 else
952 return COMM_ERROR;
953
954 address.toStr(F->ipaddr, MAX_IPSTRLEN);
955
956 F->remote_port = address.port(); /* remote_port is HS */
957
958 if (status == COMM_OK) {
959 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connected to " << address);
960 } else if (status == COMM_INPROGRESS) {
961 debugs(5, DBG_DATA, "comm_connect_addr: FD " << sock << " connection pending");
962 }
963
964 return status;
965 }
966
967 void
968 commCallCloseHandlers(int fd)
969 {
970 fde *F = &fd_table[fd];
971 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
972
973 while (F->closeHandler != NULL) {
974 AsyncCall::Pointer call = F->closeHandler;
975 F->closeHandler = call->Next();
976 call->setNext(NULL);
977 // If call is not canceled schedule it for execution else ignore it
978 if (!call->canceled()) {
979 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
980 ScheduleCallHere(call);
981 }
982 }
983 }
984
985 #if LINGERING_CLOSE
986 static void
987 commLingerClose(int fd, void *unused)
988 {
989 LOCAL_ARRAY(char, buf, 1024);
990 int n;
991 n = FD_READ_METHOD(fd, buf, 1024);
992
993 if (n < 0)
994 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
995
996 comm_close(fd);
997 }
998
999 static void
1000 commLingerTimeout(const FdeCbParams &params)
1001 {
1002 debugs(5, 3, "commLingerTimeout: FD " << params.fd);
1003 comm_close(params.fd);
1004 }
1005
1006 /*
1007 * Inspired by apache
1008 */
1009 void
1010 comm_lingering_close(int fd)
1011 {
1012 #if USE_OPENSSL
1013 if (fd_table[fd].ssl)
1014 ssl_shutdown_method(fd_table[fd].ssl);
1015 #endif
1016
1017 if (shutdown(fd, 1) < 0) {
1018 comm_close(fd);
1019 return;
1020 }
1021
1022 fd_note(fd, "lingering close");
1023 AsyncCall::Pointer call = commCbCall(5,4, "commLingerTimeout", FdeCbPtrFun(commLingerTimeout, NULL));
1024
1025 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
1026 assert(fd_table[fd].flags.open);
1027 if (callback != NULL) {
1028 typedef FdeCbParams Params;
1029 Params &params = GetCommParams<Params>(callback);
1030 params.fd = fd;
1031 fd_table[fd].timeoutHandler = callback;
1032 fd_table[fd].timeout = squid_curtime + static_cast<time_t>(10);
1033 }
1034
1035 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
1036 }
1037
1038 #endif
1039
1040 /**
1041 * enable linger with time of 0 so that when the socket is
1042 * closed, TCP generates a RESET
1043 */
1044 void
1045 comm_reset_close(const Comm::ConnectionPointer &conn)
1046 {
1047 struct linger L;
1048 L.l_onoff = 1;
1049 L.l_linger = 0;
1050
1051 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1052 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
1053
1054 conn->close();
1055 }
1056
1057 // Legacy close function.
1058 void
1059 old_comm_reset_close(int fd)
1060 {
1061 struct linger L;
1062 L.l_onoff = 1;
1063 L.l_linger = 0;
1064
1065 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1066 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
1067
1068 comm_close(fd);
1069 }
1070
1071 #if USE_OPENSSL
1072 void
1073 commStartSslClose(const FdeCbParams &params)
1074 {
1075 assert(&fd_table[params.fd].ssl);
1076 ssl_shutdown_method(fd_table[params.fd].ssl);
1077 }
1078 #endif
1079
1080 void
1081 comm_close_complete(const FdeCbParams &params)
1082 {
1083 #if USE_OPENSSL
1084 fde *F = &fd_table[params.fd];
1085
1086 if (F->ssl) {
1087 SSL_free(F->ssl);
1088 F->ssl = NULL;
1089 }
1090
1091 if (F->dynamicSslContext) {
1092 SSL_CTX_free(F->dynamicSslContext);
1093 F->dynamicSslContext = NULL;
1094 }
1095 #endif
1096 fd_close(params.fd); /* update fdstat */
1097 close(params.fd);
1098
1099 ++ statCounter.syscalls.sock.closes;
1100
1101 /* When one connection closes, give accept() a chance, if need be */
1102 Comm::AcceptLimiter::Instance().kick();
1103 }
1104
1105 /*
1106 * Close the socket fd.
1107 *
1108 * + call write handlers with ERR_CLOSING
1109 * + call read handlers with ERR_CLOSING
1110 * + call closing handlers
1111 *
1112 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1113 * DeferredReadManager.
1114 */
1115 void
1116 _comm_close(int fd, char const *file, int line)
1117 {
1118 debugs(5, 3, "comm_close: start closing FD " << fd);
1119 assert(fd >= 0);
1120 assert(fd < Squid_MaxFD);
1121
1122 fde *F = &fd_table[fd];
1123 fdd_table[fd].close_file = file;
1124 fdd_table[fd].close_line = line;
1125
1126 if (F->closing())
1127 return;
1128
1129 /* XXX: is this obsolete behind F->closing() ? */
1130 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
1131 return;
1132
1133 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1134 if (!isOpen(fd)) {
1135 debugs(50, DBG_IMPORTANT, HERE << "BUG 3556: FD " << fd << " is not an open socket.");
1136 // XXX: do we need to run close(fd) or fd_close(fd) here?
1137 return;
1138 }
1139
1140 assert(F->type != FD_FILE);
1141
1142 PROF_start(comm_close);
1143
1144 F->flags.close_request = true;
1145
1146 #if USE_OPENSSL
1147 if (F->ssl) {
1148 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
1149 FdeCbPtrFun(commStartSslClose, NULL));
1150 FdeCbParams &startParams = GetCommParams<FdeCbParams>(startCall);
1151 startParams.fd = fd;
1152 ScheduleCallHere(startCall);
1153 }
1154 #endif
1155
1156 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1157 if (commHasHalfClosedMonitor(fd))
1158 commStopHalfClosedMonitor(fd);
1159 commUnsetFdTimeout(fd);
1160
1161 // notify read/write handlers after canceling select reservations, if any
1162 if (COMMIO_FD_WRITECB(fd)->active()) {
1163 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1164 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
1165 }
1166 if (COMMIO_FD_READCB(fd)->active()) {
1167 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
1168 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
1169 }
1170
1171 #if USE_DELAY_POOLS
1172 if (ClientInfo *clientInfo = F->clientInfo) {
1173 if (clientInfo->selectWaiting) {
1174 clientInfo->selectWaiting = false;
1175 // kick queue or it will get stuck as commWriteHandle is not called
1176 clientInfo->kickQuotaQueue();
1177 }
1178 }
1179 #endif
1180
1181 commCallCloseHandlers(fd);
1182
1183 comm_empty_os_read_buffers(fd);
1184
1185 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
1186 FdeCbPtrFun(comm_close_complete, NULL));
1187 FdeCbParams &completeParams = GetCommParams<FdeCbParams>(completeCall);
1188 completeParams.fd = fd;
1189 // must use async call to wait for all callbacks
1190 // scheduled before comm_close() to finish
1191 ScheduleCallHere(completeCall);
1192
1193 PROF_stop(comm_close);
1194 }
1195
1196 /* Send a udp datagram to specified TO_ADDR. */
1197 int
1198 comm_udp_sendto(int fd,
1199 const Ip::Address &to_addr,
1200 const void *buf,
1201 int len)
1202 {
1203 PROF_start(comm_udp_sendto);
1204 ++ statCounter.syscalls.sock.sendtos;
1205
1206 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
1207 " using FD " << fd << " using Port " << comm_local_port(fd) );
1208
1209 struct addrinfo *AI = NULL;
1210 to_addr.getAddrInfo(AI, fd_table[fd].sock_family);
1211 int x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
1212 Ip::Address::FreeAddrInfo(AI);
1213
1214 PROF_stop(comm_udp_sendto);
1215
1216 if (x >= 0)
1217 return x;
1218
1219 #if _SQUID_LINUX_
1220
1221 if (ECONNREFUSED != errno)
1222 #endif
1223
1224 debugs(50, DBG_IMPORTANT, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1225
1226 return COMM_ERROR;
1227 }
1228
1229 void
1230 comm_add_close_handler(int fd, CLCB * handler, void *data)
1231 {
1232 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1233 handler << ", data=" << data);
1234
1235 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1236 CommCloseCbPtrFun(handler, data));
1237 comm_add_close_handler(fd, call);
1238 }
1239
1240 void
1241 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1242 {
1243 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1244
1245 /*TODO:Check for a similar scheduled AsyncCall*/
1246 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1247 // assert(c->handler != handler || c->data != data);
1248
1249 call->setNext(fd_table[fd].closeHandler);
1250
1251 fd_table[fd].closeHandler = call;
1252 }
1253
1254 // remove function-based close handler
1255 void
1256 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1257 {
1258 assert(isOpen(fd));
1259 /* Find handler in list */
1260 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1261 handler << ", data=" << data);
1262
1263 AsyncCall::Pointer p, prev = NULL;
1264 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1265 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1266 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1267 if (!call) // method callbacks have their own comm_remove_close_handler
1268 continue;
1269
1270 typedef CommCloseCbParams Params;
1271 const Params &params = GetCommParams<Params>(p);
1272 if (call->dialer.handler == handler && params.data == data)
1273 break; /* This is our handler */
1274 }
1275
1276 // comm_close removes all close handlers so our handler may be gone
1277 if (p != NULL) {
1278 p->dequeue(fd_table[fd].closeHandler, prev);
1279 p->cancel("comm_remove_close_handler");
1280 }
1281 }
1282
1283 // remove method-based close handler
1284 void
1285 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1286 {
1287 assert(isOpen(fd));
1288 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1289
1290 // comm_close removes all close handlers so our handler may be gone
1291 AsyncCall::Pointer p, prev = NULL;
1292 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1293
1294 if (p != NULL)
1295 p->dequeue(fd_table[fd].closeHandler, prev);
1296 call->cancel("comm_remove_close_handler");
1297 }
1298
1299 static void
1300 commSetNoLinger(int fd)
1301 {
1302
1303 struct linger L;
1304 L.l_onoff = 0; /* off */
1305 L.l_linger = 0;
1306
1307 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1308 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1309
1310 fd_table[fd].flags.nolinger = true;
1311 }
1312
1313 static void
1314 commSetReuseAddr(int fd)
1315 {
1316 int on = 1;
1317
1318 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1319 debugs(50, DBG_IMPORTANT, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1320 }
1321
1322 static void
1323 commSetTcpRcvbuf(int fd, int size)
1324 {
1325 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1326 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1327 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1328 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1329 #ifdef TCP_WINDOW_CLAMP
1330 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1331 debugs(50, DBG_IMPORTANT, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1332 #endif
1333 }
1334
1335 int
1336 commSetNonBlocking(int fd)
1337 {
1338 #if !_SQUID_WINDOWS_
1339 int flags;
1340 int dummy = 0;
1341 #endif
1342 #if _SQUID_WINDOWS_
1343 int nonblocking = TRUE;
1344
1345 #if _SQUID_CYGWIN_
1346 if (fd_table[fd].type != FD_PIPE) {
1347 #endif
1348
1349 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1350 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1351 return COMM_ERROR;
1352 }
1353
1354 #if _SQUID_CYGWIN_
1355 } else {
1356 #endif
1357 #endif
1358 #if !_SQUID_WINDOWS_
1359
1360 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1361 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1362 return COMM_ERROR;
1363 }
1364
1365 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1366 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1367 return COMM_ERROR;
1368 }
1369
1370 #endif
1371 #if _SQUID_CYGWIN_
1372 }
1373 #endif
1374 fd_table[fd].flags.nonblocking = true;
1375
1376 return 0;
1377 }
1378
1379 int
1380 commUnsetNonBlocking(int fd)
1381 {
1382 #if _SQUID_WINDOWS_
1383 int nonblocking = FALSE;
1384
1385 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1386 #else
1387 int flags;
1388 int dummy = 0;
1389
1390 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1391 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1392 return COMM_ERROR;
1393 }
1394
1395 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1396 #endif
1397 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1398 return COMM_ERROR;
1399 }
1400
1401 fd_table[fd].flags.nonblocking = false;
1402 return 0;
1403 }
1404
1405 void
1406 commSetCloseOnExec(int fd)
1407 {
1408 #ifdef FD_CLOEXEC
1409 int flags;
1410 int dummy = 0;
1411
1412 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1413 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1414 return;
1415 }
1416
1417 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1418 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1419
1420 fd_table[fd].flags.close_on_exec = true;
1421
1422 #endif
1423 }
1424
1425 #ifdef TCP_NODELAY
1426 static void
1427 commSetTcpNoDelay(int fd)
1428 {
1429 int on = 1;
1430
1431 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1432 debugs(50, DBG_IMPORTANT, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1433
1434 fd_table[fd].flags.nodelay = true;
1435 }
1436
1437 #endif
1438
1439 void
1440 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1441 {
1442 int on = 1;
1443 #ifdef TCP_KEEPCNT
1444 if (timeout && interval) {
1445 int count = (timeout + interval - 1) / interval;
1446 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1447 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1448 }
1449 #endif
1450 #ifdef TCP_KEEPIDLE
1451 if (idle) {
1452 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1453 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1454 }
1455 #endif
1456 #ifdef TCP_KEEPINTVL
1457 if (interval) {
1458 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1459 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1460 }
1461 #endif
1462 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1463 debugs(5, DBG_IMPORTANT, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1464 }
1465
1466 void
1467 comm_init(void)
1468 {
1469 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1470 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1471
1472 /* make sure the accept() socket FIFO delay queue exists */
1473 Comm::AcceptLimiter::Instance();
1474
1475 // make sure the IO pending callback table exists
1476 Comm::CallbackTableInit();
1477
1478 /* XXX account fd_table */
1479 /* Keep a few file descriptors free so that we don't run out of FD's
1480 * after accepting a client but before it opens a socket or a file.
1481 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1482 RESERVED_FD = min(100, Squid_MaxFD / 4);
1483
1484 TheHalfClosed = new DescriptorSet;
1485
1486 /* setup the select loop module */
1487 Comm::SelectLoopInit();
1488 }
1489
1490 void
1491 comm_exit(void)
1492 {
1493 delete TheHalfClosed;
1494 TheHalfClosed = NULL;
1495
1496 safe_free(fd_table);
1497 safe_free(fdd_table);
1498 Comm::CallbackTableDestruct();
1499 }
1500
1501 #if USE_DELAY_POOLS
1502 // called when the queue is done waiting for the client bucket to fill
1503 void
1504 commHandleWriteHelper(void * data)
1505 {
1506 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1507 assert(queue);
1508
1509 ClientInfo *clientInfo = queue->clientInfo;
1510 // ClientInfo invalidates queue if freed, so if we got here through,
1511 // evenAdd cbdata protections, everything should be valid and consistent
1512 assert(clientInfo);
1513 assert(clientInfo->hasQueue());
1514 assert(clientInfo->hasQueue(queue));
1515 assert(!clientInfo->selectWaiting);
1516 assert(clientInfo->eventWaiting);
1517 clientInfo->eventWaiting = false;
1518
1519 do {
1520 // check that the head descriptor is still relevant
1521 const int head = clientInfo->quotaPeekFd();
1522 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1523
1524 if (fd_table[head].clientInfo == clientInfo &&
1525 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1526 !fd_table[head].closing()) {
1527
1528 // wait for the head descriptor to become ready for writing
1529 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1530 clientInfo->selectWaiting = true;
1531 return;
1532 }
1533
1534 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1535 // and continue looking for a relevant one
1536 } while (clientInfo->hasQueue());
1537
1538 debugs(77,3, HERE << "emptied queue");
1539 }
1540
1541 bool
1542 ClientInfo::hasQueue() const
1543 {
1544 assert(quotaQueue);
1545 return !quotaQueue->empty();
1546 }
1547
1548 bool
1549 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1550 {
1551 assert(quotaQueue);
1552 return quotaQueue == q;
1553 }
1554
1555 /// returns the first descriptor to be dequeued
1556 int
1557 ClientInfo::quotaPeekFd() const
1558 {
1559 assert(quotaQueue);
1560 return quotaQueue->front();
1561 }
1562
1563 /// returns the reservation ID of the first descriptor to be dequeued
1564 unsigned int
1565 ClientInfo::quotaPeekReserv() const
1566 {
1567 assert(quotaQueue);
1568 return quotaQueue->outs + 1;
1569 }
1570
1571 /// queues a given fd, creating the queue if necessary; returns reservation ID
1572 unsigned int
1573 ClientInfo::quotaEnqueue(int fd)
1574 {
1575 assert(quotaQueue);
1576 return quotaQueue->enqueue(fd);
1577 }
1578
1579 /// removes queue head
1580 void
1581 ClientInfo::quotaDequeue()
1582 {
1583 assert(quotaQueue);
1584 quotaQueue->dequeue();
1585 }
1586
1587 void
1588 ClientInfo::kickQuotaQueue()
1589 {
1590 if (!eventWaiting && !selectWaiting && hasQueue()) {
1591 // wait at least a second if the bucket is empty
1592 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1593 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1594 quotaQueue, delay, 0, true);
1595 eventWaiting = true;
1596 }
1597 }
1598
1599 /// calculates how much to write for a single dequeued client
1600 int
1601 ClientInfo::quotaForDequed()
1602 {
1603 /* If we have multiple clients and give full bucketSize to each client then
1604 * clt1 may often get a lot more because clt1->clt2 time distance in the
1605 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1606 * We divide quota evenly to be more fair. */
1607
1608 if (!rationedCount) {
1609 rationedCount = quotaQueue->size() + 1;
1610
1611 // The delay in ration recalculation _temporary_ deprives clients from
1612 // bytes that should have trickled in while rationedCount was positive.
1613 refillBucket();
1614
1615 // Rounding errors do not accumulate here, but we round down to avoid
1616 // negative bucket sizes after write with rationedCount=1.
1617 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1618 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1619 '*' << rationedCount);
1620 }
1621
1622 --rationedCount;
1623 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1624 " rations remaining: " << rationedCount);
1625
1626 // update 'last seen' time to prevent clientdb GC from dropping us
1627 last_seen = squid_curtime;
1628 return rationedQuota;
1629 }
1630
1631 ///< adds bytes to the quota bucket based on the rate and passed time
1632 void
1633 ClientInfo::refillBucket()
1634 {
1635 // all these times are in seconds, with double precision
1636 const double currTime = current_dtime;
1637 const double timePassed = currTime - prevTime;
1638
1639 // Calculate allowance for the time passed. Use double to avoid
1640 // accumulating rounding errors for small intervals. For example, always
1641 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1642 const double gain = timePassed * writeSpeedLimit;
1643
1644 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1645 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1646 " = " << gain << ')');
1647
1648 // to further combat error accumulation during micro updates,
1649 // quit before updating time if we cannot add at least one byte
1650 if (gain < 1.0)
1651 return;
1652
1653 prevTime = currTime;
1654
1655 // for "first" connections, drain initial fat before refilling but keep
1656 // updating prevTime to avoid bursts after the fat is gone
1657 if (bucketSize > bucketSizeLimit) {
1658 debugs(77,4, HERE << "not refilling while draining initial fat");
1659 return;
1660 }
1661
1662 bucketSize += gain;
1663
1664 // obey quota limits
1665 if (bucketSize > bucketSizeLimit)
1666 bucketSize = bucketSizeLimit;
1667 }
1668
1669 void
1670 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1671 {
1672 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1673 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1674 " highwatermark=" << aHighWatermark);
1675
1676 // set or possibly update traffic shaping parameters
1677 writeLimitingActive = true;
1678 writeSpeedLimit = aWriteSpeedLimit;
1679 bucketSizeLimit = aHighWatermark;
1680
1681 // but some members should only be set once for a newly activated bucket
1682 if (firstTimeConnection) {
1683 firstTimeConnection = false;
1684
1685 assert(!selectWaiting);
1686 assert(!quotaQueue);
1687 quotaQueue = new CommQuotaQueue(this);
1688
1689 bucketSize = anInitialBurst;
1690 prevTime = current_dtime;
1691 }
1692 }
1693
1694 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1695 ins(0), outs(0)
1696 {
1697 assert(clientInfo);
1698 }
1699
1700 CommQuotaQueue::~CommQuotaQueue()
1701 {
1702 assert(!clientInfo); // ClientInfo should clear this before destroying us
1703 }
1704
1705 /// places the given fd at the end of the queue; returns reservation ID
1706 unsigned int
1707 CommQuotaQueue::enqueue(int fd)
1708 {
1709 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1710 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1711 fds.push_back(fd);
1712 return ++ins;
1713 }
1714
1715 /// removes queue head
1716 void
1717 CommQuotaQueue::dequeue()
1718 {
1719 assert(!fds.empty());
1720 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1721 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1722 fds.size());
1723 fds.pop_front();
1724 ++outs;
1725 }
1726 #endif
1727
1728 /*
1729 * hm, this might be too general-purpose for all the places we'd
1730 * like to use it.
1731 */
1732 int
1733 ignoreErrno(int ierrno)
1734 {
1735 switch (ierrno) {
1736
1737 case EINPROGRESS:
1738
1739 case EWOULDBLOCK:
1740 #if EAGAIN != EWOULDBLOCK
1741
1742 case EAGAIN:
1743 #endif
1744
1745 case EALREADY:
1746
1747 case EINTR:
1748 #ifdef ERESTART
1749
1750 case ERESTART:
1751 #endif
1752
1753 return 1;
1754
1755 default:
1756 return 0;
1757 }
1758
1759 /* NOTREACHED */
1760 }
1761
1762 void
1763 commCloseAllSockets(void)
1764 {
1765 int fd;
1766 fde *F = NULL;
1767
1768 for (fd = 0; fd <= Biggest_FD; ++fd) {
1769 F = &fd_table[fd];
1770
1771 if (!F->flags.open)
1772 continue;
1773
1774 if (F->type != FD_SOCKET)
1775 continue;
1776
1777 if (F->flags.ipc) /* don't close inter-process sockets */
1778 continue;
1779
1780 if (F->timeoutHandler != NULL) {
1781 AsyncCall::Pointer callback = F->timeoutHandler;
1782 F->timeoutHandler = NULL;
1783 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1784 ScheduleCallHere(callback);
1785 } else {
1786 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1787 old_comm_reset_close(fd);
1788 }
1789 }
1790 }
1791
1792 static bool
1793 AlreadyTimedOut(fde *F)
1794 {
1795 if (!F->flags.open)
1796 return true;
1797
1798 if (F->timeout == 0)
1799 return true;
1800
1801 if (F->timeout > squid_curtime)
1802 return true;
1803
1804 return false;
1805 }
1806
1807 static bool
1808 writeTimedOut(int fd)
1809 {
1810 if (!COMMIO_FD_WRITECB(fd)->active())
1811 return false;
1812
1813 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1814 return false;
1815
1816 return true;
1817 }
1818
1819 void
1820 checkTimeouts(void)
1821 {
1822 int fd;
1823 fde *F = NULL;
1824 AsyncCall::Pointer callback;
1825
1826 for (fd = 0; fd <= Biggest_FD; ++fd) {
1827 F = &fd_table[fd];
1828
1829 if (writeTimedOut(fd)) {
1830 // We have an active write callback and we are timed out
1831 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1832 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1833 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1834 } else if (AlreadyTimedOut(F))
1835 continue;
1836
1837 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1838
1839 if (F->timeoutHandler != NULL) {
1840 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1841 callback = F->timeoutHandler;
1842 F->timeoutHandler = NULL;
1843 ScheduleCallHere(callback);
1844 } else {
1845 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1846 comm_close(fd);
1847 }
1848 }
1849 }
1850
1851 /// Start waiting for a possibly half-closed connection to close
1852 // by scheduling a read callback to a monitoring handler that
1853 // will close the connection on read errors.
1854 void
1855 commStartHalfClosedMonitor(int fd)
1856 {
1857 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1858 assert(isOpen(fd) && !commHasHalfClosedMonitor(fd));
1859 (void)TheHalfClosed->add(fd); // could also assert the result
1860 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1861 }
1862
1863 static
1864 void
1865 commPlanHalfClosedCheck()
1866 {
1867 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1868 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1869 WillCheckHalfClosed = true;
1870 }
1871 }
1872
1873 /// iterates over all descriptors that may need half-closed tests and
1874 /// calls comm_read for those that do; re-schedules the check if needed
1875 static
1876 void
1877 commHalfClosedCheck(void *)
1878 {
1879 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1880
1881 typedef DescriptorSet::const_iterator DSCI;
1882 const DSCI end = TheHalfClosed->end();
1883 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1884 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1885 c->fd = *i;
1886 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1887 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1888 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1889 comm_read(c, NULL, 0, call);
1890 fd_table[c->fd].halfClosedReader = call;
1891 } else
1892 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1893 }
1894
1895 WillCheckHalfClosed = false; // as far as we know
1896 commPlanHalfClosedCheck(); // may need to check again
1897 }
1898
1899 /// checks whether we are waiting for possibly half-closed connection to close
1900 // We are monitoring if the read handler for the fd is the monitoring handler.
1901 bool
1902 commHasHalfClosedMonitor(int fd)
1903 {
1904 return TheHalfClosed->has(fd);
1905 }
1906
1907 /// stop waiting for possibly half-closed connection to close
1908 static void
1909 commStopHalfClosedMonitor(int const fd)
1910 {
1911 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1912
1913 // cancel the read if one was scheduled
1914 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1915 if (reader != NULL)
1916 comm_read_cancel(fd, reader);
1917 fd_table[fd].halfClosedReader = NULL;
1918
1919 TheHalfClosed->del(fd);
1920 }
1921
1922 /// I/O handler for the possibly half-closed connection monitoring code
1923 static void
1924 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
1925 {
1926 // there cannot be more data coming in on half-closed connections
1927 assert(size == 0);
1928 assert(conn != NULL);
1929 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1930
1931 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1932
1933 // nothing to do if fd is being closed
1934 if (flag == COMM_ERR_CLOSING)
1935 return;
1936
1937 // if read failed, close the connection
1938 if (flag != COMM_OK) {
1939 debugs(5, 3, HERE << "closing " << conn);
1940 conn->close();
1941 return;
1942 }
1943
1944 // continue waiting for close or error
1945 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1946 }
1947
1948 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1949
1950 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1951 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1952
1953 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
1954
1955 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
1956
1957 DeferredReadManager::~DeferredReadManager()
1958 {
1959 flushReads();
1960 assert (deferredReads.empty());
1961 }
1962
1963 /* explicit instantiation required for some systems */
1964
1965 /// \cond AUTODOCS_IGNORE
1966 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
1967 /// \endcond
1968
1969 void
1970 DeferredReadManager::delayRead(DeferredRead const &aRead)
1971 {
1972 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
1973 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
1974
1975 // We have to use a global function as a closer and point to temp
1976 // instead of "this" because DeferredReadManager is not a job and
1977 // is not even cbdata protected
1978 // XXX: and yet we use cbdata protection functions on it??
1979 AsyncCall::Pointer closer = commCbCall(5,4,
1980 "DeferredReadManager::CloseHandler",
1981 CommCloseCbPtrFun(&CloseHandler, temp));
1982 comm_add_close_handler(aRead.theRead.conn->fd, closer);
1983 temp->element.closer = closer; // remeber so that we can cancel
1984 }
1985
1986 void
1987 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
1988 {
1989 if (!cbdataReferenceValid(params.data))
1990 return;
1991
1992 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
1993
1994 temp->element.closer = NULL;
1995 temp->element.markCancelled();
1996 }
1997
1998 DeferredRead
1999 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
2000 {
2001 assert (!deferredReads.empty());
2002
2003 DeferredRead &read = deferredReads.head->element;
2004
2005 // NOTE: at this point the connection has been paused/stalled for an unknown
2006 // amount of time. We must re-validate that it is active and usable.
2007
2008 // If the connection has been closed already. Cancel this read.
2009 if (!Comm::IsConnOpen(read.theRead.conn)) {
2010 if (read.closer != NULL) {
2011 read.closer->cancel("Connection closed before.");
2012 read.closer = NULL;
2013 }
2014 read.markCancelled();
2015 }
2016
2017 if (!read.cancelled) {
2018 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
2019 read.closer = NULL;
2020 }
2021
2022 DeferredRead result = deferredReads.pop_front();
2023
2024 return result;
2025 }
2026
2027 void
2028 DeferredReadManager::kickReads(int const count)
2029 {
2030 /* if we had CbDataList::size() we could consolidate this and flushReads */
2031
2032 if (count < 1) {
2033 flushReads();
2034 return;
2035 }
2036
2037 size_t remaining = count;
2038
2039 while (!deferredReads.empty() && remaining) {
2040 DeferredRead aRead = popHead(deferredReads);
2041 kickARead(aRead);
2042
2043 if (!aRead.cancelled)
2044 --remaining;
2045 }
2046 }
2047
2048 void
2049 DeferredReadManager::flushReads()
2050 {
2051 CbDataListContainer<DeferredRead> reads;
2052 reads = deferredReads;
2053 deferredReads = CbDataListContainer<DeferredRead>();
2054
2055 // XXX: For fairness this SHOULD randomize the order
2056 while (!reads.empty()) {
2057 DeferredRead aRead = popHead(reads);
2058 kickARead(aRead);
2059 }
2060 }
2061
2062 void
2063 DeferredReadManager::kickARead(DeferredRead const &aRead)
2064 {
2065 if (aRead.cancelled)
2066 return;
2067
2068 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
2069 return;
2070
2071 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
2072
2073 aRead.theReader(aRead.theContext, aRead.theRead);
2074 }
2075
2076 void
2077 DeferredRead::markCancelled()
2078 {
2079 cancelled = true;
2080 }
2081
2082 int
2083 CommSelectEngine::checkEvents(int timeout)
2084 {
2085 static time_t last_timeout = 0;
2086
2087 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2088 if (squid_curtime > last_timeout) {
2089 last_timeout = squid_curtime;
2090 checkTimeouts();
2091 }
2092
2093 switch (Comm::DoSelect(timeout)) {
2094
2095 case COMM_OK:
2096
2097 case COMM_TIMEOUT:
2098 return 0;
2099
2100 case COMM_IDLE:
2101
2102 case COMM_SHUTDOWN:
2103 return EVENT_IDLE;
2104
2105 case COMM_ERROR:
2106 return EVENT_ERROR;
2107
2108 default:
2109 fatal_dump("comm.cc: Internal error -- this should never happen.");
2110 return EVENT_ERROR;
2111 };
2112 }
2113
2114 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2115 int
2116 comm_open_uds(int sock_type,
2117 int proto,
2118 struct sockaddr_un* addr,
2119 int flags)
2120 {
2121 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2122
2123 int new_socket;
2124
2125 PROF_start(comm_open);
2126 /* Create socket for accepting new connections. */
2127 ++ statCounter.syscalls.sock.sockets;
2128
2129 /* Setup the socket addrinfo details for use */
2130 struct addrinfo AI;
2131 AI.ai_flags = 0;
2132 AI.ai_family = PF_UNIX;
2133 AI.ai_socktype = sock_type;
2134 AI.ai_protocol = proto;
2135 AI.ai_addrlen = SUN_LEN(addr);
2136 AI.ai_addr = (sockaddr*)addr;
2137 AI.ai_canonname = NULL;
2138 AI.ai_next = NULL;
2139
2140 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
2141
2142 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
2143 /* Increase the number of reserved fd's if calls to socket()
2144 * are failing because the open file table is full. This
2145 * limits the number of simultaneous clients */
2146
2147 if (limitError(errno)) {
2148 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
2149 fdAdjustReserved();
2150 } else {
2151 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
2152 }
2153
2154 PROF_stop(comm_open);
2155 return -1;
2156 }
2157
2158 debugs(50, 3, "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
2159
2160 /* update fdstat */
2161 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
2162
2163 assert(!isOpen(new_socket));
2164 fd_open(new_socket, FD_MSGHDR, NULL);
2165
2166 fdd_table[new_socket].close_file = NULL;
2167
2168 fdd_table[new_socket].close_line = 0;
2169
2170 fd_table[new_socket].sock_family = AI.ai_family;
2171
2172 if (!(flags & COMM_NOCLOEXEC))
2173 commSetCloseOnExec(new_socket);
2174
2175 if (flags & COMM_REUSEADDR)
2176 commSetReuseAddr(new_socket);
2177
2178 if (flags & COMM_NONBLOCKING) {
2179 if (commSetNonBlocking(new_socket) != COMM_OK) {
2180 comm_close(new_socket);
2181 PROF_stop(comm_open);
2182 return -1;
2183 }
2184 }
2185
2186 if (flags & COMM_DOBIND) {
2187 if (commBind(new_socket, AI) != COMM_OK) {
2188 comm_close(new_socket);
2189 PROF_stop(comm_open);
2190 return -1;
2191 }
2192 }
2193
2194 #ifdef TCP_NODELAY
2195 if (sock_type == SOCK_STREAM)
2196 commSetTcpNoDelay(new_socket);
2197
2198 #endif
2199
2200 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
2201 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
2202
2203 PROF_stop(comm_open);
2204
2205 return new_socket;
2206 }