]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Cleanup: remove defined() from compat OS macros
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid.h"
36 #include "base/AsyncCall.h"
37 #include "StoreIOBuffer.h"
38 #include "comm.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "comm/AcceptLimiter.h"
42 #include "comm/comm_internal.h"
43 #include "comm/Connection.h"
44 #include "comm/IoCallback.h"
45 #include "comm/Loops.h"
46 #include "comm/Write.h"
47 #include "comm/TcpAcceptor.h"
48 #include "CommIO.h"
49 #include "CommRead.h"
50 #include "MemBuf.h"
51 #include "pconn.h"
52 #include "SquidTime.h"
53 #include "CommCalls.h"
54 #include "DescriptorSet.h"
55 #include "icmp/net_db.h"
56 #include "ip/Address.h"
57 #include "ip/Intercept.h"
58 #include "ip/QosConfig.h"
59 #include "ip/tools.h"
60 #include "ClientInfo.h"
61 #if USE_SSL
62 #include "ssl/support.h"
63 #endif
64
65 #include "cbdata.h"
66 #if _SQUID_CYGWIN_
67 #include <sys/ioctl.h>
68 #endif
69 #ifdef HAVE_NETINET_TCP_H
70 #include <netinet/tcp.h>
71 #endif
72
73 /*
74 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
75 */
76
77 static void commStopHalfClosedMonitor(int fd);
78 static IOCB commHalfClosedReader;
79 static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
80 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
81
82 #if USE_DELAY_POOLS
83 CBDATA_CLASS_INIT(CommQuotaQueue);
84
85 static void commHandleWriteHelper(void * data);
86 #endif
87
88 /* STATIC */
89
90 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
91 static bool WillCheckHalfClosed = false; /// true if check is scheduled
92 static EVH commHalfClosedCheck;
93 static void commPlanHalfClosedCheck();
94
95 static comm_err_t commBind(int s, struct addrinfo &);
96 static void commSetReuseAddr(int);
97 static void commSetNoLinger(int);
98 #ifdef TCP_NODELAY
99 static void commSetTcpNoDelay(int);
100 #endif
101 static void commSetTcpRcvbuf(int, int);
102
103 static MemAllocator *conn_close_pool = NULL;
104 fd_debug_t *fdd_table = NULL;
105
106 bool
107 isOpen(const int fd)
108 {
109 return fd >= 0 && fd_table && fd_table[fd].flags.open != 0;
110 }
111
112 /**
113 * Attempt a read
114 *
115 * If the read attempt succeeds or fails, call the callback.
116 * Else, wait for another IO notification.
117 */
118 void
119 commHandleRead(int fd, void *data)
120 {
121 Comm::IoCallback *ccb = (Comm::IoCallback *) data;
122
123 assert(data == COMMIO_FD_READCB(fd));
124 assert(ccb->active());
125 /* Attempt a read */
126 statCounter.syscalls.sock.reads++;
127 errno = 0;
128 int retval;
129 retval = FD_READ_METHOD(fd, ccb->buf, ccb->size);
130 debugs(5, 3, "comm_read_try: FD " << fd << ", size " << ccb->size << ", retval " << retval << ", errno " << errno);
131
132 if (retval < 0 && !ignoreErrno(errno)) {
133 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
134 ccb->offset = 0;
135 ccb->finish(COMM_ERROR, errno);
136 return;
137 };
138
139 /* See if we read anything */
140 /* Note - read 0 == socket EOF, which is a valid read */
141 if (retval >= 0) {
142 fd_bytes(fd, retval, FD_READ);
143 ccb->offset = retval;
144 ccb->finish(COMM_OK, errno);
145 return;
146 }
147
148 /* Nope, register for some more IO */
149 Comm::SetSelect(fd, COMM_SELECT_READ, commHandleRead, data, 0);
150 }
151
152 /**
153 * Queue a read. handler/handler_data are called when the read
154 * completes, on error, or on file descriptor close.
155 */
156 void
157 comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, AsyncCall::Pointer &callback)
158 {
159 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
160
161 /* Make sure we are open and not closing */
162 assert(Comm::IsConnOpen(conn));
163 assert(!fd_table[conn->fd].closing());
164 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
165
166 // Make sure we are either not reading or just passively monitoring.
167 // Active/passive conflicts are OK and simply cancel passive monitoring.
168 if (ccb->active()) {
169 // if the assertion below fails, we have an active comm_read conflict
170 assert(fd_table[conn->fd].halfClosedReader != NULL);
171 commStopHalfClosedMonitor(conn->fd);
172 assert(!ccb->active());
173 }
174 ccb->conn = conn;
175
176 /* Queue the read */
177 ccb->setCallback(Comm::IOCB_READ, callback, (char *)buf, NULL, size);
178 Comm::SetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
179 }
180
181 /**
182 * Empty the read buffers
183 *
184 * This is a magical routine that empties the read buffers.
185 * Under some platforms (Linux) if a buffer has data in it before
186 * you call close(), the socket will hang and take quite a while
187 * to timeout.
188 */
189 static void
190 comm_empty_os_read_buffers(int fd)
191 {
192 #if _SQUID_LINUX_
193 /* prevent those nasty RST packets */
194 char buf[SQUID_TCP_SO_RCVBUF];
195
196 if (fd_table[fd].flags.nonblocking == 1) {
197 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
198 }
199 #endif
200 }
201
202
203 /**
204 * Return whether the FD has a pending completed callback.
205 * NP: does not work.
206 */
207 int
208 comm_has_pending_read_callback(int fd)
209 {
210 assert(isOpen(fd));
211 // XXX: We do not know whether there is a read callback scheduled.
212 // This is used for pconn management that should probably be more
213 // tightly integrated into comm to minimize the chance that a
214 // closing pconn socket will be used for a new transaction.
215 return false;
216 }
217
218 // Does comm check this fd for read readiness?
219 // Note that when comm is not monitoring, there can be a pending callback
220 // call, which may resume comm monitoring once fired.
221 bool
222 comm_monitors_read(int fd)
223 {
224 assert(isOpen(fd));
225 // Being active is usually the same as monitoring because we always
226 // start monitoring the FD when we configure Comm::IoCallback for I/O
227 // and we usually configure Comm::IoCallback for I/O when we starting
228 // monitoring a FD for reading.
229 return COMMIO_FD_READCB(fd)->active();
230 }
231
232 /**
233 * Cancel a pending read. Assert that we have the right parameters,
234 * and that there are no pending read events!
235 *
236 * XXX: We do not assert that there are no pending read events and
237 * with async calls it becomes even more difficult.
238 * The whole interface should be reworked to do callback->cancel()
239 * instead of searching for places where the callback may be stored and
240 * updating the state of those places.
241 *
242 * AHC Don't call the comm handlers?
243 */
244 void
245 comm_read_cancel(int fd, IOCB *callback, void *data)
246 {
247 if (!isOpen(fd)) {
248 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
249 return;
250 }
251
252 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
253 // TODO: is "active" == "monitors FD"?
254 if (!cb->active()) {
255 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
256 return;
257 }
258
259 typedef CommCbFunPtrCallT<CommIoCbPtrFun> Call;
260 Call *call = dynamic_cast<Call*>(cb->callback.getRaw());
261 if (!call) {
262 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " lacks callback");
263 return;
264 }
265
266 call->cancel("old comm_read_cancel");
267
268 typedef CommIoCbParams Params;
269 const Params &params = GetCommParams<Params>(cb->callback);
270
271 /* Ok, we can be reasonably sure we won't lose any data here! */
272 assert(call->dialer.handler == callback);
273 assert(params.data == data);
274
275 /* Delete the callback */
276 cb->cancel("old comm_read_cancel");
277
278 /* And the IO event */
279 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
280 }
281
282 void
283 comm_read_cancel(int fd, AsyncCall::Pointer &callback)
284 {
285 callback->cancel("comm_read_cancel");
286
287 if (!isOpen(fd)) {
288 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
289 return;
290 }
291
292 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
293
294 if (!cb->active()) {
295 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
296 return;
297 }
298
299 AsyncCall::Pointer call = cb->callback;
300 assert(call != NULL); // XXX: should never fail (active() checks for callback==NULL)
301
302 /* Ok, we can be reasonably sure we won't lose any data here! */
303 assert(call == callback);
304
305 /* Delete the callback */
306 cb->cancel("comm_read_cancel");
307
308 /* And the IO event */
309 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
310 }
311
312
313 /**
314 * synchronous wrapper around udp socket functions
315 */
316 int
317 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
318 {
319 statCounter.syscalls.sock.recvfroms++;
320 int x = 0;
321 struct addrinfo *AI = NULL;
322
323 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
324
325 assert( NULL == AI );
326
327 from.InitAddrInfo(AI);
328
329 x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
330
331 from = *AI;
332
333 from.FreeAddrInfo(AI);
334
335 return x;
336 }
337
338 int
339 comm_udp_recv(int fd, void *buf, size_t len, int flags)
340 {
341 Ip::Address nul;
342 return comm_udp_recvfrom(fd, buf, len, flags, nul);
343 }
344
345 ssize_t
346 comm_udp_send(int s, const void *buf, size_t len, int flags)
347 {
348 return send(s, buf, len, flags);
349 }
350
351
352 bool
353 comm_has_incomplete_write(int fd)
354 {
355 assert(isOpen(fd));
356 return COMMIO_FD_WRITECB(fd)->active();
357 }
358
359 /**
360 * Queue a write. handler/handler_data are called when the write fully
361 * completes, on error, or on file descriptor close.
362 */
363
364 /* Return the local port associated with fd. */
365 unsigned short
366 comm_local_port(int fd)
367 {
368 Ip::Address temp;
369 struct addrinfo *addr = NULL;
370 fde *F = &fd_table[fd];
371
372 /* If the fd is closed already, just return */
373
374 if (!F->flags.open) {
375 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
376 return 0;
377 }
378
379 if (F->local_addr.GetPort())
380 return F->local_addr.GetPort();
381
382 if (F->sock_family == AF_INET)
383 temp.SetIPv4();
384
385 temp.InitAddrInfo(addr);
386
387 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
388 debugs(50, 1, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
389 temp.FreeAddrInfo(addr);
390 return 0;
391 }
392 temp = *addr;
393
394 temp.FreeAddrInfo(addr);
395
396 if (F->local_addr.IsAnyAddr()) {
397 /* save the whole local address, not just the port. */
398 F->local_addr = temp;
399 } else {
400 F->local_addr.SetPort(temp.GetPort());
401 }
402
403 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.GetPort() << "(family=" << F->sock_family << ")");
404 return F->local_addr.GetPort();
405 }
406
407 static comm_err_t
408 commBind(int s, struct addrinfo &inaddr)
409 {
410 statCounter.syscalls.sock.binds++;
411
412 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
413 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
414 return COMM_OK;
415 }
416
417 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
418
419 return COMM_ERROR;
420 }
421
422 /**
423 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
424 * is OR of flags specified in comm.h. Defaults TOS
425 */
426 int
427 comm_open(int sock_type,
428 int proto,
429 Ip::Address &addr,
430 int flags,
431 const char *note)
432 {
433 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
434 }
435
436 void
437 comm_open_listener(int sock_type,
438 int proto,
439 Comm::ConnectionPointer &conn,
440 const char *note)
441 {
442 /* all listener sockets require bind() */
443 conn->flags |= COMM_DOBIND;
444
445 /* attempt native enabled port. */
446 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
447 }
448
449 int
450 comm_open_listener(int sock_type,
451 int proto,
452 Ip::Address &addr,
453 int flags,
454 const char *note)
455 {
456 int sock = -1;
457
458 /* all listener sockets require bind() */
459 flags |= COMM_DOBIND;
460
461 /* attempt native enabled port. */
462 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
463
464 return sock;
465 }
466
467 static bool
468 limitError(int const anErrno)
469 {
470 return anErrno == ENFILE || anErrno == EMFILE;
471 }
472
473 void
474 comm_set_v6only(int fd, int tos)
475 {
476 #ifdef IPV6_V6ONLY
477 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
478 debugs(50, 1, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
479 }
480 #else
481 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
482 #endif /* sockopt */
483 }
484
485 /**
486 * Set the socket IP_TRANSPARENT option for Linux TPROXY v4 support.
487 */
488 void
489 comm_set_transparent(int fd)
490 {
491 #if defined(IP_TRANSPARENT)
492 int tos = 1;
493 if (setsockopt(fd, SOL_IP, IP_TRANSPARENT, (char *) &tos, sizeof(int)) < 0) {
494 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IP_TRANSPARENT) on FD " << fd << ": " << xstrerror());
495 } else {
496 /* mark the socket as having transparent options */
497 fd_table[fd].flags.transparent = 1;
498 }
499 #else
500 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(IP_TRANSPARENT) not supported on this platform");
501 #endif /* sockopt */
502 }
503
504 /**
505 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
506 * is OR of flags specified in defines.h:COMM_*
507 */
508 int
509 comm_openex(int sock_type,
510 int proto,
511 Ip::Address &addr,
512 int flags,
513 tos_t tos,
514 nfmark_t nfmark,
515 const char *note)
516 {
517 int new_socket;
518 struct addrinfo *AI = NULL;
519
520 PROF_start(comm_open);
521 /* Create socket for accepting new connections. */
522 statCounter.syscalls.sock.sockets++;
523
524 /* Setup the socket addrinfo details for use */
525 addr.GetAddrInfo(AI);
526 AI->ai_socktype = sock_type;
527 AI->ai_protocol = proto;
528
529 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
530
531 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
532
533 /* under IPv6 there is the possibility IPv6 is present but disabled. */
534 /* try again as IPv4-native if possible */
535 if ( new_socket < 0 && Ip::EnableIpv6 && addr.IsIPv6() && addr.SetIPv4() ) {
536 /* attempt to open this IPv4-only. */
537 addr.FreeAddrInfo(AI);
538 /* Setup the socket addrinfo details for use */
539 addr.GetAddrInfo(AI);
540 AI->ai_socktype = sock_type;
541 AI->ai_protocol = proto;
542 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
543 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
544 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
545 }
546
547 if (new_socket < 0) {
548 /* Increase the number of reserved fd's if calls to socket()
549 * are failing because the open file table is full. This
550 * limits the number of simultaneous clients */
551
552 if (limitError(errno)) {
553 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
554 fdAdjustReserved();
555 } else {
556 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
557 }
558
559 addr.FreeAddrInfo(AI);
560
561 PROF_stop(comm_open);
562 return -1;
563 }
564
565 // XXX: temporary for the transition. comm_openex will eventually have a conn to play with.
566 Comm::ConnectionPointer conn = new Comm::Connection;
567 conn->local = addr;
568 conn->fd = new_socket;
569
570 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
571
572 /* set TOS if needed */
573 if (tos)
574 Ip::Qos::setSockTos(conn, tos);
575
576 /* set netfilter mark if needed */
577 if (nfmark)
578 Ip::Qos::setSockNfmark(conn, nfmark);
579
580 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.IsIPv6() )
581 comm_set_v6only(conn->fd, 1);
582
583 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
584 /* Other OS may have this administratively disabled for general use. Same deal. */
585 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.IsIPv6() )
586 comm_set_v6only(conn->fd, 0);
587
588 comm_init_opened(conn, tos, nfmark, note, AI);
589 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
590
591 addr.FreeAddrInfo(AI);
592
593 PROF_stop(comm_open);
594
595 // XXX transition only. prevent conn from closing the new FD on function exit.
596 conn->fd = -1;
597 return new_socket;
598 }
599
600 /// update FD tables after a local or remote (IPC) comm_openex();
601 void
602 comm_init_opened(const Comm::ConnectionPointer &conn,
603 tos_t tos,
604 nfmark_t nfmark,
605 const char *note,
606 struct addrinfo *AI)
607 {
608 assert(Comm::IsConnOpen(conn));
609 assert(AI);
610
611 /* update fdstat */
612 debugs(5, 5, HERE << conn << " is a new socket");
613
614 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
615 fd_open(conn->fd, FD_SOCKET, note);
616
617 fdd_table[conn->fd].close_file = NULL;
618 fdd_table[conn->fd].close_line = 0;
619
620 fde *F = &fd_table[conn->fd];
621 F->local_addr = conn->local;
622 F->tosToServer = tos;
623
624 F->nfmarkToServer = nfmark;
625
626 F->sock_family = AI->ai_family;
627 }
628
629 /// apply flags after a local comm_open*() call;
630 /// returns new_socket or -1 on error
631 static int
632 comm_apply_flags(int new_socket,
633 Ip::Address &addr,
634 int flags,
635 struct addrinfo *AI)
636 {
637 assert(new_socket >= 0);
638 assert(AI);
639 const int sock_type = AI->ai_socktype;
640
641 if (!(flags & COMM_NOCLOEXEC))
642 commSetCloseOnExec(new_socket);
643
644 if ((flags & COMM_REUSEADDR))
645 commSetReuseAddr(new_socket);
646
647 if (addr.GetPort() > (unsigned short) 0) {
648 #if _SQUID_MSWIN_
649 if (sock_type != SOCK_DGRAM)
650 #endif
651 commSetNoLinger(new_socket);
652
653 if (opt_reuseaddr)
654 commSetReuseAddr(new_socket);
655 }
656
657 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
658 if ((flags & COMM_TRANSPARENT)) {
659 comm_set_transparent(new_socket);
660 }
661
662 if ( (flags & COMM_DOBIND) || addr.GetPort() > 0 || !addr.IsAnyAddr() ) {
663 if ( !(flags & COMM_DOBIND) && addr.IsAnyAddr() )
664 debugs(5,1,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
665 if ( addr.IsNoAddr() )
666 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
667
668 if (commBind(new_socket, *AI) != COMM_OK) {
669 comm_close(new_socket);
670 return -1;
671 }
672 }
673
674 if (flags & COMM_NONBLOCKING)
675 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
676 comm_close(new_socket);
677 return -1;
678 }
679
680 #ifdef TCP_NODELAY
681 if (sock_type == SOCK_STREAM)
682 commSetTcpNoDelay(new_socket);
683
684 #endif
685
686 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
687 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
688
689 return new_socket;
690 }
691
692 void
693 comm_import_opened(const Comm::ConnectionPointer &conn,
694 const char *note,
695 struct addrinfo *AI)
696 {
697 debugs(5, 2, HERE << conn);
698 assert(Comm::IsConnOpen(conn));
699 assert(AI);
700
701 comm_init_opened(conn, 0, 0, note, AI);
702
703 if (!(conn->flags & COMM_NOCLOEXEC))
704 fd_table[conn->fd].flags.close_on_exec = 1;
705
706 if (conn->local.GetPort() > (unsigned short) 0) {
707 #if _SQUID_MSWIN_
708 if (AI->ai_socktype != SOCK_DGRAM)
709 #endif
710 fd_table[conn->fd].flags.nolinger = 1;
711 }
712
713 if ((conn->flags & COMM_TRANSPARENT))
714 fd_table[conn->fd].flags.transparent = 1;
715
716 if (conn->flags & COMM_NONBLOCKING)
717 fd_table[conn->fd].flags.nonblocking = 1;
718
719 #ifdef TCP_NODELAY
720 if (AI->ai_socktype == SOCK_STREAM)
721 fd_table[conn->fd].flags.nodelay = 1;
722 #endif
723
724 /* no fd_table[fd].flags. updates needed for these conditions:
725 * if ((flags & COMM_REUSEADDR)) ...
726 * if ((flags & COMM_DOBIND) ...) ...
727 */
728 }
729
730 // Legacy pre-AsyncCalls API for FD timeouts.
731 int
732 commSetTimeout(int fd, int timeout, CTCB * handler, void *data)
733 {
734 AsyncCall::Pointer call;
735 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
736 if (handler != NULL)
737 call=commCbCall(5,4, "SomeTimeoutHandler", CommTimeoutCbPtrFun(handler, data));
738 else
739 call = NULL;
740 return commSetTimeout(fd, timeout, call);
741 }
742
743 // Legacy pre-Comm::Connection API for FD timeouts
744 // still used by non-socket FD code dealing with pipes and IPC sockets.
745 int
746 commSetTimeout(int fd, int timeout, AsyncCall::Pointer &callback)
747 {
748 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
749 assert(fd >= 0);
750 assert(fd < Squid_MaxFD);
751 fde *F = &fd_table[fd];
752 assert(F->flags.open);
753
754 if (timeout < 0) {
755 F->timeoutHandler = NULL;
756 F->timeout = 0;
757 } else {
758 if (callback != NULL) {
759 typedef CommTimeoutCbParams Params;
760 Params &params = GetCommParams<Params>(callback);
761 params.fd = fd;
762 F->timeoutHandler = callback;
763 }
764
765 F->timeout = squid_curtime + (time_t) timeout;
766 }
767
768 return F->timeout;
769 }
770
771 int
772 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
773 {
774 debugs(5, 3, HERE << conn << " timeout " << timeout);
775 assert(Comm::IsConnOpen(conn));
776 assert(conn->fd < Squid_MaxFD);
777 fde *F = &fd_table[conn->fd];
778 assert(F->flags.open);
779
780 if (timeout < 0) {
781 F->timeoutHandler = NULL;
782 F->timeout = 0;
783 } else {
784 if (callback != NULL) {
785 typedef CommTimeoutCbParams Params;
786 Params &params = GetCommParams<Params>(callback);
787 params.conn = conn;
788 F->timeoutHandler = callback;
789 }
790
791 F->timeout = squid_curtime + (time_t) timeout;
792 }
793
794 return F->timeout;
795 }
796
797 int
798 commUnsetConnTimeout(const Comm::ConnectionPointer &conn)
799 {
800 debugs(5, 3, HERE << "Remove timeout for " << conn);
801 AsyncCall::Pointer nil;
802 return commSetConnTimeout(conn, -1, nil);
803 }
804
805 int
806 comm_connect_addr(int sock, const Ip::Address &address)
807 {
808 comm_err_t status = COMM_OK;
809 fde *F = &fd_table[sock];
810 int x = 0;
811 int err = 0;
812 socklen_t errlen;
813 struct addrinfo *AI = NULL;
814 PROF_start(comm_connect_addr);
815
816 assert(address.GetPort() != 0);
817
818 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
819
820 /* Handle IPv6 over IPv4-only socket case.
821 * this case must presently be handled here since the GetAddrInfo asserts on bad mappings.
822 * NP: because commResetFD is private to ConnStateData we have to return an error and
823 * trust its handled properly.
824 */
825 if (F->sock_family == AF_INET && !address.IsIPv4()) {
826 errno = ENETUNREACH;
827 return COMM_ERR_PROTOCOL;
828 }
829
830 /* Handle IPv4 over IPv6-only socket case.
831 * This case is presently handled here as it's both a known case and it's
832 * uncertain what error will be returned by the IPv6 stack in such case. It's
833 * possible this will also be handled by the errno checks below after connect()
834 * but needs carefull cross-platform verification, and verifying the address
835 * condition here is simple.
836 */
837 if (!F->local_addr.IsIPv4() && address.IsIPv4()) {
838 errno = ENETUNREACH;
839 return COMM_ERR_PROTOCOL;
840 }
841
842 address.GetAddrInfo(AI, F->sock_family);
843
844 /* Establish connection. */
845 errno = 0;
846
847 if (!F->flags.called_connect) {
848 F->flags.called_connect = 1;
849 statCounter.syscalls.sock.connects++;
850
851 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
852
853 // XXX: ICAP code refuses callbacks during a pending comm_ call
854 // Async calls development will fix this.
855 if (x == 0) {
856 x = -1;
857 errno = EINPROGRESS;
858 }
859
860 if (x < 0) {
861 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
862 " flags=" << AI->ai_flags <<
863 ", family=" << AI->ai_family <<
864 ", socktype=" << AI->ai_socktype <<
865 ", protocol=" << AI->ai_protocol <<
866 ", &addr=" << AI->ai_addr <<
867 ", addrlen=" << AI->ai_addrlen <<
868 " )" );
869 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
870 debugs(14,9, "connecting to: " << address );
871 }
872 } else {
873 #if _SQUID_NEWSOS6_
874 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
875
876 connect(sock, AI->ai_addr, AI->ai_addrlen);
877
878 if (errno == EINVAL) {
879 errlen = sizeof(err);
880 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
881
882 if (x >= 0)
883 errno = x;
884 }
885
886 #else
887 errlen = sizeof(err);
888
889 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
890
891 if (x == 0)
892 errno = err;
893
894 #if _SQUID_SOLARIS_
895 /*
896 * Solaris 2.4's socket emulation doesn't allow you
897 * to determine the error from a failed non-blocking
898 * connect and just returns EPIPE. Create a fake
899 * error message for connect. -- fenner@parc.xerox.com
900 */
901 if (x < 0 && errno == EPIPE)
902 errno = ENOTCONN;
903
904 #endif
905 #endif
906
907 }
908
909 /* Squid seems to be working fine without this code. With this code,
910 * we leak memory on many connect requests because of EINPROGRESS.
911 * If you find that this code is needed, please file a bug report. */
912 #if 0
913 #if _SQUID_LINUX_
914 /* 2007-11-27:
915 * Linux Debian replaces our allocated AI pointer with garbage when
916 * connect() fails. This leads to segmentation faults deallocating
917 * the system-allocated memory when we go to clean up our pointer.
918 * HACK: is to leak the memory returned since we can't deallocate.
919 */
920 if (errno != 0) {
921 AI = NULL;
922 }
923 #endif
924 #endif
925
926 address.FreeAddrInfo(AI);
927
928 PROF_stop(comm_connect_addr);
929
930 if (errno == 0 || errno == EISCONN)
931 status = COMM_OK;
932 else if (ignoreErrno(errno))
933 status = COMM_INPROGRESS;
934 else if (errno == EAFNOSUPPORT || errno == EINVAL)
935 return COMM_ERR_PROTOCOL;
936 else
937 return COMM_ERROR;
938
939 address.NtoA(F->ipaddr, MAX_IPSTRLEN);
940
941 F->remote_port = address.GetPort(); /* remote_port is HS */
942
943 if (status == COMM_OK) {
944 debugs(5, 10, "comm_connect_addr: FD " << sock << " connected to " << address);
945 } else if (status == COMM_INPROGRESS) {
946 debugs(5, 10, "comm_connect_addr: FD " << sock << " connection pending");
947 }
948
949 return status;
950 }
951
952 void
953 commCallCloseHandlers(int fd)
954 {
955 fde *F = &fd_table[fd];
956 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
957
958 while (F->closeHandler != NULL) {
959 AsyncCall::Pointer call = F->closeHandler;
960 F->closeHandler = call->Next();
961 call->setNext(NULL);
962 // If call is not canceled schedule it for execution else ignore it
963 if (!call->canceled()) {
964 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
965 // XXX: this should not be needed. Params can be set by the call creator
966 typedef CommCloseCbParams Params;
967 Params &params = GetCommParams<Params>(call);
968 params.fd = fd;
969 ScheduleCallHere(call);
970 }
971 }
972 }
973
974 #if LINGERING_CLOSE
975 static void
976 commLingerClose(int fd, void *unused)
977 {
978 LOCAL_ARRAY(char, buf, 1024);
979 int n;
980 n = FD_READ_METHOD(fd, buf, 1024);
981
982 if (n < 0)
983 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
984
985 comm_close(fd);
986 }
987
988 static void
989 commLingerTimeout(int fd, void *unused)
990 {
991 debugs(5, 3, "commLingerTimeout: FD " << fd);
992 comm_close(fd);
993 }
994
995 /*
996 * Inspired by apache
997 */
998 void
999 comm_lingering_close(int fd)
1000 {
1001 #if USE_SSL
1002 if (fd_table[fd].ssl)
1003 ssl_shutdown_method(fd_table[fd].ssl);
1004 #endif
1005
1006 if (shutdown(fd, 1) < 0) {
1007 comm_close(fd);
1008 return;
1009 }
1010
1011 fd_note(fd, "lingering close");
1012 commSetTimeout(fd, 10, commLingerTimeout, NULL);
1013 Comm::SetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
1014 }
1015
1016 #endif
1017
1018 /**
1019 * enable linger with time of 0 so that when the socket is
1020 * closed, TCP generates a RESET
1021 */
1022 void
1023 comm_reset_close(const Comm::ConnectionPointer &conn)
1024 {
1025 struct linger L;
1026 L.l_onoff = 1;
1027 L.l_linger = 0;
1028
1029 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1030 debugs(50, DBG_CRITICAL, "ERROR: Closing " << conn << " with TCP RST: " << xstrerror());
1031
1032 conn->close();
1033 }
1034
1035 // Legacy close function.
1036 void
1037 old_comm_reset_close(int fd)
1038 {
1039 struct linger L;
1040 L.l_onoff = 1;
1041 L.l_linger = 0;
1042
1043 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1044 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
1045
1046 comm_close(fd);
1047 }
1048
1049 #if USE_SSL
1050 void
1051 commStartSslClose(const CommCloseCbParams &params)
1052 {
1053 assert(&fd_table[params.fd].ssl);
1054 ssl_shutdown_method(fd_table[params.fd].ssl);
1055 }
1056 #endif
1057
1058 void
1059 comm_close_complete(const CommCloseCbParams &params)
1060 {
1061 #if USE_SSL
1062 fde *F = &fd_table[params.fd];
1063
1064 if (F->ssl) {
1065 SSL_free(F->ssl);
1066 F->ssl = NULL;
1067 }
1068
1069 if (F->dynamicSslContext) {
1070 SSL_CTX_free(F->dynamicSslContext);
1071 F->dynamicSslContext = NULL;
1072 }
1073 #endif
1074 fd_close(params.fd); /* update fdstat */
1075 close(params.fd);
1076
1077 statCounter.syscalls.sock.closes++;
1078
1079 /* When one connection closes, give accept() a chance, if need be */
1080 Comm::AcceptLimiter::Instance().kick();
1081 }
1082
1083 /*
1084 * Close the socket fd.
1085 *
1086 * + call write handlers with ERR_CLOSING
1087 * + call read handlers with ERR_CLOSING
1088 * + call closing handlers
1089 *
1090 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1091 * DeferredReadManager.
1092 */
1093 void
1094 _comm_close(int fd, char const *file, int line)
1095 {
1096 debugs(5, 3, "comm_close: start closing FD " << fd);
1097 assert(fd >= 0);
1098 assert(fd < Squid_MaxFD);
1099
1100 fde *F = &fd_table[fd];
1101 fdd_table[fd].close_file = file;
1102 fdd_table[fd].close_line = line;
1103
1104 if (F->closing())
1105 return;
1106
1107 /* XXX: is this obsolete behind F->closing() ? */
1108 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
1109 return;
1110
1111 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1112 assert(isOpen(fd));
1113
1114 assert(F->type != FD_FILE);
1115
1116 PROF_start(comm_close);
1117
1118 F->flags.close_request = 1;
1119
1120 #if USE_SSL
1121 if (F->ssl) {
1122 // XXX: make this a generic async call passing one FD parameter. No need to use CommCloseCbParams
1123 AsyncCall::Pointer startCall=commCbCall(5,4, "commStartSslClose",
1124 CommCloseCbPtrFun(commStartSslClose, NULL));
1125 CommCloseCbParams &startParams = GetCommParams<CommCloseCbParams>(startCall);
1126 startParams.fd = fd;
1127 ScheduleCallHere(startCall);
1128 }
1129 #endif
1130
1131 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1132 if (commHasHalfClosedMonitor(fd))
1133 commStopHalfClosedMonitor(fd);
1134 commSetTimeout(fd, -1, NULL, NULL);
1135
1136 // notify read/write handlers after canceling select reservations, if any
1137 if (COMMIO_FD_WRITECB(fd)->active()) {
1138 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1139 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
1140 }
1141 if (COMMIO_FD_READCB(fd)->active()) {
1142 Comm::SetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
1143 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
1144 }
1145
1146 #if USE_DELAY_POOLS
1147 if (ClientInfo *clientInfo = F->clientInfo) {
1148 if (clientInfo->selectWaiting) {
1149 clientInfo->selectWaiting = false;
1150 // kick queue or it will get stuck as commWriteHandle is not called
1151 clientInfo->kickQuotaQueue();
1152 }
1153 }
1154 #endif
1155
1156 commCallCloseHandlers(fd);
1157
1158 if (F->pconn.uses && F->pconn.pool)
1159 F->pconn.pool->noteUses(F->pconn.uses);
1160
1161 comm_empty_os_read_buffers(fd);
1162
1163
1164 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
1165 CommCloseCbPtrFun(comm_close_complete, NULL));
1166 CommCloseCbParams &completeParams = GetCommParams<CommCloseCbParams>(completeCall);
1167 completeParams.fd = fd;
1168 // must use async call to wait for all callbacks
1169 // scheduled before comm_close() to finish
1170 ScheduleCallHere(completeCall);
1171
1172 PROF_stop(comm_close);
1173 }
1174
1175 /* Send a udp datagram to specified TO_ADDR. */
1176 int
1177 comm_udp_sendto(int fd,
1178 const Ip::Address &to_addr,
1179 const void *buf,
1180 int len)
1181 {
1182 int x = 0;
1183 struct addrinfo *AI = NULL;
1184
1185 PROF_start(comm_udp_sendto);
1186 statCounter.syscalls.sock.sendtos++;
1187
1188 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
1189 " using FD " << fd << " using Port " << comm_local_port(fd) );
1190
1191 /* BUG: something in the above macro appears to occasionally be setting AI to garbage. */
1192 /* AYJ: 2007-08-27 : or was it because I wasn't then setting 'fd_table[fd].sock_family' to fill properly. */
1193 assert( NULL == AI );
1194
1195 to_addr.GetAddrInfo(AI, fd_table[fd].sock_family);
1196
1197 x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
1198
1199 to_addr.FreeAddrInfo(AI);
1200
1201 PROF_stop(comm_udp_sendto);
1202
1203 if (x >= 0)
1204 return x;
1205
1206 #if _SQUID_LINUX_
1207
1208 if (ECONNREFUSED != errno)
1209 #endif
1210
1211 debugs(50, 1, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1212
1213 return COMM_ERROR;
1214 }
1215
1216 void
1217 comm_add_close_handler(int fd, CLCB * handler, void *data)
1218 {
1219 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1220 handler << ", data=" << data);
1221
1222 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1223 CommCloseCbPtrFun(handler, data));
1224 comm_add_close_handler(fd, call);
1225 }
1226
1227 void
1228 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1229 {
1230 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1231
1232 /*TODO:Check for a similar scheduled AsyncCall*/
1233 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1234 // assert(c->handler != handler || c->data != data);
1235
1236 call->setNext(fd_table[fd].closeHandler);
1237
1238 fd_table[fd].closeHandler = call;
1239 }
1240
1241
1242 // remove function-based close handler
1243 void
1244 comm_remove_close_handler(int fd, CLCB * handler, void *data)
1245 {
1246 assert (isOpen(fd));
1247 /* Find handler in list */
1248 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1249 handler << ", data=" << data);
1250
1251 AsyncCall::Pointer p, prev = NULL;
1252 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1253 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1254 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1255 if (!call) // method callbacks have their own comm_remove_close_handler
1256 continue;
1257
1258 typedef CommCloseCbParams Params;
1259 const Params &params = GetCommParams<Params>(p);
1260 if (call->dialer.handler == handler && params.data == data)
1261 break; /* This is our handler */
1262 }
1263
1264 // comm_close removes all close handlers so our handler may be gone
1265 if (p != NULL) {
1266 p->dequeue(fd_table[fd].closeHandler, prev);
1267 p->cancel("comm_remove_close_handler");
1268 }
1269 }
1270
1271 // remove method-based close handler
1272 void
1273 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1274 {
1275 assert (isOpen(fd));
1276 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1277
1278 // comm_close removes all close handlers so our handler may be gone
1279 AsyncCall::Pointer p, prev = NULL;
1280 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1281
1282 if (p != NULL)
1283 p->dequeue(fd_table[fd].closeHandler, prev);
1284 call->cancel("comm_remove_close_handler");
1285 }
1286
1287 static void
1288 commSetNoLinger(int fd)
1289 {
1290
1291 struct linger L;
1292 L.l_onoff = 0; /* off */
1293 L.l_linger = 0;
1294
1295 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1296 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1297
1298 fd_table[fd].flags.nolinger = 1;
1299 }
1300
1301 static void
1302 commSetReuseAddr(int fd)
1303 {
1304 int on = 1;
1305
1306 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1307 debugs(50, 1, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1308 }
1309
1310 static void
1311 commSetTcpRcvbuf(int fd, int size)
1312 {
1313 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1314 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1315 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1316 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1317 #ifdef TCP_WINDOW_CLAMP
1318 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1319 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1320 #endif
1321 }
1322
1323 int
1324 commSetNonBlocking(int fd)
1325 {
1326 #if !_SQUID_MSWIN_
1327 int flags;
1328 int dummy = 0;
1329 #endif
1330 #if _SQUID_WINDOWS_
1331 int nonblocking = TRUE;
1332
1333 #if _SQUID_CYGWIN_
1334 if (fd_table[fd].type != FD_PIPE) {
1335 #endif
1336
1337 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1338 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1339 return COMM_ERROR;
1340 }
1341
1342 #if _SQUID_CYGWIN_
1343 } else {
1344 #endif
1345 #endif
1346 #if !_SQUID_MSWIN_
1347
1348 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1349 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1350 return COMM_ERROR;
1351 }
1352
1353 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1354 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1355 return COMM_ERROR;
1356 }
1357
1358 #endif
1359 #if _SQUID_CYGWIN_
1360 }
1361 #endif
1362 fd_table[fd].flags.nonblocking = 1;
1363
1364 return 0;
1365 }
1366
1367 int
1368 commUnsetNonBlocking(int fd)
1369 {
1370 #if _SQUID_MSWIN_
1371 int nonblocking = FALSE;
1372
1373 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1374 #else
1375 int flags;
1376 int dummy = 0;
1377
1378 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1379 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1380 return COMM_ERROR;
1381 }
1382
1383 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1384 #endif
1385 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1386 return COMM_ERROR;
1387 }
1388
1389 fd_table[fd].flags.nonblocking = 0;
1390 return 0;
1391 }
1392
1393 void
1394 commSetCloseOnExec(int fd)
1395 {
1396 #ifdef FD_CLOEXEC
1397 int flags;
1398 int dummy = 0;
1399
1400 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1401 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1402 return;
1403 }
1404
1405 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1406 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1407
1408 fd_table[fd].flags.close_on_exec = 1;
1409
1410 #endif
1411 }
1412
1413 #ifdef TCP_NODELAY
1414 static void
1415 commSetTcpNoDelay(int fd)
1416 {
1417 int on = 1;
1418
1419 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1420 debugs(50, 1, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1421
1422 fd_table[fd].flags.nodelay = 1;
1423 }
1424
1425 #endif
1426
1427 void
1428 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1429 {
1430 int on = 1;
1431 #ifdef TCP_KEEPCNT
1432 if (timeout && interval) {
1433 int count = (timeout + interval - 1) / interval;
1434 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1435 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1436 }
1437 #endif
1438 #ifdef TCP_KEEPIDLE
1439 if (idle) {
1440 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1441 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1442 }
1443 #endif
1444 #ifdef TCP_KEEPINTVL
1445 if (interval) {
1446 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1447 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1448 }
1449 #endif
1450 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1451 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1452 }
1453
1454 void
1455 comm_init(void)
1456 {
1457 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1458 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1459
1460 /* make sure the accept() socket FIFO delay queue exists */
1461 Comm::AcceptLimiter::Instance();
1462
1463 // make sure the IO pending callback table exists
1464 Comm::CallbackTableInit();
1465
1466 /* XXX account fd_table */
1467 /* Keep a few file descriptors free so that we don't run out of FD's
1468 * after accepting a client but before it opens a socket or a file.
1469 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1470 RESERVED_FD = min(100, Squid_MaxFD / 4);
1471
1472 conn_close_pool = memPoolCreate("close_handler", sizeof(close_handler));
1473
1474 TheHalfClosed = new DescriptorSet;
1475
1476 /* setup the select loop module */
1477 Comm::SelectLoopInit();
1478 }
1479
1480 void
1481 comm_exit(void)
1482 {
1483 delete TheHalfClosed;
1484 TheHalfClosed = NULL;
1485
1486 safe_free(fd_table);
1487 safe_free(fdd_table);
1488 Comm::CallbackTableDestruct();
1489 }
1490
1491 #if USE_DELAY_POOLS
1492 // called when the queue is done waiting for the client bucket to fill
1493 void
1494 commHandleWriteHelper(void * data)
1495 {
1496 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1497 assert(queue);
1498
1499 ClientInfo *clientInfo = queue->clientInfo;
1500 // ClientInfo invalidates queue if freed, so if we got here through,
1501 // evenAdd cbdata protections, everything should be valid and consistent
1502 assert(clientInfo);
1503 assert(clientInfo->hasQueue());
1504 assert(clientInfo->hasQueue(queue));
1505 assert(!clientInfo->selectWaiting);
1506 assert(clientInfo->eventWaiting);
1507 clientInfo->eventWaiting = false;
1508
1509 do {
1510 // check that the head descriptor is still relevant
1511 const int head = clientInfo->quotaPeekFd();
1512 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1513
1514 if (fd_table[head].clientInfo == clientInfo &&
1515 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1516 !fd_table[head].closing()) {
1517
1518 // wait for the head descriptor to become ready for writing
1519 Comm::SetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1520 clientInfo->selectWaiting = true;
1521 return;
1522 }
1523
1524 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1525 // and continue looking for a relevant one
1526 } while (clientInfo->hasQueue());
1527
1528 debugs(77,3, HERE << "emptied queue");
1529 }
1530
1531 bool
1532 ClientInfo::hasQueue() const
1533 {
1534 assert(quotaQueue);
1535 return !quotaQueue->empty();
1536 }
1537
1538 bool
1539 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1540 {
1541 assert(quotaQueue);
1542 return quotaQueue == q;
1543 }
1544
1545 /// returns the first descriptor to be dequeued
1546 int
1547 ClientInfo::quotaPeekFd() const
1548 {
1549 assert(quotaQueue);
1550 return quotaQueue->front();
1551 }
1552
1553 /// returns the reservation ID of the first descriptor to be dequeued
1554 unsigned int
1555 ClientInfo::quotaPeekReserv() const
1556 {
1557 assert(quotaQueue);
1558 return quotaQueue->outs + 1;
1559 }
1560
1561 /// queues a given fd, creating the queue if necessary; returns reservation ID
1562 unsigned int
1563 ClientInfo::quotaEnqueue(int fd)
1564 {
1565 assert(quotaQueue);
1566 return quotaQueue->enqueue(fd);
1567 }
1568
1569 /// removes queue head
1570 void
1571 ClientInfo::quotaDequeue()
1572 {
1573 assert(quotaQueue);
1574 quotaQueue->dequeue();
1575 }
1576
1577 void
1578 ClientInfo::kickQuotaQueue()
1579 {
1580 if (!eventWaiting && !selectWaiting && hasQueue()) {
1581 // wait at least a second if the bucket is empty
1582 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1583 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1584 quotaQueue, delay, 0, true);
1585 eventWaiting = true;
1586 }
1587 }
1588
1589 /// calculates how much to write for a single dequeued client
1590 int
1591 ClientInfo::quotaForDequed()
1592 {
1593 /* If we have multiple clients and give full bucketSize to each client then
1594 * clt1 may often get a lot more because clt1->clt2 time distance in the
1595 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1596 * We divide quota evenly to be more fair. */
1597
1598 if (!rationedCount) {
1599 rationedCount = quotaQueue->size() + 1;
1600
1601 // The delay in ration recalculation _temporary_ deprives clients from
1602 // bytes that should have trickled in while rationedCount was positive.
1603 refillBucket();
1604
1605 // Rounding errors do not accumulate here, but we round down to avoid
1606 // negative bucket sizes after write with rationedCount=1.
1607 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1608 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1609 '*' << rationedCount);
1610 }
1611
1612 --rationedCount;
1613 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1614 " rations remaining: " << rationedCount);
1615
1616 // update 'last seen' time to prevent clientdb GC from dropping us
1617 last_seen = squid_curtime;
1618 return rationedQuota;
1619 }
1620
1621 ///< adds bytes to the quota bucket based on the rate and passed time
1622 void
1623 ClientInfo::refillBucket()
1624 {
1625 // all these times are in seconds, with double precision
1626 const double currTime = current_dtime;
1627 const double timePassed = currTime - prevTime;
1628
1629 // Calculate allowance for the time passed. Use double to avoid
1630 // accumulating rounding errors for small intervals. For example, always
1631 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1632 const double gain = timePassed * writeSpeedLimit;
1633
1634 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1635 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1636 " = " << gain << ')');
1637
1638 // to further combat error accumulation during micro updates,
1639 // quit before updating time if we cannot add at least one byte
1640 if (gain < 1.0)
1641 return;
1642
1643 prevTime = currTime;
1644
1645 // for "first" connections, drain initial fat before refilling but keep
1646 // updating prevTime to avoid bursts after the fat is gone
1647 if (bucketSize > bucketSizeLimit) {
1648 debugs(77,4, HERE << "not refilling while draining initial fat");
1649 return;
1650 }
1651
1652 bucketSize += gain;
1653
1654 // obey quota limits
1655 if (bucketSize > bucketSizeLimit)
1656 bucketSize = bucketSizeLimit;
1657 }
1658
1659 void
1660 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1661 {
1662 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1663 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1664 " highwatermark=" << aHighWatermark);
1665
1666 // set or possibly update traffic shaping parameters
1667 writeLimitingActive = true;
1668 writeSpeedLimit = aWriteSpeedLimit;
1669 bucketSizeLimit = aHighWatermark;
1670
1671 // but some members should only be set once for a newly activated bucket
1672 if (firstTimeConnection) {
1673 firstTimeConnection = false;
1674
1675 assert(!selectWaiting);
1676 assert(!quotaQueue);
1677 quotaQueue = new CommQuotaQueue(this);
1678
1679 bucketSize = anInitialBurst;
1680 prevTime = current_dtime;
1681 }
1682 }
1683
1684 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1685 ins(0), outs(0)
1686 {
1687 assert(clientInfo);
1688 }
1689
1690 CommQuotaQueue::~CommQuotaQueue()
1691 {
1692 assert(!clientInfo); // ClientInfo should clear this before destroying us
1693 }
1694
1695 /// places the given fd at the end of the queue; returns reservation ID
1696 unsigned int
1697 CommQuotaQueue::enqueue(int fd)
1698 {
1699 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1700 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1701 fds.push_back(fd);
1702 return ++ins;
1703 }
1704
1705 /// removes queue head
1706 void
1707 CommQuotaQueue::dequeue()
1708 {
1709 assert(!fds.empty());
1710 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1711 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1712 fds.size());
1713 fds.pop_front();
1714 ++outs;
1715 }
1716 #endif
1717
1718 /*
1719 * hm, this might be too general-purpose for all the places we'd
1720 * like to use it.
1721 */
1722 int
1723 ignoreErrno(int ierrno)
1724 {
1725 switch (ierrno) {
1726
1727 case EINPROGRESS:
1728
1729 case EWOULDBLOCK:
1730 #if EAGAIN != EWOULDBLOCK
1731
1732 case EAGAIN:
1733 #endif
1734
1735 case EALREADY:
1736
1737 case EINTR:
1738 #ifdef ERESTART
1739
1740 case ERESTART:
1741 #endif
1742
1743 return 1;
1744
1745 default:
1746 return 0;
1747 }
1748
1749 /* NOTREACHED */
1750 }
1751
1752 void
1753 commCloseAllSockets(void)
1754 {
1755 int fd;
1756 fde *F = NULL;
1757
1758 for (fd = 0; fd <= Biggest_FD; fd++) {
1759 F = &fd_table[fd];
1760
1761 if (!F->flags.open)
1762 continue;
1763
1764 if (F->type != FD_SOCKET)
1765 continue;
1766
1767 if (F->flags.ipc) /* don't close inter-process sockets */
1768 continue;
1769
1770 if (F->timeoutHandler != NULL) {
1771 AsyncCall::Pointer callback = F->timeoutHandler;
1772 F->timeoutHandler = NULL;
1773 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1774 ScheduleCallHere(callback);
1775 } else {
1776 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1777 old_comm_reset_close(fd);
1778 }
1779 }
1780 }
1781
1782 static bool
1783 AlreadyTimedOut(fde *F)
1784 {
1785 if (!F->flags.open)
1786 return true;
1787
1788 if (F->timeout == 0)
1789 return true;
1790
1791 if (F->timeout > squid_curtime)
1792 return true;
1793
1794 return false;
1795 }
1796
1797 static bool
1798 writeTimedOut(int fd)
1799 {
1800 if (!COMMIO_FD_WRITECB(fd)->active())
1801 return false;
1802
1803 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1804 return false;
1805
1806 return true;
1807 }
1808
1809 void
1810 checkTimeouts(void)
1811 {
1812 int fd;
1813 fde *F = NULL;
1814 AsyncCall::Pointer callback;
1815
1816 for (fd = 0; fd <= Biggest_FD; fd++) {
1817 F = &fd_table[fd];
1818
1819 if (writeTimedOut(fd)) {
1820 // We have an active write callback and we are timed out
1821 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1822 Comm::SetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1823 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1824 } else if (AlreadyTimedOut(F))
1825 continue;
1826
1827 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1828
1829 if (F->timeoutHandler != NULL) {
1830 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1831 callback = F->timeoutHandler;
1832 F->timeoutHandler = NULL;
1833 ScheduleCallHere(callback);
1834 } else {
1835 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1836 comm_close(fd);
1837 }
1838 }
1839 }
1840
1841 void CommIO::Initialise()
1842 {
1843 /* Initialize done pipe signal */
1844 int DonePipe[2];
1845 if (pipe(DonePipe)) {}
1846 DoneFD = DonePipe[1];
1847 DoneReadFD = DonePipe[0];
1848 fd_open(DoneReadFD, FD_PIPE, "async-io completetion event: main");
1849 fd_open(DoneFD, FD_PIPE, "async-io completetion event: threads");
1850 commSetNonBlocking(DoneReadFD);
1851 commSetNonBlocking(DoneFD);
1852 Comm::SetSelect(DoneReadFD, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1853 Initialised = true;
1854 }
1855
1856 void CommIO::NotifyIOClose()
1857 {
1858 /* Close done pipe signal */
1859 FlushPipe();
1860 close(DoneFD);
1861 close(DoneReadFD);
1862 fd_close(DoneFD);
1863 fd_close(DoneReadFD);
1864 Initialised = false;
1865 }
1866
1867 bool CommIO::Initialised = false;
1868 bool CommIO::DoneSignalled = false;
1869 int CommIO::DoneFD = -1;
1870 int CommIO::DoneReadFD = -1;
1871
1872 void
1873 CommIO::FlushPipe()
1874 {
1875 char buf[256];
1876 FD_READ_METHOD(DoneReadFD, buf, sizeof(buf));
1877 }
1878
1879 void
1880 CommIO::NULLFDHandler(int fd, void *data)
1881 {
1882 FlushPipe();
1883 Comm::SetSelect(fd, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1884 }
1885
1886 void
1887 CommIO::ResetNotifications()
1888 {
1889 if (DoneSignalled) {
1890 FlushPipe();
1891 DoneSignalled = false;
1892 }
1893 }
1894
1895 /// Start waiting for a possibly half-closed connection to close
1896 // by scheduling a read callback to a monitoring handler that
1897 // will close the connection on read errors.
1898 void
1899 commStartHalfClosedMonitor(int fd)
1900 {
1901 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1902 assert(isOpen(fd));
1903 assert(!commHasHalfClosedMonitor(fd));
1904 (void)TheHalfClosed->add(fd); // could also assert the result
1905 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1906 }
1907
1908 static
1909 void
1910 commPlanHalfClosedCheck()
1911 {
1912 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1913 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1914 WillCheckHalfClosed = true;
1915 }
1916 }
1917
1918 /// iterates over all descriptors that may need half-closed tests and
1919 /// calls comm_read for those that do; re-schedules the check if needed
1920 static
1921 void
1922 commHalfClosedCheck(void *)
1923 {
1924 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1925
1926 typedef DescriptorSet::const_iterator DSCI;
1927 const DSCI end = TheHalfClosed->end();
1928 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1929 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1930 c->fd = *i;
1931 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1932 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1933 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1934 comm_read(c, NULL, 0, call);
1935 fd_table[c->fd].halfClosedReader = call;
1936 } else
1937 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1938 }
1939
1940 WillCheckHalfClosed = false; // as far as we know
1941 commPlanHalfClosedCheck(); // may need to check again
1942 }
1943
1944 /// checks whether we are waiting for possibly half-closed connection to close
1945 // We are monitoring if the read handler for the fd is the monitoring handler.
1946 bool
1947 commHasHalfClosedMonitor(int fd)
1948 {
1949 return TheHalfClosed->has(fd);
1950 }
1951
1952 /// stop waiting for possibly half-closed connection to close
1953 static void
1954 commStopHalfClosedMonitor(int const fd)
1955 {
1956 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1957
1958 // cancel the read if one was scheduled
1959 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1960 if (reader != NULL)
1961 comm_read_cancel(fd, reader);
1962 fd_table[fd].halfClosedReader = NULL;
1963
1964 TheHalfClosed->del(fd);
1965 }
1966
1967 /// I/O handler for the possibly half-closed connection monitoring code
1968 static void
1969 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
1970 {
1971 // there cannot be more data coming in on half-closed connections
1972 assert(size == 0);
1973 assert(conn != NULL);
1974 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
1975
1976 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
1977
1978 // nothing to do if fd is being closed
1979 if (flag == COMM_ERR_CLOSING)
1980 return;
1981
1982 // if read failed, close the connection
1983 if (flag != COMM_OK) {
1984 debugs(5, 3, HERE << "closing " << conn);
1985 conn->close();
1986 return;
1987 }
1988
1989 // continue waiting for close or error
1990 commPlanHalfClosedCheck(); // make sure this fd will be checked again
1991 }
1992
1993
1994 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
1995
1996 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
1997 : conn(c), buf(buf_), len(len_), callback(callback_) {}
1998
1999 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
2000
2001 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
2002
2003 DeferredReadManager::~DeferredReadManager()
2004 {
2005 flushReads();
2006 assert (deferredReads.empty());
2007 }
2008
2009 /* explicit instantiation required for some systems */
2010
2011 /// \cond AUTODOCS-IGNORE
2012 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
2013 /// \endcond
2014
2015 void
2016 DeferredReadManager::delayRead(DeferredRead const &aRead)
2017 {
2018 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
2019 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
2020
2021 // We have to use a global function as a closer and point to temp
2022 // instead of "this" because DeferredReadManager is not a job and
2023 // is not even cbdata protected
2024 // XXX: and yet we use cbdata protection functions on it??
2025 AsyncCall::Pointer closer = commCbCall(5,4,
2026 "DeferredReadManager::CloseHandler",
2027 CommCloseCbPtrFun(&CloseHandler, temp));
2028 comm_add_close_handler(aRead.theRead.conn->fd, closer);
2029 temp->element.closer = closer; // remeber so that we can cancel
2030 }
2031
2032 void
2033 DeferredReadManager::CloseHandler(const CommCloseCbParams &params)
2034 {
2035 if (!cbdataReferenceValid(params.data))
2036 return;
2037
2038 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)params.data;
2039
2040 temp->element.closer = NULL;
2041 temp->element.markCancelled();
2042 }
2043
2044 DeferredRead
2045 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
2046 {
2047 assert (!deferredReads.empty());
2048
2049 DeferredRead &read = deferredReads.head->element;
2050
2051 // NOTE: at this point the connection has been paused/stalled for an unknown
2052 // amount of time. We must re-validate that it is active and usable.
2053
2054 // If the connection has been closed already. Cancel this read.
2055 if (!Comm::IsConnOpen(read.theRead.conn)) {
2056 if (read.closer != NULL) {
2057 read.closer->cancel("Connection closed before.");
2058 read.closer = NULL;
2059 }
2060 read.markCancelled();
2061 }
2062
2063 if (!read.cancelled) {
2064 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
2065 read.closer = NULL;
2066 }
2067
2068 DeferredRead result = deferredReads.pop_front();
2069
2070 return result;
2071 }
2072
2073 void
2074 DeferredReadManager::kickReads(int const count)
2075 {
2076 /* if we had CbDataList::size() we could consolidate this and flushReads */
2077
2078 if (count < 1) {
2079 flushReads();
2080 return;
2081 }
2082
2083 size_t remaining = count;
2084
2085 while (!deferredReads.empty() && remaining) {
2086 DeferredRead aRead = popHead(deferredReads);
2087 kickARead(aRead);
2088
2089 if (!aRead.cancelled)
2090 --remaining;
2091 }
2092 }
2093
2094 void
2095 DeferredReadManager::flushReads()
2096 {
2097 CbDataListContainer<DeferredRead> reads;
2098 reads = deferredReads;
2099 deferredReads = CbDataListContainer<DeferredRead>();
2100
2101 // XXX: For fairness this SHOULD randomize the order
2102 while (!reads.empty()) {
2103 DeferredRead aRead = popHead(reads);
2104 kickARead(aRead);
2105 }
2106 }
2107
2108 void
2109 DeferredReadManager::kickARead(DeferredRead const &aRead)
2110 {
2111 if (aRead.cancelled)
2112 return;
2113
2114 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
2115 return;
2116
2117 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
2118
2119 aRead.theReader(aRead.theContext, aRead.theRead);
2120 }
2121
2122 void
2123 DeferredRead::markCancelled()
2124 {
2125 cancelled = true;
2126 }
2127
2128 int
2129 CommSelectEngine::checkEvents(int timeout)
2130 {
2131 static time_t last_timeout = 0;
2132
2133 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2134 if (squid_curtime > last_timeout) {
2135 last_timeout = squid_curtime;
2136 checkTimeouts();
2137 }
2138
2139 switch (Comm::DoSelect(timeout)) {
2140
2141 case COMM_OK:
2142
2143 case COMM_TIMEOUT:
2144 return 0;
2145
2146 case COMM_IDLE:
2147
2148 case COMM_SHUTDOWN:
2149 return EVENT_IDLE;
2150
2151 case COMM_ERROR:
2152 return EVENT_ERROR;
2153
2154 default:
2155 fatal_dump("comm.cc: Internal error -- this should never happen.");
2156 return EVENT_ERROR;
2157 };
2158 }
2159
2160 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2161 int
2162 comm_open_uds(int sock_type,
2163 int proto,
2164 struct sockaddr_un* addr,
2165 int flags)
2166 {
2167 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2168
2169 int new_socket;
2170
2171 PROF_start(comm_open);
2172 /* Create socket for accepting new connections. */
2173 statCounter.syscalls.sock.sockets++;
2174
2175 /* Setup the socket addrinfo details for use */
2176 struct addrinfo AI;
2177 AI.ai_flags = 0;
2178 AI.ai_family = PF_UNIX;
2179 AI.ai_socktype = sock_type;
2180 AI.ai_protocol = proto;
2181 AI.ai_addrlen = SUN_LEN(addr);
2182 AI.ai_addr = (sockaddr*)addr;
2183 AI.ai_canonname = NULL;
2184 AI.ai_next = NULL;
2185
2186 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
2187
2188 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
2189 /* Increase the number of reserved fd's if calls to socket()
2190 * are failing because the open file table is full. This
2191 * limits the number of simultaneous clients */
2192
2193 if (limitError(errno)) {
2194 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
2195 fdAdjustReserved();
2196 } else {
2197 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
2198 }
2199
2200 PROF_stop(comm_open);
2201 return -1;
2202 }
2203
2204 debugs(50, 3, HERE "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
2205
2206 /* update fdstat */
2207 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
2208
2209 assert(!isOpen(new_socket));
2210 fd_open(new_socket, FD_MSGHDR, NULL);
2211
2212 fdd_table[new_socket].close_file = NULL;
2213
2214 fdd_table[new_socket].close_line = 0;
2215
2216 fd_table[new_socket].sock_family = AI.ai_family;
2217
2218 if (!(flags & COMM_NOCLOEXEC))
2219 commSetCloseOnExec(new_socket);
2220
2221 if (flags & COMM_REUSEADDR)
2222 commSetReuseAddr(new_socket);
2223
2224 if (flags & COMM_NONBLOCKING) {
2225 if (commSetNonBlocking(new_socket) != COMM_OK) {
2226 comm_close(new_socket);
2227 PROF_stop(comm_open);
2228 return -1;
2229 }
2230 }
2231
2232 if (flags & COMM_DOBIND) {
2233 if (commBind(new_socket, AI) != COMM_OK) {
2234 comm_close(new_socket);
2235 PROF_stop(comm_open);
2236 return -1;
2237 }
2238 }
2239
2240 #ifdef TCP_NODELAY
2241 if (sock_type == SOCK_STREAM)
2242 commSetTcpNoDelay(new_socket);
2243
2244 #endif
2245
2246 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
2247 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
2248
2249 PROF_stop(comm_open);
2250
2251 return new_socket;
2252 }