]> git.ipfire.org Git - thirdparty/squid.git/blob - src/comm.cc
Unwrap users of legacy comm_read() wrapper
[thirdparty/squid.git] / src / comm.cc
1 /*
2 * DEBUG: section 05 Socket Functions
3 * AUTHOR: Harvest Derived
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 *
32 * Copyright (c) 2003, Robert Collins <robertc@squid-cache.org>
33 */
34
35 #include "squid.h"
36 #include "base/AsyncCall.h"
37 #include "StoreIOBuffer.h"
38 #include "comm.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "comm/AcceptLimiter.h"
42 #include "comm/comm_internal.h"
43 #include "comm/Connection.h"
44 #include "comm/IoCallback.h"
45 #include "comm/Write.h"
46 #include "CommIO.h"
47 #include "CommRead.h"
48 #include "MemBuf.h"
49 #include "pconn.h"
50 #include "SquidTime.h"
51 #include "CommCalls.h"
52 #include "DescriptorSet.h"
53 #include "icmp/net_db.h"
54 #include "ip/Address.h"
55 #include "ip/Intercept.h"
56 #include "ip/QosConfig.h"
57 #include "ip/tools.h"
58 #include "ClientInfo.h"
59
60 #include "cbdata.h"
61 #if defined(_SQUID_CYGWIN_)
62 #include <sys/ioctl.h>
63 #endif
64 #ifdef HAVE_NETINET_TCP_H
65 #include <netinet/tcp.h>
66 #endif
67
68 /*
69 * New C-like simple comm code. This stuff is a mess and doesn't really buy us anything.
70 */
71
72 static void commStopHalfClosedMonitor(int fd);
73 static IOCB commHalfClosedReader;
74 static void comm_init_opened(const Comm::ConnectionPointer &conn, tos_t tos, nfmark_t nfmark, const char *note, struct addrinfo *AI);
75 static int comm_apply_flags(int new_socket, Ip::Address &addr, int flags, struct addrinfo *AI);
76
77 #if USE_DELAY_POOLS
78 CBDATA_CLASS_INIT(CommQuotaQueue);
79
80 static void commHandleWriteHelper(void * data);
81 #endif
82
83 /* STATIC */
84
85 static DescriptorSet *TheHalfClosed = NULL; /// the set of half-closed FDs
86 static bool WillCheckHalfClosed = false; /// true if check is scheduled
87 static EVH commHalfClosedCheck;
88 static void commPlanHalfClosedCheck();
89
90 static comm_err_t commBind(int s, struct addrinfo &);
91 static void commSetReuseAddr(int);
92 static void commSetNoLinger(int);
93 #ifdef TCP_NODELAY
94 static void commSetTcpNoDelay(int);
95 #endif
96 static void commSetTcpRcvbuf(int, int);
97
98 /*
99 typedef enum {
100 COMM_CB_READ = 1,
101 COMM_CB_DERIVED
102 } comm_callback_t;
103 */
104
105 static MemAllocator *conn_close_pool = NULL;
106 fd_debug_t *fdd_table = NULL;
107
108 bool
109 isOpen(const int fd)
110 {
111 return fd_table[fd].flags.open != 0;
112 }
113
114 /**
115 * Attempt a read
116 *
117 * If the read attempt succeeds or fails, call the callback.
118 * Else, wait for another IO notification.
119 */
120 void
121 commHandleRead(int fd, void *data)
122 {
123 Comm::IoCallback *ccb = (Comm::IoCallback *) data;
124
125 assert(data == COMMIO_FD_READCB(fd));
126 assert(ccb->active());
127 /* Attempt a read */
128 statCounter.syscalls.sock.reads++;
129 errno = 0;
130 int retval;
131 retval = FD_READ_METHOD(fd, ccb->buf, ccb->size);
132 debugs(5, 3, "comm_read_try: FD " << fd << ", size " << ccb->size << ", retval " << retval << ", errno " << errno);
133
134 if (retval < 0 && !ignoreErrno(errno)) {
135 debugs(5, 3, "comm_read_try: scheduling COMM_ERROR");
136 ccb->offset = 0;
137 ccb->finish(COMM_ERROR, errno);
138 return;
139 };
140
141 /* See if we read anything */
142 /* Note - read 0 == socket EOF, which is a valid read */
143 if (retval >= 0) {
144 fd_bytes(fd, retval, FD_READ);
145 ccb->offset = retval;
146 ccb->finish(COMM_OK, errno);
147 return;
148 }
149
150 /* Nope, register for some more IO */
151 commSetSelect(fd, COMM_SELECT_READ, commHandleRead, data, 0);
152 }
153
154 #if 0 // obsolete wrapper.
155 void
156 comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, IOCB *handler, void *handler_data)
157 {
158 AsyncCall::Pointer call = commCbCall(5,4, "SomeCommReadHandler",
159 CommIoCbPtrFun(handler, handler_data));
160 comm_read(conn, buf, size, call);
161 }
162 #endif
163
164 /**
165 * Queue a read. handler/handler_data are called when the read
166 * completes, on error, or on file descriptor close.
167 */
168 void
169 comm_read(const Comm::ConnectionPointer &conn, char *buf, int size, AsyncCall::Pointer &callback)
170 {
171 debugs(5, 5, "comm_read, queueing read for " << conn << "; asynCall " << callback);
172
173 /* Make sure we are open and not closing */
174 assert(Comm::IsConnOpen(conn));
175 assert(!fd_table[conn->fd].closing());
176 Comm::IoCallback *ccb = COMMIO_FD_READCB(conn->fd);
177
178 // Make sure we are either not reading or just passively monitoring.
179 // Active/passive conflicts are OK and simply cancel passive monitoring.
180 if (ccb->active()) {
181 // if the assertion below fails, we have an active comm_read conflict
182 assert(fd_table[conn->fd].halfClosedReader != NULL);
183 commStopHalfClosedMonitor(conn->fd);
184 assert(!ccb->active());
185 }
186 ccb->conn = conn;
187
188 /* Queue the read */
189 ccb->setCallback(Comm::IOCB_READ, callback, (char *)buf, NULL, size);
190 commSetSelect(conn->fd, COMM_SELECT_READ, commHandleRead, ccb, 0);
191 }
192
193 /**
194 * Empty the read buffers
195 *
196 * This is a magical routine that empties the read buffers.
197 * Under some platforms (Linux) if a buffer has data in it before
198 * you call close(), the socket will hang and take quite a while
199 * to timeout.
200 */
201 static void
202 comm_empty_os_read_buffers(int fd)
203 {
204 #ifdef _SQUID_LINUX_
205 /* prevent those nasty RST packets */
206 char buf[SQUID_TCP_SO_RCVBUF];
207
208 if (fd_table[fd].flags.nonblocking == 1) {
209 while (FD_READ_METHOD(fd, buf, SQUID_TCP_SO_RCVBUF) > 0) {};
210 }
211 #endif
212 }
213
214
215 /**
216 * Return whether the FD has a pending completed callback.
217 * NP: does not work.
218 */
219 int
220 comm_has_pending_read_callback(int fd)
221 {
222 assert(isOpen(fd));
223 // XXX: We do not know whether there is a read callback scheduled.
224 // This is used for pconn management that should probably be more
225 // tightly integrated into comm to minimize the chance that a
226 // closing pconn socket will be used for a new transaction.
227 return false;
228 }
229
230 // Does comm check this fd for read readiness?
231 // Note that when comm is not monitoring, there can be a pending callback
232 // call, which may resume comm monitoring once fired.
233 bool
234 comm_monitors_read(int fd)
235 {
236 assert(isOpen(fd));
237 // Being active is usually the same as monitoring because we always
238 // start monitoring the FD when we configure Comm::IoCallback for I/O
239 // and we usually configure Comm::IoCallback for I/O when we starting
240 // monitoring a FD for reading.
241 return COMMIO_FD_READCB(fd)->active();
242 }
243
244 /**
245 * Cancel a pending read. Assert that we have the right parameters,
246 * and that there are no pending read events!
247 *
248 * XXX: We do not assert that there are no pending read events and
249 * with async calls it becomes even more difficult.
250 * The whole interface should be reworked to do callback->cancel()
251 * instead of searching for places where the callback may be stored and
252 * updating the state of those places.
253 *
254 * AHC Don't call the comm handlers?
255 */
256 void
257 comm_read_cancel(int fd, IOCB *callback, void *data)
258 {
259 if (!isOpen(fd)) {
260 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
261 return;
262 }
263
264 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
265 // TODO: is "active" == "monitors FD"?
266 if (!cb->active()) {
267 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
268 return;
269 }
270
271 typedef CommCbFunPtrCallT<CommIoCbPtrFun> Call;
272 Call *call = dynamic_cast<Call*>(cb->callback.getRaw());
273 if (!call) {
274 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " lacks callback");
275 return;
276 }
277
278 call->cancel("old comm_read_cancel");
279
280 typedef CommIoCbParams Params;
281 const Params &params = GetCommParams<Params>(cb->callback);
282
283 /* Ok, we can be reasonably sure we won't lose any data here! */
284 assert(call->dialer.handler == callback);
285 assert(params.data == data);
286
287 /* Delete the callback */
288 cb->cancel("old comm_read_cancel");
289
290 /* And the IO event */
291 commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
292 }
293
294 void
295 comm_read_cancel(int fd, AsyncCall::Pointer &callback)
296 {
297 callback->cancel("comm_read_cancel");
298
299 if (!isOpen(fd)) {
300 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " closed");
301 return;
302 }
303
304 Comm::IoCallback *cb = COMMIO_FD_READCB(fd);
305
306 if (!cb->active()) {
307 debugs(5, 4, "comm_read_cancel fails: FD " << fd << " inactive");
308 return;
309 }
310
311 AsyncCall::Pointer call = cb->callback;
312 assert(call != NULL); // XXX: should never fail (active() checks for callback==NULL)
313
314 /* Ok, we can be reasonably sure we won't lose any data here! */
315 assert(call == callback);
316
317 /* Delete the callback */
318 cb->cancel("comm_read_cancel");
319
320 /* And the IO event */
321 commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
322 }
323
324
325 /**
326 * synchronous wrapper around udp socket functions
327 */
328 int
329 comm_udp_recvfrom(int fd, void *buf, size_t len, int flags, Ip::Address &from)
330 {
331 statCounter.syscalls.sock.recvfroms++;
332 int x = 0;
333 struct addrinfo *AI = NULL;
334
335 debugs(5,8, "comm_udp_recvfrom: FD " << fd << " from " << from);
336
337 assert( NULL == AI );
338
339 from.InitAddrInfo(AI);
340
341 x = recvfrom(fd, buf, len, flags, AI->ai_addr, &AI->ai_addrlen);
342
343 from = *AI;
344
345 from.FreeAddrInfo(AI);
346
347 return x;
348 }
349
350 int
351 comm_udp_recv(int fd, void *buf, size_t len, int flags)
352 {
353 Ip::Address nul;
354 return comm_udp_recvfrom(fd, buf, len, flags, nul);
355 }
356
357 ssize_t
358 comm_udp_send(int s, const void *buf, size_t len, int flags)
359 {
360 return send(s, buf, len, flags);
361 }
362
363
364 bool
365 comm_has_incomplete_write(int fd)
366 {
367 assert(isOpen(fd));
368 return COMMIO_FD_WRITECB(fd)->active();
369 }
370
371 /**
372 * Queue a write. handler/handler_data are called when the write fully
373 * completes, on error, or on file descriptor close.
374 */
375
376 /* Return the local port associated with fd. */
377 u_short
378 comm_local_port(int fd)
379 {
380 Ip::Address temp;
381 struct addrinfo *addr = NULL;
382 fde *F = &fd_table[fd];
383
384 /* If the fd is closed already, just return */
385
386 if (!F->flags.open) {
387 debugs(5, 0, "comm_local_port: FD " << fd << " has been closed.");
388 return 0;
389 }
390
391 if (F->local_addr.GetPort())
392 return F->local_addr.GetPort();
393
394 if (F->sock_family == AF_INET)
395 temp.SetIPv4();
396
397 temp.InitAddrInfo(addr);
398
399 if (getsockname(fd, addr->ai_addr, &(addr->ai_addrlen)) ) {
400 debugs(50, 1, "comm_local_port: Failed to retrieve TCP/UDP port number for socket: FD " << fd << ": " << xstrerror());
401 temp.FreeAddrInfo(addr);
402 return 0;
403 }
404 temp = *addr;
405
406 temp.FreeAddrInfo(addr);
407
408 if (F->local_addr.IsAnyAddr()) {
409 /* save the whole local address, not just the port. */
410 F->local_addr = temp;
411 } else {
412 F->local_addr.SetPort(temp.GetPort());
413 }
414
415 debugs(5, 6, "comm_local_port: FD " << fd << ": port " << F->local_addr.GetPort() << "(family=" << F->sock_family << ")");
416 return F->local_addr.GetPort();
417 }
418
419 static comm_err_t
420 commBind(int s, struct addrinfo &inaddr)
421 {
422 statCounter.syscalls.sock.binds++;
423
424 if (bind(s, inaddr.ai_addr, inaddr.ai_addrlen) == 0) {
425 debugs(50, 6, "commBind: bind socket FD " << s << " to " << fd_table[s].local_addr);
426 return COMM_OK;
427 }
428
429 debugs(50, 0, "commBind: Cannot bind socket FD " << s << " to " << fd_table[s].local_addr << ": " << xstrerror());
430
431 return COMM_ERROR;
432 }
433
434 /**
435 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
436 * is OR of flags specified in comm.h. Defaults TOS
437 */
438 int
439 comm_open(int sock_type,
440 int proto,
441 Ip::Address &addr,
442 int flags,
443 const char *note)
444 {
445 return comm_openex(sock_type, proto, addr, flags, 0, 0, note);
446 }
447
448 void
449 comm_open_listener(int sock_type,
450 int proto,
451 Comm::ConnectionPointer &conn,
452 const char *note)
453 {
454 /* all listener sockets require bind() */
455 conn->flags |= COMM_DOBIND;
456
457 /* attempt native enabled port. */
458 conn->fd = comm_openex(sock_type, proto, conn->local, conn->flags, 0, 0, note);
459 }
460
461 int
462 comm_open_listener(int sock_type,
463 int proto,
464 Ip::Address &addr,
465 int flags,
466 const char *note)
467 {
468 int sock = -1;
469
470 /* all listener sockets require bind() */
471 flags |= COMM_DOBIND;
472
473 /* attempt native enabled port. */
474 sock = comm_openex(sock_type, proto, addr, flags, 0, 0, note);
475
476 return sock;
477 }
478
479 static bool
480 limitError(int const anErrno)
481 {
482 return anErrno == ENFILE || anErrno == EMFILE;
483 }
484
485 void
486 comm_set_v6only(int fd, int tos)
487 {
488 #ifdef IPV6_V6ONLY
489 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &tos, sizeof(int)) < 0) {
490 debugs(50, 1, "comm_open: setsockopt(IPV6_V6ONLY) " << (tos?"ON":"OFF") << " for FD " << fd << ": " << xstrerror());
491 }
492 #else
493 debugs(50, 0, "WARNING: comm_open: setsockopt(IPV6_V6ONLY) not supported on this platform");
494 #endif /* sockopt */
495 }
496
497 /**
498 * Set the socket IP_TRANSPARENT option for Linux TPROXY v4 support.
499 */
500 void
501 comm_set_transparent(int fd)
502 {
503 #if defined(IP_TRANSPARENT)
504 int tos = 1;
505 if (setsockopt(fd, SOL_IP, IP_TRANSPARENT, (char *) &tos, sizeof(int)) < 0) {
506 debugs(50, DBG_IMPORTANT, "comm_open: setsockopt(IP_TRANSPARENT) on FD " << fd << ": " << xstrerror());
507 } else {
508 /* mark the socket as having transparent options */
509 fd_table[fd].flags.transparent = 1;
510 }
511 #else
512 debugs(50, DBG_CRITICAL, "WARNING: comm_open: setsockopt(IP_TRANSPARENT) not supported on this platform");
513 #endif /* sockopt */
514 }
515
516 /**
517 * Create a socket. Default is blocking, stream (TCP) socket. IO_TYPE
518 * is OR of flags specified in defines.h:COMM_*
519 */
520 int
521 comm_openex(int sock_type,
522 int proto,
523 Ip::Address &addr,
524 int flags,
525 tos_t tos,
526 nfmark_t nfmark,
527 const char *note)
528 {
529 int new_socket;
530 struct addrinfo *AI = NULL;
531
532 PROF_start(comm_open);
533 /* Create socket for accepting new connections. */
534 statCounter.syscalls.sock.sockets++;
535
536 /* Setup the socket addrinfo details for use */
537 addr.GetAddrInfo(AI);
538 AI->ai_socktype = sock_type;
539 AI->ai_protocol = proto;
540
541 debugs(50, 3, "comm_openex: Attempt open socket for: " << addr );
542 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
543
544 /* under IPv6 there is the possibility IPv6 is present but disabled. */
545 /* try again as IPv4-native if possible */
546 if ( new_socket < 0 && Ip::EnableIpv6 && addr.IsIPv6() && addr.SetIPv4() ) {
547 /* attempt to open this IPv4-only. */
548 addr.FreeAddrInfo(AI);
549 /* Setup the socket addrinfo details for use */
550 addr.GetAddrInfo(AI);
551 AI->ai_socktype = sock_type;
552 AI->ai_protocol = proto;
553 debugs(50, 3, "comm_openex: Attempt fallback open socket for: " << addr );
554 new_socket = socket(AI->ai_family, AI->ai_socktype, AI->ai_protocol);
555 debugs(50, 2, HERE << "attempt open " << note << " socket on: " << addr);
556 }
557
558 if (new_socket < 0) {
559 /* Increase the number of reserved fd's if calls to socket()
560 * are failing because the open file table is full. This
561 * limits the number of simultaneous clients */
562
563 if (limitError(errno)) {
564 debugs(50, DBG_IMPORTANT, "comm_open: socket failure: " << xstrerror());
565 fdAdjustReserved();
566 } else {
567 debugs(50, DBG_CRITICAL, "comm_open: socket failure: " << xstrerror());
568 }
569
570 addr.FreeAddrInfo(AI);
571
572 PROF_stop(comm_open);
573 return -1;
574 }
575
576 // temporary for the transition. comm_openex will eventually have a conn to play with.
577 Comm::ConnectionPointer conn = new Comm::Connection;
578 conn->local = addr;
579 conn->fd = new_socket;
580
581 debugs(50, 3, "comm_openex: Opened socket " << conn << " : family=" << AI->ai_family << ", type=" << AI->ai_socktype << ", protocol=" << AI->ai_protocol );
582
583 /* set TOS if needed */
584 if (tos)
585 Ip::Qos::setSockTos(conn, tos);
586
587 /* set netfilter mark if needed */
588 if (nfmark)
589 Ip::Qos::setSockNfmark(conn, nfmark);
590
591 if ( Ip::EnableIpv6&IPV6_SPECIAL_SPLITSTACK && addr.IsIPv6() )
592 comm_set_v6only(conn->fd, 1);
593
594 /* Windows Vista supports Dual-Sockets. BUT defaults them to V6ONLY. Turn it OFF. */
595 /* Other OS may have this administratively disabled for general use. Same deal. */
596 if ( Ip::EnableIpv6&IPV6_SPECIAL_V4MAPPING && addr.IsIPv6() )
597 comm_set_v6only(conn->fd, 0);
598
599 comm_init_opened(conn, tos, nfmark, note, AI);
600 new_socket = comm_apply_flags(conn->fd, addr, flags, AI);
601
602 addr.FreeAddrInfo(AI);
603
604 PROF_stop(comm_open);
605
606 // XXX transition only. prevent conn from closing the new FD on functio exit.
607 conn->fd = -1;
608 return new_socket;
609 }
610
611 /// update FD tables after a local or remote (IPC) comm_openex();
612 void
613 comm_init_opened(const Comm::ConnectionPointer &conn,
614 tos_t tos,
615 nfmark_t nfmark,
616 const char *note,
617 struct addrinfo *AI)
618 {
619 assert(Comm::IsConnOpen(conn));
620 assert(AI);
621
622 /* update fdstat */
623 debugs(5, 5, HERE << conn << " is a new socket");
624
625 assert(!isOpen(conn->fd)); // NP: global isOpen checks the fde entry for openness not the Comm::Connection
626 fd_open(conn->fd, FD_SOCKET, note);
627
628 fdd_table[conn->fd].close_file = NULL;
629 fdd_table[conn->fd].close_line = 0;
630
631 fde *F = &fd_table[conn->fd];
632 F->local_addr = conn->local;
633 F->tosToServer = tos;
634 F->nfmarkToServer = nfmark;
635 F->sock_family = AI->ai_family;
636 }
637
638 /// apply flags after a local comm_open*() call;
639 /// returns new_socket or -1 on error
640 static int
641 comm_apply_flags(int new_socket,
642 Ip::Address &addr,
643 int flags,
644 struct addrinfo *AI)
645 {
646 assert(new_socket >= 0);
647 assert(AI);
648 const int sock_type = AI->ai_socktype;
649
650 if (!(flags & COMM_NOCLOEXEC))
651 commSetCloseOnExec(new_socket);
652
653 if ((flags & COMM_REUSEADDR))
654 commSetReuseAddr(new_socket);
655
656 if (addr.GetPort() > (u_short) 0) {
657 #ifdef _SQUID_MSWIN_
658 if (sock_type != SOCK_DGRAM)
659 #endif
660 commSetNoLinger(new_socket);
661
662 if (opt_reuseaddr)
663 commSetReuseAddr(new_socket);
664 }
665
666 /* MUST be done before binding or face OS Error: "(99) Cannot assign requested address"... */
667 if ((flags & COMM_TRANSPARENT)) {
668 comm_set_transparent(new_socket);
669 }
670
671 if ( (flags & COMM_DOBIND) || addr.GetPort() > 0 || !addr.IsAnyAddr() ) {
672 if ( !(flags & COMM_DOBIND) && addr.IsAnyAddr() )
673 debugs(5,1,"WARNING: Squid is attempting to bind() port " << addr << " without being a listener.");
674 if ( addr.IsNoAddr() )
675 debugs(5,0,"CRITICAL: Squid is attempting to bind() port " << addr << "!!");
676
677 if (commBind(new_socket, *AI) != COMM_OK) {
678 comm_close(new_socket);
679 return -1;
680 }
681 }
682
683 if (flags & COMM_NONBLOCKING)
684 if (commSetNonBlocking(new_socket) == COMM_ERROR) {
685 comm_close(new_socket);
686 return -1;
687 }
688
689 #ifdef TCP_NODELAY
690 if (sock_type == SOCK_STREAM)
691 commSetTcpNoDelay(new_socket);
692
693 #endif
694
695 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
696 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
697
698 return new_socket;
699 }
700
701 void
702 comm_import_opened(const Comm::ConnectionPointer &conn,
703 const char *note,
704 struct addrinfo *AI)
705 {
706 debugs(5, 2, HERE << conn);
707 assert(Comm::IsConnOpen(conn));
708 assert(AI);
709
710 comm_init_opened(conn, 0, 0, note, AI);
711
712 if (!(conn->flags & COMM_NOCLOEXEC))
713 fd_table[conn->fd].flags.close_on_exec = 1;
714
715 if (conn->local.GetPort() > (u_short) 0) {
716 #ifdef _SQUID_MSWIN_
717 if (AI->ai_socktype != SOCK_DGRAM)
718 #endif
719 fd_table[conn->fd].flags.nolinger = 1;
720 }
721
722 if ((conn->flags & COMM_TRANSPARENT))
723 fd_table[conn->fd].flags.transparent = 1;
724
725 if (conn->flags & COMM_NONBLOCKING)
726 fd_table[conn->fd].flags.nonblocking = 1;
727
728 #ifdef TCP_NODELAY
729 if (AI->ai_socktype == SOCK_STREAM)
730 fd_table[conn->fd].flags.nodelay = 1;
731 #endif
732
733 /* no fd_table[fd].flags. updates needed for these conditions:
734 * if ((flags & COMM_REUSEADDR)) ...
735 * if ((flags & COMM_DOBIND) ...) ...
736 */
737 }
738
739 #if 0
740 int
741 commSetTimeout_old(int fd, int timeout, PF * handler, void *data)
742 {
743 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
744 assert(fd >= 0);
745 assert(fd < Squid_MaxFD);
746 fde *F = &fd_table[fd];
747 assert(F->flags.open);
748
749 if (timeout < 0) {
750 cbdataReferenceDone(F->timeout_data);
751 F->timeout_handler = NULL;
752 F->timeout = 0;
753 } else {
754 if (handler) {
755 cbdataReferenceDone(F->timeout_data);
756 F->timeout_handler = handler;
757 F->timeout_data = cbdataReference(data);
758 }
759
760 F->timeout = squid_curtime + (time_t) timeout;
761 }
762
763 return F->timeout;
764 }
765 #endif
766
767 // Legacy pre-AsyncCalls API for FD timeouts.
768 int
769 commSetTimeout(int fd, int timeout, PF * handler, void *data)
770 {
771 AsyncCall::Pointer call;
772 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
773 if (handler != NULL)
774 call=commCbCall(5,4, "SomeTimeoutHandler", CommTimeoutCbPtrFun(handler, data));
775 else
776 call = NULL;
777 return commSetTimeout(fd, timeout, call);
778 }
779
780 // Legacy pre-Comm::Connection API for FD timeouts
781 // still used by non-socket FD code dealing with pipes and IPC sockets.
782 int
783 commSetTimeout(int fd, int timeout, AsyncCall::Pointer &callback)
784 {
785 debugs(5, 3, HERE << "FD " << fd << " timeout " << timeout);
786 assert(fd >= 0);
787 assert(fd < Squid_MaxFD);
788 fde *F = &fd_table[fd];
789 assert(F->flags.open);
790
791 if (timeout < 0) {
792 F->timeoutHandler = NULL;
793 F->timeout = 0;
794 } else {
795 if (callback != NULL) {
796 typedef CommTimeoutCbParams Params;
797 Params &params = GetCommParams<Params>(callback);
798 params.fd = fd;
799 F->timeoutHandler = callback;
800 }
801
802 F->timeout = squid_curtime + (time_t) timeout;
803 }
804
805 return F->timeout;
806 }
807
808 int
809 commSetConnTimeout(const Comm::ConnectionPointer &conn, int timeout, AsyncCall::Pointer &callback)
810 {
811 debugs(5, 3, HERE << conn << " timeout " << timeout);
812 assert(Comm::IsConnOpen(conn));
813 assert(conn->fd < Squid_MaxFD);
814 fde *F = &fd_table[conn->fd];
815 assert(F->flags.open);
816
817 if (timeout < 0) {
818 F->timeoutHandler = NULL;
819 F->timeout = 0;
820 } else {
821 if (callback != NULL) {
822 typedef CommTimeoutCbParams Params;
823 Params &params = GetCommParams<Params>(callback);
824 params.conn = conn;
825 F->timeoutHandler = callback;
826 }
827
828 F->timeout = squid_curtime + (time_t) timeout;
829 }
830
831 return F->timeout;
832 }
833
834 int
835 comm_connect_addr(int sock, const Ip::Address &address)
836 {
837 comm_err_t status = COMM_OK;
838 fde *F = &fd_table[sock];
839 int x = 0;
840 int err = 0;
841 socklen_t errlen;
842 struct addrinfo *AI = NULL;
843 PROF_start(comm_connect_addr);
844
845 assert(address.GetPort() != 0);
846
847 debugs(5, 9, HERE << "connecting socket FD " << sock << " to " << address << " (want family: " << F->sock_family << ")");
848
849 /* Handle IPv6 over IPv4-only socket case.
850 * this case must presently be handled here since the GetAddrInfo asserts on bad mappings.
851 * NP: because commResetFD is private to ConnStateData we have to return an error and
852 * trust its handled properly.
853 */
854 if (F->sock_family == AF_INET && !address.IsIPv4()) {
855 errno = ENETUNREACH;
856 return COMM_ERR_PROTOCOL;
857 }
858
859 /* Handle IPv4 over IPv6-only socket case.
860 * This case is presently handled here as it's both a known case and it's
861 * uncertain what error will be returned by the IPv6 stack in such case. It's
862 * possible this will also be handled by the errno checks below after connect()
863 * but needs carefull cross-platform verification, and verifying the address
864 * condition here is simple.
865 */
866 if (!F->local_addr.IsIPv4() && address.IsIPv4()) {
867 errno = ENETUNREACH;
868 return COMM_ERR_PROTOCOL;
869 }
870
871 address.GetAddrInfo(AI, F->sock_family);
872
873 /* Establish connection. */
874 errno = 0;
875
876 if (!F->flags.called_connect) {
877 F->flags.called_connect = 1;
878 statCounter.syscalls.sock.connects++;
879
880 x = connect(sock, AI->ai_addr, AI->ai_addrlen);
881
882 // XXX: ICAP code refuses callbacks during a pending comm_ call
883 // Async calls development will fix this.
884 if (x == 0) {
885 x = -1;
886 errno = EINPROGRESS;
887 }
888
889 if (x < 0) {
890 debugs(5,5, "comm_connect_addr: sock=" << sock << ", addrinfo( " <<
891 " flags=" << AI->ai_flags <<
892 ", family=" << AI->ai_family <<
893 ", socktype=" << AI->ai_socktype <<
894 ", protocol=" << AI->ai_protocol <<
895 ", &addr=" << AI->ai_addr <<
896 ", addrlen=" << AI->ai_addrlen <<
897 " )" );
898 debugs(5, 9, "connect FD " << sock << ": (" << x << ") " << xstrerror());
899 debugs(14,9, "connecting to: " << address );
900 }
901 } else {
902 #if defined(_SQUID_NEWSOS6_)
903 /* Makoto MATSUSHITA <matusita@ics.es.osaka-u.ac.jp> */
904
905 connect(sock, AI->ai_addr, AI->ai_addrlen);
906
907 if (errno == EINVAL) {
908 errlen = sizeof(err);
909 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
910
911 if (x >= 0)
912 errno = x;
913 }
914
915 #else
916 errlen = sizeof(err);
917
918 x = getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, &errlen);
919
920 if (x == 0)
921 errno = err;
922
923 #if defined(_SQUID_SOLARIS_)
924 /*
925 * Solaris 2.4's socket emulation doesn't allow you
926 * to determine the error from a failed non-blocking
927 * connect and just returns EPIPE. Create a fake
928 * error message for connect. -- fenner@parc.xerox.com
929 */
930 if (x < 0 && errno == EPIPE)
931 errno = ENOTCONN;
932
933 #endif
934 #endif
935
936 }
937
938 /* Squid seems to be working fine without this code. With this code,
939 * we leak memory on many connect requests because of EINPROGRESS.
940 * If you find that this code is needed, please file a bug report. */
941 #if 0
942 #ifdef _SQUID_LINUX_
943 /* 2007-11-27:
944 * Linux Debian replaces our allocated AI pointer with garbage when
945 * connect() fails. This leads to segmentation faults deallocating
946 * the system-allocated memory when we go to clean up our pointer.
947 * HACK: is to leak the memory returned since we can't deallocate.
948 */
949 if (errno != 0) {
950 AI = NULL;
951 }
952 #endif
953 #endif
954
955 address.FreeAddrInfo(AI);
956
957 PROF_stop(comm_connect_addr);
958
959 if (errno == 0 || errno == EISCONN)
960 status = COMM_OK;
961 else if (ignoreErrno(errno))
962 status = COMM_INPROGRESS;
963 else if (errno == EAFNOSUPPORT || errno == EINVAL)
964 return COMM_ERR_PROTOCOL;
965 else
966 return COMM_ERROR;
967
968 address.NtoA(F->ipaddr, MAX_IPSTRLEN);
969
970 F->remote_port = address.GetPort(); /* remote_port is HS */
971
972 if (status == COMM_OK) {
973 debugs(5, 10, "comm_connect_addr: FD " << sock << " connected to " << address);
974 } else if (status == COMM_INPROGRESS) {
975 debugs(5, 10, "comm_connect_addr: FD " << sock << " connection pending");
976 }
977
978 return status;
979 }
980
981 void
982 commCallCloseHandlers(int fd)
983 {
984 fde *F = &fd_table[fd];
985 debugs(5, 5, "commCallCloseHandlers: FD " << fd);
986
987 while (F->closeHandler != NULL) {
988 AsyncCall::Pointer call = F->closeHandler;
989 F->closeHandler = call->Next();
990 call->setNext(NULL);
991 // If call is not canceled schedule it for execution else ignore it
992 if (!call->canceled()) {
993 debugs(5, 5, "commCallCloseHandlers: ch->handler=" << call);
994 typedef CommCloseCbParams Params;
995 Params &params = GetCommParams<Params>(call);
996 params.fd = fd;
997 ScheduleCallHere(call);
998 }
999 }
1000 }
1001
1002 #if LINGERING_CLOSE
1003 static void
1004 commLingerClose(int fd, void *unused)
1005 {
1006 LOCAL_ARRAY(char, buf, 1024);
1007 int n;
1008 n = FD_READ_METHOD(fd, buf, 1024);
1009
1010 if (n < 0)
1011 debugs(5, 3, "commLingerClose: FD " << fd << " read: " << xstrerror());
1012
1013 comm_close(fd);
1014 }
1015
1016 static void
1017 commLingerTimeout(int fd, void *unused)
1018 {
1019 debugs(5, 3, "commLingerTimeout: FD " << fd);
1020 comm_close(fd);
1021 }
1022
1023 /*
1024 * Inspired by apache
1025 */
1026 void
1027 comm_lingering_close(int fd)
1028 {
1029 #if USE_SSL
1030
1031 if (fd_table[fd].ssl)
1032 ssl_shutdown_method(fd);
1033
1034 #endif
1035
1036 if (shutdown(fd, 1) < 0) {
1037 comm_close(fd);
1038 return;
1039 }
1040
1041 fd_note(fd, "lingering close");
1042 commSetTimeout(fd, 10, commLingerTimeout, NULL);
1043 commSetSelect(fd, COMM_SELECT_READ, commLingerClose, NULL, 0);
1044 }
1045
1046 #endif
1047
1048 /**
1049 * enable linger with time of 0 so that when the socket is
1050 * closed, TCP generates a RESET
1051 */
1052 void
1053 comm_reset_close(Comm::ConnectionPointer &conn)
1054 {
1055 struct linger L;
1056 L.l_onoff = 1;
1057 L.l_linger = 0;
1058
1059 if (setsockopt(conn->fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1060 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << conn->fd << " with TCP RST: " << xstrerror());
1061
1062 conn->close();
1063 }
1064
1065 // Legacy close function.
1066 void
1067 old_comm_reset_close(int fd)
1068 {
1069 struct linger L;
1070 L.l_onoff = 1;
1071 L.l_linger = 0;
1072
1073 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1074 debugs(50, DBG_CRITICAL, "ERROR: Closing FD " << fd << " with TCP RST: " << xstrerror());
1075
1076 comm_close(fd);
1077 }
1078
1079 void
1080 comm_close_start(int fd, void *data)
1081 {
1082 #if USE_SSL
1083 fde *F = &fd_table[fd];
1084 if (F->ssl)
1085 ssl_shutdown_method(fd);
1086
1087 #endif
1088
1089 }
1090
1091 void
1092 comm_close_complete(int fd, void *data)
1093 {
1094 #if USE_SSL
1095 fde *F = &fd_table[fd];
1096
1097 if (F->ssl) {
1098 SSL_free(F->ssl);
1099 F->ssl = NULL;
1100 }
1101
1102 if (F->dynamicSslContext) {
1103 SSL_CTX_free(F->dynamicSslContext);
1104 F->dynamicSslContext = NULL;
1105 }
1106 #endif
1107 fd_close(fd); /* update fdstat */
1108
1109 close(fd);
1110
1111 statCounter.syscalls.sock.closes++;
1112
1113 /* When an fd closes, give accept() a chance, if need be */
1114 Comm::AcceptLimiter::Instance().kick();
1115 }
1116
1117 /*
1118 * Close the socket fd.
1119 *
1120 * + call write handlers with ERR_CLOSING
1121 * + call read handlers with ERR_CLOSING
1122 * + call closing handlers
1123 *
1124 * NOTE: COMM_ERR_CLOSING will NOT be called for CommReads' sitting in a
1125 * DeferredReadManager.
1126 */
1127 void
1128 _comm_close(int fd, char const *file, int line)
1129 {
1130 debugs(5, 3, "comm_close: start closing FD " << fd);
1131 assert(fd >= 0);
1132 assert(fd < Squid_MaxFD);
1133
1134 fde *F = &fd_table[fd];
1135 fdd_table[fd].close_file = file;
1136 fdd_table[fd].close_line = line;
1137
1138 if (F->closing())
1139 return;
1140
1141 /* XXX: is this obsolete behind F->closing() ? */
1142 if ( (shutting_down || reconfiguring) && (!F->flags.open || F->type == FD_FILE))
1143 return;
1144
1145 /* The following fails because ipc.c is doing calls to pipe() to create sockets! */
1146 assert(isOpen(fd));
1147
1148 assert(F->type != FD_FILE);
1149
1150 PROF_start(comm_close);
1151
1152 F->flags.close_request = 1;
1153
1154 AsyncCall::Pointer startCall=commCbCall(5,4, "comm_close_start",
1155 CommCloseCbPtrFun(comm_close_start, NULL));
1156 typedef CommCloseCbParams Params;
1157 Params &startParams = GetCommParams<Params>(startCall);
1158 startParams.fd = fd;
1159 ScheduleCallHere(startCall);
1160
1161 // a half-closed fd may lack a reader, so we stop monitoring explicitly
1162 if (commHasHalfClosedMonitor(fd))
1163 commStopHalfClosedMonitor(fd);
1164 commSetTimeout(fd, -1, NULL, NULL);
1165
1166 // notify read/write handlers after canceling select reservations, if any
1167 if (COMMIO_FD_WRITECB(fd)->active()) {
1168 commSetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1169 COMMIO_FD_WRITECB(fd)->finish(COMM_ERR_CLOSING, errno);
1170 }
1171 if (COMMIO_FD_READCB(fd)->active()) {
1172 commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0);
1173 COMMIO_FD_READCB(fd)->finish(COMM_ERR_CLOSING, errno);
1174 }
1175
1176 #if USE_DELAY_POOLS
1177 if (ClientInfo *clientInfo = F->clientInfo) {
1178 if (clientInfo->selectWaiting) {
1179 clientInfo->selectWaiting = false;
1180 // kick queue or it will get stuck as commWriteHandle is not called
1181 clientInfo->kickQuotaQueue();
1182 }
1183 }
1184 #endif
1185
1186 commCallCloseHandlers(fd);
1187
1188 if (F->pconn.uses)
1189 F->pconn.pool->count(F->pconn.uses);
1190
1191 comm_empty_os_read_buffers(fd);
1192
1193
1194 AsyncCall::Pointer completeCall=commCbCall(5,4, "comm_close_complete",
1195 CommCloseCbPtrFun(comm_close_complete, NULL));
1196 Params &completeParams = GetCommParams<Params>(completeCall);
1197 completeParams.fd = fd;
1198 // must use async call to wait for all callbacks
1199 // scheduled before comm_close() to finish
1200 ScheduleCallHere(completeCall);
1201
1202 PROF_stop(comm_close);
1203 }
1204
1205 /* Send a udp datagram to specified TO_ADDR. */
1206 int
1207 comm_udp_sendto(int fd,
1208 const Ip::Address &to_addr,
1209 const void *buf,
1210 int len)
1211 {
1212 int x = 0;
1213 struct addrinfo *AI = NULL;
1214
1215 PROF_start(comm_udp_sendto);
1216 statCounter.syscalls.sock.sendtos++;
1217
1218 debugs(50, 3, "comm_udp_sendto: Attempt to send UDP packet to " << to_addr <<
1219 " using FD " << fd << " using Port " << comm_local_port(fd) );
1220
1221 /* BUG: something in the above macro appears to occasionally be setting AI to garbage. */
1222 /* AYJ: 2007-08-27 : or was it because I wasn't then setting 'fd_table[fd].sock_family' to fill properly. */
1223 assert( NULL == AI );
1224
1225 to_addr.GetAddrInfo(AI, fd_table[fd].sock_family);
1226
1227 x = sendto(fd, buf, len, 0, AI->ai_addr, AI->ai_addrlen);
1228
1229 to_addr.FreeAddrInfo(AI);
1230
1231 PROF_stop(comm_udp_sendto);
1232
1233 if (x >= 0)
1234 return x;
1235
1236 #ifdef _SQUID_LINUX_
1237
1238 if (ECONNREFUSED != errno)
1239 #endif
1240
1241 debugs(50, 1, "comm_udp_sendto: FD " << fd << ", (family=" << fd_table[fd].sock_family << ") " << to_addr << ": " << xstrerror());
1242
1243 return COMM_ERROR;
1244 }
1245
1246 void
1247 comm_add_close_handler(int fd, PF * handler, void *data)
1248 {
1249 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", handler=" <<
1250 handler << ", data=" << data);
1251
1252 AsyncCall::Pointer call=commCbCall(5,4, "SomeCloseHandler",
1253 CommCloseCbPtrFun(handler, data));
1254 comm_add_close_handler(fd, call);
1255 }
1256
1257 void
1258 comm_add_close_handler(int fd, AsyncCall::Pointer &call)
1259 {
1260 debugs(5, 5, "comm_add_close_handler: FD " << fd << ", AsyncCall=" << call);
1261
1262 /*TODO:Check for a similar scheduled AsyncCall*/
1263 // for (c = fd_table[fd].closeHandler; c; c = c->next)
1264 // assert(c->handler != handler || c->data != data);
1265
1266 call->setNext(fd_table[fd].closeHandler);
1267
1268 fd_table[fd].closeHandler = call;
1269 }
1270
1271
1272 // remove function-based close handler
1273 void
1274 comm_remove_close_handler(int fd, PF * handler, void *data)
1275 {
1276 assert (isOpen(fd));
1277 /* Find handler in list */
1278 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", handler=" <<
1279 handler << ", data=" << data);
1280
1281 AsyncCall::Pointer p, prev = NULL;
1282 for (p = fd_table[fd].closeHandler; p != NULL; prev = p, p = p->Next()) {
1283 typedef CommCbFunPtrCallT<CommCloseCbPtrFun> Call;
1284 const Call *call = dynamic_cast<const Call*>(p.getRaw());
1285 if (!call) // method callbacks have their own comm_remove_close_handler
1286 continue;
1287
1288 typedef CommCloseCbParams Params;
1289 const Params &params = GetCommParams<Params>(p);
1290 if (call->dialer.handler == handler && params.data == data)
1291 break; /* This is our handler */
1292 }
1293
1294 // comm_close removes all close handlers so our handler may be gone
1295 if (p != NULL) {
1296 p->dequeue(fd_table[fd].closeHandler, prev);
1297 p->cancel("comm_remove_close_handler");
1298 }
1299 }
1300
1301 // remove method-based close handler
1302 void
1303 comm_remove_close_handler(int fd, AsyncCall::Pointer &call)
1304 {
1305 assert (isOpen(fd));
1306 debugs(5, 5, "comm_remove_close_handler: FD " << fd << ", AsyncCall=" << call);
1307
1308 // comm_close removes all close handlers so our handler may be gone
1309 AsyncCall::Pointer p, prev = NULL;
1310 for (p = fd_table[fd].closeHandler; p != NULL && p != call; prev = p, p = p->Next());
1311
1312 if (p != NULL)
1313 p->dequeue(fd_table[fd].closeHandler, prev);
1314 call->cancel("comm_remove_close_handler");
1315 }
1316
1317 static void
1318 commSetNoLinger(int fd)
1319 {
1320
1321 struct linger L;
1322 L.l_onoff = 0; /* off */
1323 L.l_linger = 0;
1324
1325 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, (char *) &L, sizeof(L)) < 0)
1326 debugs(50, 0, "commSetNoLinger: FD " << fd << ": " << xstrerror());
1327
1328 fd_table[fd].flags.nolinger = 1;
1329 }
1330
1331 static void
1332 commSetReuseAddr(int fd)
1333 {
1334 int on = 1;
1335
1336 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof(on)) < 0)
1337 debugs(50, 1, "commSetReuseAddr: FD " << fd << ": " << xstrerror());
1338 }
1339
1340 static void
1341 commSetTcpRcvbuf(int fd, int size)
1342 {
1343 if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char *) &size, sizeof(size)) < 0)
1344 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1345 if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (char *) &size, sizeof(size)) < 0)
1346 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1347 #ifdef TCP_WINDOW_CLAMP
1348 if (setsockopt(fd, SOL_TCP, TCP_WINDOW_CLAMP, (char *) &size, sizeof(size)) < 0)
1349 debugs(50, 1, "commSetTcpRcvbuf: FD " << fd << ", SIZE " << size << ": " << xstrerror());
1350 #endif
1351 }
1352
1353 int
1354 commSetNonBlocking(int fd)
1355 {
1356 #ifndef _SQUID_MSWIN_
1357 int flags;
1358 int dummy = 0;
1359 #endif
1360 #ifdef _SQUID_WIN32_
1361
1362 int nonblocking = TRUE;
1363
1364 #ifdef _SQUID_CYGWIN_
1365
1366 if (fd_table[fd].type != FD_PIPE) {
1367 #endif
1368
1369 if (ioctl(fd, FIONBIO, &nonblocking) < 0) {
1370 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror() << " " << fd_table[fd].type);
1371 return COMM_ERROR;
1372 }
1373
1374 #ifdef _SQUID_CYGWIN_
1375
1376 } else {
1377 #endif
1378 #endif
1379 #ifndef _SQUID_MSWIN_
1380
1381 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1382 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1383 return COMM_ERROR;
1384 }
1385
1386 if (fcntl(fd, F_SETFL, flags | SQUID_NONBLOCK) < 0) {
1387 debugs(50, 0, "commSetNonBlocking: FD " << fd << ": " << xstrerror());
1388 return COMM_ERROR;
1389 }
1390
1391 #endif
1392 #ifdef _SQUID_CYGWIN_
1393
1394 }
1395
1396 #endif
1397 fd_table[fd].flags.nonblocking = 1;
1398
1399 return 0;
1400 }
1401
1402 int
1403 commUnsetNonBlocking(int fd)
1404 {
1405 #ifdef _SQUID_MSWIN_
1406 int nonblocking = FALSE;
1407
1408 if (ioctlsocket(fd, FIONBIO, (unsigned long *) &nonblocking) < 0) {
1409 #else
1410 int flags;
1411 int dummy = 0;
1412
1413 if ((flags = fcntl(fd, F_GETFL, dummy)) < 0) {
1414 debugs(50, 0, "FD " << fd << ": fcntl F_GETFL: " << xstrerror());
1415 return COMM_ERROR;
1416 }
1417
1418 if (fcntl(fd, F_SETFL, flags & (~SQUID_NONBLOCK)) < 0) {
1419 #endif
1420 debugs(50, 0, "commUnsetNonBlocking: FD " << fd << ": " << xstrerror());
1421 return COMM_ERROR;
1422 }
1423
1424 fd_table[fd].flags.nonblocking = 0;
1425 return 0;
1426 }
1427
1428 void
1429 commSetCloseOnExec(int fd)
1430 {
1431 #ifdef FD_CLOEXEC
1432 int flags;
1433 int dummy = 0;
1434
1435 if ((flags = fcntl(fd, F_GETFD, dummy)) < 0) {
1436 debugs(50, 0, "FD " << fd << ": fcntl F_GETFD: " << xstrerror());
1437 return;
1438 }
1439
1440 if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
1441 debugs(50, 0, "FD " << fd << ": set close-on-exec failed: " << xstrerror());
1442
1443 fd_table[fd].flags.close_on_exec = 1;
1444
1445 #endif
1446 }
1447
1448 #ifdef TCP_NODELAY
1449 static void
1450 commSetTcpNoDelay(int fd)
1451 {
1452 int on = 1;
1453
1454 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof(on)) < 0)
1455 debugs(50, 1, "commSetTcpNoDelay: FD " << fd << ": " << xstrerror());
1456
1457 fd_table[fd].flags.nodelay = 1;
1458 }
1459
1460 #endif
1461
1462 void
1463 commSetTcpKeepalive(int fd, int idle, int interval, int timeout)
1464 {
1465 int on = 1;
1466 #ifdef TCP_KEEPCNT
1467 if (timeout && interval) {
1468 int count = (timeout + interval - 1) / interval;
1469 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(on)) < 0)
1470 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1471 }
1472 #endif
1473 #ifdef TCP_KEEPIDLE
1474 if (idle) {
1475 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(on)) < 0)
1476 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1477 }
1478 #endif
1479 #ifdef TCP_KEEPINTVL
1480 if (interval) {
1481 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(on)) < 0)
1482 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1483 }
1484 #endif
1485 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on)) < 0)
1486 debugs(5, 1, "commSetKeepalive: FD " << fd << ": " << xstrerror());
1487 }
1488
1489 void
1490 comm_init(void)
1491 {
1492 fd_table =(fde *) xcalloc(Squid_MaxFD, sizeof(fde));
1493 fdd_table = (fd_debug_t *)xcalloc(Squid_MaxFD, sizeof(fd_debug_t));
1494
1495 /* make sure the accept() socket FIFO delay queue exists */
1496 Comm::AcceptLimiter::Instance();
1497
1498 // make sure the IO pending callback table exists
1499 Comm::CallbackTableInit();
1500
1501 /* XXX account fd_table */
1502 /* Keep a few file descriptors free so that we don't run out of FD's
1503 * after accepting a client but before it opens a socket or a file.
1504 * Since Squid_MaxFD can be as high as several thousand, don't waste them */
1505 RESERVED_FD = min(100, Squid_MaxFD / 4);
1506
1507 conn_close_pool = memPoolCreate("close_handler", sizeof(close_handler));
1508
1509 TheHalfClosed = new DescriptorSet;
1510 }
1511
1512 void
1513 comm_exit(void)
1514 {
1515 delete TheHalfClosed;
1516 TheHalfClosed = NULL;
1517
1518 safe_free(fd_table);
1519 safe_free(fdd_table);
1520 Comm::CallbackTableDestruct();
1521 }
1522
1523 #if USE_DELAY_POOLS
1524 // called when the queue is done waiting for the client bucket to fill
1525 void
1526 commHandleWriteHelper(void * data)
1527 {
1528 CommQuotaQueue *queue = static_cast<CommQuotaQueue*>(data);
1529 assert(queue);
1530
1531 ClientInfo *clientInfo = queue->clientInfo;
1532 // ClientInfo invalidates queue if freed, so if we got here through,
1533 // evenAdd cbdata protections, everything should be valid and consistent
1534 assert(clientInfo);
1535 assert(clientInfo->hasQueue());
1536 assert(clientInfo->hasQueue(queue));
1537 assert(!clientInfo->selectWaiting);
1538 assert(clientInfo->eventWaiting);
1539 clientInfo->eventWaiting = false;
1540
1541 do {
1542 // check that the head descriptor is still relevant
1543 const int head = clientInfo->quotaPeekFd();
1544 Comm::IoCallback *ccb = COMMIO_FD_WRITECB(head);
1545
1546 if (fd_table[head].clientInfo == clientInfo &&
1547 clientInfo->quotaPeekReserv() == ccb->quotaQueueReserv &&
1548 !fd_table[head].closing()) {
1549
1550 // wait for the head descriptor to become ready for writing
1551 commSetSelect(head, COMM_SELECT_WRITE, Comm::HandleWrite, ccb, 0);
1552 clientInfo->selectWaiting = true;
1553 return;
1554 }
1555
1556 clientInfo->quotaDequeue(); // remove the no longer relevant descriptor
1557 // and continue looking for a relevant one
1558 } while (clientInfo->hasQueue());
1559
1560 debugs(77,3, HERE << "emptied queue");
1561 }
1562
1563 bool
1564 ClientInfo::hasQueue() const
1565 {
1566 assert(quotaQueue);
1567 return !quotaQueue->empty();
1568 }
1569
1570 bool
1571 ClientInfo::hasQueue(const CommQuotaQueue *q) const
1572 {
1573 assert(quotaQueue);
1574 return quotaQueue == q;
1575 }
1576
1577 /// returns the first descriptor to be dequeued
1578 int
1579 ClientInfo::quotaPeekFd() const
1580 {
1581 assert(quotaQueue);
1582 return quotaQueue->front();
1583 }
1584
1585 /// returns the reservation ID of the first descriptor to be dequeued
1586 unsigned int
1587 ClientInfo::quotaPeekReserv() const
1588 {
1589 assert(quotaQueue);
1590 return quotaQueue->outs + 1;
1591 }
1592
1593 /// queues a given fd, creating the queue if necessary; returns reservation ID
1594 unsigned int
1595 ClientInfo::quotaEnqueue(int fd)
1596 {
1597 assert(quotaQueue);
1598 return quotaQueue->enqueue(fd);
1599 }
1600
1601 /// removes queue head
1602 void
1603 ClientInfo::quotaDequeue()
1604 {
1605 assert(quotaQueue);
1606 quotaQueue->dequeue();
1607 }
1608
1609 void
1610 ClientInfo::kickQuotaQueue()
1611 {
1612 if (!eventWaiting && !selectWaiting && hasQueue()) {
1613 // wait at least a second if the bucket is empty
1614 const double delay = (bucketSize < 1.0) ? 1.0 : 0.0;
1615 eventAdd("commHandleWriteHelper", &commHandleWriteHelper,
1616 quotaQueue, delay, 0, true);
1617 eventWaiting = true;
1618 }
1619 }
1620
1621 /// calculates how much to write for a single dequeued client
1622 int
1623 ClientInfo::quotaForDequed()
1624 {
1625 /* If we have multiple clients and give full bucketSize to each client then
1626 * clt1 may often get a lot more because clt1->clt2 time distance in the
1627 * select(2) callback order may be a lot smaller than cltN->clt1 distance.
1628 * We divide quota evenly to be more fair. */
1629
1630 if (!rationedCount) {
1631 rationedCount = quotaQueue->size() + 1;
1632
1633 // The delay in ration recalculation _temporary_ deprives clients from
1634 // bytes that should have trickled in while rationedCount was positive.
1635 refillBucket();
1636
1637 // Rounding errors do not accumulate here, but we round down to avoid
1638 // negative bucket sizes after write with rationedCount=1.
1639 rationedQuota = static_cast<int>(floor(bucketSize/rationedCount));
1640 debugs(77,5, HERE << "new rationedQuota: " << rationedQuota <<
1641 '*' << rationedCount);
1642 }
1643
1644 --rationedCount;
1645 debugs(77,7, HERE << "rationedQuota: " << rationedQuota <<
1646 " rations remaining: " << rationedCount);
1647
1648 // update 'last seen' time to prevent clientdb GC from dropping us
1649 last_seen = squid_curtime;
1650 return rationedQuota;
1651 }
1652
1653 ///< adds bytes to the quota bucket based on the rate and passed time
1654 void
1655 ClientInfo::refillBucket()
1656 {
1657 // all these times are in seconds, with double precision
1658 const double currTime = current_dtime;
1659 const double timePassed = currTime - prevTime;
1660
1661 // Calculate allowance for the time passed. Use double to avoid
1662 // accumulating rounding errors for small intervals. For example, always
1663 // adding 1 byte instead of 1.4 results in 29% bandwidth allocation error.
1664 const double gain = timePassed * writeSpeedLimit;
1665
1666 debugs(77,5, HERE << currTime << " clt" << (const char*)hash.key << ": " <<
1667 bucketSize << " + (" << timePassed << " * " << writeSpeedLimit <<
1668 " = " << gain << ')');
1669
1670 // to further combat error accumulation during micro updates,
1671 // quit before updating time if we cannot add at least one byte
1672 if (gain < 1.0)
1673 return;
1674
1675 prevTime = currTime;
1676
1677 // for "first" connections, drain initial fat before refilling but keep
1678 // updating prevTime to avoid bursts after the fat is gone
1679 if (bucketSize > bucketSizeLimit) {
1680 debugs(77,4, HERE << "not refilling while draining initial fat");
1681 return;
1682 }
1683
1684 bucketSize += gain;
1685
1686 // obey quota limits
1687 if (bucketSize > bucketSizeLimit)
1688 bucketSize = bucketSizeLimit;
1689 }
1690
1691 void
1692 ClientInfo::setWriteLimiter(const int aWriteSpeedLimit, const double anInitialBurst, const double aHighWatermark)
1693 {
1694 debugs(77,5, HERE << "Write limits for " << (const char*)hash.key <<
1695 " speed=" << aWriteSpeedLimit << " burst=" << anInitialBurst <<
1696 " highwatermark=" << aHighWatermark);
1697
1698 // set or possibly update traffic shaping parameters
1699 writeLimitingActive = true;
1700 writeSpeedLimit = aWriteSpeedLimit;
1701 bucketSizeLimit = aHighWatermark;
1702
1703 // but some members should only be set once for a newly activated bucket
1704 if (firstTimeConnection) {
1705 firstTimeConnection = false;
1706
1707 assert(!selectWaiting);
1708 assert(!quotaQueue);
1709 quotaQueue = new CommQuotaQueue(this);
1710
1711 bucketSize = anInitialBurst;
1712 prevTime = current_dtime;
1713 }
1714 }
1715
1716 CommQuotaQueue::CommQuotaQueue(ClientInfo *info): clientInfo(info),
1717 ins(0), outs(0)
1718 {
1719 assert(clientInfo);
1720 }
1721
1722 CommQuotaQueue::~CommQuotaQueue()
1723 {
1724 assert(!clientInfo); // ClientInfo should clear this before destroying us
1725 }
1726
1727 /// places the given fd at the end of the queue; returns reservation ID
1728 unsigned int
1729 CommQuotaQueue::enqueue(int fd)
1730 {
1731 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1732 ": FD " << fd << " with qqid" << (ins+1) << ' ' << fds.size());
1733 fds.push_back(fd);
1734 return ++ins;
1735 }
1736
1737 /// removes queue head
1738 void
1739 CommQuotaQueue::dequeue()
1740 {
1741 assert(!fds.empty());
1742 debugs(77,5, HERE << "clt" << (const char*)clientInfo->hash.key <<
1743 ": FD " << fds.front() << " with qqid" << (outs+1) << ' ' <<
1744 fds.size());
1745 fds.pop_front();
1746 ++outs;
1747 }
1748 #endif
1749
1750 /*
1751 * hm, this might be too general-purpose for all the places we'd
1752 * like to use it.
1753 */
1754 int
1755 ignoreErrno(int ierrno)
1756 {
1757 switch (ierrno) {
1758
1759 case EINPROGRESS:
1760
1761 case EWOULDBLOCK:
1762 #if EAGAIN != EWOULDBLOCK
1763
1764 case EAGAIN:
1765 #endif
1766
1767 case EALREADY:
1768
1769 case EINTR:
1770 #ifdef ERESTART
1771
1772 case ERESTART:
1773 #endif
1774
1775 return 1;
1776
1777 default:
1778 return 0;
1779 }
1780
1781 /* NOTREACHED */
1782 }
1783
1784 void
1785 commCloseAllSockets(void)
1786 {
1787 int fd;
1788 fde *F = NULL;
1789
1790 for (fd = 0; fd <= Biggest_FD; fd++) {
1791 F = &fd_table[fd];
1792
1793 if (!F->flags.open)
1794 continue;
1795
1796 if (F->type != FD_SOCKET)
1797 continue;
1798
1799 if (F->flags.ipc) /* don't close inter-process sockets */
1800 continue;
1801
1802 if (F->timeoutHandler != NULL) {
1803 AsyncCall::Pointer callback = F->timeoutHandler;
1804 F->timeoutHandler = NULL;
1805 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": Calling timeout handler");
1806 ScheduleCallHere(callback);
1807 } else {
1808 debugs(5, 5, "commCloseAllSockets: FD " << fd << ": calling comm_reset_close()");
1809 old_comm_reset_close(fd);
1810 }
1811 }
1812 }
1813
1814 static bool
1815 AlreadyTimedOut(fde *F)
1816 {
1817 if (!F->flags.open)
1818 return true;
1819
1820 if (F->timeout == 0)
1821 return true;
1822
1823 if (F->timeout > squid_curtime)
1824 return true;
1825
1826 return false;
1827 }
1828
1829 static bool
1830 writeTimedOut(int fd)
1831 {
1832 if (!COMMIO_FD_WRITECB(fd)->active())
1833 return false;
1834
1835 if ((squid_curtime - fd_table[fd].writeStart) < Config.Timeout.write)
1836 return false;
1837
1838 return true;
1839 }
1840
1841 void
1842 checkTimeouts(void)
1843 {
1844 int fd;
1845 fde *F = NULL;
1846 AsyncCall::Pointer callback;
1847
1848 for (fd = 0; fd <= Biggest_FD; fd++) {
1849 F = &fd_table[fd];
1850
1851 if (writeTimedOut(fd)) {
1852 // We have an active write callback and we are timed out
1853 debugs(5, 5, "checkTimeouts: FD " << fd << " auto write timeout");
1854 commSetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0);
1855 COMMIO_FD_WRITECB(fd)->finish(COMM_ERROR, ETIMEDOUT);
1856 } else if (AlreadyTimedOut(F))
1857 continue;
1858
1859 debugs(5, 5, "checkTimeouts: FD " << fd << " Expired");
1860
1861 if (F->timeoutHandler != NULL) {
1862 debugs(5, 5, "checkTimeouts: FD " << fd << ": Call timeout handler");
1863 callback = F->timeoutHandler;
1864 F->timeoutHandler = NULL;
1865 ScheduleCallHere(callback);
1866 } else {
1867 debugs(5, 5, "checkTimeouts: FD " << fd << ": Forcing comm_close()");
1868 comm_close(fd);
1869 }
1870 }
1871 }
1872
1873 void CommIO::Initialise()
1874 {
1875 /* Initialize done pipe signal */
1876 int DonePipe[2];
1877 if (pipe(DonePipe)) {}
1878 DoneFD = DonePipe[1];
1879 DoneReadFD = DonePipe[0];
1880 fd_open(DoneReadFD, FD_PIPE, "async-io completetion event: main");
1881 fd_open(DoneFD, FD_PIPE, "async-io completetion event: threads");
1882 commSetNonBlocking(DoneReadFD);
1883 commSetNonBlocking(DoneFD);
1884 commSetSelect(DoneReadFD, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1885 Initialised = true;
1886 }
1887
1888 void CommIO::NotifyIOClose()
1889 {
1890 /* Close done pipe signal */
1891 FlushPipe();
1892 close(DoneFD);
1893 close(DoneReadFD);
1894 fd_close(DoneFD);
1895 fd_close(DoneReadFD);
1896 Initialised = false;
1897 }
1898
1899 bool CommIO::Initialised = false;
1900 bool CommIO::DoneSignalled = false;
1901 int CommIO::DoneFD = -1;
1902 int CommIO::DoneReadFD = -1;
1903
1904 void
1905 CommIO::FlushPipe()
1906 {
1907 char buf[256];
1908 FD_READ_METHOD(DoneReadFD, buf, sizeof(buf));
1909 }
1910
1911 void
1912 CommIO::NULLFDHandler(int fd, void *data)
1913 {
1914 FlushPipe();
1915 commSetSelect(fd, COMM_SELECT_READ, NULLFDHandler, NULL, 0);
1916 }
1917
1918 void
1919 CommIO::ResetNotifications()
1920 {
1921 if (DoneSignalled) {
1922 FlushPipe();
1923 DoneSignalled = false;
1924 }
1925 }
1926
1927 /// Start waiting for a possibly half-closed connection to close
1928 // by scheduling a read callback to a monitoring handler that
1929 // will close the connection on read errors.
1930 void
1931 commStartHalfClosedMonitor(int fd)
1932 {
1933 debugs(5, 5, HERE << "adding FD " << fd << " to " << *TheHalfClosed);
1934 assert(isOpen(fd));
1935 assert(!commHasHalfClosedMonitor(fd));
1936 (void)TheHalfClosed->add(fd); // could also assert the result
1937 commPlanHalfClosedCheck(); // may schedule check if we added the first FD
1938 }
1939
1940 static
1941 void
1942 commPlanHalfClosedCheck()
1943 {
1944 if (!WillCheckHalfClosed && !TheHalfClosed->empty()) {
1945 eventAdd("commHalfClosedCheck", &commHalfClosedCheck, NULL, 1.0, 1);
1946 WillCheckHalfClosed = true;
1947 }
1948 }
1949
1950 /// iterates over all descriptors that may need half-closed tests and
1951 /// calls comm_read for those that do; re-schedules the check if needed
1952 static
1953 void
1954 commHalfClosedCheck(void *)
1955 {
1956 debugs(5, 5, HERE << "checking " << *TheHalfClosed);
1957
1958 typedef DescriptorSet::const_iterator DSCI;
1959 const DSCI end = TheHalfClosed->end();
1960 for (DSCI i = TheHalfClosed->begin(); i != end; ++i) {
1961 Comm::ConnectionPointer c = new Comm::Connection; // XXX: temporary. make HalfClosed a list of these.
1962 c->fd = *i;
1963 if (!fd_table[c->fd].halfClosedReader) { // not reading already
1964 AsyncCall::Pointer call = commCbCall(5,4, "commHalfClosedReader",
1965 CommIoCbPtrFun(&commHalfClosedReader, NULL));
1966 comm_read(c, NULL, 0, call);
1967 fd_table[c->fd].halfClosedReader = call;
1968 } else
1969 c->fd = -1; // XXX: temporary. prevent c replacement erase closing listed FD
1970 }
1971
1972 WillCheckHalfClosed = false; // as far as we know
1973 commPlanHalfClosedCheck(); // may need to check again
1974 }
1975
1976 /// checks whether we are waiting for possibly half-closed connection to close
1977 // We are monitoring if the read handler for the fd is the monitoring handler.
1978 bool
1979 commHasHalfClosedMonitor(int fd)
1980 {
1981 return TheHalfClosed->has(fd);
1982 }
1983
1984 /// stop waiting for possibly half-closed connection to close
1985 static void
1986 commStopHalfClosedMonitor(int const fd)
1987 {
1988 debugs(5, 5, HERE << "removing FD " << fd << " from " << *TheHalfClosed);
1989
1990 // cancel the read if one was scheduled
1991 AsyncCall::Pointer reader = fd_table[fd].halfClosedReader;
1992 if (reader != NULL)
1993 comm_read_cancel(fd, reader);
1994 fd_table[fd].halfClosedReader = NULL;
1995
1996 TheHalfClosed->del(fd);
1997 }
1998
1999 /// I/O handler for the possibly half-closed connection monitoring code
2000 static void
2001 commHalfClosedReader(const Comm::ConnectionPointer &conn, char *, size_t size, comm_err_t flag, int, void *)
2002 {
2003 // there cannot be more data coming in on half-closed connections
2004 assert(size == 0);
2005 assert(conn != NULL);
2006 assert(commHasHalfClosedMonitor(conn->fd)); // or we would have canceled the read
2007
2008 fd_table[conn->fd].halfClosedReader = NULL; // done reading, for now
2009
2010 // nothing to do if fd is being closed
2011 if (flag == COMM_ERR_CLOSING)
2012 return;
2013
2014 // if read failed, close the connection
2015 if (flag != COMM_OK) {
2016 debugs(5, 3, HERE << "closing " << conn);
2017 conn->close();
2018 return;
2019 }
2020
2021 // continue waiting for close or error
2022 commPlanHalfClosedCheck(); // make sure this fd will be checked again
2023 }
2024
2025
2026 CommRead::CommRead() : conn(NULL), buf(NULL), len(0), callback(NULL) {}
2027
2028 CommRead::CommRead(const Comm::ConnectionPointer &c, char *buf_, int len_, AsyncCall::Pointer &callback_)
2029 : conn(c), buf(buf_), len(len_), callback(callback_) {}
2030
2031 DeferredRead::DeferredRead () : theReader(NULL), theContext(NULL), theRead(), cancelled(false) {}
2032
2033 DeferredRead::DeferredRead (DeferrableRead *aReader, void *data, CommRead const &aRead) : theReader(aReader), theContext (data), theRead(aRead), cancelled(false) {}
2034
2035 DeferredReadManager::~DeferredReadManager()
2036 {
2037 flushReads();
2038 assert (deferredReads.empty());
2039 }
2040
2041 /* explicit instantiation required for some systems */
2042
2043 /// \cond AUTODOCS-IGNORE
2044 template cbdata_type CbDataList<DeferredRead>::CBDATA_CbDataList;
2045 /// \endcond
2046
2047 void
2048 DeferredReadManager::delayRead(DeferredRead const &aRead)
2049 {
2050 debugs(5, 3, "Adding deferred read on " << aRead.theRead.conn);
2051 CbDataList<DeferredRead> *temp = deferredReads.push_back(aRead);
2052
2053 // We have to use a global function as a closer and point to temp
2054 // instead of "this" because DeferredReadManager is not a job and
2055 // is not even cbdata protected
2056 AsyncCall::Pointer closer = commCbCall(5,4,
2057 "DeferredReadManager::CloseHandler",
2058 CommCloseCbPtrFun(&CloseHandler, temp));
2059 comm_add_close_handler(aRead.theRead.conn->fd, closer);
2060 temp->element.closer = closer; // remeber so that we can cancel
2061 }
2062
2063 void
2064 DeferredReadManager::CloseHandler(int fd, void *thecbdata)
2065 {
2066 if (!cbdataReferenceValid (thecbdata))
2067 return;
2068
2069 CbDataList<DeferredRead> *temp = (CbDataList<DeferredRead> *)thecbdata;
2070
2071 temp->element.closer = NULL;
2072 temp->element.markCancelled();
2073 }
2074
2075 DeferredRead
2076 DeferredReadManager::popHead(CbDataListContainer<DeferredRead> &deferredReads)
2077 {
2078 assert (!deferredReads.empty());
2079
2080 DeferredRead &read = deferredReads.head->element;
2081 if (!read.cancelled) {
2082 comm_remove_close_handler(read.theRead.conn->fd, read.closer);
2083 read.closer = NULL;
2084 }
2085
2086 DeferredRead result = deferredReads.pop_front();
2087
2088 return result;
2089 }
2090
2091 void
2092 DeferredReadManager::kickReads(int const count)
2093 {
2094 /* if we had CbDataList::size() we could consolidate this and flushReads */
2095
2096 if (count < 1) {
2097 flushReads();
2098 return;
2099 }
2100
2101 size_t remaining = count;
2102
2103 while (!deferredReads.empty() && remaining) {
2104 DeferredRead aRead = popHead(deferredReads);
2105 kickARead(aRead);
2106
2107 if (!aRead.cancelled)
2108 --remaining;
2109 }
2110 }
2111
2112 void
2113 DeferredReadManager::flushReads()
2114 {
2115 CbDataListContainer<DeferredRead> reads;
2116 reads = deferredReads;
2117 deferredReads = CbDataListContainer<DeferredRead>();
2118
2119 // XXX: For fairness this SHOULD randomize the order
2120 while (!reads.empty()) {
2121 DeferredRead aRead = popHead(reads);
2122 kickARead(aRead);
2123 }
2124 }
2125
2126 void
2127 DeferredReadManager::kickARead(DeferredRead const &aRead)
2128 {
2129 if (aRead.cancelled)
2130 return;
2131
2132 if (Comm::IsConnOpen(aRead.theRead.conn) && fd_table[aRead.theRead.conn->fd].closing())
2133 return;
2134
2135 debugs(5, 3, "Kicking deferred read on " << aRead.theRead.conn);
2136
2137 aRead.theReader(aRead.theContext, aRead.theRead);
2138 }
2139
2140 void
2141 DeferredRead::markCancelled()
2142 {
2143 cancelled = true;
2144 }
2145
2146 int
2147 CommSelectEngine::checkEvents(int timeout)
2148 {
2149 static time_t last_timeout = 0;
2150
2151 /* No, this shouldn't be here. But it shouldn't be in each comm handler. -adrian */
2152 if (squid_curtime > last_timeout) {
2153 last_timeout = squid_curtime;
2154 checkTimeouts();
2155 }
2156
2157 switch (comm_select(timeout)) {
2158
2159 case COMM_OK:
2160
2161 case COMM_TIMEOUT:
2162 return 0;
2163
2164 case COMM_IDLE:
2165
2166 case COMM_SHUTDOWN:
2167 return EVENT_IDLE;
2168
2169 case COMM_ERROR:
2170 return EVENT_ERROR;
2171
2172 default:
2173 fatal_dump("comm.cc: Internal error -- this should never happen.");
2174 return EVENT_ERROR;
2175 };
2176 }
2177
2178 /// Create a unix-domain socket (UDS) that only supports FD_MSGHDR I/O.
2179 int
2180 comm_open_uds(int sock_type,
2181 int proto,
2182 struct sockaddr_un* addr,
2183 int flags)
2184 {
2185 // TODO: merge with comm_openex() when Ip::Address becomes NetAddress
2186
2187 int new_socket;
2188
2189 PROF_start(comm_open);
2190 /* Create socket for accepting new connections. */
2191 statCounter.syscalls.sock.sockets++;
2192
2193 /* Setup the socket addrinfo details for use */
2194 struct addrinfo AI;
2195 AI.ai_flags = 0;
2196 AI.ai_family = PF_UNIX;
2197 AI.ai_socktype = sock_type;
2198 AI.ai_protocol = proto;
2199 AI.ai_addrlen = SUN_LEN(addr);
2200 AI.ai_addr = (sockaddr*)addr;
2201 AI.ai_canonname = NULL;
2202 AI.ai_next = NULL;
2203
2204 debugs(50, 3, HERE << "Attempt open socket for: " << addr->sun_path);
2205
2206 if ((new_socket = socket(AI.ai_family, AI.ai_socktype, AI.ai_protocol)) < 0) {
2207 /* Increase the number of reserved fd's if calls to socket()
2208 * are failing because the open file table is full. This
2209 * limits the number of simultaneous clients */
2210
2211 if (limitError(errno)) {
2212 debugs(50, DBG_IMPORTANT, HERE << "socket failure: " << xstrerror());
2213 fdAdjustReserved();
2214 } else {
2215 debugs(50, DBG_CRITICAL, HERE << "socket failure: " << xstrerror());
2216 }
2217
2218 PROF_stop(comm_open);
2219 return -1;
2220 }
2221
2222 debugs(50, 3, HERE "Opened UDS FD " << new_socket << " : family=" << AI.ai_family << ", type=" << AI.ai_socktype << ", protocol=" << AI.ai_protocol);
2223
2224 /* update fdstat */
2225 debugs(50, 5, HERE << "FD " << new_socket << " is a new socket");
2226
2227 assert(!isOpen(new_socket));
2228 fd_open(new_socket, FD_MSGHDR, NULL);
2229
2230 fdd_table[new_socket].close_file = NULL;
2231
2232 fdd_table[new_socket].close_line = 0;
2233
2234 fd_table[new_socket].sock_family = AI.ai_family;
2235
2236 if (!(flags & COMM_NOCLOEXEC))
2237 commSetCloseOnExec(new_socket);
2238
2239 if (flags & COMM_REUSEADDR)
2240 commSetReuseAddr(new_socket);
2241
2242 if (flags & COMM_NONBLOCKING) {
2243 if (commSetNonBlocking(new_socket) != COMM_OK) {
2244 comm_close(new_socket);
2245 PROF_stop(comm_open);
2246 return -1;
2247 }
2248 }
2249
2250 if (flags & COMM_DOBIND) {
2251 if (commBind(new_socket, AI) != COMM_OK) {
2252 comm_close(new_socket);
2253 PROF_stop(comm_open);
2254 return -1;
2255 }
2256 }
2257
2258 #ifdef TCP_NODELAY
2259 if (sock_type == SOCK_STREAM)
2260 commSetTcpNoDelay(new_socket);
2261
2262 #endif
2263
2264 if (Config.tcpRcvBufsz > 0 && sock_type == SOCK_STREAM)
2265 commSetTcpRcvbuf(new_socket, Config.tcpRcvBufsz);
2266
2267 PROF_stop(comm_open);
2268
2269 return new_socket;
2270 }