4 * DEBUG: section 33 Client-side Routines
5 * AUTHOR: Duane Wessels
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
36 \defgroup ClientSide Client-Side Logics
38 \section cserrors Errors and client side
40 \par Problem the first:
41 * the store entry is no longer authoritative on the
42 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
43 * of client_side_reply.c.
44 * Problem the second: resources are wasted if we delay in cleaning up.
45 * Problem the third we can't depend on a connection close to clean up.
47 \par Nice thing the first:
48 * Any step in the stream can callback with data
49 * representing an error.
50 * Nice thing the second: once you stop requesting reads from upstream,
51 * upstream can be stopped too.
54 * Error has a callback mechanism to hand over a membuf
55 * with the error content. The failing node pushes that back as the
56 * reply. Can this be generalised to reduce duplicate efforts?
57 * A: Possibly. For now, only one location uses this.
58 * How to deal with pre-stream errors?
59 * Tell client_side_reply that we *want* an error page before any
60 * stream calls occur. Then we simply read as normal.
63 \section pconn_logic Persistent connection logic:
66 * requests (httpClientRequest structs) get added to the connection
67 * list, with the current one being chr
70 * The request is *immediately* kicked off, and data flows through
71 * to clientSocketRecipient.
74 * If the data that arrives at clientSocketRecipient is not for the current
75 * request, clientSocketRecipient simply returns, without requesting more
76 * data, or sending it.
79 * ClientKeepAliveNextRequest will then detect the presence of data in
80 * the next ClientHttpRequest, and will send it, restablishing the
85 #include "client_side.h"
86 #include "clientStream.h"
87 #include "ProtoPort.h"
88 #include "auth/UserRequest.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpReply.h"
93 #include "HttpRequest.h"
94 #include "ident/Config.h"
95 #include "ident/Ident.h"
96 #include "ip/IpIntercept.h"
97 #include "MemObject.h"
99 #include "client_side_request.h"
100 #include "acl/FilledChecklist.h"
101 #include "ConnectionDetail.h"
102 #include "client_side_reply.h"
103 #include "ClientRequestContext.h"
105 #include "SquidTime.h"
106 #include "ChunkedCodingParser.h"
109 #define comm_close comm_lingering_close
112 /* our socket-related context */
115 CBDATA_CLASS_INIT(ClientSocketContext
);
118 ClientSocketContext::operator new (size_t byteCount
)
120 /* derived classes with different sizes must implement their own new */
121 assert (byteCount
== sizeof (ClientSocketContext
));
122 CBDATA_INIT_TYPE(ClientSocketContext
);
123 return cbdataAlloc(ClientSocketContext
);
127 ClientSocketContext::operator delete (void *address
)
129 cbdataFree (address
);
132 /* Local functions */
133 /* ClientSocketContext */
134 static ClientSocketContext
*ClientSocketContextNew(ClientHttpRequest
*);
136 static IOCB clientWriteComplete
;
137 static IOCB clientWriteBodyComplete
;
138 static bool clientParseRequest(ConnStateData
* conn
, bool &do_next_read
);
139 static PF clientLifetimeTimeout
;
140 static ClientSocketContext
*parseHttpRequestAbort(ConnStateData
* conn
,
142 static ClientSocketContext
*parseHttpRequest(ConnStateData
*, HttpParser
*, HttpRequestMethod
*, HttpVersion
*);
144 static IDCB clientIdentDone
;
146 static CSCB clientSocketRecipient
;
147 static CSD clientSocketDetach
;
148 static void clientSetKeepaliveFlag(ClientHttpRequest
*);
149 static int clientIsContentLengthValid(HttpRequest
* r
);
150 static bool okToAccept();
151 static int clientIsRequestBodyValid(int64_t bodyLength
);
152 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
);
154 static void clientUpdateStatHistCounters(log_type logType
, int svc_time
);
155 static void clientUpdateStatCounters(log_type logType
);
156 static void clientUpdateHierCounters(HierarchyLogEntry
*);
157 static bool clientPingHasFinished(ping_data
const *aPing
);
158 void prepareLogWithRequestDetails(HttpRequest
*, AccessLogEntry
*);
160 static int connIsUsable(ConnStateData
* conn
);
162 static int responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const &receivedData
);
163 static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
);
164 static void clientUpdateSocketStats(log_type logType
, size_t size
);
166 char *skipLeadingSpace(char *aString
);
167 static void connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
);
168 static int connKeepReadingIncompleteRequest(ConnStateData
* conn
);
169 static void connCancelIncompleteRequests(ConnStateData
* conn
);
171 static ConnStateData
*connStateCreate(const IpAddress
&peer
, const IpAddress
&me
, int fd
, http_port_list
*port
);
175 ClientSocketContext::fd() const
178 assert (http
->getConn() != NULL
);
179 return http
->getConn()->fd
;
183 ClientSocketContext::getTail() const
185 if (http
->client_stream
.tail
)
186 return (clientStreamNode
*)http
->client_stream
.tail
->data
;
192 ClientSocketContext::getClientReplyContext() const
194 return (clientStreamNode
*)http
->client_stream
.tail
->prev
->data
;
198 * This routine should be called to grow the inbuf and then
202 ConnStateData::readSomeData()
209 debugs(33, 4, "clientReadSomeData: FD " << fd
<< ": reading request...");
211 makeSpaceAvailable();
213 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
214 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::clientReadRequest",
215 Dialer(this, &ConnStateData::clientReadRequest
));
216 comm_read(fd
, in
.addressToReadInto(), getAvailableBufferLength(), call
);
221 ClientSocketContext::removeFromConnectionList(ConnStateData
* conn
)
223 ClientSocketContext::Pointer
*tempContextPointer
;
224 assert(conn
!= NULL
&& cbdataReferenceValid(conn
));
225 assert(conn
->getCurrentContext() != NULL
);
226 /* Unlink us from the connection request list */
227 tempContextPointer
= & conn
->currentobject
;
229 while (tempContextPointer
->getRaw()) {
230 if (*tempContextPointer
== this)
233 tempContextPointer
= &(*tempContextPointer
)->next
;
236 assert(tempContextPointer
->getRaw() != NULL
);
237 *tempContextPointer
= next
;
241 ClientSocketContext::~ClientSocketContext()
243 clientStreamNode
*node
= getTail();
246 ClientSocketContext
*streamContext
= dynamic_cast<ClientSocketContext
*> (node
->data
.getRaw());
249 /* We are *always* the tail - prevent recursive free */
250 assert(this == streamContext
);
256 deRegisterWithConn();
258 httpRequestFree(http
);
260 /* clean up connection links to us */
261 assert(this != next
.getRaw());
265 ClientSocketContext::registerWithConn()
267 assert (!connRegistered_
);
269 assert (http
->getConn() != NULL
);
270 connRegistered_
= true;
271 http
->getConn()->addContextToQueue(this);
275 ClientSocketContext::deRegisterWithConn()
277 assert (connRegistered_
);
278 removeFromConnectionList(http
->getConn());
279 connRegistered_
= false;
283 ClientSocketContext::connIsFinished()
286 assert (http
->getConn() != NULL
);
287 deRegisterWithConn();
288 /* we can't handle any more stream data - detach */
289 clientStreamDetach(getTail(), http
);
292 ClientSocketContext::ClientSocketContext() : http(NULL
), reply(NULL
), next(NULL
),
294 mayUseConnection_ (false),
295 connRegistered_ (false)
297 memset (reqbuf
, '\0', sizeof (reqbuf
));
300 deferredparams
.node
= NULL
;
301 deferredparams
.rep
= NULL
;
304 ClientSocketContext
*
305 ClientSocketContextNew(ClientHttpRequest
* http
)
307 ClientSocketContext
*newContext
;
308 assert(http
!= NULL
);
309 newContext
= new ClientSocketContext
;
310 newContext
->http
= http
;
316 clientIdentDone(const char *ident
, void *data
)
318 ConnStateData
*conn
= (ConnStateData
*)data
;
319 xstrncpy(conn
->rfc931
, ident
? ident
: dash_str
, USER_IDENT_SZ
);
324 clientUpdateStatCounters(log_type logType
)
326 statCounter
.client_http
.requests
++;
328 if (logTypeIsATcpHit(logType
))
329 statCounter
.client_http
.hits
++;
331 if (logType
== LOG_TCP_HIT
)
332 statCounter
.client_http
.disk_hits
++;
333 else if (logType
== LOG_TCP_MEM_HIT
)
334 statCounter
.client_http
.mem_hits
++;
338 clientUpdateStatHistCounters(log_type logType
, int svc_time
)
340 statHistCount(&statCounter
.client_http
.all_svc_time
, svc_time
);
342 * The idea here is not to be complete, but to get service times
343 * for only well-defined types. For example, we don't include
344 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
345 * (we *tried* to validate it, but failed).
350 case LOG_TCP_REFRESH_UNMODIFIED
:
351 statHistCount(&statCounter
.client_http
.nh_svc_time
, svc_time
);
354 case LOG_TCP_IMS_HIT
:
355 statHistCount(&statCounter
.client_http
.nm_svc_time
, svc_time
);
360 case LOG_TCP_MEM_HIT
:
362 case LOG_TCP_OFFLINE_HIT
:
363 statHistCount(&statCounter
.client_http
.hit_svc_time
, svc_time
);
368 case LOG_TCP_CLIENT_REFRESH_MISS
:
369 statHistCount(&statCounter
.client_http
.miss_svc_time
, svc_time
);
373 /* make compiler warnings go away */
379 clientPingHasFinished(ping_data
const *aPing
)
381 if (0 != aPing
->stop
.tv_sec
&& 0 != aPing
->start
.tv_sec
)
388 clientUpdateHierCounters(HierarchyLogEntry
* someEntry
)
392 switch (someEntry
->code
) {
393 #if USE_CACHE_DIGESTS
398 statCounter
.cd
.times_used
++;
406 case FIRST_PARENT_MISS
:
408 case CLOSEST_PARENT_MISS
:
409 statCounter
.icp
.times_used
++;
410 i
= &someEntry
->ping
;
412 if (clientPingHasFinished(i
))
413 statHistCount(&statCounter
.icp
.query_svc_time
,
414 tvSubUsec(i
->start
, i
->stop
));
417 statCounter
.icp
.query_timeouts
++;
424 statCounter
.netdb
.times_used
++;
434 ClientHttpRequest::updateCounters()
436 clientUpdateStatCounters(logType
);
438 if (request
->errType
!= ERR_NONE
)
439 statCounter
.client_http
.errors
++;
441 clientUpdateStatHistCounters(logType
,
442 tvSubMsec(start_time
, current_time
));
444 clientUpdateHierCounters(&request
->hier
);
448 prepareLogWithRequestDetails(HttpRequest
* request
, AccessLogEntry
* aLogEntry
)
454 Adaptation::Icap::History::Pointer ih
= request
->icapHistory();
456 if (Config
.onoff
.log_mime_hdrs
) {
460 packerToMemInit(&p
, &mb
);
461 request
->header
.packInto(&p
);
462 aLogEntry
->headers
.request
= xstrdup(mb
.buf
);
467 packerToMemInit(&p
, &mb
);
470 ih
->lastIcapHeader
.packInto(&p
);
471 aLogEntry
->headers
.icap
= xstrdup(mb
.buf
);
480 aLogEntry
->icap
.processingTime
= ih
->processingTime();
483 aLogEntry
->http
.method
= request
->method
;
484 aLogEntry
->http
.version
= request
->http_ver
;
485 aLogEntry
->hier
= request
->hier
;
486 if (request
->content_length
> 0) // negative when no body or unknown length
487 aLogEntry
->cache
.requestSize
+= request
->content_length
;
488 aLogEntry
->cache
.extuser
= request
->extacl_user
.termedBuf();
490 if (request
->auth_user_request
) {
492 if (request
->auth_user_request
->username())
493 aLogEntry
->cache
.authuser
=
494 xstrdup(request
->auth_user_request
->username());
496 AUTHUSERREQUESTUNLOCK(request
->auth_user_request
, "request via clientPrepareLogWithRequestDetails");
501 ClientHttpRequest::logRequest()
503 if (out
.size
|| logType
) {
504 al
.icp
.opcode
= ICP_INVALID
;
506 debugs(33, 9, "clientLogRequest: al.url='" << al
.url
<< "'");
509 al
.http
.code
= al
.reply
->sline
.status
;
510 al
.http
.content_type
= al
.reply
->content_type
.termedBuf();
511 } else if (loggingEntry() && loggingEntry()->mem_obj
) {
512 al
.http
.code
= loggingEntry()->mem_obj
->getReply()->sline
.status
;
513 al
.http
.content_type
= loggingEntry()->mem_obj
->getReply()->content_type
.termedBuf();
516 debugs(33, 9, "clientLogRequest: http.code='" << al
.http
.code
<< "'");
518 if (loggingEntry() && loggingEntry()->mem_obj
)
519 al
.cache
.objectSize
= loggingEntry()->contentLen();
521 al
.cache
.caddr
.SetNoAddr();
523 if (getConn() != NULL
) al
.cache
.caddr
= getConn()->log_addr
;
525 al
.cache
.requestSize
= req_sz
;
526 al
.cache
.requestHeadersSize
= req_sz
;
528 al
.cache
.replySize
= out
.size
;
529 al
.cache
.replyHeadersSize
= out
.headers_sz
;
531 al
.cache
.highOffset
= out
.offset
;
533 al
.cache
.code
= logType
;
535 al
.cache
.msec
= tvSubMsec(start_time
, current_time
);
538 prepareLogWithRequestDetails(request
, &al
);
540 if (getConn() != NULL
&& getConn()->rfc931
[0])
541 al
.cache
.rfc931
= getConn()->rfc931
;
545 /* This is broken. Fails if the connection has been closed. Needs
546 * to snarf the ssl details some place earlier..
548 if (getConn() != NULL
)
549 al
.cache
.ssluser
= sslGetUserEmail(fd_table
[getConn()->fd
].ssl
);
553 ACLFilledChecklist
*checklist
= clientAclChecklistCreate(Config
.accessList
.log
, this);
556 checklist
->reply
= HTTPMSGLOCK(al
.reply
);
558 if (!Config
.accessList
.log
|| checklist
->fastCheck()) {
560 al
.request
= HTTPMSGLOCK(request
);
561 accessLogLog(&al
, checklist
);
564 if (getConn() != NULL
)
565 clientdbUpdate(getConn()->peer
, logType
, PROTO_HTTP
, out
.size
);
570 accessLogFreeMemory(&al
);
575 ClientHttpRequest::freeResources()
579 safe_free(redirect
.location
);
580 range_iter
.boundary
.clean();
581 HTTPMSGUNLOCK(request
);
583 if (client_stream
.tail
)
584 clientStreamAbort((clientStreamNode
*)client_stream
.tail
->data
, this);
588 httpRequestFree(void *data
)
590 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
591 assert(http
!= NULL
);
596 ConnStateData::areAllContextsForThisConnection() const
598 assert(this != NULL
);
599 ClientSocketContext::Pointer context
= getCurrentContext();
601 while (context
.getRaw()) {
602 if (context
->http
->getConn() != this)
605 context
= context
->next
;
612 ConnStateData::freeAllContexts()
614 ClientSocketContext::Pointer context
;
616 while ((context
= getCurrentContext()).getRaw() != NULL
) {
617 assert(getCurrentContext() !=
618 getCurrentContext()->next
);
619 context
->connIsFinished();
620 assert (context
!= currentobject
);
624 /* This is a handler normally called by comm_close() */
625 void ConnStateData::connStateClosed(const CommCloseCbParams
&io
)
627 assert (fd
== io
.fd
);
628 deleteThis("ConnStateData::connStateClosed");
631 // cleans up before destructor is called
633 ConnStateData::swanSong()
635 debugs(33, 2, "ConnStateData::swanSong: FD " << fd
);
637 flags
.readMoreRequests
= false;
638 clientdbEstablished(peer
, -1); /* decrement */
639 assert(areAllContextsForThisConnection());
642 if (auth_user_request
!= NULL
) {
643 debugs(33, 4, "ConnStateData::swanSong: freeing auth_user_request '" << auth_user_request
<< "' (this is '" << this << "')");
644 auth_user_request
->onConnectionClose(this);
648 comm_close(pinning
.fd
);
650 BodyProducer::swanSong();
651 flags
.swanSang
= true;
655 ConnStateData::isOpen() const
657 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
659 !fd_table
[fd
].closing();
662 ConnStateData::~ConnStateData()
664 assert(this != NULL
);
665 debugs(33, 3, "ConnStateData::~ConnStateData: FD " << fd
);
668 debugs(33, 1, "BUG: ConnStateData did not close FD " << fd
);
671 debugs(33, 1, "BUG: ConnStateData was not destroyed properly; FD " << fd
);
673 AUTHUSERREQUESTUNLOCK(auth_user_request
, "~conn");
675 cbdataReferenceDone(port
);
677 if (bodyPipe
!= NULL
)
678 stopProducingFor(bodyPipe
, false);
682 * clientSetKeepaliveFlag() sets request->flags.proxy_keepalive.
683 * This is the client-side persistent connection flag. We need
684 * to set this relatively early in the request processing
685 * to handle hacks for broken servers and clients.
688 clientSetKeepaliveFlag(ClientHttpRequest
* http
)
690 HttpRequest
*request
= http
->request
;
691 const HttpHeader
*req_hdr
= &request
->header
;
693 debugs(33, 3, "clientSetKeepaliveFlag: http_ver = " <<
694 request
->http_ver
.major
<< "." << request
->http_ver
.minor
);
695 debugs(33, 3, "clientSetKeepaliveFlag: method = " <<
696 RequestMethodStr(request
->method
));
698 HttpVersion
http_ver(1,0);
699 /* we are HTTP/1.0, no matter what the client requests... */
701 if (httpMsgIsPersistent(http_ver
, req_hdr
))
702 request
->flags
.proxy_keepalive
= 1;
706 clientIsContentLengthValid(HttpRequest
* r
)
708 switch (r
->method
.id()) {
713 /* PUT/POST requires a request entity */
714 return (r
->content_length
>= 0);
719 /* We do not want to see a request entity on GET/HEAD requests */
720 return (r
->content_length
<= 0 || Config
.onoff
.request_entities
);
723 /* For other types of requests we don't care */
731 clientIsRequestBodyValid(int64_t bodyLength
)
740 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
)
742 if (Config
.maxRequestBodySize
&&
743 bodyLength
> Config
.maxRequestBodySize
)
744 return 1; /* too large */
751 connIsUsable(ConnStateData
* conn
)
753 if (conn
== NULL
|| !cbdataReferenceValid(conn
) || conn
->fd
== -1)
761 ClientSocketContext::Pointer
762 ConnStateData::getCurrentContext() const
765 return currentobject
;
769 ClientSocketContext::deferRecipientForLater(clientStreamNode
* node
, HttpReply
* rep
, StoreIOBuffer receivedData
)
771 debugs(33, 2, "clientSocketRecipient: Deferring request " << http
->uri
);
772 assert(flags
.deferred
== 0);
774 deferredparams
.node
= node
;
775 deferredparams
.rep
= rep
;
776 deferredparams
.queuedBuffer
= receivedData
;
781 responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const & receivedData
)
783 if (rep
== NULL
&& receivedData
.data
== NULL
&& receivedData
.length
== 0)
790 ClientSocketContext::startOfOutput() const
792 return http
->out
.size
== 0;
796 ClientSocketContext::lengthToSend(Range
<int64_t> const &available
)
798 /*the size of available range can always fit in a size_t type*/
799 size_t maximum
= (size_t)available
.size();
801 if (!http
->request
->range
)
804 assert (canPackMoreRanges());
806 if (http
->range_iter
.debt() == -1)
809 assert (http
->range_iter
.debt() > 0);
811 /* TODO this + the last line could be a range intersection calculation */
812 if (available
.start
< http
->range_iter
.currentSpec()->offset
)
815 return min(http
->range_iter
.debt(), (int64_t)maximum
);
819 ClientSocketContext::noteSentBodyBytes(size_t bytes
)
821 http
->out
.offset
+= bytes
;
823 if (!http
->request
->range
)
826 if (http
->range_iter
.debt() != -1) {
827 http
->range_iter
.debt(http
->range_iter
.debt() - bytes
);
828 assert (http
->range_iter
.debt() >= 0);
831 /* debt() always stops at -1, below that is a bug */
832 assert (http
->range_iter
.debt() >= -1);
836 ClientHttpRequest::multipartRangeRequest() const
838 return request
->multipartRangeRequest();
842 ClientSocketContext::multipartRangeRequest() const
844 return http
->multipartRangeRequest();
848 ClientSocketContext::sendBody(HttpReply
* rep
, StoreIOBuffer bodyData
)
852 if (!multipartRangeRequest()) {
853 size_t length
= lengthToSend(bodyData
.range());
854 noteSentBodyBytes (length
);
855 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteBodyComplete",
856 CommIoCbPtrFun(clientWriteBodyComplete
, this));
857 comm_write(fd(), bodyData
.data
, length
, call
);
863 packRange(bodyData
, &mb
);
865 if (mb
.contentSize()) {
867 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
868 CommIoCbPtrFun(clientWriteComplete
, this));
869 comm_write_mbuf(fd(), &mb
, call
);
871 writeComplete(fd(), NULL
, 0, COMM_OK
);
874 /** put terminating boundary for multiparts */
876 clientPackTermBound(String boundary
, MemBuf
* mb
)
878 mb
->Printf("\r\n--" SQUIDSTRINGPH
"--\r\n", SQUIDSTRINGPRINT(boundary
));
879 debugs(33, 6, "clientPackTermBound: buf offset: " << mb
->size
);
882 /** appends a "part" HTTP header (as in a multi-part/range reply) to the buffer */
884 clientPackRangeHdr(const HttpReply
* rep
, const HttpHdrRangeSpec
* spec
, String boundary
, MemBuf
* mb
)
886 HttpHeader
hdr(hoReply
);
892 debugs(33, 5, "clientPackRangeHdr: appending boundary: " << boundary
);
893 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
894 mb
->Printf("\r\n--" SQUIDSTRINGPH
"\r\n", SQUIDSTRINGPRINT(boundary
));
896 /* stuff the header with required entries and pack it */
898 if (rep
->header
.has(HDR_CONTENT_TYPE
))
899 hdr
.putStr(HDR_CONTENT_TYPE
, rep
->header
.getStr(HDR_CONTENT_TYPE
));
901 httpHeaderAddContRange(&hdr
, *spec
, rep
->content_length
);
903 packerToMemInit(&p
, mb
);
911 /* append <crlf> (we packed a header, not a reply) */
916 * extracts a "range" from *buf and appends them to mb, updating
917 * all offsets and such.
920 ClientSocketContext::packRange(StoreIOBuffer
const &source
, MemBuf
* mb
)
922 HttpHdrRangeIter
* i
= &http
->range_iter
;
923 Range
<int64_t> available (source
.range());
924 char const *buf
= source
.data
;
926 while (i
->currentSpec() && available
.size()) {
927 const size_t copy_sz
= lengthToSend(available
);
931 * intersection of "have" and "need" ranges must not be empty
933 assert(http
->out
.offset
< i
->currentSpec()->offset
+ i
->currentSpec()->length
);
934 assert(http
->out
.offset
+ available
.size() > i
->currentSpec()->offset
);
937 * put boundary and headers at the beginning of a range in a
941 if (http
->multipartRangeRequest() && i
->debt() == i
->currentSpec()->length
) {
942 assert(http
->memObject());
944 http
->memObject()->getReply(), /* original reply */
945 i
->currentSpec(), /* current range */
946 i
->boundary
, /* boundary, the same for all */
953 debugs(33, 3, "clientPackRange: appending " << copy_sz
<< " bytes");
955 noteSentBodyBytes (copy_sz
);
957 mb
->append(buf
, copy_sz
);
962 available
.start
+= copy_sz
;
971 assert((available
.size() >= 0 && i
->debt() >= 0) || i
->debt() == -1);
973 if (!canPackMoreRanges()) {
974 debugs(33, 3, "clientPackRange: Returning because !canPackMoreRanges.");
977 /* put terminating boundary for multiparts */
978 clientPackTermBound(i
->boundary
, mb
);
983 int64_t next
= getNextRangeOffset();
985 assert (next
>= http
->out
.offset
);
987 int64_t skip
= next
- http
->out
.offset
;
989 /* adjust for not to be transmitted bytes */
990 http
->out
.offset
= next
;
992 if (available
.size() <= skip
)
995 available
.start
+= skip
;
1004 /** returns expected content length for multi-range replies
1005 * note: assumes that httpHdrRangeCanonize has already been called
1006 * warning: assumes that HTTP headers for individual ranges at the
1007 * time of the actuall assembly will be exactly the same as
1008 * the headers when clientMRangeCLen() is called */
1010 ClientHttpRequest::mRangeCLen()
1015 assert(memObject());
1018 HttpHdrRange::iterator pos
= request
->range
->begin();
1020 while (pos
!= request
->range
->end()) {
1021 /* account for headers for this range */
1023 clientPackRangeHdr(memObject()->getReply(),
1024 *pos
, range_iter
.boundary
, &mb
);
1027 /* account for range content */
1028 clen
+= (*pos
)->length
;
1030 debugs(33, 6, "clientMRangeCLen: (clen += " << mb
.size
<< " + " << (*pos
)->length
<< ") == " << clen
);
1034 /* account for the terminating boundary */
1037 clientPackTermBound(range_iter
.boundary
, &mb
);
1047 * returns true if If-Range specs match reply, false otherwise
1050 clientIfRangeMatch(ClientHttpRequest
* http
, HttpReply
* rep
)
1052 const TimeOrTag spec
= http
->request
->header
.getTimeOrTag(HDR_IF_RANGE
);
1053 /* check for parsing falure */
1060 ETag rep_tag
= rep
->header
.getETag(HDR_ETAG
);
1061 debugs(33, 3, "clientIfRangeMatch: ETags: " << spec
.tag
.str
<< " and " <<
1062 (rep_tag
.str
? rep_tag
.str
: "<none>"));
1065 return 0; /* entity has no etag to compare with! */
1067 if (spec
.tag
.weak
|| rep_tag
.weak
) {
1068 debugs(33, 1, "clientIfRangeMatch: Weak ETags are not allowed in If-Range: " << spec
.tag
.str
<< " ? " << rep_tag
.str
);
1069 return 0; /* must use strong validator for sub-range requests */
1072 return etagIsEqual(&rep_tag
, &spec
.tag
);
1075 /* got modification time? */
1076 if (spec
.time
>= 0) {
1077 return http
->storeEntry()->lastmod
<= spec
.time
;
1080 assert(0); /* should not happen */
1085 * generates a "unique" boundary string for multipart responses
1086 * the caller is responsible for cleaning the string */
1088 ClientHttpRequest::rangeBoundaryStr() const
1092 String
b(APP_FULLNAME
);
1094 key
= storeEntry()->getMD5Text();
1095 b
.append(key
, strlen(key
));
1099 /** adds appropriate Range headers if needed */
1101 ClientSocketContext::buildRangeHeader(HttpReply
* rep
)
1103 HttpHeader
*hdr
= rep
? &rep
->header
: 0;
1104 const char *range_err
= NULL
;
1105 HttpRequest
*request
= http
->request
;
1106 assert(request
->range
);
1107 /* check if we still want to do ranges */
1110 range_err
= "no [parse-able] reply";
1111 else if ((rep
->sline
.status
!= HTTP_OK
) && (rep
->sline
.status
!= HTTP_PARTIAL_CONTENT
))
1112 range_err
= "wrong status code";
1113 else if (hdr
->has(HDR_CONTENT_RANGE
))
1114 range_err
= "origin server does ranges";
1115 else if (rep
->content_length
< 0)
1116 range_err
= "unknown length";
1117 else if (rep
->content_length
!= http
->memObject()->getReply()->content_length
)
1118 range_err
= "INCONSISTENT length"; /* a bug? */
1120 /* hits only - upstream peer determines correct behaviour on misses, and client_side_reply determines
1123 else if (logTypeIsATcpHit(http
->logType
) && http
->request
->header
.has(HDR_IF_RANGE
) && !clientIfRangeMatch(http
, rep
))
1124 range_err
= "If-Range match failed";
1125 else if (!http
->request
->range
->canonize(rep
))
1126 range_err
= "canonization failed";
1127 else if (http
->request
->range
->isComplex())
1128 range_err
= "too complex range header";
1129 else if (!logTypeIsATcpHit(http
->logType
) && http
->request
->range
->offsetLimitExceeded())
1130 range_err
= "range outside range_offset_limit";
1132 /* get rid of our range specs on error */
1134 /* XXX We do this here because we need canonisation etc. However, this current
1135 * code will lead to incorrect store offset requests - the store will have the
1136 * offset data, but we won't be requesting it.
1137 * So, we can either re-request, or generate an error
1139 debugs(33, 3, "clientBuildRangeHeader: will not do ranges: " << range_err
<< ".");
1140 delete http
->request
->range
;
1141 http
->request
->range
= NULL
;
1143 /* XXX: TODO: Review, this unconditional set may be wrong. - TODO: review. */
1144 httpStatusLineSet(&rep
->sline
, rep
->sline
.version
,
1145 HTTP_PARTIAL_CONTENT
, NULL
);
1146 // web server responded with a valid, but unexpected range.
1147 // will (try-to) forward as-is.
1148 //TODO: we should cope with multirange request/responses
1149 bool replyMatchRequest
= rep
->content_range
!= NULL
?
1150 request
->range
->contains(rep
->content_range
->spec
) :
1152 const int spec_count
= http
->request
->range
->specs
.count
;
1153 int64_t actual_clen
= -1;
1155 debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
1156 spec_count
<< " virgin clen: " << rep
->content_length
);
1157 assert(spec_count
> 0);
1158 /* ETags should not be returned with Partial Content replies? */
1159 hdr
->delById(HDR_ETAG
);
1160 /* append appropriate header(s) */
1162 if (spec_count
== 1) {
1163 if (!replyMatchRequest
) {
1164 hdr
->delById(HDR_CONTENT_RANGE
);
1165 hdr
->putContRange(rep
->content_range
);
1166 actual_clen
= rep
->content_length
;
1167 //http->range_iter.pos = rep->content_range->spec.begin();
1168 (*http
->range_iter
.pos
)->offset
= rep
->content_range
->spec
.offset
;
1169 (*http
->range_iter
.pos
)->length
= rep
->content_range
->spec
.length
;
1172 HttpHdrRange::iterator pos
= http
->request
->range
->begin();
1174 /* append Content-Range */
1176 if (!hdr
->has(HDR_CONTENT_RANGE
)) {
1177 /* No content range, so this was a full object we are
1180 httpHeaderAddContRange(hdr
, **pos
, rep
->content_length
);
1183 /* set new Content-Length to the actual number of bytes
1184 * transmitted in the message-body */
1185 actual_clen
= (*pos
)->length
;
1189 /* generate boundary string */
1190 http
->range_iter
.boundary
= http
->rangeBoundaryStr();
1191 /* delete old Content-Type, add ours */
1192 hdr
->delById(HDR_CONTENT_TYPE
);
1193 httpHeaderPutStrf(hdr
, HDR_CONTENT_TYPE
,
1194 "multipart/byteranges; boundary=\"" SQUIDSTRINGPH
"\"",
1195 SQUIDSTRINGPRINT(http
->range_iter
.boundary
));
1196 /* Content-Length is not required in multipart responses
1197 * but it is always nice to have one */
1198 actual_clen
= http
->mRangeCLen();
1199 /* http->out needs to start where we want data at */
1200 http
->out
.offset
= http
->range_iter
.currentSpec()->offset
;
1203 /* replace Content-Length header */
1204 assert(actual_clen
>= 0);
1206 hdr
->delById(HDR_CONTENT_LENGTH
);
1208 hdr
->putInt64(HDR_CONTENT_LENGTH
, actual_clen
);
1210 debugs(33, 3, "clientBuildRangeHeader: actual content length: " << actual_clen
);
1212 /* And start the range iter off */
1213 http
->range_iter
.updateSpec();
1218 ClientSocketContext::prepareReply(HttpReply
* rep
)
1222 if (http
->request
->range
)
1223 buildRangeHeader(rep
);
1227 ClientSocketContext::sendStartOfMessage(HttpReply
* rep
, StoreIOBuffer bodyData
)
1231 MemBuf
*mb
= rep
->pack();
1232 /* Save length of headers for persistent conn checks */
1233 http
->out
.headers_sz
= mb
->contentSize();
1236 headersLog(0, 0, http
->request
->method
, rep
);
1239 if (bodyData
.data
&& bodyData
.length
) {
1240 if (!multipartRangeRequest()) {
1241 size_t length
= lengthToSend(bodyData
.range());
1242 noteSentBodyBytes (length
);
1244 mb
->append(bodyData
.data
, length
);
1246 packRange(bodyData
, mb
);
1251 debugs(33,7, HERE
<< "sendStartOfMessage schedules clientWriteComplete");
1252 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
1253 CommIoCbPtrFun(clientWriteComplete
, this));
1254 comm_write_mbuf(fd(), mb
, call
);
1260 * Write a chunk of data to a client socket. If the reply is present,
1261 * send the reply headers down the wire too, and clean them up when
1264 * The request is one backed by a connection, not an internal request.
1265 * data context is not NULL
1266 * There are no more entries in the stream chain.
1269 clientSocketRecipient(clientStreamNode
* node
, ClientHttpRequest
* http
,
1270 HttpReply
* rep
, StoreIOBuffer receivedData
)
1273 /* Test preconditions */
1274 assert(node
!= NULL
);
1275 PROF_start(clientSocketRecipient
);
1276 /* TODO: handle this rather than asserting
1277 * - it should only ever happen if we cause an abort and
1278 * the callback chain loops back to here, so we can simply return.
1279 * However, that itself shouldn't happen, so it stays as an assert for now.
1281 assert(cbdataReferenceValid(node
));
1282 assert(node
->node
.next
== NULL
);
1283 ClientSocketContext::Pointer context
= dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw());
1284 assert(context
!= NULL
);
1285 assert(connIsUsable(http
->getConn()));
1286 fd
= http
->getConn()->fd
;
1287 /* TODO: check offset is what we asked for */
1289 if (context
!= http
->getConn()->getCurrentContext()) {
1290 context
->deferRecipientForLater(node
, rep
, receivedData
);
1291 PROF_stop(clientSocketRecipient
);
1295 if (responseFinishedOrFailed(rep
, receivedData
)) {
1296 context
->writeComplete(fd
, NULL
, 0, COMM_OK
);
1297 PROF_stop(clientSocketRecipient
);
1301 if (!context
->startOfOutput())
1302 context
->sendBody(rep
, receivedData
);
1305 http
->al
.reply
= HTTPMSGLOCK(rep
);
1306 context
->sendStartOfMessage(rep
, receivedData
);
1309 PROF_stop(clientSocketRecipient
);
1313 * Called when a downstream node is no longer interested in
1314 * our data. As we are a terminal node, this means on aborts
1318 clientSocketDetach(clientStreamNode
* node
, ClientHttpRequest
* http
)
1320 /* Test preconditions */
1321 assert(node
!= NULL
);
1322 /* TODO: handle this rather than asserting
1323 * - it should only ever happen if we cause an abort and
1324 * the callback chain loops back to here, so we can simply return.
1325 * However, that itself shouldn't happen, so it stays as an assert for now.
1327 assert(cbdataReferenceValid(node
));
1328 /* Set null by ContextFree */
1329 assert(node
->node
.next
== NULL
);
1330 /* this is the assert discussed above */
1331 assert(NULL
== dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw()));
1332 /* We are only called when the client socket shutsdown.
1333 * Tell the prev pipeline member we're finished
1335 clientStreamDetach(node
, http
);
1339 clientWriteBodyComplete(int fd
, char *buf
, size_t size
, comm_err_t errflag
, int xerrno
, void *data
)
1341 debugs(33,7, HERE
<< "clientWriteBodyComplete schedules clientWriteComplete");
1342 clientWriteComplete(fd
, NULL
, size
, errflag
, xerrno
, data
);
1346 ConnStateData::readNextRequest()
1348 debugs(33, 5, "ConnStateData::readNextRequest: FD " << fd
<< " reading next req");
1350 fd_note(fd
, "Waiting for next request");
1352 * Set the timeout BEFORE calling clientReadRequest().
1354 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1355 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
1356 TimeoutDialer(this, &ConnStateData::requestTimeout
));
1357 commSetTimeout(fd
, Config
.Timeout
.persistent_request
, timeoutCall
);
1360 /** Please don't do anything with the FD past here! */
1364 ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
)
1366 debugs(33, 2, "ClientSocketContextPushDeferredIfNeeded: FD " << conn
->fd
<< " Sending next");
1368 /** If the client stream is waiting on a socket write to occur, then */
1370 if (deferredRequest
->flags
.deferred
) {
1371 /** NO data is allowed to have been sent. */
1372 assert(deferredRequest
->http
->out
.size
== 0);
1374 clientSocketRecipient(deferredRequest
->deferredparams
.node
,
1375 deferredRequest
->http
,
1376 deferredRequest
->deferredparams
.rep
,
1377 deferredRequest
->deferredparams
.queuedBuffer
);
1380 /** otherwise, the request is still active in a callbacksomewhere,
1386 ClientSocketContext::keepaliveNextRequest()
1388 ConnStateData
* conn
= http
->getConn();
1389 bool do_next_read
= false;
1391 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: FD " << conn
->fd
);
1394 if (conn
->pinning
.pinned
&& conn
->pinning
.fd
== -1) {
1395 debugs(33, 2, "clientKeepaliveNextRequest: FD " << conn
->fd
<< " Connection was pinned but server side gone. Terminating client connection");
1396 comm_close(conn
->fd
);
1401 * Attempt to parse a request from the request buffer.
1402 * If we've been fed a pipelined request it may already
1403 * be in our read buffer.
1406 * This needs to fall through - if we're unlucky and parse the _last_ request
1407 * from our read buffer we may never re-register for another client read.
1410 if (clientParseRequest(conn
, do_next_read
)) {
1411 debugs(33, 3, "clientSocketContext::keepaliveNextRequest: FD " << conn
->fd
<< ": parsed next request from buffer");
1415 * Either we need to kick-start another read or, if we have
1416 * a half-closed connection, kill it after the last request.
1417 * This saves waiting for half-closed connections to finished being
1418 * half-closed _AND_ then, sometimes, spending "Timeout" time in
1419 * the keepalive "Waiting for next request" state.
1421 if (commIsHalfClosed(conn
->fd
) && (conn
->getConcurrentRequestCount() == 0)) {
1422 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing");
1423 comm_close(conn
->fd
);
1427 ClientSocketContext::Pointer deferredRequest
;
1430 * At this point we either have a parsed request (which we've
1431 * kicked off the processing for) or not. If we have a deferred
1432 * request (parsed but deferred for pipeling processing reasons)
1433 * then look at processing it. If not, simply kickstart
1437 if ((deferredRequest
= conn
->getCurrentContext()).getRaw()) {
1438 debugs(33, 3, "ClientSocketContext:: FD " << conn
->fd
<< ": calling PushDeferredIfNeeded");
1439 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, conn
);
1441 debugs(33, 3, "ClientSocketContext:: FD " << conn
->fd
<< ": calling conn->readNextRequest()");
1442 conn
->readNextRequest();
1447 clientUpdateSocketStats(log_type logType
, size_t size
)
1452 kb_incr(&statCounter
.client_http
.kbytes_out
, size
);
1454 if (logTypeIsATcpHit(logType
))
1455 kb_incr(&statCounter
.client_http
.hit_kbytes_out
, size
);
1459 * increments iterator "i"
1460 * used by clientPackMoreRanges
1462 \retval true there is still data available to pack more ranges
1466 ClientSocketContext::canPackMoreRanges() const
1468 /** first update iterator "i" if needed */
1470 if (!http
->range_iter
.debt()) {
1471 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: At end of current range spec for FD " << fd());
1473 if (http
->range_iter
.pos
.incrementable())
1474 ++http
->range_iter
.pos
;
1476 http
->range_iter
.updateSpec();
1479 assert(!http
->range_iter
.debt() == !http
->range_iter
.currentSpec());
1481 /* paranoid sync condition */
1482 /* continue condition: need_more_data */
1483 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: returning " << (http
->range_iter
.currentSpec() ? true : false));
1484 return http
->range_iter
.currentSpec() ? true : false;
1488 ClientSocketContext::getNextRangeOffset() const
1490 if (http
->request
->range
) {
1491 /* offset in range specs does not count the prefix of an http msg */
1492 debugs (33, 5, "ClientSocketContext::getNextRangeOffset: http offset " << http
->out
.offset
);
1493 /* check: reply was parsed and range iterator was initialized */
1494 assert(http
->range_iter
.valid
);
1495 /* filter out data according to range specs */
1496 assert (canPackMoreRanges());
1498 int64_t start
; /* offset of still missing data */
1499 assert(http
->range_iter
.currentSpec());
1500 start
= http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
- http
->range_iter
.debt();
1501 debugs(33, 3, "clientPackMoreRanges: in: offset: " << http
->out
.offset
);
1502 debugs(33, 3, "clientPackMoreRanges: out:"
1503 " start: " << start
<<
1504 " spec[" << http
->range_iter
.pos
- http
->request
->range
->begin() << "]:" <<
1505 " [" << http
->range_iter
.currentSpec()->offset
<<
1506 ", " << http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
<< "),"
1507 " len: " << http
->range_iter
.currentSpec()->length
<<
1508 " debt: " << http
->range_iter
.debt());
1509 if (http
->range_iter
.currentSpec()->length
!= -1)
1510 assert(http
->out
.offset
<= start
); /* we did not miss it */
1515 } else if (reply
&& reply
->content_range
) {
1516 /* request does not have ranges, but reply does */
1517 /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range
1518 * becomes HttpHdrRange rather than HttpHdrRangeSpec.
1520 return http
->out
.offset
+ reply
->content_range
->spec
.offset
;
1523 return http
->out
.offset
;
1527 ClientSocketContext::pullData()
1529 debugs(33, 5, "ClientSocketContext::pullData: FD " << fd() <<
1530 " attempting to pull upstream data");
1532 /* More data will be coming from the stream. */
1533 StoreIOBuffer readBuffer
;
1534 /* XXX: Next requested byte in the range sequence */
1535 /* XXX: length = getmaximumrangelenfgth */
1536 readBuffer
.offset
= getNextRangeOffset();
1537 readBuffer
.length
= HTTP_REQBUF_SZ
;
1538 readBuffer
.data
= reqbuf
;
1539 /* we may note we have reached the end of the wanted ranges */
1540 clientStreamRead(getTail(), http
, readBuffer
);
1543 clientStream_status_t
1544 ClientSocketContext::socketState()
1546 switch (clientStreamStatus(getTail(), http
)) {
1549 /* check for range support ending */
1551 if (http
->request
->range
) {
1552 /* check: reply was parsed and range iterator was initialized */
1553 assert(http
->range_iter
.valid
);
1554 /* filter out data according to range specs */
1556 if (!canPackMoreRanges()) {
1557 debugs(33, 5, HERE
<< "Range request at end of returnable " <<
1558 "range sequence on FD " << fd());
1560 if (http
->request
->flags
.proxy_keepalive
)
1561 return STREAM_COMPLETE
;
1563 return STREAM_UNPLANNED_COMPLETE
;
1565 } else if (reply
&& reply
->content_range
) {
1566 /* reply has content-range, but Squid is not managing ranges */
1567 const int64_t &bytesSent
= http
->out
.offset
;
1568 const int64_t &bytesExpected
= reply
->content_range
->spec
.length
;
1570 debugs(33, 7, HERE
<< "body bytes sent vs. expected: " <<
1571 bytesSent
<< " ? " << bytesExpected
<< " (+" <<
1572 reply
->content_range
->spec
.offset
<< ")");
1574 // did we get at least what we expected, based on range specs?
1576 if (bytesSent
== bytesExpected
) { // got everything
1577 if (http
->request
->flags
.proxy_keepalive
)
1578 return STREAM_COMPLETE
;
1580 return STREAM_UNPLANNED_COMPLETE
;
1583 // The logic below is not clear: If we got more than we
1584 // expected why would persistency matter? Should not this
1585 // always be an error?
1586 if (bytesSent
> bytesExpected
) { // got extra
1587 if (http
->request
->flags
.proxy_keepalive
)
1588 return STREAM_COMPLETE
;
1590 return STREAM_UNPLANNED_COMPLETE
;
1593 // did not get enough yet, expecting more
1598 case STREAM_COMPLETE
:
1599 return STREAM_COMPLETE
;
1601 case STREAM_UNPLANNED_COMPLETE
:
1602 return STREAM_UNPLANNED_COMPLETE
;
1605 return STREAM_FAILED
;
1608 fatal ("unreachable code\n");
1613 * A write has just completed to the client, or we have just realised there is
1614 * no more data to send.
1617 clientWriteComplete(int fd
, char *bufnotused
, size_t size
, comm_err_t errflag
, int xerrno
, void *data
)
1619 ClientSocketContext
*context
= (ClientSocketContext
*)data
;
1620 context
->writeComplete (fd
, bufnotused
, size
, errflag
);
1624 ClientSocketContext::doClose()
1629 /** Called to initiate (and possibly complete) closing of the context.
1630 * The underlying socket may be already closed */
1632 ClientSocketContext::initiateClose(const char *reason
)
1634 debugs(33, 5, HERE
<< "initiateClose: closing for " << reason
);
1637 ConnStateData
* conn
= http
->getConn();
1640 if (const int64_t expecting
= conn
->bodySizeLeft()) {
1641 debugs(33, 5, HERE
<< "ClientSocketContext::initiateClose: " <<
1642 "closing, but first " << conn
<< " needs to read " <<
1643 expecting
<< " request body bytes with " <<
1644 conn
->in
.notYetUsed
<< " notYetUsed");
1646 if (conn
->closing()) {
1647 debugs(33, 2, HERE
<< "avoiding double-closing " << conn
);
1652 * XXX We assume the reply fits in the TCP transmit
1653 * window. If not the connection may stall while sending
1654 * the reply (before reaching here) if the client does not
1655 * try to read the response while sending the request body.
1656 * As of yet we have not received any complaints indicating
1657 * this may be an issue.
1659 conn
->startClosing(reason
);
1670 ClientSocketContext::writeComplete(int fd
, char *bufnotused
, size_t size
, comm_err_t errflag
)
1672 StoreEntry
*entry
= http
->storeEntry();
1673 http
->out
.size
+= size
;
1675 debugs(33, 5, "clientWriteComplete: FD " << fd
<< ", sz " << size
<<
1676 ", err " << errflag
<< ", off " << http
->out
.size
<< ", len " <<
1677 entry
? entry
->objectLen() : 0);
1678 clientUpdateSocketStats(http
->logType
, size
);
1679 assert (this->fd() == fd
);
1681 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
1683 if (errflag
== COMM_ERR_CLOSING
)
1686 if (errflag
|| clientHttpRequestStatus(fd
, http
)) {
1687 initiateClose("failure or true request status");
1688 /* Do we leak here ? */
1692 switch (socketState()) {
1698 case STREAM_COMPLETE
:
1699 debugs(33, 5, "clientWriteComplete: FD " << fd
<< " Keeping Alive");
1700 keepaliveNextRequest();
1703 case STREAM_UNPLANNED_COMPLETE
:
1704 initiateClose("STREAM_UNPLANNED_COMPLETE");
1708 initiateClose("STREAM_FAILED");
1712 fatal("Hit unreachable code in clientWriteComplete\n");
1716 extern "C" CSR clientGetMoreData
;
1717 extern "C" CSS clientReplyStatus
;
1718 extern "C" CSD clientReplyDetach
;
1720 static ClientSocketContext
*
1721 parseHttpRequestAbort(ConnStateData
* conn
, const char *uri
)
1723 ClientHttpRequest
*http
;
1724 ClientSocketContext
*context
;
1725 StoreIOBuffer tempBuffer
;
1726 http
= new ClientHttpRequest(conn
);
1727 http
->req_sz
= conn
->in
.notYetUsed
;
1728 http
->uri
= xstrdup(uri
);
1729 setLogUri (http
, uri
);
1730 context
= ClientSocketContextNew(http
);
1731 tempBuffer
.data
= context
->reqbuf
;
1732 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1733 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1734 clientReplyStatus
, new clientReplyContext(http
), clientSocketRecipient
,
1735 clientSocketDetach
, context
, tempBuffer
);
1740 skipLeadingSpace(char *aString
)
1742 char *result
= aString
;
1744 while (xisspace(*aString
))
1751 * 'end' defaults to NULL for backwards compatibility
1752 * remove default value if we ever get rid of NULL-terminated
1756 findTrailingHTTPVersion(const char *uriAndHTTPVersion
, const char *end
)
1759 end
= uriAndHTTPVersion
+ strcspn(uriAndHTTPVersion
, "\r\n");
1763 for (; end
> uriAndHTTPVersion
; end
--) {
1764 if (*end
== '\n' || *end
== '\r')
1767 if (xisspace(*end
)) {
1768 if (strncasecmp(end
+ 1, "HTTP/", 5) == 0)
1779 setLogUri(ClientHttpRequest
* http
, char const *uri
)
1781 safe_free(http
->log_uri
);
1783 if (!stringHasCntl(uri
))
1784 http
->log_uri
= xstrndup(uri
, MAX_URL
);
1786 http
->log_uri
= xstrndup(rfc1738_escape_unescaped(uri
), MAX_URL
);
1790 prepareAcceleratedURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
1792 int vhost
= conn
->port
->vhost
;
1793 int vport
= conn
->port
->vport
;
1795 char ntoabuf
[MAX_IPSTRLEN
];
1797 http
->flags
.accel
= 1;
1799 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1801 if (strncasecmp(url
, "cache_object://", 15) == 0)
1802 return; /* already in good shape */
1805 if (conn
->port
->vhost
)
1806 return; /* already in good shape */
1808 /* else we need to ignore the host name */
1809 url
= strstr(url
, "//");
1811 #if SHOULD_REJECT_UNKNOWN_URLS
1814 return parseHttpRequestAbort(conn
, "error:invalid-request");
1819 url
= strchr(url
+ 2, '/');
1825 if (internalCheck(url
)) {
1826 /* prepend our name & port */
1827 http
->uri
= xstrdup(internalLocalUri(NULL
, url
));
1831 const bool switchedToHttps
= conn
->switchedToHttps();
1832 const bool tryHostHeader
= vhost
|| switchedToHttps
;
1833 if (tryHostHeader
&& (host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
1834 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1836 http
->uri
= (char *)xcalloc(url_sz
, 1);
1837 const char *protocol
= switchedToHttps
?
1838 "https" : conn
->port
->protocol
;
1839 snprintf(http
->uri
, url_sz
, "%s://%s%s", protocol
, host
, url
);
1840 debugs(33, 5, "ACCEL VHOST REWRITE: '" << http
->uri
<< "'");
1841 } else if (conn
->port
->defaultsite
) {
1842 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1843 strlen(conn
->port
->defaultsite
);
1844 http
->uri
= (char *)xcalloc(url_sz
, 1);
1845 snprintf(http
->uri
, url_sz
, "%s://%s%s",
1846 conn
->port
->protocol
, conn
->port
->defaultsite
, url
);
1847 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http
->uri
<<"'");
1848 } else if (vport
== -1) {
1849 /* Put the local socket IP address as the hostname. */
1850 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1851 http
->uri
= (char *)xcalloc(url_sz
, 1);
1852 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1853 http
->getConn()->port
->protocol
,
1854 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1855 http
->getConn()->me
.GetPort(), url
);
1856 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http
->uri
<< "'");
1857 } else if (vport
> 0) {
1858 /* Put the local socket IP address as the hostname, but static port */
1859 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1860 http
->uri
= (char *)xcalloc(url_sz
, 1);
1861 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1862 http
->getConn()->port
->protocol
,
1863 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1865 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http
->uri
<< "'");
1870 prepareTransparentURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
1873 char ntoabuf
[MAX_IPSTRLEN
];
1876 return; /* already in good shape */
1878 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1880 if ((host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
1881 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1883 http
->uri
= (char *)xcalloc(url_sz
, 1);
1884 snprintf(http
->uri
, url_sz
, "%s://%s%s",
1885 conn
->port
->protocol
, host
, url
);
1886 debugs(33, 5, "TRANSPARENT HOST REWRITE: '" << http
->uri
<<"'");
1888 /* Put the local socket IP address as the hostname. */
1889 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1890 http
->uri
= (char *)xcalloc(url_sz
, 1);
1891 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1892 http
->getConn()->port
->protocol
,
1893 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1894 http
->getConn()->me
.GetPort(), url
);
1895 debugs(33, 5, "TRANSPARENT REWRITE: '" << http
->uri
<< "'");
1899 // Temporary hack helper: determine whether the request is chunked, expensive
1901 isChunkedRequest(const HttpParser
*hp
)
1903 HttpRequest request
;
1904 if (!request
.parseHeader(HttpParserHdrBuf(hp
), HttpParserHdrSz(hp
)))
1907 return request
.header
.has(HDR_TRANSFER_ENCODING
) &&
1908 request
.header
.hasListMember(HDR_TRANSFER_ENCODING
, "chunked", ',');
1913 * parseHttpRequest()
1916 * NULL on incomplete requests
1917 * a ClientSocketContext structure on success or failure.
1918 * Sets result->flags.parsed_ok to 0 if failed to parse the request.
1919 * Sets result->flags.parsed_ok to 1 if we have a good request.
1921 static ClientSocketContext
*
1922 parseHttpRequest(ConnStateData
*conn
, HttpParser
*hp
, HttpRequestMethod
* method_p
, HttpVersion
*http_ver
)
1924 char *req_hdr
= NULL
;
1927 ClientHttpRequest
*http
;
1928 ClientSocketContext
*result
;
1929 StoreIOBuffer tempBuffer
;
1932 /* pre-set these values to make aborting simpler */
1933 *method_p
= METHOD_NONE
;
1935 /* NP: don't be tempted to move this down or remove again.
1936 * It's the only DDoS protection old-String has against long URL */
1937 if ( hp
->bufsiz
<= 0) {
1938 debugs(33, 5, "Incomplete request, waiting for end of request line");
1940 } else if ( (size_t)hp
->bufsiz
>= Config
.maxRequestHeaderSize
&& headersEnd(hp
->buf
, Config
.maxRequestHeaderSize
) == 0) {
1941 debugs(33, 5, "parseHttpRequest: Too large request");
1942 return parseHttpRequestAbort(conn
, "error:request-too-large");
1945 /* Attempt to parse the first line; this'll define the method, url, version and header begin */
1946 r
= HttpParserParseReqLine(hp
);
1949 debugs(33, 5, "Incomplete request, waiting for end of request line");
1954 return parseHttpRequestAbort(conn
, "error:invalid-request");
1957 /* Request line is valid here .. */
1958 *http_ver
= HttpVersion(hp
->v_maj
, hp
->v_min
);
1960 /* This call scans the entire request, not just the headers */
1961 if (hp
->v_maj
> 0) {
1962 if ((req_sz
= headersEnd(hp
->buf
, hp
->bufsiz
)) == 0) {
1963 debugs(33, 5, "Incomplete request, waiting for end of headers");
1967 debugs(33, 3, "parseHttpRequest: Missing HTTP identifier");
1968 req_sz
= HttpParserReqSz(hp
);
1971 /* We know the whole request is in hp->buf now */
1973 assert(req_sz
<= (size_t) hp
->bufsiz
);
1975 /* Will the following be true with HTTP/0.9 requests? probably not .. */
1976 /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */
1979 hp
->hdr_end
= req_sz
- 1;
1981 hp
->hdr_start
= hp
->req_end
+ 1;
1983 /* Enforce max_request_size */
1984 if (req_sz
>= Config
.maxRequestHeaderSize
) {
1985 debugs(33, 5, "parseHttpRequest: Too large request");
1986 return parseHttpRequestAbort(conn
, "error:request-too-large");
1990 *method_p
= HttpRequestMethod(&hp
->buf
[hp
->m_start
], &hp
->buf
[hp
->m_end
]+1);
1992 /* deny CONNECT via accelerated ports */
1993 if (*method_p
== METHOD_CONNECT
&& conn
&& conn
->port
&& conn
->port
->accel
) {
1994 debugs(33, DBG_IMPORTANT
, "WARNING: CONNECT method received on " << conn
->port
->protocol
<< " Accelerator port " << conn
->port
->s
.GetPort() );
1995 /* XXX need a way to say "this many character length string" */
1996 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->buf
);
1997 /* XXX need some way to set 405 status on the error reply */
1998 return parseHttpRequestAbort(conn
, "error:method-not-allowed");
2001 if (*method_p
== METHOD_NONE
) {
2002 /* XXX need a way to say "this many character length string" */
2003 debugs(33, 1, "clientParseRequestMethod: Unsupported method in request '" << hp
->buf
<< "'");
2005 /* XXX where's the method set for this error? */
2006 return parseHttpRequestAbort(conn
, "error:unsupported-request-method");
2010 * Process headers after request line
2011 * TODO: Use httpRequestParse here.
2013 /* XXX this code should be modified to take a const char * later! */
2014 req_hdr
= (char *) hp
->buf
+ hp
->req_end
+ 1;
2016 debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr
<< "}");
2018 end
= (char *) hp
->buf
+ hp
->hdr_end
;
2020 debugs(33, 3, "parseHttpRequest: end = {" << end
<< "}");
2023 * Check that the headers don't have double-CR.
2024 * NP: strnstr is required so we don't search any possible binary body blobs.
2026 if ( squid_strnstr(req_hdr
, "\r\r\n", req_sz
) ) {
2027 debugs(33, 1, "WARNING: suspicious HTTP request contains double CR");
2028 return parseHttpRequestAbort(conn
, "error:double-CR");
2031 debugs(33, 3, "parseHttpRequest: prefix_sz = " <<
2032 (int) HttpParserRequestLen(hp
) << ", req_line_sz = " <<
2033 HttpParserReqSz(hp
));
2035 // Temporary hack: We might receive a chunked body from a broken HTTP/1.1
2036 // client that sends chunked requests to HTTP/1.0 Squid. If the request
2037 // might have a chunked body, parse the headers early to look for the
2038 // "Transfer-Encoding: chunked" header. If we find it, wait until the
2039 // entire body is available so that we can set the content length and
2040 // forward the request without chunks. The primary reason for this is
2041 // to avoid forwarding a chunked request because the server side lacks
2042 // logic to determine when it is valid to do so.
2043 // FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS below will replace this hack.
2044 if (hp
->v_min
== 1 && hp
->v_maj
== 1 && // broken client, may send chunks
2045 Config
.maxChunkedRequestBodySize
> 0 && // configured to dechunk
2046 (*method_p
== METHOD_PUT
|| *method_p
== METHOD_POST
)) {
2048 // check only once per request because isChunkedRequest is expensive
2049 if (conn
->in
.dechunkingState
== ConnStateData::chunkUnknown
) {
2050 if (isChunkedRequest(hp
))
2051 conn
->startDechunkingRequest(hp
);
2053 conn
->in
.dechunkingState
= ConnStateData::chunkNone
;
2056 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
) {
2057 if (conn
->parseRequestChunks(hp
)) // parses newly read chunks
2058 return NULL
; // wait for more data
2059 debugs(33, 5, HERE
<< "Got complete chunked request or err.");
2060 assert(conn
->in
.dechunkingState
!= ConnStateData::chunkParsing
);
2064 /* Ok, all headers are received */
2065 http
= new ClientHttpRequest(conn
);
2067 http
->req_sz
= HttpParserRequestLen(hp
);
2068 result
= ClientSocketContextNew(http
);
2069 tempBuffer
.data
= result
->reqbuf
;
2070 tempBuffer
.length
= HTTP_REQBUF_SZ
;
2072 ClientStreamData newServer
= new clientReplyContext(http
);
2073 ClientStreamData newClient
= result
;
2074 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
2075 clientReplyStatus
, newServer
, clientSocketRecipient
,
2076 clientSocketDetach
, newClient
, tempBuffer
);
2078 debugs(33, 5, "parseHttpRequest: Request Header is\n" <<(hp
->buf
) + hp
->hdr_start
);
2082 * XXX this should eventually not use a malloc'ed buffer; the transformation code
2083 * below needs to be modified to not expect a mutable nul-terminated string.
2085 char *url
= (char *)xmalloc(hp
->u_end
- hp
->u_start
+ 16);
2087 memcpy(url
, hp
->buf
+ hp
->u_start
, hp
->u_end
- hp
->u_start
+ 1);
2089 url
[hp
->u_end
- hp
->u_start
+ 1] = '\0';
2091 #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION
2093 if ((t
= strchr(url
, '#'))) /* remove HTML anchors */
2098 /* Rewrite the URL in transparent or accelerator mode */
2099 /* NP: there are several cases to traverse here:
2100 * - standard mode (forward proxy)
2101 * - transparent mode (TPROXY)
2102 * - transparent mode with failures
2103 * - intercept mode (NAT)
2104 * - intercept mode with failures
2105 * - accelerator mode (reverse proxy)
2107 * - mixed combos of the above with internal URL
2109 if (conn
->transparent()) {
2110 /* intercept or transparent mode, properly working with no failures */
2111 http
->flags
.intercepted
= conn
->port
->intercepted
;
2112 http
->flags
.spoof_client_ip
= conn
->port
->spoof_client_ip
;
2113 prepareTransparentURL(conn
, http
, url
, req_hdr
);
2115 } else if (conn
->port
->intercepted
|| conn
->port
->spoof_client_ip
) {
2116 /* transparent or intercept mode with failures */
2117 prepareTransparentURL(conn
, http
, url
, req_hdr
);
2119 } else if (conn
->port
->accel
|| conn
->switchedToHttps()) {
2120 /* accelerator mode */
2121 prepareAcceleratedURL(conn
, http
, url
, req_hdr
);
2123 } else if (internalCheck(url
)) {
2124 /* internal URL mode */
2125 /* prepend our name & port */
2126 http
->uri
= xstrdup(internalLocalUri(NULL
, url
));
2127 http
->flags
.accel
= 1;
2131 /* No special rewrites have been applied above, use the
2132 * requested url. may be rewritten later, so make extra room */
2133 int url_sz
= strlen(url
) + Config
.appendDomainLen
+ 5;
2134 http
->uri
= (char *)xcalloc(url_sz
, 1);
2135 strcpy(http
->uri
, url
);
2138 setLogUri(http
, http
->uri
);
2139 debugs(33, 5, "parseHttpRequest: Complete request received");
2140 result
->flags
.parsed_ok
= 1;
2146 ConnStateData::getAvailableBufferLength() const
2148 int result
= in
.allocatedSize
- in
.notYetUsed
- 1;
2149 assert (result
>= 0);
2154 ConnStateData::makeSpaceAvailable()
2156 if (getAvailableBufferLength() < 2) {
2157 in
.buf
= (char *)memReallocBuf(in
.buf
, in
.allocatedSize
* 2, &in
.allocatedSize
);
2158 debugs(33, 2, "growing request buffer: notYetUsed=" << in
.notYetUsed
<< " size=" << in
.allocatedSize
);
2163 ConnStateData::addContextToQueue(ClientSocketContext
* context
)
2165 ClientSocketContext::Pointer
*S
;
2167 for (S
= (ClientSocketContext::Pointer
*) & currentobject
; S
->getRaw();
2175 ConnStateData::getConcurrentRequestCount() const
2178 ClientSocketContext::Pointer
*T
;
2180 for (T
= (ClientSocketContext::Pointer
*) ¤tobject
;
2181 T
->getRaw(); T
= &(*T
)->next
, ++result
);
2186 ConnStateData::connReadWasError(comm_err_t flag
, int size
, int xerrno
)
2188 if (flag
!= COMM_OK
) {
2189 debugs(33, 2, "connReadWasError: FD " << fd
<< ": got flag " << flag
);
2194 if (!ignoreErrno(xerrno
)) {
2195 debugs(33, 2, "connReadWasError: FD " << fd
<< ": " << xstrerr(xerrno
));
2197 } else if (in
.notYetUsed
== 0) {
2198 debugs(33, 2, "connReadWasError: FD " << fd
<< ": no data to process (" << xstrerr(xerrno
) << ")");
2206 ConnStateData::connFinishedWithConn(int size
)
2209 if (getConcurrentRequestCount() == 0 && in
.notYetUsed
== 0) {
2210 /* no current or pending requests */
2211 debugs(33, 4, "connFinishedWithConn: FD " << fd
<< " closed");
2213 } else if (!Config
.onoff
.half_closed_clients
) {
2214 /* admin doesn't want to support half-closed client sockets */
2215 debugs(33, 3, "connFinishedWithConn: FD " << fd
<< " aborted (half_closed_clients disabled)");
2224 connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
)
2226 assert(byteCount
> 0 && byteCount
<= conn
->in
.notYetUsed
);
2227 conn
->in
.notYetUsed
-= byteCount
;
2228 debugs(33, 5, HERE
<< "conn->in.notYetUsed = " << conn
->in
.notYetUsed
);
2230 * If there is still data that will be used,
2231 * move it to the beginning.
2234 if (conn
->in
.notYetUsed
> 0)
2235 xmemmove(conn
->in
.buf
, conn
->in
.buf
+ byteCount
,
2236 conn
->in
.notYetUsed
);
2240 connKeepReadingIncompleteRequest(ConnStateData
* conn
)
2242 // when we read chunked requests, the entire body is buffered
2243 // XXX: this check ignores header size and its limits.
2244 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
)
2245 return ((int64_t)conn
->in
.notYetUsed
) < Config
.maxChunkedRequestBodySize
;
2247 return conn
->in
.notYetUsed
>= Config
.maxRequestHeaderSize
? 0 : 1;
2251 connCancelIncompleteRequests(ConnStateData
* conn
)
2253 ClientSocketContext
*context
= parseHttpRequestAbort(conn
, "error:request-too-large");
2254 clientStreamNode
*node
= context
->getClientReplyContext();
2255 assert(!connKeepReadingIncompleteRequest(conn
));
2256 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
) {
2257 debugs(33, 1, "Chunked request is too large (" << conn
->in
.notYetUsed
<< " bytes)");
2258 debugs(33, 1, "Config 'chunked_request_body_max_size'= " << Config
.maxChunkedRequestBodySize
<< " bytes.");
2260 debugs(33, 1, "Request header is too large (" << conn
->in
.notYetUsed
<< " bytes)");
2261 debugs(33, 1, "Config 'request_header_max_size'= " << Config
.maxRequestHeaderSize
<< " bytes.");
2263 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2264 assert (repContext
);
2265 repContext
->setReplyToError(ERR_TOO_BIG
,
2266 HTTP_REQUEST_ENTITY_TOO_LARGE
, METHOD_NONE
, NULL
,
2267 conn
->peer
, NULL
, NULL
, NULL
);
2268 context
->registerWithConn();
2269 context
->pullData();
2273 ConnStateData::clientMaybeReadData(int do_next_read
)
2276 flags
.readMoreRequests
= true;
2282 ConnStateData::clientAfterReadingRequests(int do_next_read
)
2285 * If (1) we are reading a message body, (2) and the connection
2286 * is half-closed, and (3) we didn't get the entire HTTP request
2287 * yet, then close this connection.
2290 if (fd_table
[fd
].flags
.socket_eof
) {
2291 if ((int64_t)in
.notYetUsed
< bodySizeLeft()) {
2292 /* Partial request received. Abort client connection! */
2293 debugs(33, 3, "clientAfterReadingRequests: FD " << fd
<< " aborted, partial request");
2299 clientMaybeReadData (do_next_read
);
2303 clientProcessRequest(ConnStateData
*conn
, HttpParser
*hp
, ClientSocketContext
*context
, const HttpRequestMethod
& method
, HttpVersion http_ver
)
2305 ClientHttpRequest
*http
= context
->http
;
2306 HttpRequest
*request
= NULL
;
2307 bool notedUseOfBuffer
= false;
2308 bool tePresent
= false;
2309 bool deChunked
= false;
2310 bool unsupportedTe
= false;
2312 /* We have an initial client stream in place should it be needed */
2313 /* setup our private context */
2314 context
->registerWithConn();
2316 if (context
->flags
.parsed_ok
== 0) {
2317 clientStreamNode
*node
= context
->getClientReplyContext();
2318 debugs(33, 1, "clientProcessRequest: Invalid Request");
2319 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2320 assert (repContext
);
2321 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
, method
, NULL
, conn
->peer
, NULL
, conn
->in
.buf
, NULL
);
2322 assert(context
->http
->out
.offset
== 0);
2323 context
->pullData();
2324 conn
->flags
.readMoreRequests
= false;
2328 if ((request
= HttpRequest::CreateFromUrlAndMethod(http
->uri
, method
)) == NULL
) {
2329 clientStreamNode
*node
= context
->getClientReplyContext();
2330 debugs(33, 5, "Invalid URL: " << http
->uri
);
2331 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2332 assert (repContext
);
2333 repContext
->setReplyToError(ERR_INVALID_URL
, HTTP_BAD_REQUEST
, method
, http
->uri
, conn
->peer
, NULL
, NULL
, NULL
);
2334 assert(context
->http
->out
.offset
== 0);
2335 context
->pullData();
2336 conn
->flags
.readMoreRequests
= false;
2340 /* RFC 2616 section 10.5.6 : handle unsupported HTTP versions cleanly. */
2341 /* We currently only accept 0.9, 1.0, 1.1 */
2342 if ( (http_ver
.major
== 0 && http_ver
.minor
!= 9) ||
2343 (http_ver
.major
== 1 && http_ver
.minor
> 1 ) ||
2344 (http_ver
.major
> 1) ) {
2346 clientStreamNode
*node
= context
->getClientReplyContext();
2347 debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp
));
2348 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2349 assert (repContext
);
2350 repContext
->setReplyToError(ERR_UNSUP_HTTPVERSION
, HTTP_HTTP_VERSION_NOT_SUPPORTED
, method
, http
->uri
, conn
->peer
, NULL
, HttpParserHdrBuf(hp
), NULL
);
2351 assert(context
->http
->out
.offset
== 0);
2352 context
->pullData();
2353 conn
->flags
.readMoreRequests
= false;
2357 /* compile headers */
2358 /* we should skip request line! */
2359 /* XXX should actually know the damned buffer size here */
2360 if (http_ver
.major
>= 1 && !request
->parseHeader(HttpParserHdrBuf(hp
), HttpParserHdrSz(hp
))) {
2361 clientStreamNode
*node
= context
->getClientReplyContext();
2362 debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp
));
2363 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2364 assert (repContext
);
2365 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
, method
, http
->uri
, conn
->peer
, NULL
, NULL
, NULL
);
2366 assert(context
->http
->out
.offset
== 0);
2367 context
->pullData();
2368 conn
->flags
.readMoreRequests
= false;
2372 request
->flags
.accelerated
= http
->flags
.accel
;
2373 request
->flags
.no_direct
= request
->flags
.accelerated
? !conn
->port
->allow_direct
: 0;
2376 * If transparent or interception mode is working clone the transparent and interception flags
2377 * from the port settings to the request.
2379 if (IpInterceptor
.InterceptActive()) {
2380 request
->flags
.intercepted
= http
->flags
.intercepted
;
2382 if (IpInterceptor
.TransparentActive()) {
2383 request
->flags
.spoof_client_ip
= conn
->port
->spoof_client_ip
;
2386 if (internalCheck(request
->urlpath
.termedBuf())) {
2387 if (internalHostnameIs(request
->GetHost()) &&
2388 request
->port
== getMyPort()) {
2389 http
->flags
.internal
= 1;
2390 } else if (Config
.onoff
.global_internal_static
&& internalStaticCheck(request
->urlpath
.termedBuf())) {
2391 request
->SetHost(internalHostname());
2392 request
->port
= getMyPort();
2393 http
->flags
.internal
= 1;
2397 if (http
->flags
.internal
) {
2398 request
->protocol
= PROTO_HTTP
;
2399 request
->login
[0] = '\0';
2402 request
->flags
.internal
= http
->flags
.internal
;
2403 setLogUri (http
, urlCanonicalClean(request
));
2404 request
->client_addr
= conn
->peer
;
2405 #if FOLLOW_X_FORWARDED_FOR
2406 request
->indirect_client_addr
= conn
->peer
;
2407 #endif /* FOLLOW_X_FORWARDED_FOR */
2408 request
->my_addr
= conn
->me
;
2409 request
->http_ver
= http_ver
;
2411 tePresent
= request
->header
.has(HDR_TRANSFER_ENCODING
);
2412 deChunked
= conn
->in
.dechunkingState
== ConnStateData::chunkReady
;
2415 request
->setContentLength(conn
->in
.dechunked
.contentSize());
2416 request
->header
.delById(HDR_TRANSFER_ENCODING
);
2417 conn
->finishDechunkingRequest(hp
);
2420 unsupportedTe
= tePresent
&& !deChunked
;
2421 if (!urlCheckRequest(request
) || unsupportedTe
) {
2422 clientStreamNode
*node
= context
->getClientReplyContext();
2423 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2424 assert (repContext
);
2425 repContext
->setReplyToError(ERR_UNSUP_REQ
,
2426 HTTP_NOT_IMPLEMENTED
, request
->method
, NULL
,
2427 conn
->peer
, request
, NULL
, NULL
);
2428 assert(context
->http
->out
.offset
== 0);
2429 context
->pullData();
2430 conn
->flags
.readMoreRequests
= false;
2435 if (!clientIsContentLengthValid(request
)) {
2436 clientStreamNode
*node
= context
->getClientReplyContext();
2437 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2438 assert (repContext
);
2439 repContext
->setReplyToError(ERR_INVALID_REQ
,
2440 HTTP_LENGTH_REQUIRED
, request
->method
, NULL
,
2441 conn
->peer
, request
, NULL
, NULL
);
2442 assert(context
->http
->out
.offset
== 0);
2443 context
->pullData();
2444 conn
->flags
.readMoreRequests
= false;
2448 http
->request
= HTTPMSGLOCK(request
);
2449 clientSetKeepaliveFlag(http
);
2451 /* If this is a CONNECT, don't schedule a read - ssl.c will handle it */
2452 if (http
->request
->method
== METHOD_CONNECT
)
2453 context
->mayUseConnection(true);
2455 /* Do we expect a request-body? */
2456 if (!context
->mayUseConnection() && request
->content_length
> 0) {
2457 request
->body_pipe
= conn
->expectRequestBody(request
->content_length
);
2459 // consume header early so that body pipe gets just the body
2460 connNoteUseOfBuffer(conn
, http
->req_sz
);
2461 notedUseOfBuffer
= true;
2463 conn
->handleRequestBodyData(); // may comm_close and stop producing
2465 /* Is it too large? */
2467 if (!clientIsRequestBodyValid(request
->content_length
) ||
2468 clientIsRequestBodyTooLargeForPolicy(request
->content_length
)) {
2469 clientStreamNode
*node
= context
->getClientReplyContext();
2470 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2471 assert (repContext
);
2472 repContext
->setReplyToError(ERR_TOO_BIG
,
2473 HTTP_REQUEST_ENTITY_TOO_LARGE
, METHOD_NONE
, NULL
,
2474 conn
->peer
, http
->request
, NULL
, NULL
);
2475 assert(context
->http
->out
.offset
== 0);
2476 context
->pullData();
2480 if (!request
->body_pipe
->productionEnded())
2481 conn
->readSomeData();
2483 context
->mayUseConnection(!request
->body_pipe
->productionEnded());
2486 http
->calloutContext
= new ClientRequestContext(http
);
2491 if (!notedUseOfBuffer
)
2492 connNoteUseOfBuffer(conn
, http
->req_sz
);
2496 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
2497 * to here because calling comm_reset_close() causes http to
2498 * be freed and the above connNoteUseOfBuffer() would hit an
2499 * assertion, not to mention that we were accessing freed memory.
2501 if (http
->request
->flags
.resetTCP() && conn
->fd
> -1) {
2502 debugs(33, 3, HERE
<< "Sending TCP RST on FD " << conn
->fd
);
2503 conn
->flags
.readMoreRequests
= false;
2504 comm_reset_close(conn
->fd
);
2510 connStripBufferWhitespace (ConnStateData
* conn
)
2512 while (conn
->in
.notYetUsed
> 0 && xisspace(conn
->in
.buf
[0])) {
2513 xmemmove(conn
->in
.buf
, conn
->in
.buf
+ 1, conn
->in
.notYetUsed
- 1);
2514 --conn
->in
.notYetUsed
;
2519 connOkToAddRequest(ConnStateData
* conn
)
2521 int result
= conn
->getConcurrentRequestCount() < (Config
.onoff
.pipeline_prefetch
? 2 : 1);
2524 debugs(33, 3, "connOkToAddRequest: FD " << conn
->fd
<<
2525 " max concurrent requests reached");
2526 debugs(33, 5, "connOkToAddRequest: FD " << conn
->fd
<<
2527 " defering new request until one is done");
2536 * Report on the number of bytes of body content that we
2537 * know are yet to be read on this connection.
2540 ConnStateData::bodySizeLeft()
2542 // XXX: this logic will not work for chunked requests with unknown sizes
2544 if (bodyPipe
!= NULL
)
2545 return bodyPipe
->unproducedSize();
2551 * Attempt to parse one or more requests from the input buffer.
2552 * If a request is successfully parsed, even if the next request
2553 * is only partially parsed, it will return TRUE.
2554 * do_next_read is updated to indicate whether a read should be
2558 clientParseRequest(ConnStateData
* conn
, bool &do_next_read
)
2560 HttpRequestMethod method
;
2561 ClientSocketContext
*context
;
2562 bool parsed_req
= false;
2563 HttpVersion http_ver
;
2566 debugs(33, 5, "clientParseRequest: FD " << conn
->fd
<< ": attempting to parse");
2568 while (conn
->in
.notYetUsed
> 0 && conn
->bodySizeLeft() == 0) {
2569 connStripBufferWhitespace (conn
);
2571 /* Don't try to parse if the buffer is empty */
2573 if (conn
->in
.notYetUsed
== 0)
2576 /* Limit the number of concurrent requests to 2 */
2578 if (!connOkToAddRequest(conn
)) {
2582 /* Should not be needed anymore */
2583 /* Terminate the string */
2584 conn
->in
.buf
[conn
->in
.notYetUsed
] = '\0';
2586 /* Begin the parsing */
2587 HttpParserInit(&hp
, conn
->in
.buf
, conn
->in
.notYetUsed
);
2589 /* Process request */
2590 PROF_start(parseHttpRequest
);
2592 context
= parseHttpRequest(conn
, &hp
, &method
, &http_ver
);
2594 PROF_stop(parseHttpRequest
);
2596 /* partial or incomplete request */
2599 if (!connKeepReadingIncompleteRequest(conn
))
2600 connCancelIncompleteRequests(conn
);
2605 /* status -1 or 1 */
2607 debugs(33, 5, "clientParseRequest: FD " << conn
->fd
<< ": parsed a request");
2608 commSetTimeout(conn
->fd
, Config
.Timeout
.lifetime
, clientLifetimeTimeout
,
2611 clientProcessRequest(conn
, &hp
, context
, method
, http_ver
);
2615 if (context
->mayUseConnection()) {
2616 debugs(33, 3, "clientParseRequest: Not reading, as this request may need the connection");
2621 if (!conn
->flags
.readMoreRequests
) {
2622 conn
->flags
.readMoreRequests
= true;
2626 continue; /* while offset > 0 && conn->bodySizeLeft() == 0 */
2628 } /* while offset > 0 && conn->bodySizeLeft() == 0 */
2630 /* XXX where to 'finish' the parsing pass? */
2636 ConnStateData::clientReadRequest(const CommIoCbParams
&io
)
2638 debugs(33,5,HERE
<< "clientReadRequest FD " << io
.fd
<< " size " << io
.size
);
2640 bool do_next_read
= 1; /* the default _is_ to read data! - adrian */
2642 assert (io
.fd
== fd
);
2644 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
2646 if (io
.flag
== COMM_ERR_CLOSING
) {
2647 debugs(33,5, HERE
<< " FD " << fd
<< " closing Bailout.");
2652 * Don't reset the timeout value here. The timeout value will be
2653 * set to Config.Timeout.request by httpAccept() and
2654 * clientWriteComplete(), and should apply to the request as a
2655 * whole, not individual read() calls. Plus, it breaks our
2656 * lame half-close detection
2658 if (connReadWasError(io
.flag
, io
.size
, io
.xerrno
)) {
2663 if (io
.flag
== COMM_OK
) {
2665 kb_incr(&statCounter
.client_http
.kbytes_in
, io
.size
);
2667 handleReadData(io
.buf
, io
.size
);
2669 /* The above may close the connection under our feets */
2673 } else if (io
.size
== 0) {
2674 debugs(33, 5, "clientReadRequest: FD " << fd
<< " closed?");
2676 if (connFinishedWithConn(io
.size
)) {
2681 /* It might be half-closed, we can't tell */
2682 fd_table
[fd
].flags
.socket_eof
= 1;
2684 commMarkHalfClosed(fd
);
2688 fd_note(fd
, "half-closed");
2690 /* There is one more close check at the end, to detect aborted
2691 * (partial) requests. At this point we can't tell if the request
2694 /* Continue to process previously read data */
2698 /* Process next request */
2699 if (getConcurrentRequestCount() == 0)
2700 fd_note(fd
, "Reading next request");
2702 if (! clientParseRequest(this, do_next_read
)) {
2706 * If the client here is half closed and we failed
2707 * to parse a request, close the connection.
2708 * The above check with connFinishedWithConn() only
2709 * succeeds _if_ the buffer is empty which it won't
2710 * be if we have an incomplete request.
2711 * XXX: This duplicates ClientSocketContext::keepaliveNextRequest
2713 if (getConcurrentRequestCount() == 0 && commIsHalfClosed(fd
)) {
2714 debugs(33, 5, "clientReadRequest: FD " << fd
<< ": half-closed connection, no completed request parsed, connection closing.");
2723 clientAfterReadingRequests(do_next_read
);
2727 * called when new request data has been read from the socket
2730 ConnStateData::handleReadData(char *buf
, size_t size
)
2732 char *current_buf
= in
.addressToReadInto();
2734 if (buf
!= current_buf
)
2735 xmemmove(current_buf
, buf
, size
);
2737 in
.notYetUsed
+= size
;
2739 in
.buf
[in
.notYetUsed
] = '\0'; /* Terminate the string */
2741 // if we are reading a body, stuff data into the body pipe
2742 if (bodyPipe
!= NULL
)
2743 handleRequestBodyData();
2747 * called when new request body data has been buffered in in.buf
2748 * may close the connection if we were closing and piped everything out
2751 ConnStateData::handleRequestBodyData()
2753 assert(bodyPipe
!= NULL
);
2757 #if FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS
2758 // The code below works, in principle, but we cannot do dechunking
2759 // on-the-fly because that would mean sending chunked requests to
2760 // the next hop. Squid lacks logic to determine which servers can
2761 // receive chunk requests. Squid v3.0 code cannot even handle chunked
2762 // responses which we may encourage by sending chunked requests.
2763 // The error generation code probably needs more work.
2764 if (in
.bodyParser
) { // chunked body
2765 debugs(33,5, HERE
<< "handling chunked request body for FD " << fd
);
2766 bool malformedChunks
= false;
2768 MemBuf raw
; // ChunkedCodingParser only works with MemBufs
2769 raw
.init(in
.notYetUsed
, in
.notYetUsed
);
2770 raw
.append(in
.buf
, in
.notYetUsed
);
2771 try { // the parser will throw on errors
2772 const mb_size_t wasContentSize
= raw
.contentSize();
2773 BodyPipeCheckout
bpc(*bodyPipe
);
2774 const bool parsed
= in
.bodyParser
->parse(&raw
, &bpc
.buf
);
2776 putSize
= wasContentSize
- raw
.contentSize();
2779 stopProducingFor(bodyPipe
, true); // this makes bodySize known
2781 // parser needy state must imply body pipe needy state
2782 if (in
.bodyParser
->needsMoreData() &&
2783 !bodyPipe
->mayNeedMoreData())
2784 malformedChunks
= true;
2785 // XXX: if bodyParser->needsMoreSpace, how can we guarantee it?
2787 } catch (...) { // XXX: be more specific
2788 malformedChunks
= true;
2791 if (malformedChunks
) {
2792 if (bodyPipe
!= NULL
)
2793 stopProducingFor(bodyPipe
, false);
2795 ClientSocketContext::Pointer context
= getCurrentContext();
2796 if (!context
->http
->out
.offset
) {
2797 clientStreamNode
*node
= context
->getClientReplyContext();
2798 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2799 assert (repContext
);
2800 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
,
2801 METHOD_NONE
, NULL
, &peer
.sin_addr
,
2803 context
->pullData();
2805 flags
.readMoreRequests
= false;
2806 return; // XXX: is that sufficient to generate an error?
2808 } else // identity encoding
2811 debugs(33,5, HERE
<< "handling plain request body for FD " << fd
);
2812 putSize
= bodyPipe
->putMoreData(in
.buf
, in
.notYetUsed
);
2813 if (!bodyPipe
->mayNeedMoreData()) {
2814 // BodyPipe will clear us automagically when we produced everything
2820 connNoteUseOfBuffer(this, putSize
);
2823 debugs(33,5, HERE
<< "produced entire request body for FD " << fd
);
2826 /* we've finished reading like good clients,
2827 * now do the close that initiateClose initiated.
2829 * XXX: do we have to close? why not check keepalive et.
2831 * XXX: To support chunked requests safely, we need to handle
2832 * the case of an endless request. This if-statement does not,
2833 * because mayNeedMoreData is true if request size is not known.
2841 ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer
)
2843 handleRequestBodyData();
2847 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer
)
2850 startClosing("body consumer aborted");
2853 /** general lifetime handler for HTTP requests */
2855 ConnStateData::requestTimeout(const CommTimeoutCbParams
&io
)
2857 #if THIS_CONFUSES_PERSISTENT_CONNECTION_AWARE_BROWSERS_AND_USERS
2858 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
2860 if (COMMIO_FD_WRITECB(io
.fd
)->active
) {
2861 /* FIXME: If this code is reinstated, check the conn counters,
2862 * not the fd table state
2865 * Some data has been sent to the client, just close the FD
2868 } else if (nrequests
) {
2870 * assume its a persistent connection; just close it
2877 ClientHttpRequest
**H
;
2878 clientStreamNode
*node
;
2879 ClientHttpRequest
*http
=
2880 parseHttpRequestAbort(this, "error:Connection%20lifetime%20expired");
2881 node
= http
->client_stream
.tail
->prev
->data
;
2882 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2883 assert (repContext
);
2884 repContext
->setReplyToError(ERR_LIFETIME_EXP
,
2885 HTTP_REQUEST_TIMEOUT
, METHOD_NONE
, "N/A", &peer
.sin_addr
,
2887 /* No requests can be outstanded */
2888 assert(chr
== NULL
);
2889 /* add to the client request queue */
2891 for (H
= &chr
; *H
; H
= &(*H
)->next
);
2894 clientStreamRead(http
->client_stream
.tail
->data
, http
, 0,
2895 HTTP_REQBUF_SZ
, context
->reqbuf
);
2898 * if we don't close() here, we still need a timeout handler!
2900 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2901 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
2902 TimeoutDialer(this,&ConnStateData::requestTimeout
));
2903 commSetTimeout(io
.fd
, 30, timeoutCall
);
2906 * Aha, but we don't want a read handler!
2908 commSetSelect(io
.fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
2913 * Just close the connection to not confuse browsers
2914 * using persistent connections. Some browsers opens
2915 * an connection and then does not use it until much
2916 * later (presumeably because the request triggering
2917 * the open has already been completed on another
2920 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
2930 clientLifetimeTimeout(int fd
, void *data
)
2932 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
2933 debugs(33, 1, "WARNING: Closing client " << http
->getConn()->peer
<< " connection due to lifetime timeout");
2934 debugs(33, 1, "\t" << http
->uri
);
2941 static time_t last_warn
= 0;
2943 if (fdNFree() >= RESERVED_FD
)
2946 if (last_warn
+ 15 < squid_curtime
) {
2947 debugs(33, 0, HERE
<< "WARNING! Your cache is running out of filedescriptors");
2948 last_warn
= squid_curtime
;
2955 connStateCreate(const IpAddress
&peer
, const IpAddress
&me
, int fd
, http_port_list
*port
)
2957 ConnStateData
*result
= new ConnStateData
;
2959 result
->peer
= peer
;
2960 result
->log_addr
= peer
;
2961 result
->log_addr
.ApplyMask(Config
.Addrs
.client_netmask
.GetCIDR());
2964 result
->in
.buf
= (char *)memAllocBuf(CLIENT_REQ_BUF_SZ
, &result
->in
.allocatedSize
);
2965 result
->port
= cbdataReference(port
);
2967 if (port
->intercepted
|| port
->spoof_client_ip
) {
2968 IpAddress client
, dst
;
2970 if (IpInterceptor
.NatLookup(fd
, me
, peer
, client
, dst
) == 0) {
2971 result
->me
= client
;
2973 result
->transparent(true);
2977 if (port
->disable_pmtu_discovery
!= DISABLE_PMTU_OFF
&&
2978 (result
->transparent() || port
->disable_pmtu_discovery
== DISABLE_PMTU_ALWAYS
)) {
2979 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2980 int i
= IP_PMTUDISC_DONT
;
2981 setsockopt(fd
, SOL_IP
, IP_MTU_DISCOVER
, &i
, sizeof i
);
2985 static int reported
= 0;
2988 debugs(33, 1, "Notice: httpd_accel_no_pmtu_disc not supported on your platform");
2996 result
->flags
.readMoreRequests
= true;
3000 /** Handle a new connection on HTTP socket. */
3002 httpAccept(int sock
, int newfd
, ConnectionDetail
*details
,
3003 comm_err_t flag
, int xerrno
, void *data
)
3005 http_port_list
*s
= (http_port_list
*)data
;
3006 ConnStateData
*connState
= NULL
;
3008 if (flag
== COMM_ERR_CLOSING
) {
3013 AcceptLimiter::Instance().defer (sock
, httpAccept
, data
);
3015 /* kick off another one for later */
3016 comm_accept(sock
, httpAccept
, data
);
3018 if (flag
!= COMM_OK
) {
3019 debugs(33, 1, "httpAccept: FD " << sock
<< ": accept failure: " << xstrerr(xerrno
));
3023 debugs(33, 4, "httpAccept: FD " << newfd
<< ": accepted");
3024 fd_note(newfd
, "client http connect");
3025 connState
= connStateCreate(&details
->peer
, &details
->me
, newfd
, s
);
3027 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3028 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::connStateClosed",
3029 Dialer(connState
, &ConnStateData::connStateClosed
));
3030 comm_add_close_handler(newfd
, call
);
3032 if (Config
.onoff
.log_fqdn
)
3033 fqdncache_gethostbyaddr(details
->peer
, FQDN_LOOKUP_IF_MISS
);
3035 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3036 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
3037 TimeoutDialer(connState
,&ConnStateData::requestTimeout
));
3038 commSetTimeout(newfd
, Config
.Timeout
.read
, timeoutCall
);
3041 if (Ident::TheConfig
.identLookup
) {
3042 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
3043 identChecklist
.src_addr
= details
->peer
;
3044 identChecklist
.my_addr
= details
->me
;
3045 if (identChecklist
.fastCheck())
3046 Ident::Start(details
->me
, details
->peer
, clientIdentDone
, connState
);
3050 if (s
->tcp_keepalive
.enabled
) {
3051 commSetTcpKeepalive(newfd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
3054 connState
->readSomeData();
3056 clientdbEstablished(details
->peer
, 1);
3058 incoming_sockets_accepted
++;
3063 /** Create SSL connection structure and update fd_table */
3065 httpsCreate(int newfd
, ConnectionDetail
*details
, SSL_CTX
*sslContext
)
3067 SSL
*ssl
= SSL_new(sslContext
);
3070 const int ssl_error
= ERR_get_error();
3071 debugs(83, 1, "httpsAccept: Error allocating handle: " << ERR_error_string(ssl_error
, NULL
) );
3076 SSL_set_fd(ssl
, newfd
);
3077 fd_table
[newfd
].ssl
= ssl
;
3078 fd_table
[newfd
].read_method
= &ssl_read_method
;
3079 fd_table
[newfd
].write_method
= &ssl_write_method
;
3081 debugs(33, 5, "httpsCreate: will negotate SSL on FD " << newfd
);
3082 fd_note(newfd
, "client https start");
3087 /** negotiate an SSL connection */
3089 clientNegotiateSSL(int fd
, void *data
)
3091 ConnStateData
*conn
= (ConnStateData
*)data
;
3093 SSL
*ssl
= fd_table
[fd
].ssl
;
3096 if ((ret
= SSL_accept(ssl
)) <= 0) {
3097 int ssl_error
= SSL_get_error(ssl
, ret
);
3099 switch (ssl_error
) {
3101 case SSL_ERROR_WANT_READ
:
3102 commSetSelect(fd
, COMM_SELECT_READ
, clientNegotiateSSL
, conn
, 0);
3105 case SSL_ERROR_WANT_WRITE
:
3106 commSetSelect(fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, conn
, 0);
3109 case SSL_ERROR_SYSCALL
:
3112 debugs(83, 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Aborted by client");
3118 if (errno
== ECONNRESET
)
3121 debugs(83, hard
? 1 : 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3122 fd
<< ": " << strerror(errno
) << " (" << errno
<< ")");
3129 case SSL_ERROR_ZERO_RETURN
:
3130 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Closed by client");
3135 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3136 fd
<< ": " << ERR_error_string(ERR_get_error(), NULL
) <<
3137 " (" << ssl_error
<< "/" << ret
<< ")");
3145 if (SSL_session_reused(ssl
)) {
3146 debugs(83, 2, "clientNegotiateSSL: Session " << SSL_get_session(ssl
) <<
3147 " reused on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<< ")");
3149 if (do_debug(83, 4)) {
3150 /* Write out the SSL session details.. actually the call below, but
3151 * OpenSSL headers do strange typecasts confusing GCC.. */
3152 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
3153 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
3154 PEM_ASN1_write((i2d_of_void
*)i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3156 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
3158 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
3159 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
3160 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
3161 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
3162 * Because there are two possible usable cast, if you get an error here, try the other
3163 * commented line. */
3165 PEM_ASN1_write((int(*)())i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3166 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL); */
3170 debugs(83, 4, "With " OPENSSL_VERSION_TEXT
", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source." );
3173 /* Note: This does not automatically fflush the log file.. */
3176 debugs(83, 2, "clientNegotiateSSL: New session " <<
3177 SSL_get_session(ssl
) << " on FD " << fd
<< " (" <<
3178 fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<<
3182 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<< " negotiated cipher " <<
3183 SSL_get_cipher(ssl
));
3185 client_cert
= SSL_get_peer_certificate(ssl
);
3187 if (client_cert
!= NULL
) {
3188 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3189 " client certificate: subject: " <<
3190 X509_NAME_oneline(X509_get_subject_name(client_cert
), 0, 0));
3192 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3193 " client certificate: issuer: " <<
3194 X509_NAME_oneline(X509_get_issuer_name(client_cert
), 0, 0));
3197 X509_free(client_cert
);
3199 debugs(83, 5, "clientNegotiateSSL: FD " << fd
<<
3200 " has no certificate.");
3203 conn
->readSomeData();
3206 /** handle a new HTTPS connection */
3208 httpsAccept(int sock
, int newfd
, ConnectionDetail
*details
,
3209 comm_err_t flag
, int xerrno
, void *data
)
3211 https_port_list
*s
= (https_port_list
*)data
;
3212 SSL_CTX
*sslContext
= s
->sslContext
;
3214 if (flag
== COMM_ERR_CLOSING
) {
3219 AcceptLimiter::Instance().defer (sock
, httpsAccept
, data
);
3221 /* kick off another one for later */
3222 comm_accept(sock
, httpsAccept
, data
);
3224 if (flag
!= COMM_OK
) {
3226 debugs(33, 1, "httpsAccept: FD " << sock
<< ": accept failure: " << xstrerr(xerrno
));
3231 if (!(ssl
= httpsCreate(newfd
, details
, sslContext
)))
3234 debugs(33, 5, "httpsAccept: FD " << newfd
<< " accepted, starting SSL negotiation.");
3235 fd_note(newfd
, "client https connect");
3236 ConnStateData
*connState
= connStateCreate(details
->peer
, details
->me
,
3238 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3239 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::connStateClosed",
3240 Dialer(connState
, &ConnStateData::connStateClosed
));
3241 comm_add_close_handler(newfd
, call
);
3243 if (Config
.onoff
.log_fqdn
)
3244 fqdncache_gethostbyaddr(details
->peer
, FQDN_LOOKUP_IF_MISS
);
3246 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3247 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
3248 TimeoutDialer(connState
,&ConnStateData::requestTimeout
));
3249 commSetTimeout(newfd
, Config
.Timeout
.request
, timeoutCall
);
3252 if (Ident::TheConfig
.identLookup
) {
3253 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
3254 identChecklist
.src_addr
= details
->peer
;
3255 identChecklist
.my_addr
= details
->me
;
3256 if (identChecklist
.fastCheck())
3257 Ident::Start(details
->me
, details
->peer
, clientIdentDone
, connState
);
3262 if (s
->http
.tcp_keepalive
.enabled
) {
3263 commSetTcpKeepalive(newfd
, s
->http
.tcp_keepalive
.idle
, s
->http
.tcp_keepalive
.interval
, s
->http
.tcp_keepalive
.timeout
);
3266 commSetSelect(newfd
, COMM_SELECT_READ
, clientNegotiateSSL
, connState
, 0);
3268 clientdbEstablished(details
->peer
, 1);
3270 incoming_sockets_accepted
++;
3274 ConnStateData::switchToHttps()
3276 assert(!switchedToHttps_
);
3278 //HTTPMSGLOCK(currentobject->http->request);
3279 assert(areAllContextsForThisConnection());
3281 //currentobject->connIsFinished();
3283 debugs(33, 5, HERE
<< "converting FD " << fd
<< " to SSL");
3285 // fake a ConnectionDetail object; XXX: make ConnState a ConnectionDetail?
3286 ConnectionDetail detail
;
3290 SSL_CTX
*sslContext
= port
->sslContext
;
3292 if (!(ssl
= httpsCreate(fd
, &detail
, sslContext
)))
3295 // commSetTimeout() was called for this request before we switched.
3297 // Disable the client read handler until peer selection is complete
3298 commSetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
3300 commSetSelect(fd
, COMM_SELECT_READ
, clientNegotiateSSL
, this, 0);
3302 switchedToHttps_
= true;
3306 #endif /* USE_SSL */
3310 clientHttpConnectionsOpen(void)
3312 http_port_list
*s
= NULL
;
3315 int bumpCount
= 0; // counts http_ports with sslBump option
3318 for (s
= Config
.Sockaddr
.http
; s
; s
= s
->next
) {
3319 if (MAXHTTPPORTS
== NHttpSockets
) {
3320 debugs(1, 1, "WARNING: You have too many 'http_port' lines.");
3321 debugs(1, 1, " The limit is " << MAXHTTPPORTS
);
3326 if (s
->sslBump
&& s
->sslContext
== NULL
) {
3327 debugs(1, 1, "Will not bump SSL at http_port " <<
3328 s
->http
.s
<< " due to SSL initialization failure.");
3337 if (s
->spoof_client_ip
) {
3338 fd
= comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, s
->s
, (COMM_NONBLOCKING
|COMM_TRANSPARENT
), "HTTP Socket");
3340 fd
= comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, s
->s
, COMM_NONBLOCKING
, "HTTP Socket");
3350 comm_accept(fd
, httpAccept
, s
);
3352 debugs(1, 1, "Accepting " <<
3353 (s
->intercepted
? " intercepted" : "") <<
3354 (s
->spoof_client_ip
? " spoofing" : "") <<
3355 (s
->sslBump
? " bumpy" : "") <<
3356 (s
->accel
? " accelerated" : "")
3357 << " HTTP connections at " << s
->s
3358 << ", FD " << fd
<< "." );
3360 HttpSockets
[NHttpSockets
++] = fd
;
3364 if (bumpCount
&& !Config
.accessList
.ssl_bump
)
3365 debugs(33, 1, "WARNING: http_port(s) with SslBump found, but no " <<
3366 std::endl
<< "\tssl_bump ACL configured. No requests will be " <<
3373 clientHttpsConnectionsOpen(void)
3378 for (s
= Config
.Sockaddr
.https
; s
; s
= (https_port_list
*)s
->http
.next
) {
3379 if (MAXHTTPPORTS
== NHttpSockets
) {
3380 debugs(1, 1, "Ignoring 'https_port' lines exceeding the limit.");
3381 debugs(1, 1, "The limit is " << MAXHTTPPORTS
<< " HTTPS ports.");
3385 if (s
->sslContext
== NULL
) {
3386 debugs(1, 1, "Ignoring https_port " << s
->http
.s
<<
3387 " due to SSL initialization failure.");
3392 fd
= comm_open_listener(SOCK_STREAM
,
3395 COMM_NONBLOCKING
, "HTTPS Socket");
3403 comm_accept(fd
, httpsAccept
, s
);
3405 debugs(1, 1, "Accepting HTTPS connections at " << s
->http
.s
<< ", FD " << fd
<< ".");
3407 HttpSockets
[NHttpSockets
++] = fd
;
3414 clientOpenListenSockets(void)
3416 clientHttpConnectionsOpen();
3419 clientHttpsConnectionsOpen();
3422 if (NHttpSockets
< 1)
3423 fatal("Cannot open HTTP Port");
3427 clientHttpConnectionsClose(void)
3431 for (i
= 0; i
< NHttpSockets
; i
++) {
3432 if (HttpSockets
[i
] >= 0) {
3433 debugs(1, 1, "FD " << HttpSockets
[i
] <<
3434 " Closing HTTP connection");
3435 comm_close(HttpSockets
[i
]);
3436 HttpSockets
[i
] = -1;
3444 varyEvaluateMatch(StoreEntry
* entry
, HttpRequest
* request
)
3446 const char *vary
= request
->vary_headers
;
3447 int has_vary
= entry
->getReply()->header
.has(HDR_VARY
);
3448 #if X_ACCELERATOR_VARY
3451 entry
->getReply()->header
.has(HDR_X_ACCELERATOR_VARY
);
3454 if (!has_vary
|| !entry
->mem_obj
->vary_headers
) {
3456 /* Oops... something odd is going on here.. */
3457 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3458 entry
->mem_obj
->url
<< "' '" << vary
<< "'");
3459 safe_free(request
->vary_headers
);
3464 /* This is not a varying object */
3468 /* virtual "vary" object found. Calculate the vary key and
3469 * continue the search
3471 vary
= httpMakeVaryMark(request
, entry
->getReply());
3474 request
->vary_headers
= xstrdup(vary
);
3477 /* Ouch.. we cannot handle this kind of variance */
3478 /* XXX This cannot really happen, but just to be complete */
3483 vary
= httpMakeVaryMark(request
, entry
->getReply());
3486 request
->vary_headers
= xstrdup(vary
);
3490 /* Ouch.. we cannot handle this kind of variance */
3491 /* XXX This cannot really happen, but just to be complete */
3493 } else if (strcmp(vary
, entry
->mem_obj
->vary_headers
) == 0) {
3496 /* Oops.. we have already been here and still haven't
3497 * found the requested variant. Bail out
3499 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3500 entry
->mem_obj
->url
<< "' '" << vary
<< "'");
3506 ACLFilledChecklist
*
3507 clientAclChecklistCreate(const acl_access
* acl
, ClientHttpRequest
* http
)
3509 ConnStateData
* conn
= http
->getConn();
3510 ACLFilledChecklist
*ch
= new ACLFilledChecklist(acl
, http
->request
,
3511 cbdataReferenceValid(conn
) && conn
!= NULL
? conn
->rfc931
: dash_str
);
3514 * hack for ident ACL. It needs to get full addresses, and a place to store
3515 * the ident result on persistent connections...
3517 /* connection oriented auth also needs these two lines for it's operation. */
3519 * Internal requests do not have a connection reference, because: A) their
3520 * byte count may be transformed before being applied to an outbound
3521 * connection B) they are internal - any limiting on them should be done on
3526 ch
->conn(conn
); /* unreferenced in FilledCheckList.cc */
3531 CBDATA_CLASS_INIT(ConnStateData
);
3533 ConnStateData::ConnStateData() :AsyncJob("ConnStateData"), transparent_ (false), reading_ (false), closing_ (false)
3536 pinning
.pinned
= false;
3537 pinning
.auth
= false;
3541 ConnStateData::transparent() const
3543 return transparent_
;
3547 ConnStateData::transparent(bool const anInt
)
3549 transparent_
= anInt
;
3553 ConnStateData::reading() const
3559 ConnStateData::reading(bool const newBool
)
3561 assert (reading() != newBool
);
3567 ConnStateData::expectRequestBody(int64_t size
)
3569 bodyPipe
= new BodyPipe(this);
3570 bodyPipe
->setBodySize(size
);
3575 ConnStateData::closing() const
3581 * Called by ClientSocketContext to give the connection a chance to read
3582 * the entire body before closing the socket.
3585 ConnStateData::startClosing(const char *reason
)
3587 debugs(33, 5, HERE
<< "startClosing " << this << " for " << reason
);
3591 assert(bodyPipe
!= NULL
);
3592 assert(bodySizeLeft() > 0);
3594 // We do not have to abort the body pipeline because we are going to
3595 // read the entire body anyway.
3596 // Perhaps an ICAP server wants to log the complete request.
3598 // If a consumer abort have caused this closing, we may get stuck
3599 // as nobody is consuming our data. Allow auto-consumption.
3600 bodyPipe
->enableAutoConsumption();
3603 // initialize dechunking state
3605 ConnStateData::startDechunkingRequest(HttpParser
*hp
)
3607 debugs(33, 5, HERE
<< "start dechunking at " << HttpParserRequestLen(hp
));
3608 assert(in
.dechunkingState
== chunkUnknown
);
3609 assert(!in
.bodyParser
);
3610 in
.bodyParser
= new ChunkedCodingParser
;
3611 in
.chunkedSeen
= HttpParserRequestLen(hp
); // skip headers when dechunking
3612 in
.chunked
.init(); // TODO: should we have a smaller-than-default limit?
3613 in
.dechunked
.init();
3614 in
.dechunkingState
= chunkParsing
;
3617 // put parsed content into input buffer and clean up
3619 ConnStateData::finishDechunkingRequest(HttpParser
*hp
)
3621 debugs(33, 5, HERE
<< "finish dechunking; content: " << in
.dechunked
.contentSize());
3623 assert(in
.dechunkingState
== chunkReady
);
3624 assert(in
.bodyParser
);
3625 delete in
.bodyParser
;
3626 in
.bodyParser
= NULL
;
3628 const mb_size_t headerSize
= HttpParserRequestLen(hp
);
3630 // dechunking cannot make data bigger
3631 assert(headerSize
+ in
.dechunked
.contentSize() + in
.chunked
.contentSize()
3632 <= static_cast<mb_size_t
>(in
.notYetUsed
));
3633 assert(in
.notYetUsed
<= in
.allocatedSize
);
3635 // copy dechunked content
3636 char *end
= in
.buf
+ headerSize
;
3637 xmemmove(end
, in
.dechunked
.content(), in
.dechunked
.contentSize());
3638 end
+= in
.dechunked
.contentSize();
3640 // copy post-chunks leftovers, if any, caused by request pipelining?
3641 if (in
.chunked
.contentSize()) {
3642 xmemmove(end
, in
.chunked
.content(), in
.chunked
.contentSize());
3643 end
+= in
.chunked
.contentSize();
3646 in
.notYetUsed
= end
- in
.buf
;
3649 in
.dechunked
.clean();
3650 in
.dechunkingState
= chunkUnknown
;
3653 // parse newly read request chunks and buffer them for finishDechunkingRequest
3654 // returns true iff needs more data
3656 ConnStateData::parseRequestChunks(HttpParser
*)
3658 debugs(33,5, HERE
<< "parsing chunked request body at " <<
3659 in
.chunkedSeen
<< " < " << in
.notYetUsed
);
3660 assert(in
.bodyParser
);
3661 assert(in
.dechunkingState
== chunkParsing
);
3663 assert(in
.chunkedSeen
<= in
.notYetUsed
);
3664 const mb_size_t fresh
= in
.notYetUsed
- in
.chunkedSeen
;
3666 // be safe: count some chunked coding metadata towards the total body size
3667 if (fresh
+ in
.dechunked
.contentSize() > Config
.maxChunkedRequestBodySize
) {
3668 debugs(33,3, HERE
<< "chunked body (" << fresh
<< " + " <<
3669 in
.dechunked
.contentSize() << " may exceed " <<
3670 "chunked_request_body_max_size=" <<
3671 Config
.maxChunkedRequestBodySize
);
3672 in
.dechunkingState
= chunkError
;
3676 if (fresh
> in
.chunked
.potentialSpaceSize()) {
3677 // should not happen if Config.maxChunkedRequestBodySize is reasonable
3678 debugs(33,1, HERE
<< "request_body_max_size exceeds chunked buffer " <<
3679 "size: " << fresh
<< " + " << in
.chunked
.contentSize() << " > " <<
3680 in
.chunked
.potentialSpaceSize() << " with " <<
3681 "chunked_request_body_max_size=" <<
3682 Config
.maxChunkedRequestBodySize
);
3683 in
.dechunkingState
= chunkError
;
3686 in
.chunked
.append(in
.buf
+ in
.chunkedSeen
, fresh
);
3687 in
.chunkedSeen
+= fresh
;
3689 try { // the parser will throw on errors
3690 if (in
.bodyParser
->parse(&in
.chunked
, &in
.dechunked
))
3691 in
.dechunkingState
= chunkReady
; // successfully parsed all chunks
3693 return true; // need more, keep the same state
3695 debugs(33,3, HERE
<< "chunk parsing error");
3696 in
.dechunkingState
= chunkError
;
3698 return false; // error, unsupported, or done
3702 ConnStateData::In::addressToReadInto() const
3704 return buf
+ notYetUsed
;
3707 ConnStateData::In::In() : bodyParser(NULL
),
3708 buf (NULL
), notYetUsed (0), allocatedSize (0),
3709 dechunkingState(ConnStateData::chunkUnknown
)
3712 ConnStateData::In::~In()
3715 memFreeBuf(allocatedSize
, buf
);
3717 delete bodyParser
; // TODO: pool
3720 /* This is a comm call normally scheduled by comm_close() */
3722 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams
&io
)
3726 cbdataReferenceDone(pinning
.peer
);
3728 safe_free(pinning
.host
);
3729 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3730 * connection has gone away */
3733 void ConnStateData::pinConnection(int pinning_fd
, HttpRequest
*request
, struct peer
*peer
, bool auth
)
3736 char desc
[FD_DESC_SZ
];
3738 if (pinning
.fd
== pinning_fd
)
3740 else if (pinning
.fd
!= -1)
3741 comm_close(pinning
.fd
);
3744 safe_free(pinning
.host
);
3746 pinning
.fd
= pinning_fd
;
3747 pinning
.host
= xstrdup(request
->GetHost());
3748 pinning
.port
= request
->port
;
3749 pinning
.pinned
= true;
3751 cbdataReferenceDone(pinning
.peer
);
3753 pinning
.peer
= cbdataReference(peer
);
3754 pinning
.auth
= auth
;
3756 snprintf(desc
, FD_DESC_SZ
, "%s pinned connection for %s:%d (%d)",
3757 (auth
|| !peer
) ? request
->GetHost() : peer
->name
, f
->ipaddr
, (int) f
->remote_port
, fd
);
3758 fd_note(pinning_fd
, desc
);
3760 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3761 pinning
.closeHandler
= asyncCall(33, 5, "ConnStateData::clientPinnedConnectionClosed",
3762 Dialer(this, &ConnStateData::clientPinnedConnectionClosed
));
3763 comm_add_close_handler(pinning_fd
, pinning
.closeHandler
);
3767 int ConnStateData::validatePinnedConnection(HttpRequest
*request
, const struct peer
*peer
)
3773 if (pinning
.auth
&& request
&& strcasecmp(pinning
.host
, request
->GetHost()) != 0) {
3776 if (request
&& pinning
.port
!= request
->port
) {
3779 if (pinning
.peer
&& !cbdataReferenceValid(pinning
.peer
)) {
3782 if (peer
!= pinning
.peer
) {
3787 int pinning_fd
=pinning
.fd
;
3788 /* The pinning info is not safe, remove any pinning info*/
3791 /* also close the server side socket, we should not use it for invalid/unauthenticated
3794 comm_close(pinning_fd
);
3801 void ConnStateData::unpinConnection()
3804 cbdataReferenceDone(pinning
.peer
);
3806 if (pinning
.closeHandler
!= NULL
) {
3807 comm_remove_close_handler(pinning
.fd
, pinning
.closeHandler
);
3808 pinning
.closeHandler
= NULL
;
3811 safe_free(pinning
.host
);