4 * DEBUG: section 33 Client-side Routines
5 * AUTHOR: Duane Wessels
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
36 \defgroup ClientSide Client-Side Logics
38 \section cserrors Errors and client side
40 \par Problem the first:
41 * the store entry is no longer authoritative on the
42 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
43 * of client_side_reply.c.
44 * Problem the second: resources are wasted if we delay in cleaning up.
45 * Problem the third we can't depend on a connection close to clean up.
47 \par Nice thing the first:
48 * Any step in the stream can callback with data
49 * representing an error.
50 * Nice thing the second: once you stop requesting reads from upstream,
51 * upstream can be stopped too.
54 * Error has a callback mechanism to hand over a membuf
55 * with the error content. The failing node pushes that back as the
56 * reply. Can this be generalised to reduce duplicate efforts?
57 * A: Possibly. For now, only one location uses this.
58 * How to deal with pre-stream errors?
59 * Tell client_side_reply that we *want* an error page before any
60 * stream calls occur. Then we simply read as normal.
63 \section pconn_logic Persistent connection logic:
66 * requests (httpClientRequest structs) get added to the connection
67 * list, with the current one being chr
70 * The request is *immediately* kicked off, and data flows through
71 * to clientSocketRecipient.
74 * If the data that arrives at clientSocketRecipient is not for the current
75 * request, clientSocketRecipient simply returns, without requesting more
76 * data, or sending it.
79 * ClientKeepAliveNextRequest will then detect the presence of data in
80 * the next ClientHttpRequest, and will send it, restablishing the
86 #include "acl/FilledChecklist.h"
87 #include "auth/UserRequest.h"
88 #include "ChunkedCodingParser.h"
89 #include "client_side.h"
90 #include "client_side_reply.h"
91 #include "client_side_request.h"
92 #include "ClientRequestContext.h"
93 #include "clientStream.h"
95 #include "comm/ListenStateData.h"
96 #include "ConnectionDetail.h"
97 #include "eui/Config.h"
99 #include "HttpHdrContRange.h"
100 #include "HttpReply.h"
101 #include "HttpRequest.h"
102 #include "ident/Config.h"
103 #include "ident/Ident.h"
104 #include "ip/IpIntercept.h"
106 #include "MemObject.h"
107 #include "ProtoPort.h"
109 #include "SquidTime.h"
113 #define comm_close comm_lingering_close
116 /* our socket-related context */
119 CBDATA_CLASS_INIT(ClientSocketContext
);
122 ClientSocketContext::operator new (size_t byteCount
)
124 /* derived classes with different sizes must implement their own new */
125 assert (byteCount
== sizeof (ClientSocketContext
));
126 CBDATA_INIT_TYPE(ClientSocketContext
);
127 return cbdataAlloc(ClientSocketContext
);
131 ClientSocketContext::operator delete (void *address
)
133 cbdataFree (address
);
136 /* Local functions */
137 /* ClientSocketContext */
138 static ClientSocketContext
*ClientSocketContextNew(ClientHttpRequest
*);
140 static IOCB clientWriteComplete
;
141 static IOCB clientWriteBodyComplete
;
142 static bool clientParseRequest(ConnStateData
* conn
, bool &do_next_read
);
143 static PF clientLifetimeTimeout
;
144 static ClientSocketContext
*parseHttpRequestAbort(ConnStateData
* conn
, const char *uri
);
145 static ClientSocketContext
*parseHttpRequest(ConnStateData
*, HttpParser
*, HttpRequestMethod
*, HttpVersion
*);
147 static IDCB clientIdentDone
;
149 static CSCB clientSocketRecipient
;
150 static CSD clientSocketDetach
;
151 static void clientSetKeepaliveFlag(ClientHttpRequest
*);
152 static int clientIsContentLengthValid(HttpRequest
* r
);
153 static int clientIsRequestBodyValid(int64_t bodyLength
);
154 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
);
156 static void clientUpdateStatHistCounters(log_type logType
, int svc_time
);
157 static void clientUpdateStatCounters(log_type logType
);
158 static void clientUpdateHierCounters(HierarchyLogEntry
*);
159 static bool clientPingHasFinished(ping_data
const *aPing
);
160 void prepareLogWithRequestDetails(HttpRequest
*, AccessLogEntry
*);
162 static int connIsUsable(ConnStateData
* conn
);
164 static int responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const &receivedData
);
165 static void ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
);
166 static void clientUpdateSocketStats(log_type logType
, size_t size
);
168 char *skipLeadingSpace(char *aString
);
169 static void connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
);
170 static int connKeepReadingIncompleteRequest(ConnStateData
* conn
);
171 static void connCancelIncompleteRequests(ConnStateData
* conn
);
173 static ConnStateData
*connStateCreate(const Ip::Address
&peer
, const Ip::Address
&me
, int fd
, http_port_list
*port
);
177 ClientSocketContext::fd() const
180 assert (http
->getConn() != NULL
);
181 return http
->getConn()->fd
;
185 ClientSocketContext::getTail() const
187 if (http
->client_stream
.tail
)
188 return (clientStreamNode
*)http
->client_stream
.tail
->data
;
194 ClientSocketContext::getClientReplyContext() const
196 return (clientStreamNode
*)http
->client_stream
.tail
->prev
->data
;
200 * This routine should be called to grow the inbuf and then
204 ConnStateData::readSomeData()
211 debugs(33, 4, "clientReadSomeData: FD " << fd
<< ": reading request...");
213 makeSpaceAvailable();
215 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
216 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::clientReadRequest",
217 Dialer(this, &ConnStateData::clientReadRequest
));
218 comm_read(fd
, in
.addressToReadInto(), getAvailableBufferLength(), call
);
223 ClientSocketContext::removeFromConnectionList(ConnStateData
* conn
)
225 ClientSocketContext::Pointer
*tempContextPointer
;
226 assert(conn
!= NULL
&& cbdataReferenceValid(conn
));
227 assert(conn
->getCurrentContext() != NULL
);
228 /* Unlink us from the connection request list */
229 tempContextPointer
= & conn
->currentobject
;
231 while (tempContextPointer
->getRaw()) {
232 if (*tempContextPointer
== this)
235 tempContextPointer
= &(*tempContextPointer
)->next
;
238 assert(tempContextPointer
->getRaw() != NULL
);
239 *tempContextPointer
= next
;
243 ClientSocketContext::~ClientSocketContext()
245 clientStreamNode
*node
= getTail();
248 ClientSocketContext
*streamContext
= dynamic_cast<ClientSocketContext
*> (node
->data
.getRaw());
251 /* We are *always* the tail - prevent recursive free */
252 assert(this == streamContext
);
258 deRegisterWithConn();
260 httpRequestFree(http
);
262 /* clean up connection links to us */
263 assert(this != next
.getRaw());
267 ClientSocketContext::registerWithConn()
269 assert (!connRegistered_
);
271 assert (http
->getConn() != NULL
);
272 connRegistered_
= true;
273 http
->getConn()->addContextToQueue(this);
277 ClientSocketContext::deRegisterWithConn()
279 assert (connRegistered_
);
280 removeFromConnectionList(http
->getConn());
281 connRegistered_
= false;
285 ClientSocketContext::connIsFinished()
288 assert (http
->getConn() != NULL
);
289 deRegisterWithConn();
290 /* we can't handle any more stream data - detach */
291 clientStreamDetach(getTail(), http
);
294 ClientSocketContext::ClientSocketContext() : http(NULL
), reply(NULL
), next(NULL
),
296 mayUseConnection_ (false),
297 connRegistered_ (false)
299 memset (reqbuf
, '\0', sizeof (reqbuf
));
302 deferredparams
.node
= NULL
;
303 deferredparams
.rep
= NULL
;
306 ClientSocketContext
*
307 ClientSocketContextNew(ClientHttpRequest
* http
)
309 ClientSocketContext
*newContext
;
310 assert(http
!= NULL
);
311 newContext
= new ClientSocketContext
;
312 newContext
->http
= http
;
318 clientIdentDone(const char *ident
, void *data
)
320 ConnStateData
*conn
= (ConnStateData
*)data
;
321 xstrncpy(conn
->rfc931
, ident
? ident
: dash_str
, USER_IDENT_SZ
);
326 clientUpdateStatCounters(log_type logType
)
328 statCounter
.client_http
.requests
++;
330 if (logTypeIsATcpHit(logType
))
331 statCounter
.client_http
.hits
++;
333 if (logType
== LOG_TCP_HIT
)
334 statCounter
.client_http
.disk_hits
++;
335 else if (logType
== LOG_TCP_MEM_HIT
)
336 statCounter
.client_http
.mem_hits
++;
340 clientUpdateStatHistCounters(log_type logType
, int svc_time
)
342 statHistCount(&statCounter
.client_http
.all_svc_time
, svc_time
);
344 * The idea here is not to be complete, but to get service times
345 * for only well-defined types. For example, we don't include
346 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
347 * (we *tried* to validate it, but failed).
352 case LOG_TCP_REFRESH_UNMODIFIED
:
353 statHistCount(&statCounter
.client_http
.nh_svc_time
, svc_time
);
356 case LOG_TCP_IMS_HIT
:
357 statHistCount(&statCounter
.client_http
.nm_svc_time
, svc_time
);
362 case LOG_TCP_MEM_HIT
:
364 case LOG_TCP_OFFLINE_HIT
:
365 statHistCount(&statCounter
.client_http
.hit_svc_time
, svc_time
);
370 case LOG_TCP_CLIENT_REFRESH_MISS
:
371 statHistCount(&statCounter
.client_http
.miss_svc_time
, svc_time
);
375 /* make compiler warnings go away */
381 clientPingHasFinished(ping_data
const *aPing
)
383 if (0 != aPing
->stop
.tv_sec
&& 0 != aPing
->start
.tv_sec
)
390 clientUpdateHierCounters(HierarchyLogEntry
* someEntry
)
394 switch (someEntry
->code
) {
395 #if USE_CACHE_DIGESTS
400 statCounter
.cd
.times_used
++;
408 case FIRST_PARENT_MISS
:
410 case CLOSEST_PARENT_MISS
:
411 statCounter
.icp
.times_used
++;
412 i
= &someEntry
->ping
;
414 if (clientPingHasFinished(i
))
415 statHistCount(&statCounter
.icp
.query_svc_time
,
416 tvSubUsec(i
->start
, i
->stop
));
419 statCounter
.icp
.query_timeouts
++;
426 statCounter
.netdb
.times_used
++;
436 ClientHttpRequest::updateCounters()
438 clientUpdateStatCounters(logType
);
440 if (request
->errType
!= ERR_NONE
)
441 statCounter
.client_http
.errors
++;
443 clientUpdateStatHistCounters(logType
,
444 tvSubMsec(start_time
, current_time
));
446 clientUpdateHierCounters(&request
->hier
);
450 prepareLogWithRequestDetails(HttpRequest
* request
, AccessLogEntry
* aLogEntry
)
456 Adaptation::Icap::History::Pointer ih
= request
->icapHistory();
458 if (Config
.onoff
.log_mime_hdrs
) {
462 packerToMemInit(&p
, &mb
);
463 request
->header
.packInto(&p
);
464 //This is the request after adaptation or redirection
465 aLogEntry
->headers
.adapted_request
= xstrdup(mb
.buf
);
467 // the virgin request is saved to aLogEntry->request
468 if (aLogEntry
->request
) {
471 packerToMemInit(&p
, &mb
);
472 aLogEntry
->request
->header
.packInto(&p
);
473 aLogEntry
->headers
.request
= xstrdup(mb
.buf
);
479 packerToMemInit(&p
, &mb
);
482 ih
->lastIcapHeader
.packInto(&p
);
483 aLogEntry
->headers
.icap
= xstrdup(mb
.buf
);
492 aLogEntry
->icap
.processingTime
= ih
->processingTime();
495 aLogEntry
->http
.method
= request
->method
;
496 aLogEntry
->http
.version
= request
->http_ver
;
497 aLogEntry
->hier
= request
->hier
;
498 if (request
->content_length
> 0) // negative when no body or unknown length
499 aLogEntry
->cache
.requestSize
+= request
->content_length
;
500 aLogEntry
->cache
.extuser
= request
->extacl_user
.termedBuf();
502 if (request
->auth_user_request
) {
504 if (request
->auth_user_request
->username())
505 aLogEntry
->cache
.authuser
=
506 xstrdup(request
->auth_user_request
->username());
508 AUTHUSERREQUESTUNLOCK(request
->auth_user_request
, "request via clientPrepareLogWithRequestDetails");
513 ClientHttpRequest::logRequest()
515 if (out
.size
|| logType
) {
516 al
.icp
.opcode
= ICP_INVALID
;
518 debugs(33, 9, "clientLogRequest: al.url='" << al
.url
<< "'");
521 al
.http
.code
= al
.reply
->sline
.status
;
522 al
.http
.content_type
= al
.reply
->content_type
.termedBuf();
523 } else if (loggingEntry() && loggingEntry()->mem_obj
) {
524 al
.http
.code
= loggingEntry()->mem_obj
->getReply()->sline
.status
;
525 al
.http
.content_type
= loggingEntry()->mem_obj
->getReply()->content_type
.termedBuf();
528 debugs(33, 9, "clientLogRequest: http.code='" << al
.http
.code
<< "'");
530 if (loggingEntry() && loggingEntry()->mem_obj
)
531 al
.cache
.objectSize
= loggingEntry()->contentLen();
533 al
.cache
.caddr
.SetNoAddr();
535 if (getConn() != NULL
) al
.cache
.caddr
= getConn()->log_addr
;
537 al
.cache
.requestSize
= req_sz
;
538 al
.cache
.requestHeadersSize
= req_sz
;
540 al
.cache
.replySize
= out
.size
;
541 al
.cache
.replyHeadersSize
= out
.headers_sz
;
543 al
.cache
.highOffset
= out
.offset
;
545 al
.cache
.code
= logType
;
547 al
.cache
.msec
= tvSubMsec(start_time
, current_time
);
550 prepareLogWithRequestDetails(request
, &al
);
552 if (getConn() != NULL
&& getConn()->rfc931
[0])
553 al
.cache
.rfc931
= getConn()->rfc931
;
557 /* This is broken. Fails if the connection has been closed. Needs
558 * to snarf the ssl details some place earlier..
560 if (getConn() != NULL
)
561 al
.cache
.ssluser
= sslGetUserEmail(fd_table
[getConn()->fd
].ssl
);
565 ACLFilledChecklist
*checklist
= clientAclChecklistCreate(Config
.accessList
.log
, this);
568 checklist
->reply
= HTTPMSGLOCK(al
.reply
);
570 if (!Config
.accessList
.log
|| checklist
->fastCheck()) {
572 al
.adapted_request
= HTTPMSGLOCK(request
);
573 accessLogLog(&al
, checklist
);
576 if (getConn() != NULL
)
577 clientdbUpdate(getConn()->peer
, logType
, PROTO_HTTP
, out
.size
);
582 accessLogFreeMemory(&al
);
587 ClientHttpRequest::freeResources()
591 safe_free(redirect
.location
);
592 range_iter
.boundary
.clean();
593 HTTPMSGUNLOCK(request
);
595 if (client_stream
.tail
)
596 clientStreamAbort((clientStreamNode
*)client_stream
.tail
->data
, this);
600 httpRequestFree(void *data
)
602 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
603 assert(http
!= NULL
);
608 ConnStateData::areAllContextsForThisConnection() const
610 assert(this != NULL
);
611 ClientSocketContext::Pointer context
= getCurrentContext();
613 while (context
.getRaw()) {
614 if (context
->http
->getConn() != this)
617 context
= context
->next
;
624 ConnStateData::freeAllContexts()
626 ClientSocketContext::Pointer context
;
628 while ((context
= getCurrentContext()).getRaw() != NULL
) {
629 assert(getCurrentContext() !=
630 getCurrentContext()->next
);
631 context
->connIsFinished();
632 assert (context
!= currentobject
);
636 /// propagates abort event to all contexts
638 ConnStateData::notifyAllContexts(int xerrno
)
640 typedef ClientSocketContext::Pointer CSCP
;
641 for (CSCP c
= getCurrentContext(); c
.getRaw(); c
= c
->next
)
642 c
->noteIoError(xerrno
);
645 /* This is a handler normally called by comm_close() */
646 void ConnStateData::connStateClosed(const CommCloseCbParams
&io
)
648 assert (fd
== io
.fd
);
649 deleteThis("ConnStateData::connStateClosed");
652 // cleans up before destructor is called
654 ConnStateData::swanSong()
656 debugs(33, 2, "ConnStateData::swanSong: FD " << fd
);
658 flags
.readMoreRequests
= false;
659 clientdbEstablished(peer
, -1); /* decrement */
660 assert(areAllContextsForThisConnection());
663 if (auth_user_request
!= NULL
) {
664 debugs(33, 4, "ConnStateData::swanSong: freeing auth_user_request '" << auth_user_request
<< "' (this is '" << this << "')");
665 auth_user_request
->onConnectionClose(this);
669 comm_close(pinning
.fd
);
671 BodyProducer::swanSong();
672 flags
.swanSang
= true;
676 ConnStateData::isOpen() const
678 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
680 !fd_table
[fd
].closing();
683 ConnStateData::~ConnStateData()
685 assert(this != NULL
);
686 debugs(33, 3, "ConnStateData::~ConnStateData: FD " << fd
);
689 debugs(33, 1, "BUG: ConnStateData did not close FD " << fd
);
692 debugs(33, 1, "BUG: ConnStateData was not destroyed properly; FD " << fd
);
694 AUTHUSERREQUESTUNLOCK(auth_user_request
, "~conn");
696 cbdataReferenceDone(port
);
698 if (bodyPipe
!= NULL
)
699 stopProducingFor(bodyPipe
, false);
703 * clientSetKeepaliveFlag() sets request->flags.proxy_keepalive.
704 * This is the client-side persistent connection flag. We need
705 * to set this relatively early in the request processing
706 * to handle hacks for broken servers and clients.
709 clientSetKeepaliveFlag(ClientHttpRequest
* http
)
711 HttpRequest
*request
= http
->request
;
712 const HttpHeader
*req_hdr
= &request
->header
;
714 debugs(33, 3, "clientSetKeepaliveFlag: http_ver = " <<
715 request
->http_ver
.major
<< "." << request
->http_ver
.minor
);
716 debugs(33, 3, "clientSetKeepaliveFlag: method = " <<
717 RequestMethodStr(request
->method
));
719 /* We are HTTP/1.1 facing clients now*/
720 HttpVersion
http_ver(1,1);
722 if (httpMsgIsPersistent(http_ver
, req_hdr
))
723 request
->flags
.proxy_keepalive
= 1;
727 clientIsContentLengthValid(HttpRequest
* r
)
729 switch (r
->method
.id()) {
734 /* PUT/POST requires a request entity */
735 return (r
->content_length
>= 0);
740 /* We do not want to see a request entity on GET/HEAD requests */
741 return (r
->content_length
<= 0 || Config
.onoff
.request_entities
);
744 /* For other types of requests we don't care */
752 clientIsRequestBodyValid(int64_t bodyLength
)
761 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
)
763 if (Config
.maxRequestBodySize
&&
764 bodyLength
> Config
.maxRequestBodySize
)
765 return 1; /* too large */
772 connIsUsable(ConnStateData
* conn
)
774 if (conn
== NULL
|| !cbdataReferenceValid(conn
) || conn
->fd
== -1)
782 ClientSocketContext::Pointer
783 ConnStateData::getCurrentContext() const
786 return currentobject
;
790 ClientSocketContext::deferRecipientForLater(clientStreamNode
* node
, HttpReply
* rep
, StoreIOBuffer receivedData
)
792 debugs(33, 2, "clientSocketRecipient: Deferring request " << http
->uri
);
793 assert(flags
.deferred
== 0);
795 deferredparams
.node
= node
;
796 deferredparams
.rep
= rep
;
797 deferredparams
.queuedBuffer
= receivedData
;
802 responseFinishedOrFailed(HttpReply
* rep
, StoreIOBuffer
const & receivedData
)
804 if (rep
== NULL
&& receivedData
.data
== NULL
&& receivedData
.length
== 0)
811 ClientSocketContext::startOfOutput() const
813 return http
->out
.size
== 0;
817 ClientSocketContext::lengthToSend(Range
<int64_t> const &available
)
819 /*the size of available range can always fit in a size_t type*/
820 size_t maximum
= (size_t)available
.size();
822 if (!http
->request
->range
)
825 assert (canPackMoreRanges());
827 if (http
->range_iter
.debt() == -1)
830 assert (http
->range_iter
.debt() > 0);
832 /* TODO this + the last line could be a range intersection calculation */
833 if (available
.start
< http
->range_iter
.currentSpec()->offset
)
836 return min(http
->range_iter
.debt(), (int64_t)maximum
);
840 ClientSocketContext::noteSentBodyBytes(size_t bytes
)
842 http
->out
.offset
+= bytes
;
844 if (!http
->request
->range
)
847 if (http
->range_iter
.debt() != -1) {
848 http
->range_iter
.debt(http
->range_iter
.debt() - bytes
);
849 assert (http
->range_iter
.debt() >= 0);
852 /* debt() always stops at -1, below that is a bug */
853 assert (http
->range_iter
.debt() >= -1);
857 ClientHttpRequest::multipartRangeRequest() const
859 return request
->multipartRangeRequest();
863 ClientSocketContext::multipartRangeRequest() const
865 return http
->multipartRangeRequest();
869 ClientSocketContext::sendBody(HttpReply
* rep
, StoreIOBuffer bodyData
)
873 if (!multipartRangeRequest()) {
874 size_t length
= lengthToSend(bodyData
.range());
875 noteSentBodyBytes (length
);
876 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteBodyComplete",
877 CommIoCbPtrFun(clientWriteBodyComplete
, this));
878 comm_write(fd(), bodyData
.data
, length
, call
);
884 packRange(bodyData
, &mb
);
886 if (mb
.contentSize()) {
888 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
889 CommIoCbPtrFun(clientWriteComplete
, this));
890 comm_write_mbuf(fd(), &mb
, call
);
892 writeComplete(fd(), NULL
, 0, COMM_OK
);
895 /** put terminating boundary for multiparts */
897 clientPackTermBound(String boundary
, MemBuf
* mb
)
899 mb
->Printf("\r\n--" SQUIDSTRINGPH
"--\r\n", SQUIDSTRINGPRINT(boundary
));
900 debugs(33, 6, "clientPackTermBound: buf offset: " << mb
->size
);
903 /** appends a "part" HTTP header (as in a multi-part/range reply) to the buffer */
905 clientPackRangeHdr(const HttpReply
* rep
, const HttpHdrRangeSpec
* spec
, String boundary
, MemBuf
* mb
)
907 HttpHeader
hdr(hoReply
);
913 debugs(33, 5, "clientPackRangeHdr: appending boundary: " << boundary
);
914 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
915 mb
->Printf("\r\n--" SQUIDSTRINGPH
"\r\n", SQUIDSTRINGPRINT(boundary
));
917 /* stuff the header with required entries and pack it */
919 if (rep
->header
.has(HDR_CONTENT_TYPE
))
920 hdr
.putStr(HDR_CONTENT_TYPE
, rep
->header
.getStr(HDR_CONTENT_TYPE
));
922 httpHeaderAddContRange(&hdr
, *spec
, rep
->content_length
);
924 packerToMemInit(&p
, mb
);
932 /* append <crlf> (we packed a header, not a reply) */
937 * extracts a "range" from *buf and appends them to mb, updating
938 * all offsets and such.
941 ClientSocketContext::packRange(StoreIOBuffer
const &source
, MemBuf
* mb
)
943 HttpHdrRangeIter
* i
= &http
->range_iter
;
944 Range
<int64_t> available (source
.range());
945 char const *buf
= source
.data
;
947 while (i
->currentSpec() && available
.size()) {
948 const size_t copy_sz
= lengthToSend(available
);
952 * intersection of "have" and "need" ranges must not be empty
954 assert(http
->out
.offset
< i
->currentSpec()->offset
+ i
->currentSpec()->length
);
955 assert(http
->out
.offset
+ available
.size() > i
->currentSpec()->offset
);
958 * put boundary and headers at the beginning of a range in a
962 if (http
->multipartRangeRequest() && i
->debt() == i
->currentSpec()->length
) {
963 assert(http
->memObject());
965 http
->memObject()->getReply(), /* original reply */
966 i
->currentSpec(), /* current range */
967 i
->boundary
, /* boundary, the same for all */
974 debugs(33, 3, "clientPackRange: appending " << copy_sz
<< " bytes");
976 noteSentBodyBytes (copy_sz
);
978 mb
->append(buf
, copy_sz
);
983 available
.start
+= copy_sz
;
992 assert((available
.size() >= 0 && i
->debt() >= 0) || i
->debt() == -1);
994 if (!canPackMoreRanges()) {
995 debugs(33, 3, "clientPackRange: Returning because !canPackMoreRanges.");
998 /* put terminating boundary for multiparts */
999 clientPackTermBound(i
->boundary
, mb
);
1004 int64_t nextOffset
= getNextRangeOffset();
1006 assert (nextOffset
>= http
->out
.offset
);
1008 int64_t skip
= nextOffset
- http
->out
.offset
;
1010 /* adjust for not to be transmitted bytes */
1011 http
->out
.offset
= nextOffset
;
1013 if (available
.size() <= skip
)
1016 available
.start
+= skip
;
1025 /** returns expected content length for multi-range replies
1026 * note: assumes that httpHdrRangeCanonize has already been called
1027 * warning: assumes that HTTP headers for individual ranges at the
1028 * time of the actuall assembly will be exactly the same as
1029 * the headers when clientMRangeCLen() is called */
1031 ClientHttpRequest::mRangeCLen()
1036 assert(memObject());
1039 HttpHdrRange::iterator pos
= request
->range
->begin();
1041 while (pos
!= request
->range
->end()) {
1042 /* account for headers for this range */
1044 clientPackRangeHdr(memObject()->getReply(),
1045 *pos
, range_iter
.boundary
, &mb
);
1048 /* account for range content */
1049 clen
+= (*pos
)->length
;
1051 debugs(33, 6, "clientMRangeCLen: (clen += " << mb
.size
<< " + " << (*pos
)->length
<< ") == " << clen
);
1055 /* account for the terminating boundary */
1058 clientPackTermBound(range_iter
.boundary
, &mb
);
1068 * returns true if If-Range specs match reply, false otherwise
1071 clientIfRangeMatch(ClientHttpRequest
* http
, HttpReply
* rep
)
1073 const TimeOrTag spec
= http
->request
->header
.getTimeOrTag(HDR_IF_RANGE
);
1074 /* check for parsing falure */
1081 ETag rep_tag
= rep
->header
.getETag(HDR_ETAG
);
1082 debugs(33, 3, "clientIfRangeMatch: ETags: " << spec
.tag
.str
<< " and " <<
1083 (rep_tag
.str
? rep_tag
.str
: "<none>"));
1086 return 0; /* entity has no etag to compare with! */
1088 if (spec
.tag
.weak
|| rep_tag
.weak
) {
1089 debugs(33, 1, "clientIfRangeMatch: Weak ETags are not allowed in If-Range: " << spec
.tag
.str
<< " ? " << rep_tag
.str
);
1090 return 0; /* must use strong validator for sub-range requests */
1093 return etagIsEqual(&rep_tag
, &spec
.tag
);
1096 /* got modification time? */
1097 if (spec
.time
>= 0) {
1098 return http
->storeEntry()->lastmod
<= spec
.time
;
1101 assert(0); /* should not happen */
1106 * generates a "unique" boundary string for multipart responses
1107 * the caller is responsible for cleaning the string */
1109 ClientHttpRequest::rangeBoundaryStr() const
1113 String
b(APP_FULLNAME
);
1115 key
= storeEntry()->getMD5Text();
1116 b
.append(key
, strlen(key
));
1120 /** adds appropriate Range headers if needed */
1122 ClientSocketContext::buildRangeHeader(HttpReply
* rep
)
1124 HttpHeader
*hdr
= rep
? &rep
->header
: 0;
1125 const char *range_err
= NULL
;
1126 HttpRequest
*request
= http
->request
;
1127 assert(request
->range
);
1128 /* check if we still want to do ranges */
1130 int64_t roffLimit
= request
->getRangeOffsetLimit();
1133 range_err
= "no [parse-able] reply";
1134 else if ((rep
->sline
.status
!= HTTP_OK
) && (rep
->sline
.status
!= HTTP_PARTIAL_CONTENT
))
1135 range_err
= "wrong status code";
1136 else if (hdr
->has(HDR_CONTENT_RANGE
))
1137 range_err
= "origin server does ranges";
1138 else if (rep
->content_length
< 0)
1139 range_err
= "unknown length";
1140 else if (rep
->content_length
!= http
->memObject()->getReply()->content_length
)
1141 range_err
= "INCONSISTENT length"; /* a bug? */
1143 /* hits only - upstream peer determines correct behaviour on misses, and client_side_reply determines
1146 else if (logTypeIsATcpHit(http
->logType
) && http
->request
->header
.has(HDR_IF_RANGE
) && !clientIfRangeMatch(http
, rep
))
1147 range_err
= "If-Range match failed";
1148 else if (!http
->request
->range
->canonize(rep
))
1149 range_err
= "canonization failed";
1150 else if (http
->request
->range
->isComplex())
1151 range_err
= "too complex range header";
1152 else if (!logTypeIsATcpHit(http
->logType
) && http
->request
->range
->offsetLimitExceeded(roffLimit
))
1153 range_err
= "range outside range_offset_limit";
1155 /* get rid of our range specs on error */
1157 /* XXX We do this here because we need canonisation etc. However, this current
1158 * code will lead to incorrect store offset requests - the store will have the
1159 * offset data, but we won't be requesting it.
1160 * So, we can either re-request, or generate an error
1162 debugs(33, 3, "clientBuildRangeHeader: will not do ranges: " << range_err
<< ".");
1163 delete http
->request
->range
;
1164 http
->request
->range
= NULL
;
1166 /* XXX: TODO: Review, this unconditional set may be wrong. - TODO: review. */
1167 httpStatusLineSet(&rep
->sline
, rep
->sline
.version
,
1168 HTTP_PARTIAL_CONTENT
, NULL
);
1169 // web server responded with a valid, but unexpected range.
1170 // will (try-to) forward as-is.
1171 //TODO: we should cope with multirange request/responses
1172 bool replyMatchRequest
= rep
->content_range
!= NULL
?
1173 request
->range
->contains(rep
->content_range
->spec
) :
1175 const int spec_count
= http
->request
->range
->specs
.count
;
1176 int64_t actual_clen
= -1;
1178 debugs(33, 3, "clientBuildRangeHeader: range spec count: " <<
1179 spec_count
<< " virgin clen: " << rep
->content_length
);
1180 assert(spec_count
> 0);
1181 /* ETags should not be returned with Partial Content replies? */
1182 hdr
->delById(HDR_ETAG
);
1183 /* append appropriate header(s) */
1185 if (spec_count
== 1) {
1186 if (!replyMatchRequest
) {
1187 hdr
->delById(HDR_CONTENT_RANGE
);
1188 hdr
->putContRange(rep
->content_range
);
1189 actual_clen
= rep
->content_length
;
1190 //http->range_iter.pos = rep->content_range->spec.begin();
1191 (*http
->range_iter
.pos
)->offset
= rep
->content_range
->spec
.offset
;
1192 (*http
->range_iter
.pos
)->length
= rep
->content_range
->spec
.length
;
1195 HttpHdrRange::iterator pos
= http
->request
->range
->begin();
1197 /* append Content-Range */
1199 if (!hdr
->has(HDR_CONTENT_RANGE
)) {
1200 /* No content range, so this was a full object we are
1203 httpHeaderAddContRange(hdr
, **pos
, rep
->content_length
);
1206 /* set new Content-Length to the actual number of bytes
1207 * transmitted in the message-body */
1208 actual_clen
= (*pos
)->length
;
1212 /* generate boundary string */
1213 http
->range_iter
.boundary
= http
->rangeBoundaryStr();
1214 /* delete old Content-Type, add ours */
1215 hdr
->delById(HDR_CONTENT_TYPE
);
1216 httpHeaderPutStrf(hdr
, HDR_CONTENT_TYPE
,
1217 "multipart/byteranges; boundary=\"" SQUIDSTRINGPH
"\"",
1218 SQUIDSTRINGPRINT(http
->range_iter
.boundary
));
1219 /* Content-Length is not required in multipart responses
1220 * but it is always nice to have one */
1221 actual_clen
= http
->mRangeCLen();
1222 /* http->out needs to start where we want data at */
1223 http
->out
.offset
= http
->range_iter
.currentSpec()->offset
;
1226 /* replace Content-Length header */
1227 assert(actual_clen
>= 0);
1229 hdr
->delById(HDR_CONTENT_LENGTH
);
1231 hdr
->putInt64(HDR_CONTENT_LENGTH
, actual_clen
);
1233 debugs(33, 3, "clientBuildRangeHeader: actual content length: " << actual_clen
);
1235 /* And start the range iter off */
1236 http
->range_iter
.updateSpec();
1241 ClientSocketContext::prepareReply(HttpReply
* rep
)
1245 if (http
->request
->range
)
1246 buildRangeHeader(rep
);
1250 ClientSocketContext::sendStartOfMessage(HttpReply
* rep
, StoreIOBuffer bodyData
)
1254 MemBuf
*mb
= rep
->pack();
1255 /* Save length of headers for persistent conn checks */
1256 http
->out
.headers_sz
= mb
->contentSize();
1259 headersLog(0, 0, http
->request
->method
, rep
);
1262 if (bodyData
.data
&& bodyData
.length
) {
1263 if (!multipartRangeRequest()) {
1264 size_t length
= lengthToSend(bodyData
.range());
1265 noteSentBodyBytes (length
);
1267 mb
->append(bodyData
.data
, length
);
1269 packRange(bodyData
, mb
);
1274 debugs(33,7, HERE
<< "sendStartOfMessage schedules clientWriteComplete");
1275 AsyncCall::Pointer call
= commCbCall(33, 5, "clientWriteComplete",
1276 CommIoCbPtrFun(clientWriteComplete
, this));
1277 comm_write_mbuf(fd(), mb
, call
);
1283 * Write a chunk of data to a client socket. If the reply is present,
1284 * send the reply headers down the wire too, and clean them up when
1287 * The request is one backed by a connection, not an internal request.
1288 * data context is not NULL
1289 * There are no more entries in the stream chain.
1292 clientSocketRecipient(clientStreamNode
* node
, ClientHttpRequest
* http
,
1293 HttpReply
* rep
, StoreIOBuffer receivedData
)
1296 /* Test preconditions */
1297 assert(node
!= NULL
);
1298 PROF_start(clientSocketRecipient
);
1299 /* TODO: handle this rather than asserting
1300 * - it should only ever happen if we cause an abort and
1301 * the callback chain loops back to here, so we can simply return.
1302 * However, that itself shouldn't happen, so it stays as an assert for now.
1304 assert(cbdataReferenceValid(node
));
1305 assert(node
->node
.next
== NULL
);
1306 ClientSocketContext::Pointer context
= dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw());
1307 assert(context
!= NULL
);
1308 assert(connIsUsable(http
->getConn()));
1309 fd
= http
->getConn()->fd
;
1310 /* TODO: check offset is what we asked for */
1312 if (context
!= http
->getConn()->getCurrentContext()) {
1313 context
->deferRecipientForLater(node
, rep
, receivedData
);
1314 PROF_stop(clientSocketRecipient
);
1318 if (responseFinishedOrFailed(rep
, receivedData
)) {
1319 context
->writeComplete(fd
, NULL
, 0, COMM_OK
);
1320 PROF_stop(clientSocketRecipient
);
1324 if (!context
->startOfOutput())
1325 context
->sendBody(rep
, receivedData
);
1328 http
->al
.reply
= HTTPMSGLOCK(rep
);
1329 context
->sendStartOfMessage(rep
, receivedData
);
1332 PROF_stop(clientSocketRecipient
);
1336 * Called when a downstream node is no longer interested in
1337 * our data. As we are a terminal node, this means on aborts
1341 clientSocketDetach(clientStreamNode
* node
, ClientHttpRequest
* http
)
1343 /* Test preconditions */
1344 assert(node
!= NULL
);
1345 /* TODO: handle this rather than asserting
1346 * - it should only ever happen if we cause an abort and
1347 * the callback chain loops back to here, so we can simply return.
1348 * However, that itself shouldn't happen, so it stays as an assert for now.
1350 assert(cbdataReferenceValid(node
));
1351 /* Set null by ContextFree */
1352 assert(node
->node
.next
== NULL
);
1353 /* this is the assert discussed above */
1354 assert(NULL
== dynamic_cast<ClientSocketContext
*>(node
->data
.getRaw()));
1355 /* We are only called when the client socket shutsdown.
1356 * Tell the prev pipeline member we're finished
1358 clientStreamDetach(node
, http
);
1362 clientWriteBodyComplete(int fd
, char *buf
, size_t size
, comm_err_t errflag
, int xerrno
, void *data
)
1364 debugs(33,7, HERE
<< "clientWriteBodyComplete schedules clientWriteComplete");
1365 clientWriteComplete(fd
, NULL
, size
, errflag
, xerrno
, data
);
1369 ConnStateData::readNextRequest()
1371 debugs(33, 5, "ConnStateData::readNextRequest: FD " << fd
<< " reading next req");
1373 fd_note(fd
, "Waiting for next request");
1375 * Set the timeout BEFORE calling clientReadRequest().
1377 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1378 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
1379 TimeoutDialer(this, &ConnStateData::requestTimeout
));
1380 commSetTimeout(fd
, Config
.Timeout
.persistent_request
, timeoutCall
);
1383 /** Please don't do anything with the FD past here! */
1387 ClientSocketContextPushDeferredIfNeeded(ClientSocketContext::Pointer deferredRequest
, ConnStateData
* conn
)
1389 debugs(33, 2, "ClientSocketContextPushDeferredIfNeeded: FD " << conn
->fd
<< " Sending next");
1391 /** If the client stream is waiting on a socket write to occur, then */
1393 if (deferredRequest
->flags
.deferred
) {
1394 /** NO data is allowed to have been sent. */
1395 assert(deferredRequest
->http
->out
.size
== 0);
1397 clientSocketRecipient(deferredRequest
->deferredparams
.node
,
1398 deferredRequest
->http
,
1399 deferredRequest
->deferredparams
.rep
,
1400 deferredRequest
->deferredparams
.queuedBuffer
);
1403 /** otherwise, the request is still active in a callbacksomewhere,
1409 ClientSocketContext::keepaliveNextRequest()
1411 ConnStateData
* conn
= http
->getConn();
1412 bool do_next_read
= false;
1414 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: FD " << conn
->fd
);
1417 if (conn
->pinning
.pinned
&& conn
->pinning
.fd
== -1) {
1418 debugs(33, 2, "clientKeepaliveNextRequest: FD " << conn
->fd
<< " Connection was pinned but server side gone. Terminating client connection");
1419 comm_close(conn
->fd
);
1424 * Attempt to parse a request from the request buffer.
1425 * If we've been fed a pipelined request it may already
1426 * be in our read buffer.
1429 * This needs to fall through - if we're unlucky and parse the _last_ request
1430 * from our read buffer we may never re-register for another client read.
1433 if (clientParseRequest(conn
, do_next_read
)) {
1434 debugs(33, 3, "clientSocketContext::keepaliveNextRequest: FD " << conn
->fd
<< ": parsed next request from buffer");
1438 * Either we need to kick-start another read or, if we have
1439 * a half-closed connection, kill it after the last request.
1440 * This saves waiting for half-closed connections to finished being
1441 * half-closed _AND_ then, sometimes, spending "Timeout" time in
1442 * the keepalive "Waiting for next request" state.
1444 if (commIsHalfClosed(conn
->fd
) && (conn
->getConcurrentRequestCount() == 0)) {
1445 debugs(33, 3, "ClientSocketContext::keepaliveNextRequest: half-closed client with no pending requests, closing");
1446 comm_close(conn
->fd
);
1450 ClientSocketContext::Pointer deferredRequest
;
1453 * At this point we either have a parsed request (which we've
1454 * kicked off the processing for) or not. If we have a deferred
1455 * request (parsed but deferred for pipeling processing reasons)
1456 * then look at processing it. If not, simply kickstart
1460 if ((deferredRequest
= conn
->getCurrentContext()).getRaw()) {
1461 debugs(33, 3, "ClientSocketContext:: FD " << conn
->fd
<< ": calling PushDeferredIfNeeded");
1462 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, conn
);
1464 debugs(33, 3, "ClientSocketContext:: FD " << conn
->fd
<< ": calling conn->readNextRequest()");
1465 conn
->readNextRequest();
1470 clientUpdateSocketStats(log_type logType
, size_t size
)
1475 kb_incr(&statCounter
.client_http
.kbytes_out
, size
);
1477 if (logTypeIsATcpHit(logType
))
1478 kb_incr(&statCounter
.client_http
.hit_kbytes_out
, size
);
1482 * increments iterator "i"
1483 * used by clientPackMoreRanges
1485 \retval true there is still data available to pack more ranges
1489 ClientSocketContext::canPackMoreRanges() const
1491 /** first update iterator "i" if needed */
1493 if (!http
->range_iter
.debt()) {
1494 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: At end of current range spec for FD " << fd());
1496 if (http
->range_iter
.pos
.incrementable())
1497 ++http
->range_iter
.pos
;
1499 http
->range_iter
.updateSpec();
1502 assert(!http
->range_iter
.debt() == !http
->range_iter
.currentSpec());
1504 /* paranoid sync condition */
1505 /* continue condition: need_more_data */
1506 debugs(33, 5, "ClientSocketContext::canPackMoreRanges: returning " << (http
->range_iter
.currentSpec() ? true : false));
1507 return http
->range_iter
.currentSpec() ? true : false;
1511 ClientSocketContext::getNextRangeOffset() const
1513 if (http
->request
->range
) {
1514 /* offset in range specs does not count the prefix of an http msg */
1515 debugs (33, 5, "ClientSocketContext::getNextRangeOffset: http offset " << http
->out
.offset
);
1516 /* check: reply was parsed and range iterator was initialized */
1517 assert(http
->range_iter
.valid
);
1518 /* filter out data according to range specs */
1519 assert (canPackMoreRanges());
1521 int64_t start
; /* offset of still missing data */
1522 assert(http
->range_iter
.currentSpec());
1523 start
= http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
- http
->range_iter
.debt();
1524 debugs(33, 3, "clientPackMoreRanges: in: offset: " << http
->out
.offset
);
1525 debugs(33, 3, "clientPackMoreRanges: out:"
1526 " start: " << start
<<
1527 " spec[" << http
->range_iter
.pos
- http
->request
->range
->begin() << "]:" <<
1528 " [" << http
->range_iter
.currentSpec()->offset
<<
1529 ", " << http
->range_iter
.currentSpec()->offset
+ http
->range_iter
.currentSpec()->length
<< "),"
1530 " len: " << http
->range_iter
.currentSpec()->length
<<
1531 " debt: " << http
->range_iter
.debt());
1532 if (http
->range_iter
.currentSpec()->length
!= -1)
1533 assert(http
->out
.offset
<= start
); /* we did not miss it */
1538 } else if (reply
&& reply
->content_range
) {
1539 /* request does not have ranges, but reply does */
1540 /** \todo FIXME: should use range_iter_pos on reply, as soon as reply->content_range
1541 * becomes HttpHdrRange rather than HttpHdrRangeSpec.
1543 return http
->out
.offset
+ reply
->content_range
->spec
.offset
;
1546 return http
->out
.offset
;
1550 ClientSocketContext::pullData()
1552 debugs(33, 5, "ClientSocketContext::pullData: FD " << fd() <<
1553 " attempting to pull upstream data");
1555 /* More data will be coming from the stream. */
1556 StoreIOBuffer readBuffer
;
1557 /* XXX: Next requested byte in the range sequence */
1558 /* XXX: length = getmaximumrangelenfgth */
1559 readBuffer
.offset
= getNextRangeOffset();
1560 readBuffer
.length
= HTTP_REQBUF_SZ
;
1561 readBuffer
.data
= reqbuf
;
1562 /* we may note we have reached the end of the wanted ranges */
1563 clientStreamRead(getTail(), http
, readBuffer
);
1566 clientStream_status_t
1567 ClientSocketContext::socketState()
1569 switch (clientStreamStatus(getTail(), http
)) {
1572 /* check for range support ending */
1574 if (http
->request
->range
) {
1575 /* check: reply was parsed and range iterator was initialized */
1576 assert(http
->range_iter
.valid
);
1577 /* filter out data according to range specs */
1579 if (!canPackMoreRanges()) {
1580 debugs(33, 5, HERE
<< "Range request at end of returnable " <<
1581 "range sequence on FD " << fd());
1583 if (http
->request
->flags
.proxy_keepalive
)
1584 return STREAM_COMPLETE
;
1586 return STREAM_UNPLANNED_COMPLETE
;
1588 } else if (reply
&& reply
->content_range
) {
1589 /* reply has content-range, but Squid is not managing ranges */
1590 const int64_t &bytesSent
= http
->out
.offset
;
1591 const int64_t &bytesExpected
= reply
->content_range
->spec
.length
;
1593 debugs(33, 7, HERE
<< "body bytes sent vs. expected: " <<
1594 bytesSent
<< " ? " << bytesExpected
<< " (+" <<
1595 reply
->content_range
->spec
.offset
<< ")");
1597 // did we get at least what we expected, based on range specs?
1599 if (bytesSent
== bytesExpected
) { // got everything
1600 if (http
->request
->flags
.proxy_keepalive
)
1601 return STREAM_COMPLETE
;
1603 return STREAM_UNPLANNED_COMPLETE
;
1606 // The logic below is not clear: If we got more than we
1607 // expected why would persistency matter? Should not this
1608 // always be an error?
1609 if (bytesSent
> bytesExpected
) { // got extra
1610 if (http
->request
->flags
.proxy_keepalive
)
1611 return STREAM_COMPLETE
;
1613 return STREAM_UNPLANNED_COMPLETE
;
1616 // did not get enough yet, expecting more
1621 case STREAM_COMPLETE
:
1622 return STREAM_COMPLETE
;
1624 case STREAM_UNPLANNED_COMPLETE
:
1625 return STREAM_UNPLANNED_COMPLETE
;
1628 return STREAM_FAILED
;
1631 fatal ("unreachable code\n");
1636 * A write has just completed to the client, or we have just realised there is
1637 * no more data to send.
1640 clientWriteComplete(int fd
, char *bufnotused
, size_t size
, comm_err_t errflag
, int xerrno
, void *data
)
1642 ClientSocketContext
*context
= (ClientSocketContext
*)data
;
1643 context
->writeComplete (fd
, bufnotused
, size
, errflag
);
1646 /// remembers the abnormal connection termination for logging purposes
1648 ClientSocketContext::noteIoError(const int xerrno
)
1651 if (xerrno
== ETIMEDOUT
)
1652 http
->al
.http
.timedout
= true;
1653 else // even if xerrno is zero (which means read abort/eof)
1654 http
->al
.http
.aborted
= true;
1660 ClientSocketContext::doClose()
1665 /** Called to initiate (and possibly complete) closing of the context.
1666 * The underlying socket may be already closed */
1668 ClientSocketContext::initiateClose(const char *reason
)
1670 debugs(33, 5, HERE
<< "initiateClose: closing for " << reason
);
1673 ConnStateData
* conn
= http
->getConn();
1676 if (const int64_t expecting
= conn
->bodySizeLeft()) {
1677 debugs(33, 5, HERE
<< "ClientSocketContext::initiateClose: " <<
1678 "closing, but first " << conn
<< " needs to read " <<
1679 expecting
<< " request body bytes with " <<
1680 conn
->in
.notYetUsed
<< " notYetUsed");
1682 if (conn
->closing()) {
1683 debugs(33, 2, HERE
<< "avoiding double-closing " << conn
);
1688 * XXX We assume the reply fits in the TCP transmit
1689 * window. If not the connection may stall while sending
1690 * the reply (before reaching here) if the client does not
1691 * try to read the response while sending the request body.
1692 * As of yet we have not received any complaints indicating
1693 * this may be an issue.
1695 conn
->startClosing(reason
);
1706 ClientSocketContext::writeComplete(int aFileDescriptor
, char *bufnotused
, size_t size
, comm_err_t errflag
)
1708 StoreEntry
*entry
= http
->storeEntry();
1709 http
->out
.size
+= size
;
1710 assert(aFileDescriptor
> -1);
1711 debugs(33, 5, "clientWriteComplete: FD " << aFileDescriptor
<< ", sz " << size
<<
1712 ", err " << errflag
<< ", off " << http
->out
.size
<< ", len " <<
1713 entry
? entry
->objectLen() : 0);
1714 clientUpdateSocketStats(http
->logType
, size
);
1715 assert (this->fd() == aFileDescriptor
);
1717 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
1719 if (errflag
== COMM_ERR_CLOSING
)
1722 if (errflag
|| clientHttpRequestStatus(aFileDescriptor
, http
)) {
1723 initiateClose("failure or true request status");
1724 /* Do we leak here ? */
1728 switch (socketState()) {
1734 case STREAM_COMPLETE
:
1735 debugs(33, 5, "clientWriteComplete: FD " << aFileDescriptor
<< " Keeping Alive");
1736 keepaliveNextRequest();
1739 case STREAM_UNPLANNED_COMPLETE
:
1740 initiateClose("STREAM_UNPLANNED_COMPLETE");
1744 initiateClose("STREAM_FAILED");
1748 fatal("Hit unreachable code in clientWriteComplete\n");
1752 extern "C" CSR clientGetMoreData
;
1753 extern "C" CSS clientReplyStatus
;
1754 extern "C" CSD clientReplyDetach
;
1756 static ClientSocketContext
*
1757 parseHttpRequestAbort(ConnStateData
* conn
, const char *uri
)
1759 ClientHttpRequest
*http
;
1760 ClientSocketContext
*context
;
1761 StoreIOBuffer tempBuffer
;
1762 http
= new ClientHttpRequest(conn
);
1763 http
->req_sz
= conn
->in
.notYetUsed
;
1764 http
->uri
= xstrdup(uri
);
1765 setLogUri (http
, uri
);
1766 context
= ClientSocketContextNew(http
);
1767 tempBuffer
.data
= context
->reqbuf
;
1768 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1769 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1770 clientReplyStatus
, new clientReplyContext(http
), clientSocketRecipient
,
1771 clientSocketDetach
, context
, tempBuffer
);
1776 skipLeadingSpace(char *aString
)
1778 char *result
= aString
;
1780 while (xisspace(*aString
))
1787 * 'end' defaults to NULL for backwards compatibility
1788 * remove default value if we ever get rid of NULL-terminated
1792 findTrailingHTTPVersion(const char *uriAndHTTPVersion
, const char *end
)
1795 end
= uriAndHTTPVersion
+ strcspn(uriAndHTTPVersion
, "\r\n");
1799 for (; end
> uriAndHTTPVersion
; end
--) {
1800 if (*end
== '\n' || *end
== '\r')
1803 if (xisspace(*end
)) {
1804 if (strncasecmp(end
+ 1, "HTTP/", 5) == 0)
1815 setLogUri(ClientHttpRequest
* http
, char const *uri
)
1817 safe_free(http
->log_uri
);
1819 if (!stringHasCntl(uri
))
1820 http
->log_uri
= xstrndup(uri
, MAX_URL
);
1822 http
->log_uri
= xstrndup(rfc1738_escape_unescaped(uri
), MAX_URL
);
1826 prepareAcceleratedURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
1828 int vhost
= conn
->port
->vhost
;
1829 int vport
= conn
->port
->vport
;
1831 char ntoabuf
[MAX_IPSTRLEN
];
1833 http
->flags
.accel
= 1;
1835 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1837 if (strncasecmp(url
, "cache_object://", 15) == 0)
1838 return; /* already in good shape */
1841 if (conn
->port
->vhost
)
1842 return; /* already in good shape */
1844 /* else we need to ignore the host name */
1845 url
= strstr(url
, "//");
1847 #if SHOULD_REJECT_UNKNOWN_URLS
1850 hp
->request_parse_status
= HTTP_BAD_REQUEST
;
1851 return parseHttpRequestAbort(conn
, "error:invalid-request");
1856 url
= strchr(url
+ 2, '/');
1862 if (internalCheck(url
)) {
1863 /* prepend our name & port */
1864 http
->uri
= xstrdup(internalLocalUri(NULL
, url
));
1868 const bool switchedToHttps
= conn
->switchedToHttps();
1869 const bool tryHostHeader
= vhost
|| switchedToHttps
;
1870 if (tryHostHeader
&& (host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
1871 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1873 http
->uri
= (char *)xcalloc(url_sz
, 1);
1874 const char *protocol
= switchedToHttps
?
1875 "https" : conn
->port
->protocol
;
1876 snprintf(http
->uri
, url_sz
, "%s://%s%s", protocol
, host
, url
);
1877 debugs(33, 5, "ACCEL VHOST REWRITE: '" << http
->uri
<< "'");
1878 } else if (conn
->port
->defaultsite
) {
1879 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1880 strlen(conn
->port
->defaultsite
);
1881 http
->uri
= (char *)xcalloc(url_sz
, 1);
1882 snprintf(http
->uri
, url_sz
, "%s://%s%s",
1883 conn
->port
->protocol
, conn
->port
->defaultsite
, url
);
1884 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: '" << http
->uri
<<"'");
1885 } else if (vport
== -1) {
1886 /* Put the local socket IP address as the hostname. */
1887 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1888 http
->uri
= (char *)xcalloc(url_sz
, 1);
1889 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1890 http
->getConn()->port
->protocol
,
1891 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1892 http
->getConn()->me
.GetPort(), url
);
1893 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http
->uri
<< "'");
1894 } else if (vport
> 0) {
1895 /* Put the local socket IP address as the hostname, but static port */
1896 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1897 http
->uri
= (char *)xcalloc(url_sz
, 1);
1898 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1899 http
->getConn()->port
->protocol
,
1900 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1902 debugs(33, 5, "ACCEL VPORT REWRITE: '" << http
->uri
<< "'");
1907 prepareTransparentURL(ConnStateData
* conn
, ClientHttpRequest
*http
, char *url
, const char *req_hdr
)
1910 char ntoabuf
[MAX_IPSTRLEN
];
1913 return; /* already in good shape */
1915 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1917 if ((host
= mime_get_header(req_hdr
, "Host")) != NULL
) {
1918 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
+
1920 http
->uri
= (char *)xcalloc(url_sz
, 1);
1921 snprintf(http
->uri
, url_sz
, "%s://%s%s",
1922 conn
->port
->protocol
, host
, url
);
1923 debugs(33, 5, "TRANSPARENT HOST REWRITE: '" << http
->uri
<<"'");
1925 /* Put the local socket IP address as the hostname. */
1926 int url_sz
= strlen(url
) + 32 + Config
.appendDomainLen
;
1927 http
->uri
= (char *)xcalloc(url_sz
, 1);
1928 snprintf(http
->uri
, url_sz
, "%s://%s:%d%s",
1929 http
->getConn()->port
->protocol
,
1930 http
->getConn()->me
.NtoA(ntoabuf
,MAX_IPSTRLEN
),
1931 http
->getConn()->me
.GetPort(), url
);
1932 debugs(33, 5, "TRANSPARENT REWRITE: '" << http
->uri
<< "'");
1936 // Temporary hack helper: determine whether the request is chunked, expensive
1938 isChunkedRequest(const HttpParser
*hp
)
1940 HttpRequest request
;
1941 if (!request
.parseHeader(HttpParserHdrBuf(hp
), HttpParserHdrSz(hp
)))
1944 return request
.header
.has(HDR_TRANSFER_ENCODING
) &&
1945 request
.header
.hasListMember(HDR_TRANSFER_ENCODING
, "chunked", ',');
1950 * parseHttpRequest()
1953 * NULL on incomplete requests
1954 * a ClientSocketContext structure on success or failure.
1955 * Sets result->flags.parsed_ok to 0 if failed to parse the request.
1956 * Sets result->flags.parsed_ok to 1 if we have a good request.
1958 static ClientSocketContext
*
1959 parseHttpRequest(ConnStateData
*conn
, HttpParser
*hp
, HttpRequestMethod
* method_p
, HttpVersion
*http_ver
)
1961 char *req_hdr
= NULL
;
1964 ClientHttpRequest
*http
;
1965 ClientSocketContext
*result
;
1966 StoreIOBuffer tempBuffer
;
1969 /* pre-set these values to make aborting simpler */
1970 *method_p
= METHOD_NONE
;
1972 /* NP: don't be tempted to move this down or remove again.
1973 * It's the only DDoS protection old-String has against long URL */
1974 if ( hp
->bufsiz
<= 0) {
1975 debugs(33, 5, "Incomplete request, waiting for end of request line");
1977 } else if ( (size_t)hp
->bufsiz
>= Config
.maxRequestHeaderSize
&& headersEnd(hp
->buf
, Config
.maxRequestHeaderSize
) == 0) {
1978 debugs(33, 5, "parseHttpRequest: Too large request");
1979 hp
->request_parse_status
= HTTP_HEADER_TOO_LARGE
;
1980 return parseHttpRequestAbort(conn
, "error:request-too-large");
1983 /* Attempt to parse the first line; this'll define the method, url, version and header begin */
1984 r
= HttpParserParseReqLine(hp
);
1987 debugs(33, 5, "Incomplete request, waiting for end of request line");
1992 return parseHttpRequestAbort(conn
, "error:invalid-request");
1995 /* Request line is valid here .. */
1996 *http_ver
= HttpVersion(hp
->v_maj
, hp
->v_min
);
1998 /* This call scans the entire request, not just the headers */
1999 if (hp
->v_maj
> 0) {
2000 if ((req_sz
= headersEnd(hp
->buf
, hp
->bufsiz
)) == 0) {
2001 debugs(33, 5, "Incomplete request, waiting for end of headers");
2005 debugs(33, 3, "parseHttpRequest: Missing HTTP identifier");
2006 req_sz
= HttpParserReqSz(hp
);
2009 /* We know the whole request is in hp->buf now */
2011 assert(req_sz
<= (size_t) hp
->bufsiz
);
2013 /* Will the following be true with HTTP/0.9 requests? probably not .. */
2014 /* So the rest of the code will need to deal with '0'-byte headers (ie, none, so don't try parsing em) */
2017 hp
->hdr_end
= req_sz
- 1;
2019 hp
->hdr_start
= hp
->req_end
+ 1;
2021 /* Enforce max_request_size */
2022 if (req_sz
>= Config
.maxRequestHeaderSize
) {
2023 debugs(33, 5, "parseHttpRequest: Too large request");
2024 hp
->request_parse_status
= HTTP_HEADER_TOO_LARGE
;
2025 return parseHttpRequestAbort(conn
, "error:request-too-large");
2029 *method_p
= HttpRequestMethod(&hp
->buf
[hp
->m_start
], &hp
->buf
[hp
->m_end
]+1);
2031 /* deny CONNECT via accelerated ports */
2032 if (*method_p
== METHOD_CONNECT
&& conn
&& conn
->port
&& conn
->port
->accel
) {
2033 debugs(33, DBG_IMPORTANT
, "WARNING: CONNECT method received on " << conn
->port
->protocol
<< " Accelerator port " << conn
->port
->s
.GetPort() );
2034 /* XXX need a way to say "this many character length string" */
2035 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->buf
);
2036 hp
->request_parse_status
= HTTP_METHOD_NOT_ALLOWED
;
2037 return parseHttpRequestAbort(conn
, "error:method-not-allowed");
2040 if (*method_p
== METHOD_NONE
) {
2041 /* XXX need a way to say "this many character length string" */
2042 debugs(33, 1, "clientParseRequestMethod: Unsupported method in request '" << hp
->buf
<< "'");
2043 hp
->request_parse_status
= HTTP_METHOD_NOT_ALLOWED
;
2044 return parseHttpRequestAbort(conn
, "error:unsupported-request-method");
2048 * Process headers after request line
2049 * TODO: Use httpRequestParse here.
2051 /* XXX this code should be modified to take a const char * later! */
2052 req_hdr
= (char *) hp
->buf
+ hp
->req_end
+ 1;
2054 debugs(33, 3, "parseHttpRequest: req_hdr = {" << req_hdr
<< "}");
2056 end
= (char *) hp
->buf
+ hp
->hdr_end
;
2058 debugs(33, 3, "parseHttpRequest: end = {" << end
<< "}");
2061 * Check that the headers don't have double-CR.
2062 * NP: strnstr is required so we don't search any possible binary body blobs.
2064 if ( squid_strnstr(req_hdr
, "\r\r\n", req_sz
) ) {
2065 debugs(33, 1, "WARNING: suspicious HTTP request contains double CR");
2066 hp
->request_parse_status
= HTTP_BAD_REQUEST
;
2067 return parseHttpRequestAbort(conn
, "error:double-CR");
2070 debugs(33, 3, "parseHttpRequest: prefix_sz = " <<
2071 (int) HttpParserRequestLen(hp
) << ", req_line_sz = " <<
2072 HttpParserReqSz(hp
));
2074 // Temporary hack: We might receive a chunked body from a broken HTTP/1.1
2075 // client that sends chunked requests to HTTP/1.0 Squid. If the request
2076 // might have a chunked body, parse the headers early to look for the
2077 // "Transfer-Encoding: chunked" header. If we find it, wait until the
2078 // entire body is available so that we can set the content length and
2079 // forward the request without chunks. The primary reason for this is
2080 // to avoid forwarding a chunked request because the server side lacks
2081 // logic to determine when it is valid to do so.
2082 // FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS below will replace this hack.
2083 if (hp
->v_min
== 1 && hp
->v_maj
== 1 && // broken client, may send chunks
2084 Config
.maxChunkedRequestBodySize
> 0 && // configured to dechunk
2085 (*method_p
== METHOD_PUT
|| *method_p
== METHOD_POST
)) {
2087 // check only once per request because isChunkedRequest is expensive
2088 if (conn
->in
.dechunkingState
== ConnStateData::chunkUnknown
) {
2089 if (isChunkedRequest(hp
))
2090 conn
->startDechunkingRequest(hp
);
2092 conn
->in
.dechunkingState
= ConnStateData::chunkNone
;
2095 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
) {
2096 if (conn
->parseRequestChunks(hp
)) // parses newly read chunks
2097 return NULL
; // wait for more data
2098 debugs(33, 5, HERE
<< "Got complete chunked request or err.");
2099 assert(conn
->in
.dechunkingState
!= ConnStateData::chunkParsing
);
2103 /* Ok, all headers are received */
2104 http
= new ClientHttpRequest(conn
);
2106 http
->req_sz
= HttpParserRequestLen(hp
);
2107 result
= ClientSocketContextNew(http
);
2108 tempBuffer
.data
= result
->reqbuf
;
2109 tempBuffer
.length
= HTTP_REQBUF_SZ
;
2111 ClientStreamData newServer
= new clientReplyContext(http
);
2112 ClientStreamData newClient
= result
;
2113 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
2114 clientReplyStatus
, newServer
, clientSocketRecipient
,
2115 clientSocketDetach
, newClient
, tempBuffer
);
2117 debugs(33, 5, "parseHttpRequest: Request Header is\n" <<(hp
->buf
) + hp
->hdr_start
);
2121 * XXX this should eventually not use a malloc'ed buffer; the transformation code
2122 * below needs to be modified to not expect a mutable nul-terminated string.
2124 char *url
= (char *)xmalloc(hp
->u_end
- hp
->u_start
+ 16);
2126 memcpy(url
, hp
->buf
+ hp
->u_start
, hp
->u_end
- hp
->u_start
+ 1);
2128 url
[hp
->u_end
- hp
->u_start
+ 1] = '\0';
2130 #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION
2132 if ((t
= strchr(url
, '#'))) /* remove HTML anchors */
2137 /* Rewrite the URL in transparent or accelerator mode */
2138 /* NP: there are several cases to traverse here:
2139 * - standard mode (forward proxy)
2140 * - transparent mode (TPROXY)
2141 * - transparent mode with failures
2142 * - intercept mode (NAT)
2143 * - intercept mode with failures
2144 * - accelerator mode (reverse proxy)
2146 * - mixed combos of the above with internal URL
2148 if (conn
->transparent()) {
2149 /* intercept or transparent mode, properly working with no failures */
2150 http
->flags
.intercepted
= conn
->port
->intercepted
;
2151 http
->flags
.spoof_client_ip
= conn
->port
->spoof_client_ip
;
2152 prepareTransparentURL(conn
, http
, url
, req_hdr
);
2154 } else if (conn
->port
->intercepted
|| conn
->port
->spoof_client_ip
) {
2155 /* transparent or intercept mode with failures */
2156 prepareTransparentURL(conn
, http
, url
, req_hdr
);
2158 } else if (conn
->port
->accel
|| conn
->switchedToHttps()) {
2159 /* accelerator mode */
2160 prepareAcceleratedURL(conn
, http
, url
, req_hdr
);
2162 } else if (internalCheck(url
)) {
2163 /* internal URL mode */
2164 /* prepend our name & port */
2165 http
->uri
= xstrdup(internalLocalUri(NULL
, url
));
2166 http
->flags
.accel
= 1;
2170 /* No special rewrites have been applied above, use the
2171 * requested url. may be rewritten later, so make extra room */
2172 int url_sz
= strlen(url
) + Config
.appendDomainLen
+ 5;
2173 http
->uri
= (char *)xcalloc(url_sz
, 1);
2174 strcpy(http
->uri
, url
);
2177 setLogUri(http
, http
->uri
);
2178 debugs(33, 5, "parseHttpRequest: Complete request received");
2179 result
->flags
.parsed_ok
= 1;
2185 ConnStateData::getAvailableBufferLength() const
2187 int result
= in
.allocatedSize
- in
.notYetUsed
- 1;
2188 assert (result
>= 0);
2193 ConnStateData::makeSpaceAvailable()
2195 if (getAvailableBufferLength() < 2) {
2196 in
.buf
= (char *)memReallocBuf(in
.buf
, in
.allocatedSize
* 2, &in
.allocatedSize
);
2197 debugs(33, 2, "growing request buffer: notYetUsed=" << in
.notYetUsed
<< " size=" << in
.allocatedSize
);
2202 ConnStateData::addContextToQueue(ClientSocketContext
* context
)
2204 ClientSocketContext::Pointer
*S
;
2206 for (S
= (ClientSocketContext::Pointer
*) & currentobject
; S
->getRaw();
2214 ConnStateData::getConcurrentRequestCount() const
2217 ClientSocketContext::Pointer
*T
;
2219 for (T
= (ClientSocketContext::Pointer
*) ¤tobject
;
2220 T
->getRaw(); T
= &(*T
)->next
, ++result
);
2225 ConnStateData::connReadWasError(comm_err_t flag
, int size
, int xerrno
)
2227 if (flag
!= COMM_OK
) {
2228 debugs(33, 2, "connReadWasError: FD " << fd
<< ": got flag " << flag
);
2233 if (!ignoreErrno(xerrno
)) {
2234 debugs(33, 2, "connReadWasError: FD " << fd
<< ": " << xstrerr(xerrno
));
2236 } else if (in
.notYetUsed
== 0) {
2237 debugs(33, 2, "connReadWasError: FD " << fd
<< ": no data to process (" << xstrerr(xerrno
) << ")");
2245 ConnStateData::connFinishedWithConn(int size
)
2248 if (getConcurrentRequestCount() == 0 && in
.notYetUsed
== 0) {
2249 /* no current or pending requests */
2250 debugs(33, 4, "connFinishedWithConn: FD " << fd
<< " closed");
2252 } else if (!Config
.onoff
.half_closed_clients
) {
2253 /* admin doesn't want to support half-closed client sockets */
2254 debugs(33, 3, "connFinishedWithConn: FD " << fd
<< " aborted (half_closed_clients disabled)");
2255 notifyAllContexts(0); // no specific error implies abort
2264 connNoteUseOfBuffer(ConnStateData
* conn
, size_t byteCount
)
2266 assert(byteCount
> 0 && byteCount
<= conn
->in
.notYetUsed
);
2267 conn
->in
.notYetUsed
-= byteCount
;
2268 debugs(33, 5, HERE
<< "conn->in.notYetUsed = " << conn
->in
.notYetUsed
);
2270 * If there is still data that will be used,
2271 * move it to the beginning.
2274 if (conn
->in
.notYetUsed
> 0)
2275 xmemmove(conn
->in
.buf
, conn
->in
.buf
+ byteCount
,
2276 conn
->in
.notYetUsed
);
2280 connKeepReadingIncompleteRequest(ConnStateData
* conn
)
2282 // when we read chunked requests, the entire body is buffered
2283 // XXX: this check ignores header size and its limits.
2284 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
)
2285 return ((int64_t)conn
->in
.notYetUsed
) < Config
.maxChunkedRequestBodySize
;
2287 return conn
->in
.notYetUsed
>= Config
.maxRequestHeaderSize
? 0 : 1;
2291 connCancelIncompleteRequests(ConnStateData
* conn
)
2293 ClientSocketContext
*context
= parseHttpRequestAbort(conn
, "error:request-too-large");
2294 clientStreamNode
*node
= context
->getClientReplyContext();
2295 assert(!connKeepReadingIncompleteRequest(conn
));
2296 if (conn
->in
.dechunkingState
== ConnStateData::chunkParsing
) {
2297 debugs(33, 1, "Chunked request is too large (" << conn
->in
.notYetUsed
<< " bytes)");
2298 debugs(33, 1, "Config 'chunked_request_body_max_size'= " << Config
.maxChunkedRequestBodySize
<< " bytes.");
2300 debugs(33, 1, "Request header is too large (" << conn
->in
.notYetUsed
<< " bytes)");
2301 debugs(33, 1, "Config 'request_header_max_size'= " << Config
.maxRequestHeaderSize
<< " bytes.");
2303 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2304 assert (repContext
);
2305 repContext
->setReplyToError(ERR_TOO_BIG
,
2306 HTTP_REQUEST_ENTITY_TOO_LARGE
, METHOD_NONE
, NULL
,
2307 conn
->peer
, NULL
, NULL
, NULL
);
2308 context
->registerWithConn();
2309 context
->pullData();
2313 ConnStateData::clientMaybeReadData(int do_next_read
)
2316 flags
.readMoreRequests
= true;
2322 ConnStateData::clientAfterReadingRequests(int do_next_read
)
2325 * If (1) we are reading a message body, (2) and the connection
2326 * is half-closed, and (3) we didn't get the entire HTTP request
2327 * yet, then close this connection.
2330 if (fd_table
[fd
].flags
.socket_eof
) {
2331 if ((int64_t)in
.notYetUsed
< bodySizeLeft()) {
2332 /* Partial request received. Abort client connection! */
2333 debugs(33, 3, "clientAfterReadingRequests: FD " << fd
<< " aborted, partial request");
2339 clientMaybeReadData (do_next_read
);
2343 clientProcessRequest(ConnStateData
*conn
, HttpParser
*hp
, ClientSocketContext
*context
, const HttpRequestMethod
& method
, HttpVersion http_ver
)
2345 ClientHttpRequest
*http
= context
->http
;
2346 HttpRequest
*request
= NULL
;
2347 bool notedUseOfBuffer
= false;
2348 bool tePresent
= false;
2349 bool deChunked
= false;
2350 bool unsupportedTe
= false;
2352 /* We have an initial client stream in place should it be needed */
2353 /* setup our private context */
2354 context
->registerWithConn();
2356 if (context
->flags
.parsed_ok
== 0) {
2357 clientStreamNode
*node
= context
->getClientReplyContext();
2358 debugs(33, 1, "clientProcessRequest: Invalid Request");
2359 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2360 assert (repContext
);
2361 switch (hp
->request_parse_status
) {
2362 case HTTP_HEADER_TOO_LARGE
:
2363 repContext
->setReplyToError(ERR_TOO_BIG
, HTTP_HEADER_TOO_LARGE
, method
, http
->uri
, conn
->peer
, NULL
, conn
->in
.buf
, NULL
);
2365 case HTTP_METHOD_NOT_ALLOWED
:
2366 repContext
->setReplyToError(ERR_UNSUP_REQ
, HTTP_METHOD_NOT_ALLOWED
, method
, http
->uri
, conn
->peer
, NULL
, conn
->in
.buf
, NULL
);
2369 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
, method
, http
->uri
, conn
->peer
, NULL
, conn
->in
.buf
, NULL
);
2371 assert(context
->http
->out
.offset
== 0);
2372 context
->pullData();
2373 conn
->flags
.readMoreRequests
= false;
2377 if ((request
= HttpRequest::CreateFromUrlAndMethod(http
->uri
, method
)) == NULL
) {
2378 clientStreamNode
*node
= context
->getClientReplyContext();
2379 debugs(33, 5, "Invalid URL: " << http
->uri
);
2380 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2381 assert (repContext
);
2382 repContext
->setReplyToError(ERR_INVALID_URL
, HTTP_BAD_REQUEST
, method
, http
->uri
, conn
->peer
, NULL
, NULL
, NULL
);
2383 assert(context
->http
->out
.offset
== 0);
2384 context
->pullData();
2385 conn
->flags
.readMoreRequests
= false;
2389 /* RFC 2616 section 10.5.6 : handle unsupported HTTP versions cleanly. */
2390 /* We currently only accept 0.9, 1.0, 1.1 */
2391 if ( (http_ver
.major
== 0 && http_ver
.minor
!= 9) ||
2392 (http_ver
.major
== 1 && http_ver
.minor
> 1 ) ||
2393 (http_ver
.major
> 1) ) {
2395 clientStreamNode
*node
= context
->getClientReplyContext();
2396 debugs(33, 5, "Unsupported HTTP version discovered. :\n" << HttpParserHdrBuf(hp
));
2397 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2398 assert (repContext
);
2399 repContext
->setReplyToError(ERR_UNSUP_HTTPVERSION
, HTTP_HTTP_VERSION_NOT_SUPPORTED
, method
, http
->uri
, conn
->peer
, NULL
, HttpParserHdrBuf(hp
), NULL
);
2400 assert(context
->http
->out
.offset
== 0);
2401 context
->pullData();
2402 conn
->flags
.readMoreRequests
= false;
2406 /* compile headers */
2407 /* we should skip request line! */
2408 /* XXX should actually know the damned buffer size here */
2409 if (http_ver
.major
>= 1 && !request
->parseHeader(HttpParserHdrBuf(hp
), HttpParserHdrSz(hp
))) {
2410 clientStreamNode
*node
= context
->getClientReplyContext();
2411 debugs(33, 5, "Failed to parse request headers:\n" << HttpParserHdrBuf(hp
));
2412 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2413 assert (repContext
);
2414 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
, method
, http
->uri
, conn
->peer
, NULL
, NULL
, NULL
);
2415 assert(context
->http
->out
.offset
== 0);
2416 context
->pullData();
2417 conn
->flags
.readMoreRequests
= false;
2421 request
->flags
.accelerated
= http
->flags
.accel
;
2422 request
->flags
.ignore_cc
= conn
->port
->ignore_cc
;
2423 request
->flags
.no_direct
= request
->flags
.accelerated
? !conn
->port
->allow_direct
: 0;
2426 * If transparent or interception mode is working clone the transparent and interception flags
2427 * from the port settings to the request.
2429 if (Ip::Interceptor
.InterceptActive()) {
2430 request
->flags
.intercepted
= http
->flags
.intercepted
;
2432 if (Ip::Interceptor
.TransparentActive()) {
2433 request
->flags
.spoof_client_ip
= conn
->port
->spoof_client_ip
;
2436 if (internalCheck(request
->urlpath
.termedBuf())) {
2437 if (internalHostnameIs(request
->GetHost()) &&
2438 request
->port
== getMyPort()) {
2439 http
->flags
.internal
= 1;
2440 } else if (Config
.onoff
.global_internal_static
&& internalStaticCheck(request
->urlpath
.termedBuf())) {
2441 request
->SetHost(internalHostname());
2442 request
->port
= getMyPort();
2443 http
->flags
.internal
= 1;
2447 if (http
->flags
.internal
) {
2448 request
->protocol
= PROTO_HTTP
;
2449 request
->login
[0] = '\0';
2452 request
->flags
.internal
= http
->flags
.internal
;
2453 setLogUri (http
, urlCanonicalClean(request
));
2454 request
->client_addr
= conn
->peer
;
2456 request
->client_eui48
= conn
->peer_eui48
;
2457 request
->client_eui64
= conn
->peer_eui64
;
2459 #if FOLLOW_X_FORWARDED_FOR
2460 request
->indirect_client_addr
= conn
->peer
;
2461 #endif /* FOLLOW_X_FORWARDED_FOR */
2462 request
->my_addr
= conn
->me
;
2463 request
->http_ver
= http_ver
;
2465 tePresent
= request
->header
.has(HDR_TRANSFER_ENCODING
);
2466 deChunked
= conn
->in
.dechunkingState
== ConnStateData::chunkReady
;
2469 request
->setContentLength(conn
->in
.dechunked
.contentSize());
2470 request
->header
.delById(HDR_TRANSFER_ENCODING
);
2471 conn
->finishDechunkingRequest(hp
);
2474 unsupportedTe
= tePresent
&& !deChunked
;
2475 if (!urlCheckRequest(request
) || unsupportedTe
) {
2476 clientStreamNode
*node
= context
->getClientReplyContext();
2477 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2478 assert (repContext
);
2479 repContext
->setReplyToError(ERR_UNSUP_REQ
,
2480 HTTP_NOT_IMPLEMENTED
, request
->method
, NULL
,
2481 conn
->peer
, request
, NULL
, NULL
);
2482 assert(context
->http
->out
.offset
== 0);
2483 context
->pullData();
2484 conn
->flags
.readMoreRequests
= false;
2489 if (!clientIsContentLengthValid(request
)) {
2490 clientStreamNode
*node
= context
->getClientReplyContext();
2491 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2492 assert (repContext
);
2493 repContext
->setReplyToError(ERR_INVALID_REQ
,
2494 HTTP_LENGTH_REQUIRED
, request
->method
, NULL
,
2495 conn
->peer
, request
, NULL
, NULL
);
2496 assert(context
->http
->out
.offset
== 0);
2497 context
->pullData();
2498 conn
->flags
.readMoreRequests
= false;
2502 if (request
->header
.has(HDR_EXPECT
)) {
2505 if (Config
.onoff
.ignore_expect_100
) {
2506 String expect
= request
->header
.getList(HDR_EXPECT
);
2507 if (expect
.caseCmp("100-continue") == 0)
2513 clientStreamNode
*node
= context
->getClientReplyContext();
2514 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2515 assert (repContext
);
2516 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_EXPECTATION_FAILED
, request
->method
, http
->uri
, conn
->peer
, request
, NULL
, NULL
);
2517 assert(context
->http
->out
.offset
== 0);
2518 context
->pullData();
2523 http
->request
= HTTPMSGLOCK(request
);
2524 clientSetKeepaliveFlag(http
);
2526 /* If this is a CONNECT, don't schedule a read - ssl.c will handle it */
2527 if (http
->request
->method
== METHOD_CONNECT
)
2528 context
->mayUseConnection(true);
2530 /* Do we expect a request-body? */
2531 if (!context
->mayUseConnection() && request
->content_length
> 0) {
2532 request
->body_pipe
= conn
->expectRequestBody(request
->content_length
);
2534 // consume header early so that body pipe gets just the body
2535 connNoteUseOfBuffer(conn
, http
->req_sz
);
2536 notedUseOfBuffer
= true;
2538 conn
->handleRequestBodyData(); // may comm_close and stop producing
2540 /* Is it too large? */
2542 if (!clientIsRequestBodyValid(request
->content_length
) ||
2543 clientIsRequestBodyTooLargeForPolicy(request
->content_length
)) {
2544 clientStreamNode
*node
= context
->getClientReplyContext();
2545 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2546 assert (repContext
);
2547 repContext
->setReplyToError(ERR_TOO_BIG
,
2548 HTTP_REQUEST_ENTITY_TOO_LARGE
, METHOD_NONE
, NULL
,
2549 conn
->peer
, http
->request
, NULL
, NULL
);
2550 assert(context
->http
->out
.offset
== 0);
2551 context
->pullData();
2555 if (!request
->body_pipe
->productionEnded())
2556 conn
->readSomeData();
2558 context
->mayUseConnection(!request
->body_pipe
->productionEnded());
2561 http
->calloutContext
= new ClientRequestContext(http
);
2566 if (!notedUseOfBuffer
)
2567 connNoteUseOfBuffer(conn
, http
->req_sz
);
2571 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
2572 * to here because calling comm_reset_close() causes http to
2573 * be freed and the above connNoteUseOfBuffer() would hit an
2574 * assertion, not to mention that we were accessing freed memory.
2576 if (http
->request
->flags
.resetTCP() && conn
->fd
> -1) {
2577 debugs(33, 3, HERE
<< "Sending TCP RST on FD " << conn
->fd
);
2578 conn
->flags
.readMoreRequests
= false;
2579 comm_reset_close(conn
->fd
);
2585 connStripBufferWhitespace (ConnStateData
* conn
)
2587 while (conn
->in
.notYetUsed
> 0 && xisspace(conn
->in
.buf
[0])) {
2588 xmemmove(conn
->in
.buf
, conn
->in
.buf
+ 1, conn
->in
.notYetUsed
- 1);
2589 --conn
->in
.notYetUsed
;
2594 connOkToAddRequest(ConnStateData
* conn
)
2596 int result
= conn
->getConcurrentRequestCount() < (Config
.onoff
.pipeline_prefetch
? 2 : 1);
2599 debugs(33, 3, "connOkToAddRequest: FD " << conn
->fd
<<
2600 " max concurrent requests reached");
2601 debugs(33, 5, "connOkToAddRequest: FD " << conn
->fd
<<
2602 " defering new request until one is done");
2611 * Report on the number of bytes of body content that we
2612 * know are yet to be read on this connection.
2615 ConnStateData::bodySizeLeft()
2617 // XXX: this logic will not work for chunked requests with unknown sizes
2619 if (bodyPipe
!= NULL
)
2620 return bodyPipe
->unproducedSize();
2626 * Attempt to parse one or more requests from the input buffer.
2627 * If a request is successfully parsed, even if the next request
2628 * is only partially parsed, it will return TRUE.
2629 * do_next_read is updated to indicate whether a read should be
2633 clientParseRequest(ConnStateData
* conn
, bool &do_next_read
)
2635 HttpRequestMethod method
;
2636 ClientSocketContext
*context
;
2637 bool parsed_req
= false;
2638 HttpVersion http_ver
;
2641 debugs(33, 5, "clientParseRequest: FD " << conn
->fd
<< ": attempting to parse");
2643 while (conn
->in
.notYetUsed
> 0 && conn
->bodySizeLeft() == 0) {
2644 connStripBufferWhitespace (conn
);
2646 /* Don't try to parse if the buffer is empty */
2648 if (conn
->in
.notYetUsed
== 0)
2651 /* Limit the number of concurrent requests to 2 */
2653 if (!connOkToAddRequest(conn
)) {
2657 /* Should not be needed anymore */
2658 /* Terminate the string */
2659 conn
->in
.buf
[conn
->in
.notYetUsed
] = '\0';
2661 /* Begin the parsing */
2662 HttpParserInit(&hp
, conn
->in
.buf
, conn
->in
.notYetUsed
);
2664 /* Process request */
2665 PROF_start(parseHttpRequest
);
2667 context
= parseHttpRequest(conn
, &hp
, &method
, &http_ver
);
2669 PROF_stop(parseHttpRequest
);
2671 /* partial or incomplete request */
2674 if (!connKeepReadingIncompleteRequest(conn
))
2675 connCancelIncompleteRequests(conn
);
2680 /* status -1 or 1 */
2682 debugs(33, 5, "clientParseRequest: FD " << conn
->fd
<< ": parsed a request");
2683 commSetTimeout(conn
->fd
, Config
.Timeout
.lifetime
, clientLifetimeTimeout
,
2686 clientProcessRequest(conn
, &hp
, context
, method
, http_ver
);
2690 if (context
->mayUseConnection()) {
2691 debugs(33, 3, "clientParseRequest: Not reading, as this request may need the connection");
2696 if (!conn
->flags
.readMoreRequests
) {
2697 conn
->flags
.readMoreRequests
= true;
2701 continue; /* while offset > 0 && conn->bodySizeLeft() == 0 */
2703 } /* while offset > 0 && conn->bodySizeLeft() == 0 */
2705 /* XXX where to 'finish' the parsing pass? */
2711 ConnStateData::clientReadRequest(const CommIoCbParams
&io
)
2713 debugs(33,5,HERE
<< "clientReadRequest FD " << io
.fd
<< " size " << io
.size
);
2715 bool do_next_read
= 1; /* the default _is_ to read data! - adrian */
2717 assert (io
.fd
== fd
);
2719 /* Bail out quickly on COMM_ERR_CLOSING - close handlers will tidy up */
2721 if (io
.flag
== COMM_ERR_CLOSING
) {
2722 debugs(33,5, HERE
<< " FD " << fd
<< " closing Bailout.");
2727 * Don't reset the timeout value here. The timeout value will be
2728 * set to Config.Timeout.request by httpAccept() and
2729 * clientWriteComplete(), and should apply to the request as a
2730 * whole, not individual read() calls. Plus, it breaks our
2731 * lame half-close detection
2733 if (connReadWasError(io
.flag
, io
.size
, io
.xerrno
)) {
2734 notifyAllContexts(io
.xerrno
);
2739 if (io
.flag
== COMM_OK
) {
2741 kb_incr(&statCounter
.client_http
.kbytes_in
, io
.size
);
2743 handleReadData(io
.buf
, io
.size
);
2745 /* The above may close the connection under our feets */
2749 } else if (io
.size
== 0) {
2750 debugs(33, 5, "clientReadRequest: FD " << fd
<< " closed?");
2752 if (connFinishedWithConn(io
.size
)) {
2757 /* It might be half-closed, we can't tell */
2758 fd_table
[fd
].flags
.socket_eof
= 1;
2760 commMarkHalfClosed(fd
);
2764 fd_note(fd
, "half-closed");
2766 /* There is one more close check at the end, to detect aborted
2767 * (partial) requests. At this point we can't tell if the request
2770 /* Continue to process previously read data */
2774 /* Process next request */
2775 if (getConcurrentRequestCount() == 0)
2776 fd_note(fd
, "Reading next request");
2778 if (! clientParseRequest(this, do_next_read
)) {
2782 * If the client here is half closed and we failed
2783 * to parse a request, close the connection.
2784 * The above check with connFinishedWithConn() only
2785 * succeeds _if_ the buffer is empty which it won't
2786 * be if we have an incomplete request.
2787 * XXX: This duplicates ClientSocketContext::keepaliveNextRequest
2789 if (getConcurrentRequestCount() == 0 && commIsHalfClosed(fd
)) {
2790 debugs(33, 5, "clientReadRequest: FD " << fd
<< ": half-closed connection, no completed request parsed, connection closing.");
2799 clientAfterReadingRequests(do_next_read
);
2803 * called when new request data has been read from the socket
2806 ConnStateData::handleReadData(char *buf
, size_t size
)
2808 char *current_buf
= in
.addressToReadInto();
2810 if (buf
!= current_buf
)
2811 xmemmove(current_buf
, buf
, size
);
2813 in
.notYetUsed
+= size
;
2815 in
.buf
[in
.notYetUsed
] = '\0'; /* Terminate the string */
2817 // if we are reading a body, stuff data into the body pipe
2818 if (bodyPipe
!= NULL
)
2819 handleRequestBodyData();
2823 * called when new request body data has been buffered in in.buf
2824 * may close the connection if we were closing and piped everything out
2827 ConnStateData::handleRequestBodyData()
2829 assert(bodyPipe
!= NULL
);
2833 #if FUTURE_CODE_TO_SUPPORT_CHUNKED_REQUESTS
2834 // The code below works, in principle, but we cannot do dechunking
2835 // on-the-fly because that would mean sending chunked requests to
2836 // the next hop. Squid lacks logic to determine which servers can
2837 // receive chunk requests. Squid v3.0 code cannot even handle chunked
2838 // responses which we may encourage by sending chunked requests.
2839 // The error generation code probably needs more work.
2840 if (in
.bodyParser
) { // chunked body
2841 debugs(33,5, HERE
<< "handling chunked request body for FD " << fd
);
2842 bool malformedChunks
= false;
2844 MemBuf raw
; // ChunkedCodingParser only works with MemBufs
2845 raw
.init(in
.notYetUsed
, in
.notYetUsed
);
2846 raw
.append(in
.buf
, in
.notYetUsed
);
2847 try { // the parser will throw on errors
2848 const mb_size_t wasContentSize
= raw
.contentSize();
2849 BodyPipeCheckout
bpc(*bodyPipe
);
2850 const bool parsed
= in
.bodyParser
->parse(&raw
, &bpc
.buf
);
2852 putSize
= wasContentSize
- raw
.contentSize();
2855 stopProducingFor(bodyPipe
, true); // this makes bodySize known
2857 // parser needy state must imply body pipe needy state
2858 if (in
.bodyParser
->needsMoreData() &&
2859 !bodyPipe
->mayNeedMoreData())
2860 malformedChunks
= true;
2861 // XXX: if bodyParser->needsMoreSpace, how can we guarantee it?
2863 } catch (...) { // XXX: be more specific
2864 malformedChunks
= true;
2867 if (malformedChunks
) {
2868 if (bodyPipe
!= NULL
)
2869 stopProducingFor(bodyPipe
, false);
2871 ClientSocketContext::Pointer context
= getCurrentContext();
2872 if (!context
->http
->out
.offset
) {
2873 clientStreamNode
*node
= context
->getClientReplyContext();
2874 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2875 assert (repContext
);
2876 repContext
->setReplyToError(ERR_INVALID_REQ
, HTTP_BAD_REQUEST
,
2877 METHOD_NONE
, NULL
, &peer
.sin_addr
,
2879 context
->pullData();
2881 flags
.readMoreRequests
= false;
2882 return; // XXX: is that sufficient to generate an error?
2884 } else // identity encoding
2887 debugs(33,5, HERE
<< "handling plain request body for FD " << fd
);
2888 putSize
= bodyPipe
->putMoreData(in
.buf
, in
.notYetUsed
);
2889 if (!bodyPipe
->mayNeedMoreData()) {
2890 // BodyPipe will clear us automagically when we produced everything
2896 connNoteUseOfBuffer(this, putSize
);
2899 debugs(33,5, HERE
<< "produced entire request body for FD " << fd
);
2902 /* we've finished reading like good clients,
2903 * now do the close that initiateClose initiated.
2905 * XXX: do we have to close? why not check keepalive et.
2907 * XXX: To support chunked requests safely, we need to handle
2908 * the case of an endless request. This if-statement does not,
2909 * because mayNeedMoreData is true if request size is not known.
2917 ConnStateData::noteMoreBodySpaceAvailable(BodyPipe::Pointer
)
2919 handleRequestBodyData();
2923 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer
)
2926 startClosing("body consumer aborted");
2929 /** general lifetime handler for HTTP requests */
2931 ConnStateData::requestTimeout(const CommTimeoutCbParams
&io
)
2933 #if THIS_CONFUSES_PERSISTENT_CONNECTION_AWARE_BROWSERS_AND_USERS
2934 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
2936 if (COMMIO_FD_WRITECB(io
.fd
)->active
) {
2937 /* FIXME: If this code is reinstated, check the conn counters,
2938 * not the fd table state
2941 * Some data has been sent to the client, just close the FD
2944 } else if (nrequests
) {
2946 * assume its a persistent connection; just close it
2953 ClientHttpRequest
**H
;
2954 clientStreamNode
*node
;
2955 ClientHttpRequest
*http
= parseHttpRequestAbort(this, "error:Connection%20lifetime%20expired");
2956 node
= http
->client_stream
.tail
->prev
->data
;
2957 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2958 assert (repContext
);
2959 repContext
->setReplyToError(ERR_LIFETIME_EXP
,
2960 HTTP_REQUEST_TIMEOUT
, METHOD_NONE
, "N/A", &peer
.sin_addr
,
2962 /* No requests can be outstanded */
2963 assert(chr
== NULL
);
2964 /* add to the client request queue */
2966 for (H
= &chr
; *H
; H
= &(*H
)->next
);
2969 clientStreamRead(http
->client_stream
.tail
->data
, http
, 0,
2970 HTTP_REQBUF_SZ
, context
->reqbuf
);
2973 * if we don't close() here, we still need a timeout handler!
2975 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2976 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
2977 TimeoutDialer(this,&ConnStateData::requestTimeout
));
2978 commSetTimeout(io
.fd
, 30, timeoutCall
);
2981 * Aha, but we don't want a read handler!
2983 commSetSelect(io
.fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
2988 * Just close the connection to not confuse browsers
2989 * using persistent connections. Some browsers opens
2990 * an connection and then does not use it until much
2991 * later (presumeably because the request triggering
2992 * the open has already been completed on another
2995 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
3003 clientLifetimeTimeout(int fd
, void *data
)
3005 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
3006 debugs(33, 1, "WARNING: Closing client " << " connection due to lifetime timeout");
3007 debugs(33, 1, "\t" << http
->uri
);
3008 http
->al
.http
.timedout
= true;
3013 connStateCreate(const Ip::Address
&peer
, const Ip::Address
&me
, int fd
, http_port_list
*port
)
3015 ConnStateData
*result
= new ConnStateData
;
3017 result
->peer
= peer
;
3018 result
->log_addr
= peer
;
3019 result
->log_addr
.ApplyMask(Config
.Addrs
.client_netmask
.GetCIDR());
3022 result
->in
.buf
= (char *)memAllocBuf(CLIENT_REQ_BUF_SZ
, &result
->in
.allocatedSize
);
3023 result
->port
= cbdataReference(port
);
3025 if (port
->intercepted
|| port
->spoof_client_ip
) {
3026 Ip::Address client
, dst
;
3028 if (Ip::Interceptor
.NatLookup(fd
, me
, peer
, client
, dst
) == 0) {
3029 result
->me
= client
;
3031 result
->transparent(true);
3035 if (port
->disable_pmtu_discovery
!= DISABLE_PMTU_OFF
&&
3036 (result
->transparent() || port
->disable_pmtu_discovery
== DISABLE_PMTU_ALWAYS
)) {
3037 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
3038 int i
= IP_PMTUDISC_DONT
;
3039 setsockopt(fd
, SOL_IP
, IP_MTU_DISCOVER
, &i
, sizeof i
);
3043 static int reported
= 0;
3046 debugs(33, 1, "Notice: httpd_accel_no_pmtu_disc not supported on your platform");
3054 result
->flags
.readMoreRequests
= true;
3058 /** Handle a new connection on HTTP socket. */
3060 httpAccept(int sock
, int newfd
, ConnectionDetail
*details
,
3061 comm_err_t flag
, int xerrno
, void *data
)
3063 http_port_list
*s
= (http_port_list
*)data
;
3064 ConnStateData
*connState
= NULL
;
3066 if (flag
!= COMM_OK
) {
3067 debugs(33, 1, "httpAccept: FD " << sock
<< ": accept failure: " << xstrerr(xerrno
));
3071 debugs(33, 4, "httpAccept: FD " << newfd
<< ": accepted");
3072 fd_note(newfd
, "client http connect");
3073 connState
= connStateCreate(&details
->peer
, &details
->me
, newfd
, s
);
3075 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3076 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::connStateClosed",
3077 Dialer(connState
, &ConnStateData::connStateClosed
));
3078 comm_add_close_handler(newfd
, call
);
3080 if (Config
.onoff
.log_fqdn
)
3081 fqdncache_gethostbyaddr(details
->peer
, FQDN_LOOKUP_IF_MISS
);
3083 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3084 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
3085 TimeoutDialer(connState
,&ConnStateData::requestTimeout
));
3086 commSetTimeout(newfd
, Config
.Timeout
.read
, timeoutCall
);
3089 if (Ident::TheConfig
.identLookup
) {
3090 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
3091 identChecklist
.src_addr
= details
->peer
;
3092 identChecklist
.my_addr
= details
->me
;
3093 if (identChecklist
.fastCheck())
3094 Ident::Start(details
->me
, details
->peer
, clientIdentDone
, connState
);
3099 if (Eui::TheConfig
.euiLookup
) {
3100 if (details
->peer
.IsIPv4()) {
3101 connState
->peer_eui48
.lookup(details
->peer
);
3102 } else if (details
->peer
.IsIPv6()) {
3103 connState
->peer_eui64
.lookup(details
->peer
);
3108 if (s
->tcp_keepalive
.enabled
) {
3109 commSetTcpKeepalive(newfd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
3112 connState
->readSomeData();
3114 clientdbEstablished(details
->peer
, 1);
3116 incoming_sockets_accepted
++;
3121 /** Create SSL connection structure and update fd_table */
3123 httpsCreate(int newfd
, ConnectionDetail
*details
, SSL_CTX
*sslContext
)
3125 SSL
*ssl
= SSL_new(sslContext
);
3128 const int ssl_error
= ERR_get_error();
3129 debugs(83, 1, "httpsAccept: Error allocating handle: " << ERR_error_string(ssl_error
, NULL
) );
3134 SSL_set_fd(ssl
, newfd
);
3135 fd_table
[newfd
].ssl
= ssl
;
3136 fd_table
[newfd
].read_method
= &ssl_read_method
;
3137 fd_table
[newfd
].write_method
= &ssl_write_method
;
3139 debugs(33, 5, "httpsCreate: will negotate SSL on FD " << newfd
);
3140 fd_note(newfd
, "client https start");
3145 /** negotiate an SSL connection */
3147 clientNegotiateSSL(int fd
, void *data
)
3149 ConnStateData
*conn
= (ConnStateData
*)data
;
3151 SSL
*ssl
= fd_table
[fd
].ssl
;
3154 if ((ret
= SSL_accept(ssl
)) <= 0) {
3155 int ssl_error
= SSL_get_error(ssl
, ret
);
3157 switch (ssl_error
) {
3159 case SSL_ERROR_WANT_READ
:
3160 commSetSelect(fd
, COMM_SELECT_READ
, clientNegotiateSSL
, conn
, 0);
3163 case SSL_ERROR_WANT_WRITE
:
3164 commSetSelect(fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, conn
, 0);
3167 case SSL_ERROR_SYSCALL
:
3170 debugs(83, 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Aborted by client");
3176 if (errno
== ECONNRESET
)
3179 debugs(83, hard
? 1 : 2, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3180 fd
<< ": " << strerror(errno
) << " (" << errno
<< ")");
3187 case SSL_ERROR_ZERO_RETURN
:
3188 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " << fd
<< ": Closed by client");
3193 debugs(83, 1, "clientNegotiateSSL: Error negotiating SSL connection on FD " <<
3194 fd
<< ": " << ERR_error_string(ERR_get_error(), NULL
) <<
3195 " (" << ssl_error
<< "/" << ret
<< ")");
3203 if (SSL_session_reused(ssl
)) {
3204 debugs(83, 2, "clientNegotiateSSL: Session " << SSL_get_session(ssl
) <<
3205 " reused on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<< ")");
3207 if (do_debug(83, 4)) {
3208 /* Write out the SSL session details.. actually the call below, but
3209 * OpenSSL headers do strange typecasts confusing GCC.. */
3210 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
3211 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
3212 PEM_ASN1_write((i2d_of_void
*)i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3214 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
3216 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
3217 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
3218 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
3219 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
3220 * Because there are two possible usable cast, if you get an error here, try the other
3221 * commented line. */
3223 PEM_ASN1_write((int(*)())i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
, debug_log
, (char *)SSL_get_session(ssl
), NULL
,NULL
,0,NULL
,NULL
);
3224 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION, debug_log, (char *)SSL_get_session(ssl), NULL,NULL,0,NULL,NULL); */
3228 debugs(83, 4, "With " OPENSSL_VERSION_TEXT
", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source." );
3231 /* Note: This does not automatically fflush the log file.. */
3234 debugs(83, 2, "clientNegotiateSSL: New session " <<
3235 SSL_get_session(ssl
) << " on FD " << fd
<< " (" <<
3236 fd_table
[fd
].ipaddr
<< ":" << (int)fd_table
[fd
].remote_port
<<
3240 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<< " negotiated cipher " <<
3241 SSL_get_cipher(ssl
));
3243 client_cert
= SSL_get_peer_certificate(ssl
);
3245 if (client_cert
!= NULL
) {
3246 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3247 " client certificate: subject: " <<
3248 X509_NAME_oneline(X509_get_subject_name(client_cert
), 0, 0));
3250 debugs(83, 3, "clientNegotiateSSL: FD " << fd
<<
3251 " client certificate: issuer: " <<
3252 X509_NAME_oneline(X509_get_issuer_name(client_cert
), 0, 0));
3255 X509_free(client_cert
);
3257 debugs(83, 5, "clientNegotiateSSL: FD " << fd
<<
3258 " has no certificate.");
3261 conn
->readSomeData();
3264 /** handle a new HTTPS connection */
3266 httpsAccept(int sock
, int newfd
, ConnectionDetail
*details
,
3267 comm_err_t flag
, int xerrno
, void *data
)
3269 https_port_list
*s
= (https_port_list
*)data
;
3270 SSL_CTX
*sslContext
= s
->sslContext
;
3272 if (flag
!= COMM_OK
) {
3274 debugs(33, 1, "httpsAccept: FD " << sock
<< ": accept failure: " << xstrerr(xerrno
));
3279 if (!(ssl
= httpsCreate(newfd
, details
, sslContext
)))
3282 debugs(33, 5, "httpsAccept: FD " << newfd
<< " accepted, starting SSL negotiation.");
3283 fd_note(newfd
, "client https connect");
3284 ConnStateData
*connState
= connStateCreate(details
->peer
, details
->me
,
3286 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3287 AsyncCall::Pointer call
= asyncCall(33, 5, "ConnStateData::connStateClosed",
3288 Dialer(connState
, &ConnStateData::connStateClosed
));
3289 comm_add_close_handler(newfd
, call
);
3291 if (Config
.onoff
.log_fqdn
)
3292 fqdncache_gethostbyaddr(details
->peer
, FQDN_LOOKUP_IF_MISS
);
3294 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
3295 AsyncCall::Pointer timeoutCall
= asyncCall(33, 5, "ConnStateData::requestTimeout",
3296 TimeoutDialer(connState
,&ConnStateData::requestTimeout
));
3297 commSetTimeout(newfd
, Config
.Timeout
.request
, timeoutCall
);
3300 if (Ident::TheConfig
.identLookup
) {
3301 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
3302 identChecklist
.src_addr
= details
->peer
;
3303 identChecklist
.my_addr
= details
->me
;
3304 if (identChecklist
.fastCheck())
3305 Ident::Start(details
->me
, details
->peer
, clientIdentDone
, connState
);
3309 if (s
->http
.tcp_keepalive
.enabled
) {
3310 commSetTcpKeepalive(newfd
, s
->http
.tcp_keepalive
.idle
, s
->http
.tcp_keepalive
.interval
, s
->http
.tcp_keepalive
.timeout
);
3313 commSetSelect(newfd
, COMM_SELECT_READ
, clientNegotiateSSL
, connState
, 0);
3315 clientdbEstablished(details
->peer
, 1);
3317 incoming_sockets_accepted
++;
3321 ConnStateData::switchToHttps()
3323 assert(!switchedToHttps_
);
3325 //HTTPMSGLOCK(currentobject->http->request);
3326 assert(areAllContextsForThisConnection());
3328 //currentobject->connIsFinished();
3330 debugs(33, 5, HERE
<< "converting FD " << fd
<< " to SSL");
3332 // fake a ConnectionDetail object; XXX: make ConnState a ConnectionDetail?
3333 ConnectionDetail detail
;
3337 SSL_CTX
*sslContext
= port
->sslContext
;
3339 if (!(ssl
= httpsCreate(fd
, &detail
, sslContext
)))
3342 // commSetTimeout() was called for this request before we switched.
3344 // Disable the client read handler until peer selection is complete
3345 commSetSelect(fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
3347 commSetSelect(fd
, COMM_SELECT_READ
, clientNegotiateSSL
, this, 0);
3349 switchedToHttps_
= true;
3353 #endif /* USE_SSL */
3357 clientHttpConnectionsOpen(void)
3359 http_port_list
*s
= NULL
;
3362 int bumpCount
= 0; // counts http_ports with sslBump option
3365 for (s
= Config
.Sockaddr
.http
; s
; s
= s
->next
) {
3366 if (MAXHTTPPORTS
== NHttpSockets
) {
3367 debugs(1, 1, "WARNING: You have too many 'http_port' lines.");
3368 debugs(1, 1, " The limit is " << MAXHTTPPORTS
);
3373 if (s
->sslBump
&& s
->sslContext
== NULL
) {
3374 debugs(1, 1, "Will not bump SSL at http_port " <<
3375 s
->http
.s
<< " due to SSL initialization failure.");
3382 /* AYJ: 2009-12-27: bit bumpy. new ListenStateData(...) should be doing all the Comm:: stuff ... */
3386 if (s
->spoof_client_ip
) {
3387 fd
= comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, s
->s
, (COMM_NONBLOCKING
|COMM_TRANSPARENT
), "HTTP Socket");
3389 fd
= comm_open_listener(SOCK_STREAM
, IPPROTO_TCP
, s
->s
, COMM_NONBLOCKING
, "HTTP Socket");
3397 AsyncCall::Pointer call
= commCbCall(5,5, "SomeCommAcceptHandler(httpAccept)",
3398 CommAcceptCbPtrFun(httpAccept
, s
));
3400 s
->listener
= new Comm::ListenStateData(fd
, call
, true);
3402 debugs(1, 1, "Accepting " <<
3403 (s
->intercepted
? " intercepted" : "") <<
3404 (s
->spoof_client_ip
? " spoofing" : "") <<
3405 (s
->sslBump
? " bumpy" : "") <<
3406 (s
->accel
? " accelerated" : "")
3407 << " HTTP connections at " << s
->s
3408 << ", FD " << fd
<< "." );
3410 HttpSockets
[NHttpSockets
++] = fd
;
3414 if (bumpCount
&& !Config
.accessList
.ssl_bump
)
3415 debugs(33, 1, "WARNING: http_port(s) with SslBump found, but no " <<
3416 std::endl
<< "\tssl_bump ACL configured. No requests will be " <<
3423 clientHttpsConnectionsOpen(void)
3428 for (s
= Config
.Sockaddr
.https
; s
; s
= (https_port_list
*)s
->http
.next
) {
3429 if (MAXHTTPPORTS
== NHttpSockets
) {
3430 debugs(1, 1, "Ignoring 'https_port' lines exceeding the limit.");
3431 debugs(1, 1, "The limit is " << MAXHTTPPORTS
<< " HTTPS ports.");
3435 if (s
->sslContext
== NULL
) {
3436 debugs(1, 1, "Ignoring https_port " << s
->http
.s
<<
3437 " due to SSL initialization failure.");
3442 fd
= comm_open_listener(SOCK_STREAM
,
3445 COMM_NONBLOCKING
, "HTTPS Socket");
3451 AsyncCall::Pointer call
= commCbCall(5,5, "SomeCommAcceptHandler(httpsAccept)",
3452 CommAcceptCbPtrFun(httpsAccept
, s
));
3454 s
->listener
= new Comm::ListenStateData(fd
, call
, true);
3456 debugs(1, 1, "Accepting HTTPS connections at " << s
->http
.s
<< ", FD " << fd
<< ".");
3458 HttpSockets
[NHttpSockets
++] = fd
;
3465 clientOpenListenSockets(void)
3467 clientHttpConnectionsOpen();
3469 clientHttpsConnectionsOpen();
3472 if (NHttpSockets
< 1)
3473 fatal("Cannot open HTTP Port");
3477 clientHttpConnectionsClose(void)
3479 for (http_port_list
*s
= Config
.Sockaddr
.http
; s
; s
= s
->next
) {
3481 debugs(1, 1, "FD " << s
->listener
->fd
<< " Closing HTTP connection");
3488 for (http_port_list
*s
= Config
.Sockaddr
.https
; s
; s
= s
->next
) {
3490 debugs(1, 1, "FD " << s
->listener
->fd
<< " Closing HTTPS connection");
3497 // TODO see if we can drop HttpSockets array entirely */
3498 for (int i
= 0; i
< NHttpSockets
; i
++) {
3499 HttpSockets
[i
] = -1;
3506 varyEvaluateMatch(StoreEntry
* entry
, HttpRequest
* request
)
3508 const char *vary
= request
->vary_headers
;
3509 int has_vary
= entry
->getReply()->header
.has(HDR_VARY
);
3510 #if X_ACCELERATOR_VARY
3513 entry
->getReply()->header
.has(HDR_X_ACCELERATOR_VARY
);
3516 if (!has_vary
|| !entry
->mem_obj
->vary_headers
) {
3518 /* Oops... something odd is going on here.. */
3519 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3520 entry
->mem_obj
->url
<< "' '" << vary
<< "'");
3521 safe_free(request
->vary_headers
);
3526 /* This is not a varying object */
3530 /* virtual "vary" object found. Calculate the vary key and
3531 * continue the search
3533 vary
= httpMakeVaryMark(request
, entry
->getReply());
3536 request
->vary_headers
= xstrdup(vary
);
3539 /* Ouch.. we cannot handle this kind of variance */
3540 /* XXX This cannot really happen, but just to be complete */
3545 vary
= httpMakeVaryMark(request
, entry
->getReply());
3548 request
->vary_headers
= xstrdup(vary
);
3552 /* Ouch.. we cannot handle this kind of variance */
3553 /* XXX This cannot really happen, but just to be complete */
3555 } else if (strcmp(vary
, entry
->mem_obj
->vary_headers
) == 0) {
3558 /* Oops.. we have already been here and still haven't
3559 * found the requested variant. Bail out
3561 debugs(33, 1, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3562 entry
->mem_obj
->url
<< "' '" << vary
<< "'");
3568 ACLFilledChecklist
*
3569 clientAclChecklistCreate(const acl_access
* acl
, ClientHttpRequest
* http
)
3571 ConnStateData
* conn
= http
->getConn();
3572 ACLFilledChecklist
*ch
= new ACLFilledChecklist(acl
, http
->request
,
3573 cbdataReferenceValid(conn
) && conn
!= NULL
? conn
->rfc931
: dash_str
);
3576 * hack for ident ACL. It needs to get full addresses, and a place to store
3577 * the ident result on persistent connections...
3579 /* connection oriented auth also needs these two lines for it's operation. */
3581 * Internal requests do not have a connection reference, because: A) their
3582 * byte count may be transformed before being applied to an outbound
3583 * connection B) they are internal - any limiting on them should be done on
3588 ch
->conn(conn
); /* unreferenced in FilledCheckList.cc */
3593 CBDATA_CLASS_INIT(ConnStateData
);
3595 ConnStateData::ConnStateData() :AsyncJob("ConnStateData"), transparent_ (false), reading_ (false), closing_ (false)
3598 pinning
.pinned
= false;
3599 pinning
.auth
= false;
3603 ConnStateData::transparent() const
3605 return transparent_
;
3609 ConnStateData::transparent(bool const anInt
)
3611 transparent_
= anInt
;
3615 ConnStateData::reading() const
3621 ConnStateData::reading(bool const newBool
)
3623 assert (reading() != newBool
);
3629 ConnStateData::expectRequestBody(int64_t size
)
3631 bodyPipe
= new BodyPipe(this);
3632 bodyPipe
->setBodySize(size
);
3637 ConnStateData::closing() const
3643 * Called by ClientSocketContext to give the connection a chance to read
3644 * the entire body before closing the socket.
3647 ConnStateData::startClosing(const char *reason
)
3649 debugs(33, 5, HERE
<< "startClosing " << this << " for " << reason
);
3653 assert(bodyPipe
!= NULL
);
3654 assert(bodySizeLeft() > 0);
3656 // We do not have to abort the body pipeline because we are going to
3657 // read the entire body anyway.
3658 // Perhaps an ICAP server wants to log the complete request.
3660 // If a consumer abort have caused this closing, we may get stuck
3661 // as nobody is consuming our data. Allow auto-consumption.
3662 bodyPipe
->enableAutoConsumption();
3665 // initialize dechunking state
3667 ConnStateData::startDechunkingRequest(HttpParser
*hp
)
3669 debugs(33, 5, HERE
<< "start dechunking at " << HttpParserRequestLen(hp
));
3670 assert(in
.dechunkingState
== chunkUnknown
);
3671 assert(!in
.bodyParser
);
3672 in
.bodyParser
= new ChunkedCodingParser
;
3673 in
.chunkedSeen
= HttpParserRequestLen(hp
); // skip headers when dechunking
3674 in
.chunked
.init(); // TODO: should we have a smaller-than-default limit?
3675 in
.dechunked
.init();
3676 in
.dechunkingState
= chunkParsing
;
3679 // put parsed content into input buffer and clean up
3681 ConnStateData::finishDechunkingRequest(HttpParser
*hp
)
3683 debugs(33, 5, HERE
<< "finish dechunking; content: " << in
.dechunked
.contentSize());
3685 assert(in
.dechunkingState
== chunkReady
);
3686 assert(in
.bodyParser
);
3687 delete in
.bodyParser
;
3688 in
.bodyParser
= NULL
;
3690 const mb_size_t headerSize
= HttpParserRequestLen(hp
);
3692 // dechunking cannot make data bigger
3693 assert(headerSize
+ in
.dechunked
.contentSize() + in
.chunked
.contentSize()
3694 <= static_cast<mb_size_t
>(in
.notYetUsed
));
3695 assert(in
.notYetUsed
<= in
.allocatedSize
);
3697 // copy dechunked content
3698 char *end
= in
.buf
+ headerSize
;
3699 xmemmove(end
, in
.dechunked
.content(), in
.dechunked
.contentSize());
3700 end
+= in
.dechunked
.contentSize();
3702 // copy post-chunks leftovers, if any, caused by request pipelining?
3703 if (in
.chunked
.contentSize()) {
3704 xmemmove(end
, in
.chunked
.content(), in
.chunked
.contentSize());
3705 end
+= in
.chunked
.contentSize();
3708 in
.notYetUsed
= end
- in
.buf
;
3711 in
.dechunked
.clean();
3712 in
.dechunkingState
= chunkUnknown
;
3715 // parse newly read request chunks and buffer them for finishDechunkingRequest
3716 // returns true iff needs more data
3718 ConnStateData::parseRequestChunks(HttpParser
*)
3720 debugs(33,5, HERE
<< "parsing chunked request body at " <<
3721 in
.chunkedSeen
<< " < " << in
.notYetUsed
);
3722 assert(in
.bodyParser
);
3723 assert(in
.dechunkingState
== chunkParsing
);
3725 assert(in
.chunkedSeen
<= in
.notYetUsed
);
3726 const mb_size_t fresh
= in
.notYetUsed
- in
.chunkedSeen
;
3728 // be safe: count some chunked coding metadata towards the total body size
3729 if (fresh
+ in
.dechunked
.contentSize() > Config
.maxChunkedRequestBodySize
) {
3730 debugs(33,3, HERE
<< "chunked body (" << fresh
<< " + " <<
3731 in
.dechunked
.contentSize() << " may exceed " <<
3732 "chunked_request_body_max_size=" <<
3733 Config
.maxChunkedRequestBodySize
);
3734 in
.dechunkingState
= chunkError
;
3738 if (fresh
> in
.chunked
.potentialSpaceSize()) {
3739 // should not happen if Config.maxChunkedRequestBodySize is reasonable
3740 debugs(33,1, HERE
<< "request_body_max_size exceeds chunked buffer " <<
3741 "size: " << fresh
<< " + " << in
.chunked
.contentSize() << " > " <<
3742 in
.chunked
.potentialSpaceSize() << " with " <<
3743 "chunked_request_body_max_size=" <<
3744 Config
.maxChunkedRequestBodySize
);
3745 in
.dechunkingState
= chunkError
;
3748 in
.chunked
.append(in
.buf
+ in
.chunkedSeen
, fresh
);
3749 in
.chunkedSeen
+= fresh
;
3751 try { // the parser will throw on errors
3752 if (in
.bodyParser
->parse(&in
.chunked
, &in
.dechunked
))
3753 in
.dechunkingState
= chunkReady
; // successfully parsed all chunks
3755 return true; // need more, keep the same state
3757 debugs(33,3, HERE
<< "chunk parsing error");
3758 in
.dechunkingState
= chunkError
;
3760 return false; // error, unsupported, or done
3764 ConnStateData::In::addressToReadInto() const
3766 return buf
+ notYetUsed
;
3769 ConnStateData::In::In() : bodyParser(NULL
),
3770 buf (NULL
), notYetUsed (0), allocatedSize (0),
3771 dechunkingState(ConnStateData::chunkUnknown
)
3774 ConnStateData::In::~In()
3777 memFreeBuf(allocatedSize
, buf
);
3779 delete bodyParser
; // TODO: pool
3782 /* This is a comm call normally scheduled by comm_close() */
3784 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams
&io
)
3788 cbdataReferenceDone(pinning
.peer
);
3790 safe_free(pinning
.host
);
3791 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3792 * connection has gone away */
3795 void ConnStateData::pinConnection(int pinning_fd
, HttpRequest
*request
, struct peer
*aPeer
, bool auth
)
3798 char desc
[FD_DESC_SZ
];
3800 if (pinning
.fd
== pinning_fd
)
3802 else if (pinning
.fd
!= -1)
3803 comm_close(pinning
.fd
);
3806 safe_free(pinning
.host
);
3808 pinning
.fd
= pinning_fd
;
3809 pinning
.host
= xstrdup(request
->GetHost());
3810 pinning
.port
= request
->port
;
3811 pinning
.pinned
= true;
3813 cbdataReferenceDone(pinning
.peer
);
3815 pinning
.peer
= cbdataReference(aPeer
);
3816 pinning
.auth
= auth
;
3818 snprintf(desc
, FD_DESC_SZ
, "%s pinned connection for %s:%d (%d)",
3819 (auth
|| !aPeer
) ? request
->GetHost() : aPeer
->name
, f
->ipaddr
, (int) f
->remote_port
, fd
);
3820 fd_note(pinning_fd
, desc
);
3822 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3823 pinning
.closeHandler
= asyncCall(33, 5, "ConnStateData::clientPinnedConnectionClosed",
3824 Dialer(this, &ConnStateData::clientPinnedConnectionClosed
));
3825 comm_add_close_handler(pinning_fd
, pinning
.closeHandler
);
3829 int ConnStateData::validatePinnedConnection(HttpRequest
*request
, const struct peer
*aPeer
)
3835 if (pinning
.auth
&& request
&& strcasecmp(pinning
.host
, request
->GetHost()) != 0) {
3838 if (request
&& pinning
.port
!= request
->port
) {
3841 if (pinning
.peer
&& !cbdataReferenceValid(pinning
.peer
)) {
3844 if (aPeer
!= pinning
.peer
) {
3849 int pinning_fd
=pinning
.fd
;
3850 /* The pinning info is not safe, remove any pinning info*/
3853 /* also close the server side socket, we should not use it for invalid/unauthenticated
3856 comm_close(pinning_fd
);
3863 void ConnStateData::unpinConnection()
3866 cbdataReferenceDone(pinning
.peer
);
3868 if (pinning
.closeHandler
!= NULL
) {
3869 comm_remove_close_handler(pinning
.fd
, pinning
.closeHandler
);
3870 pinning
.closeHandler
= NULL
;
3873 safe_free(pinning
.host
);