2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 11 Hypertext Transfer Protocol (HTTP) */
12 * Anonymizing patch by lutz@as-node.jena.thur.de
13 * have a look into http-anon.c to get more informations.
17 #include "acl/FilledChecklist.h"
18 #include "base/AsyncJobCalls.h"
19 #include "base/TextException.h"
21 #include "CachePeer.h"
22 #include "ChunkedCodingParser.h"
23 #include "client_side.h"
24 #include "comm/Connection.h"
25 #include "comm/Write.h"
26 #include "err_detail_type.h"
27 #include "errorpage.h"
32 #include "HttpControlMsg.h"
33 #include "HttpHdrCc.h"
34 #include "HttpHdrContRange.h"
35 #include "HttpHdrSc.h"
36 #include "HttpHdrScTarget.h"
37 #include "HttpHeaderTools.h"
38 #include "HttpReply.h"
39 #include "HttpRequest.h"
40 #include "HttpStateFlags.h"
41 #include "log/access_log.h"
43 #include "MemObject.h"
44 #include "mime_header.h"
45 #include "neighbors.h"
46 #include "peer_proxy_negotiate_auth.h"
47 #include "profiler/Profiler.h"
49 #include "RefreshPattern.h"
51 #include "SquidConfig.h"
52 #include "SquidTime.h"
53 #include "StatCounters.h"
61 #include "auth/UserRequest.h"
64 #include "DelayPools.h"
67 #define SQUID_ENTER_THROWING_CODE() try {
68 #define SQUID_EXIT_THROWING_CODE(status) \
71 catch (const std::exception &e) { \
72 debugs (11, 1, "Exception error:" << e.what()); \
76 CBDATA_CLASS_INIT(HttpStateData
);
78 static const char *const crlf
= "\r\n";
80 static void httpMaybeRemovePublic(StoreEntry
*, Http::StatusCode
);
81 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
,
82 HttpHeader
* hdr_out
, const int we_do_ranges
, const HttpStateFlags
&);
83 //Declared in HttpHeaderTools.cc
84 void httpHdrAdd(HttpHeader
*heads
, HttpRequest
*request
, const AccessLogEntryPointer
&al
, HeaderWithAclList
&headers_add
);
86 HttpStateData::HttpStateData(FwdState
*theFwdState
) : AsyncJob("HttpStateData"), Client(theFwdState
),
87 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
88 body_bytes_truncated(0), httpChunkDecoder(NULL
)
90 debugs(11,5,HERE
<< "HttpStateData " << this << " created");
91 ignoreCacheControl
= false;
92 surrogateNoStore
= false;
93 serverConnection
= fwd
->serverConnection();
95 readBuf
->init(16*1024, 256*1024);
97 // reset peer response time stats for %<pt
98 request
->hier
.peer_http_request_sent
.tv_sec
= 0;
99 request
->hier
.peer_http_request_sent
.tv_usec
= 0;
101 if (fwd
->serverConnection() != NULL
)
102 _peer
= cbdataReference(fwd
->serverConnection()->getPeer()); /* might be NULL */
105 request
->flags
.proxying
= true;
107 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
108 * We might end up getting the object from somewhere else if,
109 * for example, the request to this neighbor fails.
111 if (_peer
->options
.proxy_only
)
112 entry
->releaseRequest();
115 entry
->setNoDelay(_peer
->options
.no_delay
);
120 * register the handler to free HTTP state data when the FD closes
122 typedef CommCbMemFunT
<HttpStateData
, CommCloseCbParams
> Dialer
;
123 closeHandler
= JobCallback(9, 5, Dialer
, this, HttpStateData::httpStateConnClosed
);
124 comm_add_close_handler(serverConnection
->fd
, closeHandler
);
127 HttpStateData::~HttpStateData()
130 * don't forget that ~Client() gets called automatically
133 if (!readBuf
->isNull())
138 if (httpChunkDecoder
)
139 delete httpChunkDecoder
;
141 cbdataReferenceDone(_peer
);
143 debugs(11,5, HERE
<< "HttpStateData " << this << " destroyed; " << serverConnection
);
146 const Comm::ConnectionPointer
&
147 HttpStateData::dataConnection() const
149 return serverConnection
;
153 HttpStateData::httpStateConnClosed(const CommCloseCbParams
¶ms
)
155 debugs(11, 5, "httpStateFree: FD " << params
.fd
<< ", httpState=" << params
.data
);
156 mustStop("HttpStateData::httpStateConnClosed");
160 HttpStateData::httpTimeout(const CommTimeoutCbParams
¶ms
)
162 debugs(11, 4, HERE
<< serverConnection
<< ": '" << entry
->url() << "'" );
164 if (entry
->store_status
== STORE_PENDING
) {
165 fwd
->fail(new ErrorState(ERR_READ_TIMEOUT
, Http::scGatewayTimeout
, fwd
->request
));
168 serverConnection
->close();
171 /// Remove an existing public store entry if the incoming response (to be
172 /// stored in a currently private entry) is going to invalidate it.
174 httpMaybeRemovePublic(StoreEntry
* e
, Http::StatusCode status
)
180 // If the incoming response already goes into a public entry, then there is
181 // nothing to remove. This protects ready-for-collapsing entries as well.
182 if (!EBIT_TEST(e
->flags
, KEY_PRIVATE
))
189 case Http::scNonAuthoritativeInformation
:
191 case Http::scMultipleChoices
:
193 case Http::scMovedPermanently
:
199 case Http::scNotFound
:
204 case Http::scForbidden
:
206 case Http::scMethodNotAllowed
:
213 case Http::scUnauthorized
:
223 * Any 2xx response should eject previously cached entities...
226 if (status
>= 200 && status
< 300)
234 if (!remove
&& !forbidden
)
239 if (e
->mem_obj
->request
)
240 pe
= storeGetPublicByRequest(e
->mem_obj
->request
);
242 pe
= storeGetPublic(e
->mem_obj
->storeId(), e
->mem_obj
->method
);
247 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, e
->mem_obj
->method
, HTCP_CLR_INVALIDATION
);
253 * Also remove any cached HEAD response in case the object has
256 if (e
->mem_obj
->request
)
257 pe
= storeGetPublicByRequestMethod(e
->mem_obj
->request
, Http::METHOD_HEAD
);
259 pe
= storeGetPublic(e
->mem_obj
->storeId(), Http::METHOD_HEAD
);
264 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, HttpRequestMethod(Http::METHOD_HEAD
), HTCP_CLR_INVALIDATION
);
271 HttpStateData::processSurrogateControl(HttpReply
*reply
)
273 if (request
->flags
.accelerated
&& reply
->surrogate_control
) {
274 HttpHdrScTarget
*sctusable
= reply
->surrogate_control
->getMergedTarget(Config
.Accel
.surrogate_id
);
277 if (sctusable
->noStore() ||
278 (Config
.onoff
.surrogate_is_remote
279 && sctusable
->noStoreRemote())) {
280 surrogateNoStore
= true;
281 entry
->makePrivate();
284 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
285 * accelerated request or not...
286 * Still, this is an abstraction breach. - RC
288 if (sctusable
->hasMaxAge()) {
289 if (sctusable
->maxAge() < sctusable
->maxStale())
290 reply
->expires
= reply
->date
+ sctusable
->maxAge();
292 reply
->expires
= reply
->date
+ sctusable
->maxStale();
294 /* And update the timestamps */
295 entry
->timestampsSet();
298 /* We ignore cache-control directives as per the Surrogate specification */
299 ignoreCacheControl
= true;
307 HttpStateData::cacheableReply()
309 HttpReply
const *rep
= finalReply();
310 HttpHeader
const *hdr
= &rep
->header
;
312 #if USE_HTTP_VIOLATIONS
314 const RefreshPattern
*R
= NULL
;
316 /* This strange looking define first looks up the refresh pattern
317 * and then checks if the specified flag is set. The main purpose
318 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
321 #define REFRESH_OVERRIDE(flag) \
322 ((R = (R ? R : refreshLimits(entry->mem_obj->storeId()))) , \
323 (R && R->flags.flag))
325 #define REFRESH_OVERRIDE(flag) 0
328 if (EBIT_TEST(entry
->flags
, RELEASE_REQUEST
)) {
329 debugs(22, 3, "NO because " << *entry
<< " has been released.");
333 // Check for Surrogate/1.0 protocol conditions
334 // NP: reverse-proxy traffic our parent server has instructed us never to cache
335 if (surrogateNoStore
) {
336 debugs(22, 3, HERE
<< "NO because Surrogate-Control:no-store");
340 // RFC 2616: HTTP/1.1 Cache-Control conditions
341 if (!ignoreCacheControl
) {
342 // XXX: check to see if the request headers alone were enough to prevent caching earlier
343 // (ie no-store request header) no need to check those all again here if so.
344 // for now we are not reliably doing that so we waste CPU re-checking request CC
346 // RFC 2616 section 14.9.2 - MUST NOT cache any response with request CC:no-store
347 if (request
&& request
->cache_control
&& request
->cache_control
->noStore() &&
348 !REFRESH_OVERRIDE(ignore_no_store
)) {
349 debugs(22, 3, HERE
<< "NO because client request Cache-Control:no-store");
353 // NP: request CC:no-cache only means cache READ is forbidden. STORE is permitted.
354 if (rep
->cache_control
&& rep
->cache_control
->hasNoCache() && rep
->cache_control
->noCache().size() > 0) {
355 /* TODO: we are allowed to cache when no-cache= has parameters.
356 * Provided we strip away any of the listed headers unless they are revalidated
357 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
358 * That is a bit tricky for squid right now so we avoid caching entirely.
360 debugs(22, 3, HERE
<< "NO because server reply Cache-Control:no-cache has parameters");
364 // NP: request CC:private is undefined. We ignore.
365 // NP: other request CC flags are limiters on HIT/MISS. We don't care about here.
367 // RFC 2616 section 14.9.2 - MUST NOT cache any response with CC:no-store
368 if (rep
->cache_control
&& rep
->cache_control
->noStore() &&
369 !REFRESH_OVERRIDE(ignore_no_store
)) {
370 debugs(22, 3, HERE
<< "NO because server reply Cache-Control:no-store");
374 // RFC 2616 section 14.9.1 - MUST NOT cache any response with CC:private in a shared cache like Squid.
375 // CC:private overrides CC:public when both are present in a response.
376 // TODO: add a shared/private cache configuration possibility.
377 if (rep
->cache_control
&&
378 rep
->cache_control
->hasPrivate() &&
379 !REFRESH_OVERRIDE(ignore_private
)) {
380 /* TODO: we are allowed to cache when private= has parameters.
381 * Provided we strip away any of the listed headers unless they are revalidated
382 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
383 * That is a bit tricky for squid right now so we avoid caching entirely.
385 debugs(22, 3, HERE
<< "NO because server reply Cache-Control:private");
390 // RFC 2068, sec 14.9.4 - MUST NOT cache any response with Authentication UNLESS certain CC controls are present
391 // allow HTTP violations to IGNORE those controls (ie re-block caching Auth)
392 if (request
&& (request
->flags
.auth
|| request
->flags
.authSent
) && !REFRESH_OVERRIDE(ignore_auth
)) {
393 if (!rep
->cache_control
) {
394 debugs(22, 3, HERE
<< "NO because Authenticated and server reply missing Cache-Control");
398 if (ignoreCacheControl
) {
399 debugs(22, 3, HERE
<< "NO because Authenticated and ignoring Cache-Control");
403 bool mayStore
= false;
404 // HTTPbis pt6 section 3.2: a response CC:public is present
405 if (rep
->cache_control
->Public()) {
406 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:public");
409 // HTTPbis pt6 section 3.2: a response CC:must-revalidate is present
410 } else if (rep
->cache_control
->mustRevalidate() && !REFRESH_OVERRIDE(ignore_must_revalidate
)) {
411 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:public");
414 #if USE_HTTP_VIOLATIONS
415 // NP: given the must-revalidate exception we should also be able to exempt no-cache.
416 // HTTPbis WG verdict on this is that it is omitted from the spec due to being 'unexpected' by
417 // some. The caching+revalidate is not exactly unsafe though with Squids interpretation of no-cache
418 // (without parameters) as equivalent to must-revalidate in the reply.
419 } else if (rep
->cache_control
->hasNoCache() && rep
->cache_control
->noCache().size() == 0 && !REFRESH_OVERRIDE(ignore_must_revalidate
)) {
420 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:no-cache (equivalent to must-revalidate)");
424 // HTTPbis pt6 section 3.2: a response CC:s-maxage is present
425 } else if (rep
->cache_control
->sMaxAge()) {
426 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:s-maxage");
431 debugs(22, 3, HERE
<< "NO because Authenticated transaction");
435 // NP: response CC:no-cache is equivalent to CC:must-revalidate,max-age=0. We MAY cache, and do so.
436 // NP: other request CC flags are limiters on HIT/MISS/REFRESH. We don't care about here.
439 /* HACK: The "multipart/x-mixed-replace" content type is used for
440 * continuous push replies. These are generally dynamic and
441 * probably should not be cachable
443 if ((v
= hdr
->getStr(HDR_CONTENT_TYPE
)))
444 if (!strncasecmp(v
, "multipart/x-mixed-replace", 25)) {
445 debugs(22, 3, HERE
<< "NO because Content-Type:multipart/x-mixed-replace");
449 switch (rep
->sline
.status()) {
450 /* Responses that are cacheable */
454 case Http::scNonAuthoritativeInformation
:
456 case Http::scMultipleChoices
:
458 case Http::scMovedPermanently
:
459 case Http::scPermanentRedirect
:
463 * Don't cache objects that need to be refreshed on next request,
464 * unless we know how to refresh it.
467 if (!refreshIsCachable(entry
) && !REFRESH_OVERRIDE(store_stale
)) {
468 debugs(22, 3, "NO because refreshIsCachable() returned non-cacheable..");
471 debugs(22, 3, HERE
<< "YES because HTTP status " << rep
->sline
.status());
477 /* Responses that only are cacheable if the server says so */
480 case Http::scTemporaryRedirect
:
481 if (rep
->date
<= 0) {
482 debugs(22, 3, HERE
<< "NO because HTTP status " << rep
->sline
.status() << " and Date missing/invalid");
485 if (rep
->expires
> rep
->date
) {
486 debugs(22, 3, HERE
<< "YES because HTTP status " << rep
->sline
.status() << " and Expires > Date");
489 debugs(22, 3, HERE
<< "NO because HTTP status " << rep
->sline
.status() << " and Expires <= Date");
495 /* Errors can be negatively cached */
497 case Http::scNoContent
:
499 case Http::scUseProxy
:
501 case Http::scBadRequest
:
503 case Http::scForbidden
:
505 case Http::scNotFound
:
507 case Http::scMethodNotAllowed
:
509 case Http::scUriTooLong
:
511 case Http::scInternalServerError
:
513 case Http::scNotImplemented
:
515 case Http::scBadGateway
:
517 case Http::scServiceUnavailable
:
519 case Http::scGatewayTimeout
:
520 case Http::scMisdirectedRequest
:
522 debugs(22, 3, "MAYBE because HTTP status " << rep
->sline
.status());
528 /* Some responses can never be cached */
530 case Http::scPartialContent
: /* Not yet supported */
532 case Http::scSeeOther
:
534 case Http::scNotModified
:
536 case Http::scUnauthorized
:
538 case Http::scProxyAuthenticationRequired
:
540 case Http::scInvalidHeader
: /* Squid header parsing error */
542 case Http::scHeaderTooLarge
:
544 case Http::scPaymentRequired
:
545 case Http::scNotAcceptable
:
546 case Http::scRequestTimeout
:
547 case Http::scConflict
:
548 case Http::scLengthRequired
:
549 case Http::scPreconditionFailed
:
550 case Http::scPayloadTooLarge
:
551 case Http::scUnsupportedMediaType
:
552 case Http::scUnprocessableEntity
:
554 case Http::scFailedDependency
:
555 case Http::scInsufficientStorage
:
556 case Http::scRequestedRangeNotSatisfied
:
557 case Http::scExpectationFailed
:
559 debugs(22, 3, HERE
<< "NO because HTTP status " << rep
->sline
.status());
563 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
564 debugs (11, 3, HERE
<< "NO because unknown HTTP status code " << rep
->sline
.status());
575 * For Vary, store the relevant request headers as
576 * virtual headers in the reply
577 * Returns false if the variance cannot be stored
580 httpMakeVaryMark(HttpRequest
* request
, HttpReply
const * reply
)
583 const char *pos
= NULL
;
590 vary
= reply
->header
.getList(HDR_VARY
);
592 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
593 char *name
= (char *)xmalloc(ilen
+ 1);
594 xstrncpy(name
, item
, ilen
+ 1);
597 if (strcmp(name
, "*") == 0) {
598 /* Can not handle "Vary: *" withtout ETag support */
604 strListAdd(&vstr
, name
, ',');
605 hdr
= request
->header
.getByName(name
);
607 value
= hdr
.termedBuf();
610 value
= rfc1738_escape_part(value
);
611 vstr
.append("=\"", 2);
613 vstr
.append("\"", 1);
620 #if X_ACCELERATOR_VARY
623 vary
= reply
->header
.getList(HDR_X_ACCELERATOR_VARY
);
625 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
626 char *name
= (char *)xmalloc(ilen
+ 1);
627 xstrncpy(name
, item
, ilen
+ 1);
629 strListAdd(&vstr
, name
, ',');
630 hdr
= request
->header
.getByName(name
);
632 value
= hdr
.termedBuf();
635 value
= rfc1738_escape_part(value
);
636 vstr
.append("=\"", 2);
638 vstr
.append("\"", 1);
647 debugs(11, 3, "httpMakeVaryMark: " << vstr
);
648 return vstr
.termedBuf();
652 HttpStateData::keepaliveAccounting(HttpReply
*reply
)
656 ++ _peer
->stats
.n_keepalives_sent
;
658 if (reply
->keep_alive
) {
660 ++ _peer
->stats
.n_keepalives_recv
;
662 if (Config
.onoff
.detect_broken_server_pconns
663 && reply
->bodySize(request
->method
) == -1 && !flags
.chunked
) {
664 debugs(11, DBG_IMPORTANT
, "keepaliveAccounting: Impossible keep-alive header from '" << entry
->url() << "'" );
665 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
666 flags
.keepalive_broken
= true;
672 HttpStateData::checkDateSkew(HttpReply
*reply
)
674 if (reply
->date
> -1 && !_peer
) {
675 int skew
= abs((int)(reply
->date
- squid_curtime
));
678 debugs(11, 3, "" << request
->GetHost() << "'s clock is skewed by " << skew
<< " seconds!");
683 * This creates the error page itself.. its likely
684 * that the forward ported reply header max size patch
685 * generates non http conformant error pages - in which
686 * case the errors where should be 'BAD_GATEWAY' etc
689 HttpStateData::processReplyHeader()
691 /** Creates a blank header. If this routine is made incremental, this will not do */
693 /* NP: all exit points to this function MUST call ctx_exit(ctx) */
694 Ctx ctx
= ctx_enter(entry
->mem_obj
->urlXXX());
696 debugs(11, 3, "processReplyHeader: key '" << entry
->getMD5Text() << "'");
698 assert(!flags
.headers_parsed
);
700 if (!readBuf
->hasContent()) {
705 Http::StatusCode error
= Http::scNone
;
707 HttpReply
*newrep
= new HttpReply
;
708 const bool parsed
= newrep
->parse(readBuf
, eof
, &error
);
710 if (!parsed
&& readBuf
->contentSize() > 5 && strncmp(readBuf
->content(), "HTTP/", 5) != 0 && strncmp(readBuf
->content(), "ICY", 3) != 0) {
712 HttpReply
*tmprep
= new HttpReply
;
713 tmprep
->setHeaders(Http::scOkay
, "Gatewaying", NULL
, -1, -1, -1);
714 tmprep
->header
.putExt("X-Transformed-From", "HTTP/0.9");
716 newrep
->parse(mb
, eof
, &error
);
720 if (!parsed
&& error
> 0) { // unrecoverable parsing error
721 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf
->content() << "'");
722 flags
.headers_parsed
= true;
723 // XXX: when sanityCheck is gone and Http::StatusLine is used to parse,
724 // the sline should be already set the appropriate values during that parser stage
725 newrep
->sline
.set(Http::ProtocolVersion(), error
);
726 HttpReply
*vrep
= setVirginReply(newrep
);
727 entry
->replaceHttpReply(vrep
);
732 if (!parsed
) { // need more data
740 debugs(11, 2, "HTTP Server " << serverConnection
);
741 debugs(11, 2, "HTTP Server REPLY:\n---------\n" << readBuf
->content() << "\n----------");
743 header_bytes_read
= headersEnd(readBuf
->content(), readBuf
->contentSize());
744 readBuf
->consume(header_bytes_read
);
747 newrep
->removeStaleWarnings();
749 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& newrep
->sline
.status() >= 100 && newrep
->sline
.status() < 200) {
755 flags
.chunked
= false;
756 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& newrep
->header
.chunked()) {
757 flags
.chunked
= true;
758 httpChunkDecoder
= new ChunkedCodingParser
;
761 if (!peerSupportsConnectionPinning())
762 request
->flags
.connectionAuthDisabled
= true;
764 HttpReply
*vrep
= setVirginReply(newrep
);
765 flags
.headers_parsed
= true;
767 keepaliveAccounting(vrep
);
771 processSurrogateControl (vrep
);
773 request
->hier
.peer_reply_status
= newrep
->sline
.status();
778 /// ignore or start forwarding the 1xx response (a.k.a., control message)
780 HttpStateData::handle1xx(HttpReply
*reply
)
782 HttpReply::Pointer
msg(reply
); // will destroy reply if unused
784 // one 1xx at a time: we must not be called while waiting for previous 1xx
785 Must(!flags
.handling1xx
);
786 flags
.handling1xx
= true;
788 if (!request
->canHandle1xx() || request
->forcedBodyContinuation
) {
789 debugs(11, 2, "ignoring 1xx because it is " << (request
->forcedBodyContinuation
? "already sent" : "not supported by client"));
794 #if USE_HTTP_VIOLATIONS
795 // check whether the 1xx response forwarding is allowed by squid.conf
796 if (Config
.accessList
.reply
) {
797 ACLFilledChecklist
ch(Config
.accessList
.reply
, originalRequest(), NULL
);
799 HTTPMSGLOCK(ch
.reply
);
800 if (ch
.fastCheck() != ACCESS_ALLOWED
) { // TODO: support slow lookups?
801 debugs(11, 3, HERE
<< "ignoring denied 1xx");
806 #endif // USE_HTTP_VIOLATIONS
808 debugs(11, 2, HERE
<< "forwarding 1xx to client");
810 // the Sink will use this to call us back after writing 1xx to the client
811 typedef NullaryMemFunT
<HttpStateData
> CbDialer
;
812 const AsyncCall::Pointer cb
= JobCallback(11, 3, CbDialer
, this,
813 HttpStateData::proceedAfter1xx
);
814 CallJobHere1(11, 4, request
->clientConnectionManager
, ConnStateData
,
815 ConnStateData::sendControlMsg
, HttpControlMsg(msg
, cb
));
816 // If the call is not fired, then the Sink is gone, and HttpStateData
817 // will terminate due to an aborted store entry or another similar error.
818 // If we get stuck, it is not handle1xx fault if we could get stuck
819 // for similar reasons without a 1xx response.
822 /// restores state and resumes processing after 1xx is ignored or forwarded
824 HttpStateData::proceedAfter1xx()
826 Must(flags
.handling1xx
);
828 debugs(11, 2, HERE
<< "consuming " << header_bytes_read
<<
829 " header and " << reply_bytes_read
<< " body bytes read after 1xx");
830 header_bytes_read
= 0;
831 reply_bytes_read
= 0;
833 CallJobHere(11, 3, this, HttpStateData
, HttpStateData::processReply
);
837 * returns true if the peer can support connection pinning
839 bool HttpStateData::peerSupportsConnectionPinning() const
841 const HttpReply
*rep
= entry
->mem_obj
->getReply();
842 const HttpHeader
*hdr
= &rep
->header
;
849 /*If this peer does not support connection pinning (authenticated
850 connections) return false
852 if (!_peer
->connection_auth
)
855 /*The peer supports connection pinning and the http reply status
856 is not unauthorized, so the related connection can be pinned
858 if (rep
->sline
.status() != Http::scUnauthorized
)
861 /*The server respond with Http::scUnauthorized and the peer configured
862 with "connection-auth=on" we know that the peer supports pinned
865 if (_peer
->connection_auth
== 1)
868 /*At this point peer has configured with "connection-auth=auto"
869 parameter so we need some extra checks to decide if we are going
870 to allow pinned connections or not
873 /*if the peer configured with originserver just allow connection
874 pinning (squid 2.6 behaviour)
876 if (_peer
->options
.originserver
)
879 /*if the connections it is already pinned it is OK*/
880 if (request
->flags
.pinned
)
883 /*Allow pinned connections only if the Proxy-support header exists in
884 reply and has in its list the "Session-Based-Authentication"
885 which means that the peer supports connection pinning.
887 if (!hdr
->has(HDR_PROXY_SUPPORT
))
890 header
= hdr
->getStrOrList(HDR_PROXY_SUPPORT
);
891 /* XXX This ought to be done in a case-insensitive manner */
892 rc
= (strstr(header
.termedBuf(), "Session-Based-Authentication") != NULL
);
897 // Called when we parsed (and possibly adapted) the headers but
898 // had not starting storing (a.k.a., sending) the body yet.
900 HttpStateData::haveParsedReplyHeaders()
902 Client::haveParsedReplyHeaders();
904 Ctx ctx
= ctx_enter(entry
->mem_obj
->urlXXX());
905 HttpReply
*rep
= finalReply();
907 entry
->timestampsSet();
909 /* Check if object is cacheable or not based on reply code */
910 debugs(11, 3, "HTTP CODE: " << rep
->sline
.status());
912 if (neighbors_do_private_keys
)
913 httpMaybeRemovePublic(entry
, rep
->sline
.status());
915 bool varyFailure
= false;
916 if (rep
->header
.has(HDR_VARY
)
917 #if X_ACCELERATOR_VARY
918 || rep
->header
.has(HDR_X_ACCELERATOR_VARY
)
921 const char *vary
= httpMakeVaryMark(request
, rep
);
924 entry
->makePrivate();
925 if (!fwd
->reforwardableStatus(rep
->sline
.status()))
926 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
929 entry
->mem_obj
->vary_headers
= xstrdup(vary
);
935 * If its not a reply that we will re-forward, then
936 * allow the client to get it.
938 if (!fwd
->reforwardableStatus(rep
->sline
.status()))
939 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
941 switch (cacheableReply()) {
948 entry
->makePrivate();
953 #if USE_HTTP_VIOLATIONS
954 if (Config
.negativeTtl
> 0)
955 entry
->cacheNegatively();
958 entry
->makePrivate();
967 if (!ignoreCacheControl
) {
968 if (rep
->cache_control
) {
969 // We are required to revalidate on many conditions.
970 // For security reasons we do so even if storage was caused by refresh_pattern ignore-* option
972 // CC:must-revalidate or CC:proxy-revalidate
973 const bool ccMustRevalidate
= (rep
->cache_control
->proxyRevalidate() || rep
->cache_control
->mustRevalidate());
975 // CC:no-cache (only if there are no parameters)
976 const bool ccNoCacheNoParams
= (rep
->cache_control
->hasNoCache() && rep
->cache_control
->noCache().size()==0);
979 const bool ccSMaxAge
= rep
->cache_control
->hasSMaxAge();
981 // CC:private (yes, these can sometimes be stored)
982 const bool ccPrivate
= rep
->cache_control
->hasPrivate();
984 if (ccMustRevalidate
|| ccNoCacheNoParams
|| ccSMaxAge
|| ccPrivate
)
985 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE
);
987 #if USE_HTTP_VIOLATIONS // response header Pragma::no-cache is undefined in HTTP
989 // Expensive calculation. So only do it IF the CC: header is not present.
991 /* HACK: Pragma: no-cache in _replies_ is not documented in HTTP,
992 * but servers like "Active Imaging Webcast/2.0" sure do use it */
993 if (rep
->header
.has(HDR_PRAGMA
) &&
994 rep
->header
.hasListMember(HDR_PRAGMA
,"no-cache",','))
995 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE
);
1001 headersLog(1, 0, request
->method
, rep
);
1008 HttpStateData::ConnectionStatus
1009 HttpStateData::statusIfComplete() const
1011 const HttpReply
*rep
= virginReply();
1013 * If the reply wants to close the connection, it takes precedence */
1015 if (httpHeaderHasConnDir(&rep
->header
, "close"))
1016 return COMPLETE_NONPERSISTENT_MSG
;
1019 * If we didn't send a keep-alive request header, then this
1020 * can not be a persistent connection.
1022 if (!flags
.keepalive
)
1023 return COMPLETE_NONPERSISTENT_MSG
;
1026 * If we haven't sent the whole request then this can not be a persistent
1029 if (!flags
.request_sent
) {
1030 debugs(11, 2, "Request not yet fully sent " << request
->method
<< ' ' << entry
->url());
1031 return COMPLETE_NONPERSISTENT_MSG
;
1035 * What does the reply have to say about keep-alive?
1039 * If the origin server (HTTP/1.0) does not send a keep-alive
1040 * header, but keeps the connection open anyway, what happens?
1041 * We'll return here and http.c waits for an EOF before changing
1042 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
1043 * and an error status code, and we might have to wait until
1044 * the server times out the socket.
1046 if (!rep
->keep_alive
)
1047 return COMPLETE_NONPERSISTENT_MSG
;
1049 return COMPLETE_PERSISTENT_MSG
;
1052 HttpStateData::ConnectionStatus
1053 HttpStateData::persistentConnStatus() const
1055 debugs(11, 3, HERE
<< serverConnection
<< " eof=" << eof
);
1056 if (eof
) // already reached EOF
1057 return COMPLETE_NONPERSISTENT_MSG
;
1059 /* If server fd is closing (but we have not been notified yet), stop Comm
1060 I/O to avoid assertions. TODO: Change Comm API to handle callers that
1061 want more I/O after async closing (usually initiated by others). */
1062 // XXX: add canReceive or s/canSend/canTalkToServer/
1063 if (!Comm::IsConnOpen(serverConnection
))
1064 return COMPLETE_NONPERSISTENT_MSG
;
1067 * In chunked response we do not know the content length but we are absolutely
1068 * sure about the end of response, so we are calling the statusIfComplete to
1069 * decide if we can be persistant
1071 if (lastChunk
&& flags
.chunked
)
1072 return statusIfComplete();
1074 const HttpReply
*vrep
= virginReply();
1075 debugs(11, 5, "persistentConnStatus: content_length=" << vrep
->content_length
);
1077 const int64_t clen
= vrep
->bodySize(request
->method
);
1079 debugs(11, 5, "persistentConnStatus: clen=" << clen
);
1081 /* If the body size is unknown we must wait for EOF */
1083 return INCOMPLETE_MSG
;
1086 * If the body size is known, we must wait until we've gotten all of it. */
1089 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
1090 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1091 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1092 body_bytes_read
<< " content_length=" << vrep
->content_length
);
1094 if (body_bytes_read
< vrep
->content_length
)
1095 return INCOMPLETE_MSG
;
1097 if (body_bytes_truncated
> 0) // already read more than needed
1098 return COMPLETE_NONPERSISTENT_MSG
; // disable pconns
1102 * If there is no message body or we got it all, we can be persistent */
1103 return statusIfComplete();
1106 /* XXX this function is too long! */
1108 HttpStateData::readReply(const CommIoCbParams
&io
)
1114 flags
.do_next_read
= false;
1116 debugs(11, 5, HERE
<< io
.conn
<< ": len " << len
<< ".");
1118 // Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us
1119 if (io
.flag
== Comm::ERR_CLOSING
) {
1120 debugs(11, 3, "http socket closing");
1124 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1125 abortTransaction("store entry aborted while reading reply");
1129 // handle I/O errors
1130 if (io
.flag
!= Comm::OK
|| len
< 0) {
1131 debugs(11, 2, HERE
<< io
.conn
<< ": read failure: " << xstrerror() << ".");
1133 if (ignoreErrno(io
.xerrno
)) {
1134 flags
.do_next_read
= true;
1136 ErrorState
*err
= new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, fwd
->request
);
1137 err
->xerrno
= io
.xerrno
;
1139 flags
.do_next_read
= false;
1140 serverConnection
->close();
1148 readBuf
->appended(len
);
1149 reply_bytes_read
+= len
;
1151 DelayId delayId
= entry
->mem_obj
->mostBytesAllowed();
1152 delayId
.bytesIn(len
);
1155 kb_incr(&(statCounter
.server
.all
.kbytes_in
), len
);
1156 kb_incr(&(statCounter
.server
.http
.kbytes_in
), len
);
1157 ++ IOStats
.Http
.reads
;
1159 for (clen
= len
- 1, bin
= 0; clen
; ++bin
)
1162 ++ IOStats
.Http
.read_hist
[bin
];
1164 // update peer response time stats (%<pt)
1165 const timeval
&sent
= request
->hier
.peer_http_request_sent
;
1167 tvSub(request
->hier
.peer_response_time
, sent
, current_time
);
1169 request
->hier
.peer_response_time
.tv_sec
= -1;
1173 * Here the RFC says we should ignore whitespace between replies, but we can't as
1174 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1175 * the response splitting countermeasures is extremely likely to trigger on this,
1176 * not allowing connection reuse in the first place.
1178 * 2012-02-10: which RFC? not 2068 or 2616,
1179 * tolerance there is all about whitespace between requests and header tokens.
1182 if (len
== 0) { // reached EOF?
1184 flags
.do_next_read
= false;
1186 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n
1187 * Ensure here that we have at minimum two \r\n when EOF is seen.
1188 * TODO: Add eof parameter to headersEnd() and move this hack there.
1190 if (readBuf
->contentSize() && !flags
.headers_parsed
) {
1192 * Yes Henrik, there is a point to doing this. When we
1193 * called httpProcessReplyHeader() before, we didn't find
1194 * the end of headers, but now we are definately at EOF, so
1195 * we want to process the reply headers.
1197 /* Fake an "end-of-headers" to work around such broken servers */
1198 readBuf
->append("\r\n", 2);
1205 /// processes the already read and buffered response data, possibly after
1206 /// waiting for asynchronous 1xx control message processing
1208 HttpStateData::processReply()
1211 if (flags
.handling1xx
) { // we came back after handling a 1xx response
1212 debugs(11, 5, HERE
<< "done with 1xx handling");
1213 flags
.handling1xx
= false;
1214 Must(!flags
.headers_parsed
);
1217 if (!flags
.headers_parsed
) { // have not parsed headers yet?
1218 PROF_start(HttpStateData_processReplyHeader
);
1219 processReplyHeader();
1220 PROF_stop(HttpStateData_processReplyHeader
);
1222 if (!continueAfterParsingHeader()) // parsing error or need more data
1223 return; // TODO: send errors to ICAP
1225 adaptOrFinalizeReply(); // may write to, abort, or "close" the entry
1228 // kick more reads if needed and/or process the response body, if any
1229 PROF_start(HttpStateData_processReplyBody
);
1230 processReplyBody(); // may call serverComplete()
1231 PROF_stop(HttpStateData_processReplyBody
);
1235 \retval true if we can continue with processing the body or doing ICAP.
1238 HttpStateData::continueAfterParsingHeader()
1240 if (flags
.handling1xx
) {
1241 debugs(11, 5, HERE
<< "wait for 1xx handling");
1242 Must(!flags
.headers_parsed
);
1246 if (!flags
.headers_parsed
&& !eof
) {
1247 debugs(11, 9, HERE
<< "needs more at " << readBuf
->contentSize());
1248 flags
.do_next_read
= true;
1249 /** \retval false If we have not finished parsing the headers and may get more data.
1250 * Schedules more reads to retrieve the missing data.
1252 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1256 /** If we are done with parsing, check for errors */
1258 err_type error
= ERR_NONE
;
1260 if (flags
.headers_parsed
) { // parsed headers, possibly with errors
1261 // check for header parsing errors
1262 if (HttpReply
*vrep
= virginReply()) {
1263 const Http::StatusCode s
= vrep
->sline
.status();
1264 const AnyP::ProtocolVersion
&v
= vrep
->sline
.version
;
1265 if (s
== Http::scInvalidHeader
&& v
!= Http::ProtocolVersion(0,9)) {
1266 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1267 error
= ERR_INVALID_RESP
;
1268 } else if (s
== Http::scHeaderTooLarge
) {
1269 fwd
->dontRetry(true);
1270 error
= ERR_TOO_BIG
;
1272 return true; // done parsing, got reply, and no error
1275 // parsed headers but got no reply
1276 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: No reply at all for " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1277 error
= ERR_INVALID_RESP
;
1281 if (readBuf
->hasContent()) {
1282 error
= ERR_INVALID_RESP
;
1283 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1285 error
= ERR_ZERO_SIZE_OBJECT
;
1286 debugs(11, (request
->flags
.accelerated
?DBG_IMPORTANT
:2), "WARNING: HTTP: Invalid Response: No object data received for " <<
1287 entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1291 assert(error
!= ERR_NONE
);
1293 fwd
->fail(new ErrorState(error
, Http::scBadGateway
, fwd
->request
));
1294 flags
.do_next_read
= false;
1295 serverConnection
->close();
1296 return false; // quit on error
1299 /** truncate what we read if we read too much so that writeReplyBody()
1300 writes no more than what we should have read */
1302 HttpStateData::truncateVirginBody()
1304 assert(flags
.headers_parsed
);
1306 HttpReply
*vrep
= virginReply();
1308 if (!vrep
->expectingBody(request
->method
, clen
) || clen
< 0)
1309 return; // no body or a body of unknown size, including chunked
1311 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1312 if (body_bytes_read
- body_bytes_truncated
<= clen
)
1313 return; // we did not read too much or already took care of the extras
1315 if (const int64_t extras
= body_bytes_read
- body_bytes_truncated
- clen
) {
1316 // server sent more that the advertised content length
1317 debugs(11,5, HERE
<< "body_bytes_read=" << body_bytes_read
<<
1318 " clen=" << clen
<< '/' << vrep
->content_length
<<
1319 " body_bytes_truncated=" << body_bytes_truncated
<< '+' << extras
);
1321 readBuf
->truncate(extras
);
1322 body_bytes_truncated
+= extras
;
1327 * Call this when there is data from the origin server
1328 * which should be sent to either StoreEntry, or to ICAP...
1331 HttpStateData::writeReplyBody()
1333 truncateVirginBody(); // if needed
1334 const char *data
= readBuf
->content();
1335 int len
= readBuf
->contentSize();
1336 addVirginReplyBody(data
, len
);
1337 readBuf
->consume(len
);
1341 HttpStateData::decodeAndWriteReplyBody()
1343 const char *data
= NULL
;
1345 bool wasThereAnException
= false;
1346 assert(flags
.chunked
);
1347 assert(httpChunkDecoder
);
1348 SQUID_ENTER_THROWING_CODE();
1351 const bool doneParsing
= httpChunkDecoder
->parse(readBuf
,&decodedData
);
1352 len
= decodedData
.contentSize();
1353 data
=decodedData
.content();
1354 addVirginReplyBody(data
, len
);
1357 flags
.do_next_read
= false;
1359 SQUID_EXIT_THROWING_CODE(wasThereAnException
);
1360 return wasThereAnException
;
1364 * processReplyBody has two purposes:
1365 * 1 - take the reply body data, if any, and put it into either
1366 * the StoreEntry, or give it over to ICAP.
1367 * 2 - see if we made it to the end of the response (persistent
1368 * connections and such)
1371 HttpStateData::processReplyBody()
1373 Ip::Address client_addr
;
1374 bool ispinned
= false;
1376 if (!flags
.headers_parsed
) {
1377 flags
.do_next_read
= true;
1378 maybeReadVirginBody();
1383 debugs(11,5, HERE
<< "adaptationAccessCheckPending=" << adaptationAccessCheckPending
);
1384 if (adaptationAccessCheckPending
)
1390 * At this point the reply headers have been parsed and consumed.
1391 * That means header content has been removed from readBuf and
1392 * it contains only body data.
1394 if (entry
->isAccepting()) {
1395 if (flags
.chunked
) {
1396 if (!decodeAndWriteReplyBody()) {
1397 flags
.do_next_read
= false;
1405 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1406 // The above writeReplyBody() call may have aborted the store entry.
1407 abortTransaction("store entry aborted while storing reply");
1410 switch (persistentConnStatus()) {
1411 case INCOMPLETE_MSG
: {
1412 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG from " << serverConnection
);
1413 /* Wait for more data or EOF condition */
1414 AsyncCall::Pointer nil
;
1415 if (flags
.keepalive_broken
) {
1416 commSetConnTimeout(serverConnection
, 10, nil
);
1418 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, nil
);
1421 flags
.do_next_read
= true;
1425 case COMPLETE_PERSISTENT_MSG
:
1426 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection
);
1427 /* yes we have to clear all these! */
1428 commUnsetConnTimeout(serverConnection
);
1429 flags
.do_next_read
= false;
1431 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1432 closeHandler
= NULL
;
1433 fwd
->unregister(serverConnection
);
1435 if (request
->flags
.spoofClientIp
)
1436 client_addr
= request
->client_addr
;
1438 if (request
->flags
.pinned
) {
1440 } else if (request
->flags
.connectionAuth
&& request
->flags
.authSent
) {
1444 if (ispinned
&& request
->clientConnectionManager
.valid()) {
1445 request
->clientConnectionManager
->pinConnection(serverConnection
, request
, _peer
,
1446 (request
->flags
.connectionAuth
));
1448 fwd
->pconnPush(serverConnection
, request
->GetHost());
1451 serverConnection
= NULL
;
1455 case COMPLETE_NONPERSISTENT_MSG
:
1456 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection
);
1461 maybeReadVirginBody();
1465 HttpStateData::mayReadVirginReplyBody() const
1467 // TODO: Be more precise here. For example, if/when reading trailer, we may
1468 // not be doneWithServer() yet, but we should return false. Similarly, we
1469 // could still be writing the request body after receiving the whole reply.
1470 return !doneWithServer();
1474 HttpStateData::maybeReadVirginBody()
1477 if (!Comm::IsConnOpen(serverConnection
) || fd_table
[serverConnection
->fd
].closing())
1480 // we may need to grow the buffer if headers do not fit
1481 const int minRead
= flags
.headers_parsed
? 0 :1024;
1482 const int read_size
= replyBodySpace(*readBuf
, minRead
);
1484 debugs(11,9, HERE
<< (flags
.do_next_read
? "may" : "wont") <<
1485 " read up to " << read_size
<< " bytes from " << serverConnection
);
1488 * why <2? Because delayAwareRead() won't actually read if
1489 * you ask it to read 1 byte. The delayed read request
1490 * just gets re-queued until the client side drains, then
1491 * the I/O thread hangs. Better to not register any read
1492 * handler until we get a notification from someone that
1493 * its okay to read again.
1498 if (flags
.do_next_read
) {
1499 flags
.do_next_read
= false;
1500 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
1501 entry
->delayAwareRead(serverConnection
, readBuf
->space(read_size
), read_size
,
1502 JobCallback(11, 5, Dialer
, this, HttpStateData::readReply
));
1506 /// called after writing the very last request byte (body, last-chunk, etc)
1508 HttpStateData::wroteLast(const CommIoCbParams
&io
)
1510 debugs(11, 5, HERE
<< serverConnection
<< ": size " << io
.size
<< ": errflag " << io
.flag
<< ".");
1511 #if URL_CHECKSUM_DEBUG
1513 entry
->mem_obj
->checkUrlChecksum();
1517 fd_bytes(io
.fd
, io
.size
, FD_WRITE
);
1518 kb_incr(&(statCounter
.server
.all
.kbytes_out
), io
.size
);
1519 kb_incr(&(statCounter
.server
.http
.kbytes_out
), io
.size
);
1522 if (io
.flag
== Comm::ERR_CLOSING
)
1526 ErrorState
*err
= new ErrorState(ERR_WRITE_ERROR
, Http::scBadGateway
, fwd
->request
);
1527 err
->xerrno
= io
.xerrno
;
1529 serverConnection
->close();
1536 /// successfully wrote the entire request (including body, last-chunk, etc.)
1538 HttpStateData::sendComplete()
1541 * Set the read timeout here because it hasn't been set yet.
1542 * We only set the read timeout after the request has been
1543 * fully written to the peer. If we start the timeout
1544 * after connection establishment, then we are likely to hit
1545 * the timeout for POST/PUT requests that have very large
1548 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1549 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
1550 TimeoutDialer
, this, HttpStateData::httpTimeout
);
1552 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, timeoutCall
);
1553 flags
.request_sent
= true;
1554 request
->hier
.peer_http_request_sent
= current_time
;
1557 // Close the HTTP server connection. Used by serverComplete().
1559 HttpStateData::closeServer()
1561 debugs(11,5, HERE
<< "closing HTTP server " << serverConnection
<< " this " << this);
1563 if (Comm::IsConnOpen(serverConnection
)) {
1564 fwd
->unregister(serverConnection
);
1565 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1566 closeHandler
= NULL
;
1567 serverConnection
->close();
1572 HttpStateData::doneWithServer() const
1574 return !Comm::IsConnOpen(serverConnection
);
1578 * Fixup authentication request headers for special cases
1581 httpFixupAuthentication(HttpRequest
* request
, const HttpHeader
* hdr_in
, HttpHeader
* hdr_out
, const HttpStateFlags
&flags
)
1583 http_hdr_type header
= flags
.originpeer
? HDR_AUTHORIZATION
: HDR_PROXY_AUTHORIZATION
;
1585 /* Nothing to do unless we are forwarding to a peer */
1586 if (!request
->flags
.proxying
)
1589 /* Needs to be explicitly enabled */
1590 if (!request
->peer_login
)
1593 /* Maybe already dealt with? */
1594 if (hdr_out
->has(header
))
1597 /* Nothing to do here for PASSTHRU */
1598 if (strcmp(request
->peer_login
, "PASSTHRU") == 0)
1601 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1602 if (flags
.originpeer
&& strcmp(request
->peer_login
, "PROXYPASS") == 0 && hdr_in
->has(HDR_PROXY_AUTHORIZATION
)) {
1603 const char *auth
= hdr_in
->getStr(HDR_PROXY_AUTHORIZATION
);
1605 if (auth
&& strncasecmp(auth
, "basic ", 6) == 0) {
1606 hdr_out
->putStr(header
, auth
);
1611 uint8_t loginbuf
[base64_encode_len(MAX_LOGIN_SZ
)];
1613 struct base64_encode_ctx ctx
;
1614 base64_encode_init(&ctx
);
1616 /* Special mode to pass the username to the upstream cache */
1617 if (*request
->peer_login
== '*') {
1618 const char *username
= "-";
1620 if (request
->extacl_user
.size())
1621 username
= request
->extacl_user
.termedBuf();
1623 else if (request
->auth_user_request
!= NULL
)
1624 username
= request
->auth_user_request
->username();
1627 blen
= base64_encode_update(&ctx
, loginbuf
, strlen(username
), reinterpret_cast<const uint8_t*>(username
));
1628 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, strlen(request
->peer_login
+1), reinterpret_cast<const uint8_t*>(request
->peer_login
+1));
1629 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1630 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1634 /* external_acl provided credentials */
1635 if (request
->extacl_user
.size() && request
->extacl_passwd
.size() &&
1636 (strcmp(request
->peer_login
, "PASS") == 0 ||
1637 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
1639 blen
= base64_encode_update(&ctx
, loginbuf
, request
->extacl_user
.size(), reinterpret_cast<const uint8_t*>(request
->extacl_user
.rawBuf()));
1640 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, 1, reinterpret_cast<const uint8_t*>(":"));
1641 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, request
->extacl_passwd
.size(), reinterpret_cast<const uint8_t*>(request
->extacl_passwd
.rawBuf()));
1642 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1643 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1646 // if no external user credentials are available to fake authentication with PASS acts like PASSTHRU
1647 if (strcmp(request
->peer_login
, "PASS") == 0)
1650 /* Kerberos login to peer */
1651 #if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1652 if (strncmp(request
->peer_login
, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1654 char *PrincipalName
=NULL
,*p
;
1655 if ((p
=strchr(request
->peer_login
,':')) != NULL
) {
1658 Token
= peer_proxy_negotiate_auth(PrincipalName
, request
->peer_host
);
1660 httpHeaderPutStrf(hdr_out
, header
, "Negotiate %s",Token
);
1664 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1666 blen
= base64_encode_update(&ctx
, loginbuf
, strlen(request
->peer_login
), reinterpret_cast<const uint8_t*>(request
->peer_login
));
1667 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1668 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1673 * build request headers and append them to a given MemBuf
1674 * used by buildRequestPrefix()
1675 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1678 HttpStateData::httpBuildRequestHeader(HttpRequest
* request
,
1680 const AccessLogEntryPointer
&al
,
1681 HttpHeader
* hdr_out
,
1682 const HttpStateFlags
&flags
)
1684 /* building buffer for complex strings */
1685 #define BBUF_SZ (MAX_URL+32)
1686 LOCAL_ARRAY(char, bbuf
, BBUF_SZ
);
1687 LOCAL_ARRAY(char, ntoabuf
, MAX_IPSTRLEN
);
1688 const HttpHeader
*hdr_in
= &request
->header
;
1689 const HttpHeaderEntry
*e
= NULL
;
1690 HttpHeaderPos pos
= HttpHeaderInitPos
;
1691 assert (hdr_out
->owner
== hoRequest
);
1693 /* use our IMS header if the cached entry has Last-Modified time */
1694 if (request
->lastmod
> -1)
1695 hdr_out
->putTime(HDR_IF_MODIFIED_SINCE
, request
->lastmod
);
1697 // Add our own If-None-Match field if the cached entry has a strong ETag.
1698 // copyOneHeaderFromClientsideRequestToUpstreamRequest() adds client ones.
1699 if (request
->etag
.size() > 0) {
1700 hdr_out
->addEntry(new HttpHeaderEntry(HDR_IF_NONE_MATCH
, NULL
,
1701 request
->etag
.termedBuf()));
1704 bool we_do_ranges
= decideIfWeDoRanges (request
);
1706 String
strConnection (hdr_in
->getList(HDR_CONNECTION
));
1708 while ((e
= hdr_in
->getEntry(&pos
)))
1709 copyOneHeaderFromClientsideRequestToUpstreamRequest(e
, strConnection
, request
, hdr_out
, we_do_ranges
, flags
);
1711 /* Abstraction break: We should interpret multipart/byterange responses
1712 * into offset-length data, and this works around our inability to do so.
1714 if (!we_do_ranges
&& request
->multipartRangeRequest()) {
1715 /* don't cache the result */
1716 request
->flags
.cachable
= false;
1717 /* pretend it's not a range request */
1718 request
->ignoreRange("want to request the whole object");
1719 request
->flags
.isRanged
= false;
1723 if (Config
.onoff
.via
) {
1725 strVia
= hdr_in
->getList(HDR_VIA
);
1726 snprintf(bbuf
, BBUF_SZ
, "%d.%d %s",
1727 request
->http_ver
.major
,
1728 request
->http_ver
.minor
, ThisCache
);
1729 strListAdd(&strVia
, bbuf
, ',');
1730 hdr_out
->putStr(HDR_VIA
, strVia
.termedBuf());
1734 if (request
->flags
.accelerated
) {
1735 /* Append Surrogate-Capabilities */
1736 String
strSurrogate(hdr_in
->getList(HDR_SURROGATE_CAPABILITY
));
1738 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0 ESI/1.0\"", Config
.Accel
.surrogate_id
);
1740 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0\"", Config
.Accel
.surrogate_id
);
1742 strListAdd(&strSurrogate
, bbuf
, ',');
1743 hdr_out
->putStr(HDR_SURROGATE_CAPABILITY
, strSurrogate
.termedBuf());
1746 /** \pre Handle X-Forwarded-For */
1747 if (strcmp(opt_forwarded_for
, "delete") != 0) {
1749 String strFwd
= hdr_in
->getList(HDR_X_FORWARDED_FOR
);
1751 if (strFwd
.size() > 65536/2) {
1752 // There is probably a forwarding loop with Via detection disabled.
1753 // If we do nothing, String will assert on overflow soon.
1754 // TODO: Terminate all transactions with huge XFF?
1757 static int warnedCount
= 0;
1758 if (warnedCount
++ < 100) {
1759 const char *url
= entry
? entry
->url() : urlCanonical(request
);
1760 debugs(11, DBG_IMPORTANT
, "Warning: likely forwarding loop with " << url
);
1764 if (strcmp(opt_forwarded_for
, "on") == 0) {
1765 /** If set to ON - append client IP or 'unknown'. */
1766 if ( request
->client_addr
.isNoAddr() )
1767 strListAdd(&strFwd
, "unknown", ',');
1769 strListAdd(&strFwd
, request
->client_addr
.toStr(ntoabuf
, MAX_IPSTRLEN
), ',');
1770 } else if (strcmp(opt_forwarded_for
, "off") == 0) {
1771 /** If set to OFF - append 'unknown'. */
1772 strListAdd(&strFwd
, "unknown", ',');
1773 } else if (strcmp(opt_forwarded_for
, "transparent") == 0) {
1774 /** If set to TRANSPARENT - pass through unchanged. */
1775 } else if (strcmp(opt_forwarded_for
, "truncate") == 0) {
1776 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1777 if ( request
->client_addr
.isNoAddr() )
1780 strFwd
= request
->client_addr
.toStr(ntoabuf
, MAX_IPSTRLEN
);
1782 if (strFwd
.size() > 0)
1783 hdr_out
->putStr(HDR_X_FORWARDED_FOR
, strFwd
.termedBuf());
1785 /** If set to DELETE - do not copy through. */
1787 /* append Host if not there already */
1788 if (!hdr_out
->has(HDR_HOST
)) {
1789 if (request
->peer_domain
) {
1790 hdr_out
->putStr(HDR_HOST
, request
->peer_domain
);
1791 } else if (request
->port
== urlDefaultPort(request
->url
.getScheme())) {
1792 /* use port# only if not default */
1793 hdr_out
->putStr(HDR_HOST
, request
->GetHost());
1795 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1797 (int) request
->port
);
1801 /* append Authorization if known in URL, not in header and going direct */
1802 if (!hdr_out
->has(HDR_AUTHORIZATION
)) {
1803 if (!request
->flags
.proxying
&& !request
->url
.userInfo().isEmpty()) {
1804 static uint8_t result
[base64_encode_len(MAX_URL
*2)]; // should be big enough for a single URI segment
1805 struct base64_encode_ctx ctx
;
1806 base64_encode_init(&ctx
);
1807 size_t blen
= base64_encode_update(&ctx
, result
, request
->url
.userInfo().length(), reinterpret_cast<const uint8_t*>(request
->url
.userInfo().rawContent()));
1808 blen
+= base64_encode_final(&ctx
, result
+blen
);
1809 result
[blen
] = '\0';
1811 httpHeaderPutStrf(hdr_out
, HDR_AUTHORIZATION
, "Basic %.*s", (int)blen
, result
);
1815 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1816 httpFixupAuthentication(request
, hdr_in
, hdr_out
, flags
);
1818 /* append Cache-Control, add max-age if not there already */
1820 HttpHdrCc
*cc
= hdr_in
->getCc();
1823 cc
= new HttpHdrCc();
1825 #if 0 /* see bug 2330 */
1826 /* Set no-cache if determined needed but not found */
1827 if (request
->flags
.nocache
)
1828 EBIT_SET(cc
->mask
, CC_NO_CACHE
);
1831 /* Add max-age only without no-cache */
1832 if (!cc
->hasMaxAge() && !cc
->hasNoCache()) {
1834 entry
? entry
->url() : urlCanonical(request
);
1835 cc
->maxAge(getMaxAge(url
));
1839 /* Enforce sibling relations */
1840 if (flags
.only_if_cached
)
1841 cc
->onlyIfCached(true);
1848 /* maybe append Connection: keep-alive */
1849 if (flags
.keepalive
) {
1850 hdr_out
->putStr(HDR_CONNECTION
, "keep-alive");
1853 /* append Front-End-Https */
1854 if (flags
.front_end_https
) {
1855 if (flags
.front_end_https
== 1 || request
->url
.getScheme() == AnyP::PROTO_HTTPS
)
1856 hdr_out
->putStr(HDR_FRONT_END_HTTPS
, "On");
1859 if (flags
.chunked_request
) {
1860 // Do not just copy the original value so that if the client-side
1861 // starts decode other encodings, this code may remain valid.
1862 hdr_out
->putStr(HDR_TRANSFER_ENCODING
, "chunked");
1865 /* Now mangle the headers. */
1866 if (Config2
.onoff
.mangle_request_headers
)
1867 httpHdrMangleList(hdr_out
, request
, ROR_REQUEST
);
1869 if (Config
.request_header_add
&& !Config
.request_header_add
->empty())
1870 httpHdrAdd(hdr_out
, request
, al
, *Config
.request_header_add
);
1872 strConnection
.clean();
1876 * Decides whether a particular header may be cloned from the received Clients request
1877 * to our outgoing fetch request.
1880 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
, HttpHeader
* hdr_out
, const int we_do_ranges
, const HttpStateFlags
&flags
)
1882 debugs(11, 5, "httpBuildRequestHeader: " << e
->name
<< ": " << e
->value
);
1886 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1888 case HDR_PROXY_AUTHORIZATION
:
1889 /** \par Proxy-Authorization:
1890 * Only pass on proxy authentication to peers for which
1891 * authentication forwarding is explicitly enabled
1893 if (!flags
.originpeer
&& flags
.proxying
&& request
->peer_login
&&
1894 (strcmp(request
->peer_login
, "PASS") == 0 ||
1895 strcmp(request
->peer_login
, "PROXYPASS") == 0 ||
1896 strcmp(request
->peer_login
, "PASSTHRU") == 0)) {
1897 hdr_out
->addEntry(e
->clone());
1901 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1903 case HDR_CONNECTION
: /** \par Connection: */
1904 case HDR_TE
: /** \par TE: */
1905 case HDR_KEEP_ALIVE
: /** \par Keep-Alive: */
1906 case HDR_PROXY_AUTHENTICATE
: /** \par Proxy-Authenticate: */
1907 case HDR_TRAILER
: /** \par Trailer: */
1908 case HDR_UPGRADE
: /** \par Upgrade: */
1909 case HDR_TRANSFER_ENCODING
: /** \par Transfer-Encoding: */
1912 /** \par OTHER headers I haven't bothered to track down yet. */
1914 case HDR_AUTHORIZATION
:
1915 /** \par WWW-Authorization:
1916 * Pass on WWW authentication */
1918 if (!flags
.originpeer
) {
1919 hdr_out
->addEntry(e
->clone());
1921 /** \note In accelerators, only forward authentication if enabled
1922 * (see also httpFixupAuthentication for special cases)
1924 if (request
->peer_login
&&
1925 (strcmp(request
->peer_login
, "PASS") == 0 ||
1926 strcmp(request
->peer_login
, "PASSTHRU") == 0 ||
1927 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
1928 hdr_out
->addEntry(e
->clone());
1936 * Normally Squid rewrites the Host: header.
1937 * However, there is one case when we don't: If the URL
1938 * went through our redirector and the admin configured
1939 * 'redir_rewrites_host' to be off.
1941 if (request
->peer_domain
)
1942 hdr_out
->putStr(HDR_HOST
, request
->peer_domain
);
1943 else if (request
->flags
.redirected
&& !Config
.onoff
.redir_rewrites_host
)
1944 hdr_out
->addEntry(e
->clone());
1946 /* use port# only if not default */
1948 if (request
->port
== urlDefaultPort(request
->url
.getScheme())) {
1949 hdr_out
->putStr(HDR_HOST
, request
->GetHost());
1951 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1953 (int) request
->port
);
1959 case HDR_IF_MODIFIED_SINCE
:
1960 /** \par If-Modified-Since:
1961 * append unless we added our own,
1962 * but only if cache_miss_revalidate is enabled, or
1963 * the request is not cacheable, or
1964 * the request contains authentication credentials.
1965 * \note at most one client's If-Modified-Since header can pass through
1967 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
1968 if (hdr_out
->has(HDR_IF_MODIFIED_SINCE
))
1970 else if (Config
.onoff
.cache_miss_revalidate
|| !request
->flags
.cachable
|| request
->flags
.auth
)
1971 hdr_out
->addEntry(e
->clone());
1974 case HDR_IF_NONE_MATCH
:
1975 /** \par If-None-Match:
1976 * append if the wildcard '*' special case value is present, or
1977 * cache_miss_revalidate is disabled, or
1978 * the request is not cacheable in this proxy, or
1979 * the request contains authentication credentials.
1980 * \note this header lists a set of responses for the server to elide sending. Squid added values are extending that set.
1982 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
1983 if (hdr_out
->hasListMember(HDR_IF_MATCH
, "*", ',') || Config
.onoff
.cache_miss_revalidate
|| !request
->flags
.cachable
|| request
->flags
.auth
)
1984 hdr_out
->addEntry(e
->clone());
1987 case HDR_MAX_FORWARDS
:
1988 /** \par Max-Forwards:
1989 * pass only on TRACE or OPTIONS requests */
1990 if (request
->method
== Http::METHOD_TRACE
|| request
->method
== Http::METHOD_OPTIONS
) {
1991 const int64_t hops
= e
->getInt64();
1994 hdr_out
->putInt64(HDR_MAX_FORWARDS
, hops
- 1);
2001 * If Via is disabled then forward any received header as-is.
2002 * Otherwise leave for explicit updated addition later. */
2004 if (!Config
.onoff
.via
)
2005 hdr_out
->addEntry(e
->clone());
2013 case HDR_REQUEST_RANGE
:
2014 /** \par Range:, If-Range:, Request-Range:
2015 * Only pass if we accept ranges */
2017 hdr_out
->addEntry(e
->clone());
2021 case HDR_PROXY_CONNECTION
: // SHOULD ignore. But doing so breaks things.
2024 case HDR_CONTENT_LENGTH
:
2025 // pass through unless we chunk; also, keeping this away from default
2026 // prevents request smuggling via Connection: Content-Length tricks
2027 if (!flags
.chunked_request
)
2028 hdr_out
->addEntry(e
->clone());
2031 case HDR_X_FORWARDED_FOR
:
2033 case HDR_CACHE_CONTROL
:
2034 /** \par X-Forwarded-For:, Cache-Control:
2035 * handled specially by Squid, so leave off for now.
2036 * append these after the loop if needed */
2039 case HDR_FRONT_END_HTTPS
:
2040 /** \par Front-End-Https:
2041 * Pass thru only if peer is configured with front-end-https */
2042 if (!flags
.front_end_https
)
2043 hdr_out
->addEntry(e
->clone());
2049 * pass on all other header fields
2050 * which are NOT listed by the special Connection: header. */
2052 if (strConnection
.size()>0 && strListIsMember(&strConnection
, e
->name
.termedBuf(), ',')) {
2053 debugs(11, 2, "'" << e
->name
<< "' header cropped by Connection: definition");
2057 hdr_out
->addEntry(e
->clone());
2062 HttpStateData::decideIfWeDoRanges (HttpRequest
* request
)
2065 /* decide if we want to do Ranges ourselves
2066 * and fetch the whole object now)
2067 * We want to handle Ranges ourselves iff
2068 * - we can actually parse client Range specs
2069 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
2070 * - reply will be cachable
2071 * (If the reply will be uncachable we have to throw it away after
2072 * serving this request, so it is better to forward ranges to
2073 * the server and fetch only the requested content)
2076 int64_t roffLimit
= request
->getRangeOffsetLimit();
2078 if (NULL
== request
->range
|| !request
->flags
.cachable
2079 || request
->range
->offsetLimitExceeded(roffLimit
) || request
->flags
.connectionAuth
)
2082 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
2083 request
->range
<< ", cachable: " <<
2084 request
->flags
.cachable
<< "; we_do_ranges: " << result
);
2089 /* build request prefix and append it to a given MemBuf;
2090 * return the length of the prefix */
2092 HttpStateData::buildRequestPrefix(MemBuf
* mb
)
2094 const int offset
= mb
->size
;
2095 /* Uses a local httpver variable to print the HTTP label
2096 * since the HttpRequest may have an older version label.
2097 * XXX: This could create protocol bugs as the headers sent and
2098 * flow control should all be based on the HttpRequest version
2099 * not the one we are sending. Needs checking.
2101 const AnyP::ProtocolVersion httpver
= Http::ProtocolVersion();
2103 if (_peer
&& !_peer
->options
.originserver
)
2104 url
= urlCanonical(request
);
2106 url
= request
->urlpath
.termedBuf();
2107 mb
->Printf(SQUIDSBUFPH
" %s %s/%d.%d\r\n",
2108 SQUIDSBUFPRINT(request
->method
.image()),
2109 url
&& *url
? url
: "/",
2110 AnyP::ProtocolType_str
[httpver
.protocol
],
2111 httpver
.major
,httpver
.minor
);
2112 /* build and pack headers */
2114 HttpHeader
hdr(hoRequest
);
2116 httpBuildRequestHeader(request
, entry
, fwd
->al
, &hdr
, flags
);
2118 if (request
->flags
.pinned
&& request
->flags
.connectionAuth
)
2119 request
->flags
.authSent
= true;
2120 else if (hdr
.has(HDR_AUTHORIZATION
))
2121 request
->flags
.authSent
= true;
2123 packerToMemInit(&p
, mb
);
2128 /* append header terminator */
2129 mb
->append(crlf
, 2);
2130 return mb
->size
- offset
;
2133 /* This will be called when connect completes. Write request. */
2135 HttpStateData::sendRequest()
2139 debugs(11, 5, HERE
<< serverConnection
<< ", request " << request
<< ", this " << this << ".");
2141 if (!Comm::IsConnOpen(serverConnection
)) {
2142 debugs(11,3, HERE
<< "cannot send request to closing " << serverConnection
);
2143 assert(closeHandler
!= NULL
);
2147 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2148 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
2149 TimeoutDialer
, this, HttpStateData::httpTimeout
);
2150 commSetConnTimeout(serverConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
2151 flags
.do_next_read
= true;
2152 maybeReadVirginBody();
2154 if (request
->body_pipe
!= NULL
) {
2155 if (!startRequestBodyFlow()) // register to receive body data
2157 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2158 requestSender
= JobCallback(11,5,
2159 Dialer
, this, HttpStateData::sentRequestBody
);
2161 Must(!flags
.chunked_request
);
2162 // use chunked encoding if we do not know the length
2163 if (request
->content_length
< 0)
2164 flags
.chunked_request
= true;
2166 assert(!requestBodySource
);
2167 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2168 requestSender
= JobCallback(11,5,
2169 Dialer
, this, HttpStateData::wroteLast
);
2172 flags
.originpeer
= (_peer
!= NULL
&& _peer
->options
.originserver
);
2173 flags
.proxying
= (_peer
!= NULL
&& !flags
.originpeer
);
2176 * Is keep-alive okay for all request methods?
2178 if (request
->flags
.mustKeepalive
)
2179 flags
.keepalive
= true;
2180 else if (request
->flags
.pinned
)
2181 flags
.keepalive
= request
->persistent();
2182 else if (!Config
.onoff
.server_pconns
)
2183 flags
.keepalive
= false;
2184 else if (_peer
== NULL
)
2185 flags
.keepalive
= true;
2186 else if (_peer
->stats
.n_keepalives_sent
< 10)
2187 flags
.keepalive
= true;
2188 else if ((double) _peer
->stats
.n_keepalives_recv
/
2189 (double) _peer
->stats
.n_keepalives_sent
> 0.50)
2190 flags
.keepalive
= true;
2193 /*The old code here was
2194 if (neighborType(_peer, request) == PEER_SIBLING && ...
2195 which is equivalent to:
2196 if (neighborType(_peer, NULL) == PEER_SIBLING && ...
2198 if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) ||
2199 _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss)
2200 flags.only_if_cached = 1;
2202 But I suppose it was a bug
2204 if (neighborType(_peer
, request
) == PEER_SIBLING
&&
2205 !_peer
->options
.allow_miss
)
2206 flags
.only_if_cached
= true;
2208 flags
.front_end_https
= _peer
->front_end_https
;
2212 request
->peer_host
=_peer
?_peer
->host
:NULL
;
2213 buildRequestPrefix(&mb
);
2215 debugs(11, 2, "HTTP Server " << serverConnection
);
2216 debugs(11, 2, "HTTP Server REQUEST:\n---------\n" << mb
.buf
<< "\n----------");
2218 Comm::Write(serverConnection
, &mb
, requestSender
);
2223 HttpStateData::getMoreRequestBody(MemBuf
&buf
)
2225 // parent's implementation can handle the no-encoding case
2226 if (!flags
.chunked_request
)
2227 return Client::getMoreRequestBody(buf
);
2231 Must(requestBodySource
!= NULL
);
2232 if (!requestBodySource
->getMoreData(raw
))
2233 return false; // no request body bytes to chunk yet
2235 // optimization: pre-allocate buffer size that should be enough
2236 const mb_size_t rawDataSize
= raw
.contentSize();
2237 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2238 buf
.init(16 + 2 + rawDataSize
+ 2 + 5, raw
.max_capacity
);
2240 buf
.Printf("%x\r\n", static_cast<unsigned int>(rawDataSize
));
2241 buf
.append(raw
.content(), rawDataSize
);
2244 Must(rawDataSize
> 0); // we did not accidently created last-chunk above
2246 // Do not send last-chunk unless we successfully received everything
2247 if (receivedWholeRequestBody
) {
2248 Must(!flags
.sentLastChunk
);
2249 flags
.sentLastChunk
= true;
2250 buf
.append("0\r\n\r\n", 5);
2257 httpStart(FwdState
*fwd
)
2259 debugs(11, 3, fwd
->request
->method
<< ' ' << fwd
->entry
->url());
2260 AsyncJob::Start(new HttpStateData(fwd
));
2264 HttpStateData::start()
2266 if (!sendRequest()) {
2267 debugs(11, 3, "httpStart: aborted");
2268 mustStop("HttpStateData::start failed");
2272 ++ statCounter
.server
.all
.requests
;
2273 ++ statCounter
.server
.http
.requests
;
2276 * We used to set the read timeout here, but not any more.
2277 * Now its set in httpSendComplete() after the full request,
2278 * including request body, has been written to the server.
2282 /// if broken posts are enabled for the request, try to fix and return true
2284 HttpStateData::finishingBrokenPost()
2286 #if USE_HTTP_VIOLATIONS
2287 if (!Config
.accessList
.brokenPosts
) {
2288 debugs(11, 5, HERE
<< "No brokenPosts list");
2292 ACLFilledChecklist
ch(Config
.accessList
.brokenPosts
, originalRequest(), NULL
);
2293 if (ch
.fastCheck() != ACCESS_ALLOWED
) {
2294 debugs(11, 5, HERE
<< "didn't match brokenPosts");
2298 if (!Comm::IsConnOpen(serverConnection
)) {
2299 debugs(11, 3, HERE
<< "ignoring broken POST for closed " << serverConnection
);
2300 assert(closeHandler
!= NULL
);
2301 return true; // prevent caller from proceeding as if nothing happened
2304 debugs(11, 3, "finishingBrokenPost: fixing broken POST");
2305 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2306 requestSender
= JobCallback(11,5,
2307 Dialer
, this, HttpStateData::wroteLast
);
2308 Comm::Write(serverConnection
, "\r\n", 2, requestSender
, NULL
);
2312 #endif /* USE_HTTP_VIOLATIONS */
2315 /// if needed, write last-chunk to end the request body and return true
2317 HttpStateData::finishingChunkedRequest()
2319 if (flags
.sentLastChunk
) {
2320 debugs(11, 5, HERE
<< "already sent last-chunk");
2324 Must(receivedWholeRequestBody
); // or we should not be sending last-chunk
2325 flags
.sentLastChunk
= true;
2327 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2328 requestSender
= JobCallback(11,5, Dialer
, this, HttpStateData::wroteLast
);
2329 Comm::Write(serverConnection
, "0\r\n\r\n", 5, requestSender
, NULL
);
2334 HttpStateData::doneSendingRequestBody()
2336 Client::doneSendingRequestBody();
2337 debugs(11,5, HERE
<< serverConnection
);
2339 // do we need to write something after the last body byte?
2340 if (flags
.chunked_request
&& finishingChunkedRequest())
2342 if (!flags
.chunked_request
&& finishingBrokenPost())
2348 // more origin request body data is available
2350 HttpStateData::handleMoreRequestBodyAvailable()
2352 if (eof
|| !Comm::IsConnOpen(serverConnection
)) {
2353 // XXX: we should check this condition in other callbacks then!
2354 // TODO: Check whether this can actually happen: We should unsubscribe
2355 // as a body consumer when the above condition(s) are detected.
2356 debugs(11, DBG_IMPORTANT
, HERE
<< "Transaction aborted while reading HTTP body");
2360 assert(requestBodySource
!= NULL
);
2362 if (requestBodySource
->buf().hasContent()) {
2363 // XXX: why does not this trigger a debug message on every request?
2365 if (flags
.headers_parsed
&& !flags
.abuse_detected
) {
2366 flags
.abuse_detected
= true;
2367 debugs(11, DBG_IMPORTANT
, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request
->client_addr
<< "' -> '" << entry
->url() << "'" );
2369 if (virginReply()->sline
.status() == Http::scInvalidHeader
) {
2370 serverConnection
->close();
2376 HttpStateData::handleMoreRequestBodyAvailable();
2379 // premature end of the request body
2381 HttpStateData::handleRequestBodyProducerAborted()
2383 Client::handleRequestBodyProducerAborted();
2384 if (entry
->isEmpty()) {
2385 debugs(11, 3, "request body aborted: " << serverConnection
);
2386 // We usually get here when ICAP REQMOD aborts during body processing.
2387 // We might also get here if client-side aborts, but then our response
2388 // should not matter because either client-side will provide its own or
2389 // there will be no response at all (e.g., if the the client has left).
2390 ErrorState
*err
= new ErrorState(ERR_ICAP_FAILURE
, Http::scInternalServerError
, fwd
->request
);
2391 err
->detailError(ERR_DETAIL_SRV_REQMOD_REQ_BODY
);
2395 abortTransaction("request body producer aborted");
2398 // called when we wrote request headers(!) or a part of the body
2400 HttpStateData::sentRequestBody(const CommIoCbParams
&io
)
2403 kb_incr(&statCounter
.server
.http
.kbytes_out
, io
.size
);
2405 Client::sentRequestBody(io
);
2408 // Quickly abort the transaction
2409 // TODO: destruction should be sufficient as the destructor should cleanup,
2410 // including canceling close handlers
2412 HttpStateData::abortTransaction(const char *reason
)
2414 debugs(11,5, HERE
<< "aborting transaction for " << reason
<<
2415 "; " << serverConnection
<< ", this " << this);
2417 if (Comm::IsConnOpen(serverConnection
)) {
2418 serverConnection
->close();
2422 fwd
->handleUnregisteredServerEnd();
2423 mustStop("HttpStateData::abortTransaction");