2 * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 11 Hypertext Transfer Protocol (HTTP) */
12 * Anonymizing patch by lutz@as-node.jena.thur.de
13 * have a look into http-anon.c to get more informations.
17 #include "acl/FilledChecklist.h"
18 #include "base/AsyncJobCalls.h"
19 #include "base/TextException.h"
21 #include "CachePeer.h"
22 #include "client_side.h"
23 #include "comm/Connection.h"
24 #include "comm/Read.h"
25 #include "comm/Write.h"
27 #include "err_detail_type.h"
28 #include "errorpage.h"
33 #include "http/one/ResponseParser.h"
34 #include "http/one/TeChunkedParser.h"
35 #include "http/Stream.h"
36 #include "HttpControlMsg.h"
37 #include "HttpHdrCc.h"
38 #include "HttpHdrContRange.h"
39 #include "HttpHdrSc.h"
40 #include "HttpHdrScTarget.h"
41 #include "HttpHeaderTools.h"
42 #include "HttpReply.h"
43 #include "HttpRequest.h"
44 #include "log/access_log.h"
46 #include "MemObject.h"
47 #include "neighbors.h"
49 #include "peer_proxy_negotiate_auth.h"
50 #include "profiler/Profiler.h"
52 #include "RefreshPattern.h"
54 #include "SquidConfig.h"
55 #include "SquidTime.h"
56 #include "StatCounters.h"
63 #include "auth/UserRequest.h"
66 #include "DelayPools.h"
69 #define SQUID_ENTER_THROWING_CODE() try {
70 #define SQUID_EXIT_THROWING_CODE(status) \
73 catch (const std::exception &e) { \
74 debugs (11, 1, "Exception error:" << e.what()); \
78 CBDATA_CLASS_INIT(HttpStateData
);
80 static const char *const crlf
= "\r\n";
82 static void httpMaybeRemovePublic(StoreEntry
*, Http::StatusCode
);
83 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
,
84 HttpHeader
* hdr_out
, const int we_do_ranges
, const Http::StateFlags
&);
86 HttpStateData::HttpStateData(FwdState
*theFwdState
) :
87 AsyncJob("HttpStateData"),
90 httpChunkDecoder(NULL
),
95 debugs(11,5,HERE
<< "HttpStateData " << this << " created");
96 ignoreCacheControl
= false;
97 surrogateNoStore
= false;
98 serverConnection
= fwd
->serverConnection();
100 if (fwd
->serverConnection() != NULL
)
101 _peer
= cbdataReference(fwd
->serverConnection()->getPeer()); /* might be NULL */
103 flags
.peering
= _peer
;
104 flags
.tunneling
= (_peer
&& request
->flags
.sslBumped
);
105 flags
.toOrigin
= (!_peer
|| _peer
->options
.originserver
|| request
->flags
.sslBumped
);
109 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
110 * We might end up getting the object from somewhere else if,
111 * for example, the request to this neighbor fails.
113 if (!flags
.tunneling
&& _peer
->options
.proxy_only
)
114 entry
->releaseRequest(true);
117 entry
->setNoDelay(_peer
->options
.no_delay
);
122 * register the handler to free HTTP state data when the FD closes
124 typedef CommCbMemFunT
<HttpStateData
, CommCloseCbParams
> Dialer
;
125 closeHandler
= JobCallback(9, 5, Dialer
, this, HttpStateData::httpStateConnClosed
);
126 comm_add_close_handler(serverConnection
->fd
, closeHandler
);
129 HttpStateData::~HttpStateData()
132 * don't forget that ~Client() gets called automatically
135 if (httpChunkDecoder
)
136 delete httpChunkDecoder
;
138 cbdataReferenceDone(_peer
);
140 debugs(11,5, HERE
<< "HttpStateData " << this << " destroyed; " << serverConnection
);
143 const Comm::ConnectionPointer
&
144 HttpStateData::dataConnection() const
146 return serverConnection
;
150 HttpStateData::httpStateConnClosed(const CommCloseCbParams
¶ms
)
152 debugs(11, 5, "httpStateFree: FD " << params
.fd
<< ", httpState=" << params
.data
);
153 doneWithFwd
= "httpStateConnClosed()"; // assume FwdState is monitoring too
154 mustStop("HttpStateData::httpStateConnClosed");
158 HttpStateData::httpTimeout(const CommTimeoutCbParams
&)
160 debugs(11, 4, serverConnection
<< ": '" << entry
->url() << "'");
162 if (entry
->store_status
== STORE_PENDING
) {
163 fwd
->fail(new ErrorState(ERR_READ_TIMEOUT
, Http::scGatewayTimeout
, fwd
->request
, fwd
->al
));
167 mustStop("HttpStateData::httpTimeout");
171 findPreviouslyCachedEntry(StoreEntry
*newEntry
) {
172 assert(newEntry
->mem_obj
);
173 return newEntry
->mem_obj
->request
?
174 storeGetPublicByRequest(newEntry
->mem_obj
->request
.getRaw()) :
175 storeGetPublic(newEntry
->mem_obj
->storeId(), newEntry
->mem_obj
->method
);
178 /// Remove an existing public store entry if the incoming response (to be
179 /// stored in a currently private entry) is going to invalidate it.
181 httpMaybeRemovePublic(StoreEntry
* e
, Http::StatusCode status
)
186 // If the incoming response already goes into a public entry, then there is
187 // nothing to remove. This protects ready-for-collapsing entries as well.
188 if (!EBIT_TEST(e
->flags
, KEY_PRIVATE
))
191 // If the new/incoming response cannot be stored, then it does not
192 // compete with the old stored response for the public key, and the
193 // old stored response should be left as is.
194 if (e
->mem_obj
->request
&& !e
->mem_obj
->request
->flags
.cachable
)
201 case Http::scNonAuthoritativeInformation
:
203 case Http::scMultipleChoices
:
205 case Http::scMovedPermanently
:
209 case Http::scSeeOther
:
213 case Http::scNotFound
:
218 case Http::scForbidden
:
220 case Http::scMethodNotAllowed
:
227 case Http::scUnauthorized
:
237 * Any 2xx response should eject previously cached entities...
240 if (status
>= 200 && status
< 300)
248 if (!remove
&& !forbidden
)
251 StoreEntry
*pe
= findPreviouslyCachedEntry(e
);
256 neighborsHtcpClear(e
, nullptr, e
->mem_obj
->request
.getRaw(), e
->mem_obj
->method
, HTCP_CLR_INVALIDATION
);
262 * Also remove any cached HEAD response in case the object has
265 if (e
->mem_obj
->request
)
266 pe
= storeGetPublicByRequestMethod(e
->mem_obj
->request
.getRaw(), Http::METHOD_HEAD
);
268 pe
= storeGetPublic(e
->mem_obj
->storeId(), Http::METHOD_HEAD
);
273 neighborsHtcpClear(e
, nullptr, e
->mem_obj
->request
.getRaw(), HttpRequestMethod(Http::METHOD_HEAD
), HTCP_CLR_INVALIDATION
);
280 HttpStateData::processSurrogateControl(HttpReply
*reply
)
282 if (request
->flags
.accelerated
&& reply
->surrogate_control
) {
283 HttpHdrScTarget
*sctusable
= reply
->surrogate_control
->getMergedTarget(Config
.Accel
.surrogate_id
);
286 if (sctusable
->hasNoStore() ||
287 (Config
.onoff
.surrogate_is_remote
288 && sctusable
->noStoreRemote())) {
289 surrogateNoStore
= true;
290 // Be conservative for now and make it non-shareable because
291 // there is no enough information here to make the decision.
292 entry
->makePrivate(false);
295 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
296 * accelerated request or not...
297 * Still, this is an abstraction breach. - RC
299 if (sctusable
->hasMaxAge()) {
300 if (sctusable
->maxAge() < sctusable
->maxStale())
301 reply
->expires
= reply
->date
+ sctusable
->maxAge();
303 reply
->expires
= reply
->date
+ sctusable
->maxStale();
305 /* And update the timestamps */
306 entry
->timestampsSet();
309 /* We ignore cache-control directives as per the Surrogate specification */
310 ignoreCacheControl
= true;
317 HttpStateData::ReuseDecision::Answers
318 HttpStateData::reusableReply(HttpStateData::ReuseDecision
&decision
)
320 HttpReply
const *rep
= finalReply();
321 HttpHeader
const *hdr
= &rep
->header
;
323 #if USE_HTTP_VIOLATIONS
325 const RefreshPattern
*R
= NULL
;
327 /* This strange looking define first looks up the refresh pattern
328 * and then checks if the specified flag is set. The main purpose
329 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
332 #define REFRESH_OVERRIDE(flag) \
333 ((R = (R ? R : refreshLimits(entry->mem_obj->storeId()))) , \
334 (R && R->flags.flag))
336 #define REFRESH_OVERRIDE(flag) 0
339 if (EBIT_TEST(entry
->flags
, RELEASE_REQUEST
))
340 return decision
.make(ReuseDecision::doNotCacheButShare
, "the entry has been released");
342 // RFC 7234 section 4: a cache MUST use the most recent response
343 // (as determined by the Date header field)
344 // TODO: whether such responses could be shareable?
346 return decision
.make(ReuseDecision::reuseNot
, "the response has an older date header");
348 // Check for Surrogate/1.0 protocol conditions
349 // NP: reverse-proxy traffic our parent server has instructed us never to cache
350 if (surrogateNoStore
)
351 return decision
.make(ReuseDecision::reuseNot
, "Surrogate-Control:no-store");
353 // RFC 2616: HTTP/1.1 Cache-Control conditions
354 if (!ignoreCacheControl
) {
355 // XXX: check to see if the request headers alone were enough to prevent caching earlier
356 // (ie no-store request header) no need to check those all again here if so.
357 // for now we are not reliably doing that so we waste CPU re-checking request CC
359 // RFC 2616 section 14.9.2 - MUST NOT cache any response with request CC:no-store
360 if (request
&& request
->cache_control
&& request
->cache_control
->hasNoStore() &&
361 !REFRESH_OVERRIDE(ignore_no_store
))
362 return decision
.make(ReuseDecision::reuseNot
,
363 "client request Cache-Control:no-store");
365 // NP: request CC:no-cache only means cache READ is forbidden. STORE is permitted.
366 if (rep
->cache_control
&& rep
->cache_control
->hasNoCacheWithParameters()) {
367 /* TODO: we are allowed to cache when no-cache= has parameters.
368 * Provided we strip away any of the listed headers unless they are revalidated
369 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
370 * That is a bit tricky for squid right now so we avoid caching entirely.
372 return decision
.make(ReuseDecision::reuseNot
,
373 "server reply Cache-Control:no-cache has parameters");
376 // NP: request CC:private is undefined. We ignore.
377 // NP: other request CC flags are limiters on HIT/MISS. We don't care about here.
379 // RFC 2616 section 14.9.2 - MUST NOT cache any response with CC:no-store
380 if (rep
->cache_control
&& rep
->cache_control
->hasNoStore() &&
381 !REFRESH_OVERRIDE(ignore_no_store
))
382 return decision
.make(ReuseDecision::reuseNot
,
383 "server reply Cache-Control:no-store");
385 // RFC 2616 section 14.9.1 - MUST NOT cache any response with CC:private in a shared cache like Squid.
386 // CC:private overrides CC:public when both are present in a response.
387 // TODO: add a shared/private cache configuration possibility.
388 if (rep
->cache_control
&&
389 rep
->cache_control
->hasPrivate() &&
390 !REFRESH_OVERRIDE(ignore_private
)) {
391 /* TODO: we are allowed to cache when private= has parameters.
392 * Provided we strip away any of the listed headers unless they are revalidated
393 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
394 * That is a bit tricky for squid right now so we avoid caching entirely.
396 return decision
.make(ReuseDecision::reuseNot
,
397 "server reply Cache-Control:private");
401 // RFC 2068, sec 14.9.4 - MUST NOT cache any response with Authentication UNLESS certain CC controls are present
402 // allow HTTP violations to IGNORE those controls (ie re-block caching Auth)
403 if (request
&& (request
->flags
.auth
|| request
->flags
.authSent
)) {
404 if (!rep
->cache_control
)
405 return decision
.make(ReuseDecision::reuseNot
,
406 "authenticated and server reply missing Cache-Control");
408 if (ignoreCacheControl
)
409 return decision
.make(ReuseDecision::reuseNot
,
410 "authenticated and ignoring Cache-Control");
412 bool mayStore
= false;
413 // HTTPbis pt6 section 3.2: a response CC:public is present
414 if (rep
->cache_control
->hasPublic()) {
415 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:public");
418 // HTTPbis pt6 section 3.2: a response CC:must-revalidate is present
419 } else if (rep
->cache_control
->hasMustRevalidate()) {
420 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:must-revalidate");
423 #if USE_HTTP_VIOLATIONS
424 // NP: given the must-revalidate exception we should also be able to exempt no-cache.
425 // HTTPbis WG verdict on this is that it is omitted from the spec due to being 'unexpected' by
426 // some. The caching+revalidate is not exactly unsafe though with Squids interpretation of no-cache
427 // (without parameters) as equivalent to must-revalidate in the reply.
428 } else if (rep
->cache_control
->hasNoCacheWithoutParameters()) {
429 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:no-cache (equivalent to must-revalidate)");
433 // HTTPbis pt6 section 3.2: a response CC:s-maxage is present
434 } else if (rep
->cache_control
->hasSMaxAge()) {
435 debugs(22, 3, HERE
<< "Authenticated but server reply Cache-Control:s-maxage");
440 return decision
.make(ReuseDecision::reuseNot
, "authenticated transaction");
442 // NP: response CC:no-cache is equivalent to CC:must-revalidate,max-age=0. We MAY cache, and do so.
443 // NP: other request CC flags are limiters on HIT/MISS/REFRESH. We don't care about here.
446 /* HACK: The "multipart/x-mixed-replace" content type is used for
447 * continuous push replies. These are generally dynamic and
448 * probably should not be cachable
450 if ((v
= hdr
->getStr(Http::HdrType::CONTENT_TYPE
)))
451 if (!strncasecmp(v
, "multipart/x-mixed-replace", 25))
452 return decision
.make(ReuseDecision::reuseNot
, "Content-Type:multipart/x-mixed-replace");
454 // TODO: if possible, provide more specific message for each status code
455 static const char *shareableError
= "shareable error status code";
456 static const char *nonShareableError
= "non-shareable error status code";
457 ReuseDecision::Answers statusAnswer
= ReuseDecision::reuseNot
;
458 const char *statusReason
= nonShareableError
;
460 switch (rep
->sline
.status()) {
462 /* There are several situations when a non-cacheable response may be
463 * still shareable (e.g., among collapsed clients). We assume that these
464 * are 3xx and 5xx responses, indicating server problems and some of
465 * 4xx responses, common for all clients with a given cache key (e.g.,
466 * 404 Not Found or 414 URI Too Long). On the other hand, we should not
467 * share non-cacheable client-specific errors, such as 400 Bad Request
468 * or 406 Not Acceptable.
471 /* Responses that are cacheable */
475 case Http::scNonAuthoritativeInformation
:
477 case Http::scMultipleChoices
:
479 case Http::scMovedPermanently
:
480 case Http::scPermanentRedirect
:
484 * Don't cache objects that need to be refreshed on next request,
485 * unless we know how to refresh it.
488 if (refreshIsCachable(entry
) || REFRESH_OVERRIDE(store_stale
))
489 decision
.make(ReuseDecision::cachePositively
, "refresh check returned cacheable");
491 decision
.make(ReuseDecision::doNotCacheButShare
, "refresh check returned non-cacheable");
494 /* Responses that only are cacheable if the server says so */
497 case Http::scTemporaryRedirect
:
499 decision
.make(ReuseDecision::doNotCacheButShare
, "Date is missing/invalid");
500 else if (rep
->expires
> rep
->date
)
501 decision
.make(ReuseDecision::cachePositively
, "Expires > Date");
503 decision
.make(ReuseDecision::doNotCacheButShare
, "Expires <= Date");
506 /* These responses can be negatively cached. Most can also be shared. */
507 case Http::scNoContent
:
508 case Http::scUseProxy
:
509 case Http::scForbidden
:
510 case Http::scNotFound
:
511 case Http::scMethodNotAllowed
:
512 case Http::scUriTooLong
:
513 case Http::scInternalServerError
:
514 case Http::scNotImplemented
:
515 case Http::scBadGateway
:
516 case Http::scServiceUnavailable
:
517 case Http::scGatewayTimeout
:
518 case Http::scMisdirectedRequest
:
519 statusAnswer
= ReuseDecision::doNotCacheButShare
;
520 statusReason
= shareableError
;
521 // fall through to the actual decision making below
523 case Http::scBadRequest
: // no sharing; perhaps the server did not like something specific to this request
524 #if USE_HTTP_VIOLATIONS
525 if (Config
.negativeTtl
> 0)
526 decision
.make(ReuseDecision::cacheNegatively
, "Config.negativeTtl > 0");
529 decision
.make(statusAnswer
, statusReason
);
532 /* these responses can never be cached, some
533 of them can be shared though */
534 case Http::scSeeOther
:
535 case Http::scNotModified
:
536 case Http::scUnauthorized
:
537 case Http::scProxyAuthenticationRequired
:
538 case Http::scPaymentRequired
:
539 case Http::scInsufficientStorage
:
540 // TODO: use more specific reason for non-error status codes
541 decision
.make(ReuseDecision::doNotCacheButShare
, shareableError
);
544 case Http::scPartialContent
: /* Not yet supported. TODO: make shareable for suitable ranges */
545 case Http::scNotAcceptable
:
546 case Http::scRequestTimeout
: // TODO: is this shareable?
547 case Http::scConflict
: // TODO: is this shareable?
548 case Http::scLengthRequired
:
549 case Http::scPreconditionFailed
:
550 case Http::scPayloadTooLarge
:
551 case Http::scUnsupportedMediaType
:
552 case Http::scUnprocessableEntity
:
553 case Http::scLocked
: // TODO: is this shareable?
554 case Http::scFailedDependency
:
555 case Http::scRequestedRangeNotSatisfied
:
556 case Http::scExpectationFailed
:
557 case Http::scInvalidHeader
: /* Squid header parsing error */
558 case Http::scHeaderTooLarge
:
559 decision
.make(ReuseDecision::reuseNot
, nonShareableError
);
563 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
564 decision
.make(ReuseDecision::reuseNot
, "unknown status code");
568 return decision
.answer
;
571 /// assemble a variant key (vary-mark) from the given Vary header and HTTP request
573 assembleVaryKey(String
&vary
, SBuf
&vstr
, const HttpRequest
&request
)
575 static const SBuf
asterisk("*");
576 const char *pos
= nullptr;
577 const char *item
= nullptr;
580 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
581 SBuf
name(item
, ilen
);
582 if (name
== asterisk
) {
588 vstr
.append(", ", 2);
590 String
hdr(request
.header
.getByName(name
));
591 const char *value
= hdr
.termedBuf();
593 value
= rfc1738_escape_part(value
);
594 vstr
.append("=\"", 2);
596 vstr
.append("\"", 1);
604 * For Vary, store the relevant request headers as
605 * virtual headers in the reply
606 * Returns an empty SBuf if the variance cannot be stored
609 httpMakeVaryMark(HttpRequest
* request
, HttpReply
const * reply
)
614 vary
= reply
->header
.getList(Http::HdrType::VARY
);
615 assembleVaryKey(vary
, vstr
, *request
);
617 #if X_ACCELERATOR_VARY
619 vary
= reply
->header
.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY
);
620 assembleVaryKey(vary
, vstr
, *request
);
628 HttpStateData::keepaliveAccounting(HttpReply
*reply
)
631 if (flags
.peering
&& !flags
.tunneling
)
632 ++ _peer
->stats
.n_keepalives_sent
;
634 if (reply
->keep_alive
) {
635 if (flags
.peering
&& !flags
.tunneling
)
636 ++ _peer
->stats
.n_keepalives_recv
;
638 if (Config
.onoff
.detect_broken_server_pconns
639 && reply
->bodySize(request
->method
) == -1 && !flags
.chunked
) {
640 debugs(11, DBG_IMPORTANT
, "keepaliveAccounting: Impossible keep-alive header from '" << entry
->url() << "'" );
641 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
642 flags
.keepalive_broken
= true;
648 HttpStateData::checkDateSkew(HttpReply
*reply
)
650 if (reply
->date
> -1 && flags
.toOrigin
) {
651 int skew
= abs((int)(reply
->date
- squid_curtime
));
654 debugs(11, 3, "" << request
->url
.host() << "'s clock is skewed by " << skew
<< " seconds!");
659 * This creates the error page itself.. its likely
660 * that the forward ported reply header max size patch
661 * generates non http conformant error pages - in which
662 * case the errors where should be 'BAD_GATEWAY' etc
665 HttpStateData::processReplyHeader()
667 /** Creates a blank header. If this routine is made incremental, this will not do */
669 /* NP: all exit points to this function MUST call ctx_exit(ctx) */
670 Ctx ctx
= ctx_enter(entry
->mem_obj
->urlXXX());
672 debugs(11, 3, "processReplyHeader: key '" << entry
->getMD5Text() << "'");
674 assert(!flags
.headers_parsed
);
676 if (!inBuf
.length()) {
681 /* Attempt to parse the first line; this will define where the protocol, status, reason-phrase and header begin */
684 hp
= new Http1::ResponseParser
;
686 bool parsedOk
= hp
->parse(inBuf
);
688 // sync the buffers after parsing.
689 inBuf
= hp
->remaining();
691 if (hp
->needsMoreData()) {
692 if (eof
) { // no more data coming
693 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n.
694 * We also may receive truncated responses.
695 * Ensure here that we have at minimum two \r\n when EOF is seen.
697 inBuf
.append("\r\n\r\n", 4);
699 parsedOk
= hp
->parse(inBuf
);
700 // sync the buffers after parsing.
701 inBuf
= hp
->remaining();
703 debugs(33, 5, "Incomplete response, waiting for end of response headers");
710 // unrecoverable parsing error
711 // TODO: Use Raw! XXX: inBuf no longer has the [beginning of the] malformed header.
712 debugs(11, 3, "Non-HTTP-compliant header:\n---------\n" << inBuf
<< "\n----------");
713 flags
.headers_parsed
= true;
714 HttpReply
*newrep
= new HttpReply
;
715 newrep
->sline
.set(Http::ProtocolVersion(), hp
->parseStatusCode
);
716 setVirginReply(newrep
);
722 /* We know the whole response is in parser now */
723 debugs(11, 2, "HTTP Server " << serverConnection
);
724 debugs(11, 2, "HTTP Server RESPONSE:\n---------\n" <<
725 hp
->messageProtocol() << " " << hp
->messageStatus() << " " << hp
->reasonPhrase() << "\n" <<
729 // reset payload tracking to begin after message headers
730 payloadSeen
= inBuf
.length();
732 HttpReply
*newrep
= new HttpReply
;
733 // XXX: RFC 7230 indicates we MAY ignore the reason phrase,
734 // and use an empty string on unknown status.
735 // We do that now to avoid performance regression from using SBuf::c_str()
736 newrep
->sline
.set(Http::ProtocolVersion(1,1), hp
->messageStatus() /* , hp->reasonPhrase() */);
737 newrep
->sline
.protocol
= newrep
->sline
.version
.protocol
= hp
->messageProtocol().protocol
;
738 newrep
->sline
.version
.major
= hp
->messageProtocol().major
;
739 newrep
->sline
.version
.minor
= hp
->messageProtocol().minor
;
742 if (!newrep
->parseHeader(*hp
)) {
743 // XXX: when Http::ProtocolVersion is a function, remove this hack. just set with messageProtocol()
744 newrep
->sline
.set(Http::ProtocolVersion(), Http::scInvalidHeader
);
745 newrep
->sline
.version
.protocol
= hp
->messageProtocol().protocol
;
746 newrep
->sline
.version
.major
= hp
->messageProtocol().major
;
747 newrep
->sline
.version
.minor
= hp
->messageProtocol().minor
;
748 debugs(11, 2, "error parsing response headers mime block");
751 // done with Parser, now process using the HttpReply
754 newrep
->sources
|= request
->url
.getScheme() == AnyP::PROTO_HTTPS
? Http::Message::srcHttps
: Http::Message::srcHttp
;
756 newrep
->removeStaleWarnings();
758 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& Http::Is1xx(newrep
->sline
.status())) {
764 flags
.chunked
= false;
765 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& newrep
->header
.chunked()) {
766 flags
.chunked
= true;
767 httpChunkDecoder
= new Http1::TeChunkedParser
;
770 if (!peerSupportsConnectionPinning())
771 request
->flags
.connectionAuthDisabled
= true;
773 HttpReply
*vrep
= setVirginReply(newrep
);
774 flags
.headers_parsed
= true;
776 keepaliveAccounting(vrep
);
780 processSurrogateControl (vrep
);
782 request
->hier
.peer_reply_status
= newrep
->sline
.status();
787 /// ignore or start forwarding the 1xx response (a.k.a., control message)
789 HttpStateData::handle1xx(HttpReply
*reply
)
792 fwd
->al
->reply
= reply
;
794 HttpReply::Pointer
msg(reply
); // will destroy reply if unused
796 // one 1xx at a time: we must not be called while waiting for previous 1xx
797 Must(!flags
.handling1xx
);
798 flags
.handling1xx
= true;
800 if (!request
->canHandle1xx() || request
->forcedBodyContinuation
) {
801 debugs(11, 2, "ignoring 1xx because it is " << (request
->forcedBodyContinuation
? "already sent" : "not supported by client"));
806 #if USE_HTTP_VIOLATIONS
807 // check whether the 1xx response forwarding is allowed by squid.conf
808 if (Config
.accessList
.reply
) {
809 ACLFilledChecklist
ch(Config
.accessList
.reply
, originalRequest().getRaw());
812 ch
.syncAle(originalRequest().getRaw(), nullptr);
813 HTTPMSGLOCK(ch
.reply
);
814 if (!ch
.fastCheck().allowed()) { // TODO: support slow lookups?
815 debugs(11, 3, HERE
<< "ignoring denied 1xx");
820 #endif // USE_HTTP_VIOLATIONS
822 debugs(11, 2, HERE
<< "forwarding 1xx to client");
824 // the Sink will use this to call us back after writing 1xx to the client
825 typedef NullaryMemFunT
<HttpStateData
> CbDialer
;
826 const AsyncCall::Pointer cb
= JobCallback(11, 3, CbDialer
, this,
827 HttpStateData::proceedAfter1xx
);
828 CallJobHere1(11, 4, request
->clientConnectionManager
, ConnStateData
,
829 ConnStateData::sendControlMsg
, HttpControlMsg(msg
, cb
));
830 // If the call is not fired, then the Sink is gone, and HttpStateData
831 // will terminate due to an aborted store entry or another similar error.
832 // If we get stuck, it is not handle1xx fault if we could get stuck
833 // for similar reasons without a 1xx response.
836 /// restores state and resumes processing after 1xx is ignored or forwarded
838 HttpStateData::proceedAfter1xx()
840 Must(flags
.handling1xx
);
841 debugs(11, 2, "continuing with " << payloadSeen
<< " bytes in buffer after 1xx");
842 CallJobHere(11, 3, this, HttpStateData
, HttpStateData::processReply
);
846 * returns true if the peer can support connection pinning
849 HttpStateData::peerSupportsConnectionPinning() const
854 // we are talking "through" rather than "to" our _peer
858 /*If this peer does not support connection pinning (authenticated
859 connections) return false
861 if (!_peer
->connection_auth
)
864 const HttpReplyPointer
rep(entry
->mem_obj
->getReply());
866 /*The peer supports connection pinning and the http reply status
867 is not unauthorized, so the related connection can be pinned
869 if (rep
->sline
.status() != Http::scUnauthorized
)
872 /*The server respond with Http::scUnauthorized and the peer configured
873 with "connection-auth=on" we know that the peer supports pinned
876 if (_peer
->connection_auth
== 1)
879 /*At this point peer has configured with "connection-auth=auto"
880 parameter so we need some extra checks to decide if we are going
881 to allow pinned connections or not
884 /*if the peer configured with originserver just allow connection
885 pinning (squid 2.6 behaviour)
887 if (_peer
->options
.originserver
)
890 /*if the connections it is already pinned it is OK*/
891 if (request
->flags
.pinned
)
894 /*Allow pinned connections only if the Proxy-support header exists in
895 reply and has in its list the "Session-Based-Authentication"
896 which means that the peer supports connection pinning.
898 if (rep
->header
.hasListMember(Http::HdrType::PROXY_SUPPORT
, "Session-Based-Authentication", ','))
904 // Called when we parsed (and possibly adapted) the headers but
905 // had not starting storing (a.k.a., sending) the body yet.
907 HttpStateData::haveParsedReplyHeaders()
909 Client::haveParsedReplyHeaders();
911 Ctx ctx
= ctx_enter(entry
->mem_obj
->urlXXX());
912 HttpReply
*rep
= finalReply();
913 const Http::StatusCode statusCode
= rep
->sline
.status();
915 entry
->timestampsSet();
917 /* Check if object is cacheable or not based on reply code */
918 debugs(11, 3, "HTTP CODE: " << statusCode
);
920 if (StoreEntry
*oldEntry
= findPreviouslyCachedEntry(entry
)) {
921 oldEntry
->lock("HttpStateData::haveParsedReplyHeaders");
922 sawDateGoBack
= rep
->olderThan(oldEntry
->getReply());
923 oldEntry
->unlock("HttpStateData::haveParsedReplyHeaders");
926 if (neighbors_do_private_keys
&& !sawDateGoBack
)
927 httpMaybeRemovePublic(entry
, rep
->sline
.status());
929 bool varyFailure
= false;
930 if (rep
->header
.has(Http::HdrType::VARY
)
931 #if X_ACCELERATOR_VARY
932 || rep
->header
.has(Http::HdrType::HDR_X_ACCELERATOR_VARY
)
935 const SBuf
vary(httpMakeVaryMark(request
.getRaw(), rep
));
937 if (vary
.isEmpty()) {
938 // TODO: check whether such responses are shareable.
939 // Do not share for now.
940 entry
->makePrivate(false);
941 if (fwd
->reforwardableStatus(rep
->sline
.status()))
942 EBIT_SET(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
945 entry
->mem_obj
->vary_headers
= vary
;
947 // RFC 7231 section 7.1.4
948 // Vary:* can be cached, but has mandatory revalidation
949 static const SBuf
asterisk("*");
950 if (vary
== asterisk
)
951 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE_ALWAYS
);
957 * If its not a reply that we will re-forward, then
958 * allow the client to get it.
960 if (fwd
->reforwardableStatus(rep
->sline
.status()))
961 EBIT_SET(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
963 ReuseDecision
decision(entry
, statusCode
);
965 switch (reusableReply(decision
)) {
967 case ReuseDecision::reuseNot
:
968 entry
->makePrivate(false);
971 case ReuseDecision::cachePositively
:
972 if (!entry
->makePublic()) {
973 decision
.make(ReuseDecision::doNotCacheButShare
, "public key creation error");
974 entry
->makePrivate(true);
978 case ReuseDecision::cacheNegatively
:
979 if (!entry
->cacheNegatively()) {
980 decision
.make(ReuseDecision::doNotCacheButShare
, "public key creation error");
981 entry
->makePrivate(true);
985 case ReuseDecision::doNotCacheButShare
:
986 entry
->makePrivate(true);
993 debugs(11, 3, "decided: " << decision
);
996 if (!ignoreCacheControl
) {
997 if (rep
->cache_control
) {
998 // We are required to revalidate on many conditions.
999 // For security reasons we do so even if storage was caused by refresh_pattern ignore-* option
1001 // CC:must-revalidate or CC:proxy-revalidate
1002 const bool ccMustRevalidate
= (rep
->cache_control
->hasProxyRevalidate() || rep
->cache_control
->hasMustRevalidate());
1004 // CC:no-cache (only if there are no parameters)
1005 const bool ccNoCacheNoParams
= rep
->cache_control
->hasNoCacheWithoutParameters();
1008 const bool ccSMaxAge
= rep
->cache_control
->hasSMaxAge();
1010 // CC:private (yes, these can sometimes be stored)
1011 const bool ccPrivate
= rep
->cache_control
->hasPrivate();
1013 if (ccNoCacheNoParams
|| ccPrivate
)
1014 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE_ALWAYS
);
1015 else if (ccMustRevalidate
|| ccSMaxAge
)
1016 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE_STALE
);
1018 #if USE_HTTP_VIOLATIONS // response header Pragma::no-cache is undefined in HTTP
1020 // Expensive calculation. So only do it IF the CC: header is not present.
1022 /* HACK: Pragma: no-cache in _replies_ is not documented in HTTP,
1023 * but servers like "Active Imaging Webcast/2.0" sure do use it */
1024 if (rep
->header
.has(Http::HdrType::PRAGMA
) &&
1025 rep
->header
.hasListMember(Http::HdrType::PRAGMA
,"no-cache",','))
1026 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE_ALWAYS
);
1032 headersLog(1, 0, request
->method
, rep
);
1039 HttpStateData::ConnectionStatus
1040 HttpStateData::statusIfComplete() const
1042 const HttpReply
*rep
= virginReply();
1044 * If the reply wants to close the connection, it takes precedence */
1046 static SBuf
close("close", 5);
1047 if (httpHeaderHasConnDir(&rep
->header
, close
))
1048 return COMPLETE_NONPERSISTENT_MSG
;
1051 * If we didn't send a keep-alive request header, then this
1052 * can not be a persistent connection.
1054 if (!flags
.keepalive
)
1055 return COMPLETE_NONPERSISTENT_MSG
;
1058 * If we haven't sent the whole request then this can not be a persistent
1061 if (!flags
.request_sent
) {
1062 debugs(11, 2, "Request not yet fully sent " << request
->method
<< ' ' << entry
->url());
1063 return COMPLETE_NONPERSISTENT_MSG
;
1067 * What does the reply have to say about keep-alive?
1071 * If the origin server (HTTP/1.0) does not send a keep-alive
1072 * header, but keeps the connection open anyway, what happens?
1073 * We'll return here and http.c waits for an EOF before changing
1074 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
1075 * and an error status code, and we might have to wait until
1076 * the server times out the socket.
1078 if (!rep
->keep_alive
)
1079 return COMPLETE_NONPERSISTENT_MSG
;
1081 return COMPLETE_PERSISTENT_MSG
;
1084 HttpStateData::ConnectionStatus
1085 HttpStateData::persistentConnStatus() const
1087 debugs(11, 3, HERE
<< serverConnection
<< " eof=" << eof
);
1088 if (eof
) // already reached EOF
1089 return COMPLETE_NONPERSISTENT_MSG
;
1091 /* If server fd is closing (but we have not been notified yet), stop Comm
1092 I/O to avoid assertions. TODO: Change Comm API to handle callers that
1093 want more I/O after async closing (usually initiated by others). */
1094 // XXX: add canReceive or s/canSend/canTalkToServer/
1095 if (!Comm::IsConnOpen(serverConnection
))
1096 return COMPLETE_NONPERSISTENT_MSG
;
1099 * In chunked response we do not know the content length but we are absolutely
1100 * sure about the end of response, so we are calling the statusIfComplete to
1101 * decide if we can be persistant
1103 if (lastChunk
&& flags
.chunked
)
1104 return statusIfComplete();
1106 const HttpReply
*vrep
= virginReply();
1107 debugs(11, 5, "persistentConnStatus: content_length=" << vrep
->content_length
);
1109 const int64_t clen
= vrep
->bodySize(request
->method
);
1111 debugs(11, 5, "persistentConnStatus: clen=" << clen
);
1113 /* If the body size is unknown we must wait for EOF */
1115 return INCOMPLETE_MSG
;
1118 * If the body size is known, we must wait until we've gotten all of it. */
1120 debugs(11,5, "payloadSeen=" << payloadSeen
<< " content_length=" << vrep
->content_length
);
1122 if (payloadSeen
< vrep
->content_length
)
1123 return INCOMPLETE_MSG
;
1125 if (payloadTruncated
> 0) // already read more than needed
1126 return COMPLETE_NONPERSISTENT_MSG
; // disable pconns
1130 * If there is no message body or we got it all, we can be persistent */
1131 return statusIfComplete();
1135 readDelayed(void *context
, CommRead
const &)
1137 HttpStateData
*state
= static_cast<HttpStateData
*>(context
);
1138 state
->flags
.do_next_read
= true;
1139 state
->maybeReadVirginBody();
1143 HttpStateData::readReply(const CommIoCbParams
&io
)
1145 Must(!flags
.do_next_read
); // XXX: should have been set false by mayReadVirginBody()
1146 flags
.do_next_read
= false;
1148 debugs(11, 5, io
.conn
);
1150 // Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us
1151 if (io
.flag
== Comm::ERR_CLOSING
) {
1152 debugs(11, 3, "http socket closing");
1156 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1157 abortTransaction("store entry aborted while reading reply");
1161 Must(Comm::IsConnOpen(serverConnection
));
1162 Must(io
.conn
->fd
== serverConnection
->fd
);
1165 * Don't reset the timeout value here. The value should be
1166 * counting Config.Timeout.request and applies to the request
1167 * as a whole, not individual read() calls.
1168 * Plus, it breaks our lame *HalfClosed() detection
1171 Must(maybeMakeSpaceAvailable(true));
1172 CommIoCbParams
rd(this); // will be expanded with ReadNow results
1174 rd
.size
= entry
->bytesWanted(Range
<size_t>(0, inBuf
.spaceSize()));
1177 assert(entry
->mem_obj
);
1178 AsyncCall::Pointer nilCall
;
1179 entry
->mem_obj
->delayRead(DeferredRead(readDelayed
, this, CommRead(io
.conn
, NULL
, 0, nilCall
)));
1183 switch (Comm::ReadNow(rd
, inBuf
)) {
1184 case Comm::INPROGRESS
:
1185 if (inBuf
.isEmpty())
1186 debugs(33, 2, io
.conn
<< ": no data to process, " << xstrerr(rd
.xerrno
));
1187 flags
.do_next_read
= true;
1188 maybeReadVirginBody();
1193 payloadSeen
+= rd
.size
;
1195 DelayId delayId
= entry
->mem_obj
->mostBytesAllowed();
1196 delayId
.bytesIn(rd
.size
);
1199 statCounter
.server
.all
.kbytes_in
+= rd
.size
;
1200 statCounter
.server
.http
.kbytes_in
+= rd
.size
;
1201 ++ IOStats
.Http
.reads
;
1204 for (int clen
= rd
.size
- 1; clen
; ++bin
)
1207 ++ IOStats
.Http
.read_hist
[bin
];
1209 request
->hier
.notePeerRead();
1212 /* Continue to process previously read data */
1215 case Comm::ENDFILE
: // close detected by 0-byte read
1217 flags
.do_next_read
= false;
1219 /* Continue to process previously read data */
1222 // case Comm::COMM_ERROR:
1223 default: // no other flags should ever occur
1224 debugs(11, 2, io
.conn
<< ": read failure: " << xstrerr(rd
.xerrno
));
1225 const auto err
= new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, fwd
->request
, fwd
->al
);
1226 err
->xerrno
= rd
.xerrno
;
1228 flags
.do_next_read
= false;
1230 mustStop("HttpStateData::readReply");
1234 /* Process next response from buffer */
1238 /// processes the already read and buffered response data, possibly after
1239 /// waiting for asynchronous 1xx control message processing
1241 HttpStateData::processReply()
1244 if (flags
.handling1xx
) { // we came back after handling a 1xx response
1245 debugs(11, 5, HERE
<< "done with 1xx handling");
1246 flags
.handling1xx
= false;
1247 Must(!flags
.headers_parsed
);
1250 if (!flags
.headers_parsed
) { // have not parsed headers yet?
1251 PROF_start(HttpStateData_processReplyHeader
);
1252 processReplyHeader();
1253 PROF_stop(HttpStateData_processReplyHeader
);
1255 if (!continueAfterParsingHeader()) // parsing error or need more data
1256 return; // TODO: send errors to ICAP
1258 adaptOrFinalizeReply(); // may write to, abort, or "close" the entry
1261 // kick more reads if needed and/or process the response body, if any
1262 PROF_start(HttpStateData_processReplyBody
);
1263 processReplyBody(); // may call serverComplete()
1264 PROF_stop(HttpStateData_processReplyBody
);
1268 \retval true if we can continue with processing the body or doing ICAP.
1271 HttpStateData::continueAfterParsingHeader()
1273 if (flags
.handling1xx
) {
1274 debugs(11, 5, HERE
<< "wait for 1xx handling");
1275 Must(!flags
.headers_parsed
);
1279 if (!flags
.headers_parsed
&& !eof
) {
1280 debugs(11, 9, "needs more at " << inBuf
.length());
1281 flags
.do_next_read
= true;
1282 /** \retval false If we have not finished parsing the headers and may get more data.
1283 * Schedules more reads to retrieve the missing data.
1285 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1289 /** If we are done with parsing, check for errors */
1291 err_type error
= ERR_NONE
;
1293 if (flags
.headers_parsed
) { // parsed headers, possibly with errors
1294 // check for header parsing errors
1295 if (HttpReply
*vrep
= virginReply()) {
1296 const Http::StatusCode s
= vrep
->sline
.status();
1297 const AnyP::ProtocolVersion
&v
= vrep
->sline
.version
;
1298 if (s
== Http::scInvalidHeader
&& v
!= Http::ProtocolVersion(0,9)) {
1299 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry
->url() << " AKA " << request
->url
);
1300 error
= ERR_INVALID_RESP
;
1301 } else if (s
== Http::scHeaderTooLarge
) {
1302 fwd
->dontRetry(true);
1303 error
= ERR_TOO_BIG
;
1304 } else if (vrep
->header
.conflictingContentLength()) {
1305 fwd
->dontRetry(true);
1306 error
= ERR_INVALID_RESP
;
1308 return true; // done parsing, got reply, and no error
1311 // parsed headers but got no reply
1312 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: No reply at all for " << entry
->url() << " AKA " << request
->url
);
1313 error
= ERR_INVALID_RESP
;
1317 if (inBuf
.length()) {
1318 error
= ERR_INVALID_RESP
;
1319 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry
->url() << " AKA " << request
->url
);
1321 error
= ERR_ZERO_SIZE_OBJECT
;
1322 debugs(11, (request
->flags
.accelerated
?DBG_IMPORTANT
:2), "WARNING: HTTP: Invalid Response: No object data received for " << entry
->url() << " AKA " << request
->url
);
1326 assert(error
!= ERR_NONE
);
1328 fwd
->fail(new ErrorState(error
, Http::scBadGateway
, fwd
->request
, fwd
->al
));
1329 flags
.do_next_read
= false;
1331 mustStop("HttpStateData::continueAfterParsingHeader");
1332 return false; // quit on error
1335 /** truncate what we read if we read too much so that writeReplyBody()
1336 writes no more than what we should have read */
1338 HttpStateData::truncateVirginBody()
1340 assert(flags
.headers_parsed
);
1342 HttpReply
*vrep
= virginReply();
1344 if (!vrep
->expectingBody(request
->method
, clen
) || clen
< 0)
1345 return; // no body or a body of unknown size, including chunked
1347 if (payloadSeen
- payloadTruncated
<= clen
)
1348 return; // we did not read too much or already took care of the extras
1350 if (const int64_t extras
= payloadSeen
- payloadTruncated
- clen
) {
1351 // server sent more that the advertised content length
1352 debugs(11, 5, "payloadSeen=" << payloadSeen
<<
1353 " clen=" << clen
<< '/' << vrep
->content_length
<<
1354 " trucated=" << payloadTruncated
<< '+' << extras
);
1356 inBuf
.chop(0, inBuf
.length() - extras
);
1357 payloadTruncated
+= extras
;
1362 * Call this when there is data from the origin server
1363 * which should be sent to either StoreEntry, or to ICAP...
1366 HttpStateData::writeReplyBody()
1368 truncateVirginBody(); // if needed
1369 const char *data
= inBuf
.rawContent();
1370 int len
= inBuf
.length();
1371 addVirginReplyBody(data
, len
);
1376 HttpStateData::decodeAndWriteReplyBody()
1378 const char *data
= NULL
;
1380 bool wasThereAnException
= false;
1381 assert(flags
.chunked
);
1382 assert(httpChunkDecoder
);
1383 SQUID_ENTER_THROWING_CODE();
1386 httpChunkDecoder
->setPayloadBuffer(&decodedData
);
1387 const bool doneParsing
= httpChunkDecoder
->parse(inBuf
);
1388 inBuf
= httpChunkDecoder
->remaining(); // sync buffers after parse
1389 len
= decodedData
.contentSize();
1390 data
=decodedData
.content();
1391 addVirginReplyBody(data
, len
);
1394 flags
.do_next_read
= false;
1396 SQUID_EXIT_THROWING_CODE(wasThereAnException
);
1397 return wasThereAnException
;
1401 * processReplyBody has two purposes:
1402 * 1 - take the reply body data, if any, and put it into either
1403 * the StoreEntry, or give it over to ICAP.
1404 * 2 - see if we made it to the end of the response (persistent
1405 * connections and such)
1408 HttpStateData::processReplyBody()
1410 if (!flags
.headers_parsed
) {
1411 flags
.do_next_read
= true;
1412 maybeReadVirginBody();
1417 debugs(11,5, HERE
<< "adaptationAccessCheckPending=" << adaptationAccessCheckPending
);
1418 if (adaptationAccessCheckPending
)
1424 * At this point the reply headers have been parsed and consumed.
1425 * That means header content has been removed from readBuf and
1426 * it contains only body data.
1428 if (entry
->isAccepting()) {
1429 if (flags
.chunked
) {
1430 if (!decodeAndWriteReplyBody()) {
1431 flags
.do_next_read
= false;
1439 // storing/sending methods like earlier adaptOrFinalizeReply() or
1440 // above writeReplyBody() may release/abort the store entry.
1441 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1442 // TODO: In some cases (e.g., 304), we should keep persistent conn open.
1443 // Detect end-of-reply (and, hence, pool our idle pconn) earlier (ASAP).
1444 abortTransaction("store entry aborted while storing reply");
1447 switch (persistentConnStatus()) {
1448 case INCOMPLETE_MSG
: {
1449 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG from " << serverConnection
);
1450 /* Wait for more data or EOF condition */
1451 AsyncCall::Pointer nil
;
1452 if (flags
.keepalive_broken
) {
1453 commSetConnTimeout(serverConnection
, 10, nil
);
1455 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, nil
);
1458 flags
.do_next_read
= true;
1462 case COMPLETE_PERSISTENT_MSG
: {
1463 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection
);
1465 // TODO: Remove serverConnectionSaved but preserve exception safety.
1467 commUnsetConnTimeout(serverConnection
);
1468 flags
.do_next_read
= false;
1470 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1471 closeHandler
= NULL
;
1473 Ip::Address client_addr
; // XXX: Remove as unused. Why was it added?
1474 if (request
->flags
.spoofClientIp
)
1475 client_addr
= request
->client_addr
;
1477 auto serverConnectionSaved
= serverConnection
;
1478 fwd
->unregister(serverConnection
);
1479 serverConnection
= nullptr;
1481 bool ispinned
= false; // TODO: Rename to isOrShouldBePinned
1482 if (request
->flags
.pinned
) {
1484 } else if (request
->flags
.connectionAuth
&& request
->flags
.authSent
) {
1489 if (request
->clientConnectionManager
.valid()) {
1490 CallJobHere1(11, 4, request
->clientConnectionManager
,
1492 notePinnedConnectionBecameIdle
,
1493 ConnStateData::PinnedIdleContext(serverConnectionSaved
, request
));
1495 // must not pool/share ispinned connections, even orphaned ones
1496 serverConnectionSaved
->close();
1499 fwdPconnPool
->push(serverConnectionSaved
, request
->url
.host());
1506 case COMPLETE_NONPERSISTENT_MSG
:
1507 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection
);
1512 maybeReadVirginBody();
1516 HttpStateData::mayReadVirginReplyBody() const
1518 // TODO: Be more precise here. For example, if/when reading trailer, we may
1519 // not be doneWithServer() yet, but we should return false. Similarly, we
1520 // could still be writing the request body after receiving the whole reply.
1521 return !doneWithServer();
1525 HttpStateData::maybeReadVirginBody()
1528 if (!Comm::IsConnOpen(serverConnection
) || fd_table
[serverConnection
->fd
].closing())
1531 if (!maybeMakeSpaceAvailable(false))
1534 // XXX: get rid of the do_next_read flag
1535 // check for the proper reasons preventing read(2)
1536 if (!flags
.do_next_read
)
1539 flags
.do_next_read
= false;
1541 // must not already be waiting for read(2) ...
1542 assert(!Comm::MonitorsRead(serverConnection
->fd
));
1544 // wait for read(2) to be possible.
1545 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
1546 AsyncCall::Pointer call
= JobCallback(11, 5, Dialer
, this, HttpStateData::readReply
);
1547 Comm::Read(serverConnection
, call
);
1551 HttpStateData::maybeMakeSpaceAvailable(bool doGrow
)
1553 // how much we are allowed to buffer
1554 const int limitBuffer
= (flags
.headers_parsed
? Config
.readAheadGap
: Config
.maxReplyHeaderSize
);
1556 if (limitBuffer
< 0 || inBuf
.length() >= (SBuf::size_type
)limitBuffer
) {
1557 // when buffer is at or over limit already
1558 debugs(11, 7, "will not read up to " << limitBuffer
<< ". buffer has (" << inBuf
.length() << "/" << inBuf
.spaceSize() << ") from " << serverConnection
);
1559 debugs(11, DBG_DATA
, "buffer has {" << inBuf
<< "}");
1560 // Process next response from buffer
1565 // how much we want to read
1566 const size_t read_size
= calcBufferSpaceToReserve(inBuf
.spaceSize(), (limitBuffer
- inBuf
.length()));
1569 debugs(11, 7, "will not read up to " << read_size
<< " into buffer (" << inBuf
.length() << "/" << inBuf
.spaceSize() << ") from " << serverConnection
);
1573 // just report whether we could grow or not, do not actually do it
1575 return (read_size
>= 2);
1577 // we may need to grow the buffer
1578 inBuf
.reserveSpace(read_size
);
1579 debugs(11, 8, (!flags
.do_next_read
? "will not" : "may") <<
1580 " read up to " << read_size
<< " bytes info buf(" << inBuf
.length() << "/" << inBuf
.spaceSize() <<
1581 ") from " << serverConnection
);
1583 return (inBuf
.spaceSize() >= 2); // only read if there is 1+ bytes of space available
1586 /// called after writing the very last request byte (body, last-chunk, etc)
1588 HttpStateData::wroteLast(const CommIoCbParams
&io
)
1590 debugs(11, 5, HERE
<< serverConnection
<< ": size " << io
.size
<< ": errflag " << io
.flag
<< ".");
1591 #if URL_CHECKSUM_DEBUG
1593 entry
->mem_obj
->checkUrlChecksum();
1596 // XXX: Keep in sync with Client::sentRequestBody().
1597 // TODO: Extract common parts.
1600 fd_bytes(io
.fd
, io
.size
, FD_WRITE
);
1601 statCounter
.server
.all
.kbytes_out
+= io
.size
;
1602 statCounter
.server
.http
.kbytes_out
+= io
.size
;
1605 if (io
.flag
== Comm::ERR_CLOSING
)
1608 // both successful and failed writes affect response times
1609 request
->hier
.notePeerWrite();
1612 const auto err
= new ErrorState(ERR_WRITE_ERROR
, Http::scBadGateway
, fwd
->request
, fwd
->al
);
1613 err
->xerrno
= io
.xerrno
;
1616 mustStop("HttpStateData::wroteLast");
1623 /// successfully wrote the entire request (including body, last-chunk, etc.)
1625 HttpStateData::sendComplete()
1628 * Set the read timeout here because it hasn't been set yet.
1629 * We only set the read timeout after the request has been
1630 * fully written to the peer. If we start the timeout
1631 * after connection establishment, then we are likely to hit
1632 * the timeout for POST/PUT requests that have very large
1635 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1636 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
1637 TimeoutDialer
, this, HttpStateData::httpTimeout
);
1639 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, timeoutCall
);
1640 flags
.request_sent
= true;
1644 HttpStateData::closeServer()
1646 debugs(11,5, HERE
<< "closing HTTP server " << serverConnection
<< " this " << this);
1648 if (Comm::IsConnOpen(serverConnection
)) {
1649 fwd
->unregister(serverConnection
);
1650 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1651 closeHandler
= NULL
;
1652 serverConnection
->close();
1657 HttpStateData::doneWithServer() const
1659 return !Comm::IsConnOpen(serverConnection
);
1663 * Fixup authentication request headers for special cases
1666 httpFixupAuthentication(HttpRequest
* request
, const HttpHeader
* hdr_in
, HttpHeader
* hdr_out
, const Http::StateFlags
&flags
)
1668 /* Nothing to do unless we are forwarding to a peer */
1672 // This request is going "through" rather than "to" our _peer.
1673 if (flags
.tunneling
)
1676 /* Needs to be explicitly enabled */
1677 if (!request
->peer_login
)
1680 const auto header
= flags
.toOrigin
? Http::HdrType::AUTHORIZATION
: Http::HdrType::PROXY_AUTHORIZATION
;
1681 /* Maybe already dealt with? */
1682 if (hdr_out
->has(header
))
1685 /* Nothing to do here for PASSTHRU */
1686 if (strcmp(request
->peer_login
, "PASSTHRU") == 0)
1689 // Dangerous and undocumented PROXYPASS is a single-signon to servers with
1690 // the proxy password. Only Basic Authentication can work this way. This
1691 // statement forwards a "basic" Proxy-Authorization value from our client
1692 // to an originserver peer. Other PROXYPASS cases are handled lower.
1693 if (flags
.toOrigin
&&
1694 strcmp(request
->peer_login
, "PROXYPASS") == 0 &&
1695 hdr_in
->has(Http::HdrType::PROXY_AUTHORIZATION
)) {
1697 const char *auth
= hdr_in
->getStr(Http::HdrType::PROXY_AUTHORIZATION
);
1699 if (auth
&& strncasecmp(auth
, "basic ", 6) == 0) {
1700 hdr_out
->putStr(header
, auth
);
1705 char loginbuf
[base64_encode_len(MAX_LOGIN_SZ
)];
1707 struct base64_encode_ctx ctx
;
1708 base64_encode_init(&ctx
);
1710 /* Special mode to pass the username to the upstream cache */
1711 if (*request
->peer_login
== '*') {
1712 const char *username
= "-";
1714 if (request
->extacl_user
.size())
1715 username
= request
->extacl_user
.termedBuf();
1717 else if (request
->auth_user_request
!= NULL
)
1718 username
= request
->auth_user_request
->username();
1721 blen
= base64_encode_update(&ctx
, loginbuf
, strlen(username
), reinterpret_cast<const uint8_t*>(username
));
1722 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, strlen(request
->peer_login
+1), reinterpret_cast<const uint8_t*>(request
->peer_login
+1));
1723 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1724 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1728 /* external_acl provided credentials */
1729 if (request
->extacl_user
.size() && request
->extacl_passwd
.size() &&
1730 (strcmp(request
->peer_login
, "PASS") == 0 ||
1731 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
1733 blen
= base64_encode_update(&ctx
, loginbuf
, request
->extacl_user
.size(), reinterpret_cast<const uint8_t*>(request
->extacl_user
.rawBuf()));
1734 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, 1, reinterpret_cast<const uint8_t*>(":"));
1735 blen
+= base64_encode_update(&ctx
, loginbuf
+blen
, request
->extacl_passwd
.size(), reinterpret_cast<const uint8_t*>(request
->extacl_passwd
.rawBuf()));
1736 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1737 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1740 // if no external user credentials are available to fake authentication with PASS acts like PASSTHRU
1741 if (strcmp(request
->peer_login
, "PASS") == 0)
1744 /* Kerberos login to peer */
1745 #if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1746 if (strncmp(request
->peer_login
, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1748 char *PrincipalName
=NULL
,*p
;
1749 int negotiate_flags
= 0;
1751 if ((p
=strchr(request
->peer_login
,':')) != NULL
) {
1754 if (request
->flags
.auth_no_keytab
) {
1755 negotiate_flags
|= PEER_PROXY_NEGOTIATE_NOKEYTAB
;
1757 Token
= peer_proxy_negotiate_auth(PrincipalName
, request
->peer_host
, negotiate_flags
);
1759 httpHeaderPutStrf(hdr_out
, header
, "Negotiate %s",Token
);
1763 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1765 blen
= base64_encode_update(&ctx
, loginbuf
, strlen(request
->peer_login
), reinterpret_cast<const uint8_t*>(request
->peer_login
));
1766 blen
+= base64_encode_final(&ctx
, loginbuf
+blen
);
1767 httpHeaderPutStrf(hdr_out
, header
, "Basic %.*s", (int)blen
, loginbuf
);
1772 * build request headers and append them to a given MemBuf
1773 * used by buildRequestPrefix()
1774 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1777 HttpStateData::httpBuildRequestHeader(HttpRequest
* request
,
1779 const AccessLogEntryPointer
&al
,
1780 HttpHeader
* hdr_out
,
1781 const Http::StateFlags
&flags
)
1783 /* building buffer for complex strings */
1784 #define BBUF_SZ (MAX_URL+32)
1785 LOCAL_ARRAY(char, bbuf
, BBUF_SZ
);
1786 LOCAL_ARRAY(char, ntoabuf
, MAX_IPSTRLEN
);
1787 const HttpHeader
*hdr_in
= &request
->header
;
1788 const HttpHeaderEntry
*e
= NULL
;
1789 HttpHeaderPos pos
= HttpHeaderInitPos
;
1790 assert (hdr_out
->owner
== hoRequest
);
1792 /* use our IMS header if the cached entry has Last-Modified time */
1793 if (request
->lastmod
> -1)
1794 hdr_out
->putTime(Http::HdrType::IF_MODIFIED_SINCE
, request
->lastmod
);
1796 // Add our own If-None-Match field if the cached entry has a strong ETag.
1797 // copyOneHeaderFromClientsideRequestToUpstreamRequest() adds client ones.
1798 if (request
->etag
.size() > 0) {
1799 hdr_out
->addEntry(new HttpHeaderEntry(Http::HdrType::IF_NONE_MATCH
, SBuf(),
1800 request
->etag
.termedBuf()));
1803 bool we_do_ranges
= decideIfWeDoRanges (request
);
1805 String
strConnection (hdr_in
->getList(Http::HdrType::CONNECTION
));
1807 while ((e
= hdr_in
->getEntry(&pos
)))
1808 copyOneHeaderFromClientsideRequestToUpstreamRequest(e
, strConnection
, request
, hdr_out
, we_do_ranges
, flags
);
1810 /* Abstraction break: We should interpret multipart/byterange responses
1811 * into offset-length data, and this works around our inability to do so.
1813 if (!we_do_ranges
&& request
->multipartRangeRequest()) {
1814 /* don't cache the result */
1815 request
->flags
.cachable
= false;
1816 /* pretend it's not a range request */
1817 request
->ignoreRange("want to request the whole object");
1818 request
->flags
.isRanged
= false;
1821 hdr_out
->addVia(request
->http_ver
, hdr_in
);
1823 if (request
->flags
.accelerated
) {
1824 /* Append Surrogate-Capabilities */
1825 String
strSurrogate(hdr_in
->getList(Http::HdrType::SURROGATE_CAPABILITY
));
1827 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0 ESI/1.0\"", Config
.Accel
.surrogate_id
);
1829 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0\"", Config
.Accel
.surrogate_id
);
1831 strListAdd(&strSurrogate
, bbuf
, ',');
1832 hdr_out
->putStr(Http::HdrType::SURROGATE_CAPABILITY
, strSurrogate
.termedBuf());
1835 /** \pre Handle X-Forwarded-For */
1836 if (strcmp(opt_forwarded_for
, "delete") != 0) {
1838 String strFwd
= hdr_in
->getList(Http::HdrType::X_FORWARDED_FOR
);
1840 // if we cannot double strFwd size, then it grew past 50% of the limit
1841 if (!strFwd
.canGrowBy(strFwd
.size())) {
1842 // There is probably a forwarding loop with Via detection disabled.
1843 // If we do nothing, String will assert on overflow soon.
1844 // TODO: Terminate all transactions with huge XFF?
1847 static int warnedCount
= 0;
1848 if (warnedCount
++ < 100) {
1849 const SBuf
url(entry
? SBuf(entry
->url()) : request
->effectiveRequestUri());
1850 debugs(11, DBG_IMPORTANT
, "Warning: likely forwarding loop with " << url
);
1854 if (strcmp(opt_forwarded_for
, "on") == 0) {
1855 /** If set to ON - append client IP or 'unknown'. */
1856 if ( request
->client_addr
.isNoAddr() )
1857 strListAdd(&strFwd
, "unknown", ',');
1859 strListAdd(&strFwd
, request
->client_addr
.toStr(ntoabuf
, MAX_IPSTRLEN
), ',');
1860 } else if (strcmp(opt_forwarded_for
, "off") == 0) {
1861 /** If set to OFF - append 'unknown'. */
1862 strListAdd(&strFwd
, "unknown", ',');
1863 } else if (strcmp(opt_forwarded_for
, "transparent") == 0) {
1864 /** If set to TRANSPARENT - pass through unchanged. */
1865 } else if (strcmp(opt_forwarded_for
, "truncate") == 0) {
1866 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1867 if ( request
->client_addr
.isNoAddr() )
1870 strFwd
= request
->client_addr
.toStr(ntoabuf
, MAX_IPSTRLEN
);
1872 if (strFwd
.size() > 0)
1873 hdr_out
->putStr(Http::HdrType::X_FORWARDED_FOR
, strFwd
.termedBuf());
1875 /** If set to DELETE - do not copy through. */
1877 /* append Host if not there already */
1878 if (!hdr_out
->has(Http::HdrType::HOST
)) {
1879 if (request
->peer_domain
) {
1880 hdr_out
->putStr(Http::HdrType::HOST
, request
->peer_domain
);
1882 SBuf authority
= request
->url
.authority();
1883 hdr_out
->putStr(Http::HdrType::HOST
, authority
.c_str());
1887 /* append Authorization if known in URL, not in header and going direct */
1888 if (!hdr_out
->has(Http::HdrType::AUTHORIZATION
)) {
1889 if (flags
.toOrigin
&& !request
->url
.userInfo().isEmpty()) {
1890 static char result
[base64_encode_len(MAX_URL
*2)]; // should be big enough for a single URI segment
1891 struct base64_encode_ctx ctx
;
1892 base64_encode_init(&ctx
);
1893 size_t blen
= base64_encode_update(&ctx
, result
, request
->url
.userInfo().length(), reinterpret_cast<const uint8_t*>(request
->url
.userInfo().rawContent()));
1894 blen
+= base64_encode_final(&ctx
, result
+blen
);
1895 result
[blen
] = '\0';
1897 httpHeaderPutStrf(hdr_out
, Http::HdrType::AUTHORIZATION
, "Basic %.*s", (int)blen
, result
);
1901 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1902 httpFixupAuthentication(request
, hdr_in
, hdr_out
, flags
);
1904 /* append Cache-Control, add max-age if not there already */
1906 HttpHdrCc
*cc
= hdr_in
->getCc();
1909 cc
= new HttpHdrCc();
1911 #if 0 /* see bug 2330 */
1912 /* Set no-cache if determined needed but not found */
1913 if (request
->flags
.nocache
)
1914 EBIT_SET(cc
->mask
, HttpHdrCcType::CC_NO_CACHE
);
1917 /* Add max-age only without no-cache */
1918 if (!cc
->hasMaxAge() && !cc
->hasNoCache()) {
1919 // XXX: performance regression. c_str() reallocates
1920 SBuf
tmp(request
->effectiveRequestUri());
1921 cc
->maxAge(getMaxAge(entry
? entry
->url() : tmp
.c_str()));
1924 /* Enforce sibling relations */
1925 if (flags
.only_if_cached
)
1926 cc
->onlyIfCached(true);
1933 // Always send Connection because HTTP/1.0 servers need explicit "keep-alive"
1934 // while HTTP/1.1 servers need explicit "close", and we do not always know
1935 // the server expectations.
1936 hdr_out
->putStr(Http::HdrType::CONNECTION
, flags
.keepalive
? "keep-alive" : "close");
1938 /* append Front-End-Https */
1939 if (flags
.front_end_https
) {
1940 if (flags
.front_end_https
== 1 || request
->url
.getScheme() == AnyP::PROTO_HTTPS
)
1941 hdr_out
->putStr(Http::HdrType::FRONT_END_HTTPS
, "On");
1944 if (flags
.chunked_request
) {
1945 // Do not just copy the original value so that if the client-side
1946 // starts decode other encodings, this code may remain valid.
1947 hdr_out
->putStr(Http::HdrType::TRANSFER_ENCODING
, "chunked");
1950 /* Now mangle the headers. */
1951 httpHdrMangleList(hdr_out
, request
, al
, ROR_REQUEST
);
1953 strConnection
.clean();
1957 * Decides whether a particular header may be cloned from the received Clients request
1958 * to our outgoing fetch request.
1961 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
, HttpHeader
* hdr_out
, const int we_do_ranges
, const Http::StateFlags
&flags
)
1963 debugs(11, 5, "httpBuildRequestHeader: " << e
->name
<< ": " << e
->value
);
1967 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1969 case Http::HdrType::PROXY_AUTHORIZATION
:
1970 /** \par Proxy-Authorization:
1971 * Only pass on proxy authentication to peers for which
1972 * authentication forwarding is explicitly enabled
1974 if (!flags
.toOrigin
&& request
->peer_login
&&
1975 (strcmp(request
->peer_login
, "PASS") == 0 ||
1976 strcmp(request
->peer_login
, "PROXYPASS") == 0 ||
1977 strcmp(request
->peer_login
, "PASSTHRU") == 0)) {
1978 hdr_out
->addEntry(e
->clone());
1982 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1984 case Http::HdrType::CONNECTION
: /** \par Connection: */
1985 case Http::HdrType::TE
: /** \par TE: */
1986 case Http::HdrType::KEEP_ALIVE
: /** \par Keep-Alive: */
1987 case Http::HdrType::PROXY_AUTHENTICATE
: /** \par Proxy-Authenticate: */
1988 case Http::HdrType::TRAILER
: /** \par Trailer: */
1989 case Http::HdrType::UPGRADE
: /** \par Upgrade: */
1990 case Http::HdrType::TRANSFER_ENCODING
: /** \par Transfer-Encoding: */
1993 /** \par OTHER headers I haven't bothered to track down yet. */
1995 case Http::HdrType::AUTHORIZATION
:
1996 /** \par WWW-Authorization:
1997 * Pass on WWW authentication */
1999 if (!flags
.toOriginPeer()) {
2000 hdr_out
->addEntry(e
->clone());
2002 /** \note Assume that talking to a cache_peer originserver makes
2003 * us a reverse proxy and only forward authentication if enabled
2004 * (see also httpFixupAuthentication for special cases)
2006 if (request
->peer_login
&&
2007 (strcmp(request
->peer_login
, "PASS") == 0 ||
2008 strcmp(request
->peer_login
, "PASSTHRU") == 0 ||
2009 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
2010 hdr_out
->addEntry(e
->clone());
2016 case Http::HdrType::HOST
:
2018 * Normally Squid rewrites the Host: header.
2019 * However, there is one case when we don't: If the URL
2020 * went through our redirector and the admin configured
2021 * 'redir_rewrites_host' to be off.
2023 if (request
->peer_domain
)
2024 hdr_out
->putStr(Http::HdrType::HOST
, request
->peer_domain
);
2025 else if (request
->flags
.redirected
&& !Config
.onoff
.redir_rewrites_host
)
2026 hdr_out
->addEntry(e
->clone());
2028 SBuf authority
= request
->url
.authority();
2029 hdr_out
->putStr(Http::HdrType::HOST
, authority
.c_str());
2034 case Http::HdrType::IF_MODIFIED_SINCE
:
2035 /** \par If-Modified-Since:
2036 * append unless we added our own,
2037 * but only if cache_miss_revalidate is enabled, or
2038 * the request is not cacheable, or
2039 * the request contains authentication credentials.
2040 * \note at most one client's If-Modified-Since header can pass through
2042 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2043 if (hdr_out
->has(Http::HdrType::IF_MODIFIED_SINCE
))
2045 else if (Config
.onoff
.cache_miss_revalidate
|| !request
->flags
.cachable
|| request
->flags
.auth
)
2046 hdr_out
->addEntry(e
->clone());
2049 case Http::HdrType::IF_NONE_MATCH
:
2050 /** \par If-None-Match:
2051 * append if the wildcard '*' special case value is present, or
2052 * cache_miss_revalidate is disabled, or
2053 * the request is not cacheable in this proxy, or
2054 * the request contains authentication credentials.
2055 * \note this header lists a set of responses for the server to elide sending. Squid added values are extending that set.
2057 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2058 if (hdr_out
->hasListMember(Http::HdrType::IF_MATCH
, "*", ',') || Config
.onoff
.cache_miss_revalidate
|| !request
->flags
.cachable
|| request
->flags
.auth
)
2059 hdr_out
->addEntry(e
->clone());
2062 case Http::HdrType::MAX_FORWARDS
:
2063 /** \par Max-Forwards:
2064 * pass only on TRACE or OPTIONS requests */
2065 if (request
->method
== Http::METHOD_TRACE
|| request
->method
== Http::METHOD_OPTIONS
) {
2066 const int64_t hops
= e
->getInt64();
2069 hdr_out
->putInt64(Http::HdrType::MAX_FORWARDS
, hops
- 1);
2074 case Http::HdrType::VIA
:
2076 * If Via is disabled then forward any received header as-is.
2077 * Otherwise leave for explicit updated addition later. */
2079 if (!Config
.onoff
.via
)
2080 hdr_out
->addEntry(e
->clone());
2084 case Http::HdrType::RANGE
:
2086 case Http::HdrType::IF_RANGE
:
2088 case Http::HdrType::REQUEST_RANGE
:
2089 /** \par Range:, If-Range:, Request-Range:
2090 * Only pass if we accept ranges */
2092 hdr_out
->addEntry(e
->clone());
2096 case Http::HdrType::PROXY_CONNECTION
: // SHOULD ignore. But doing so breaks things.
2099 case Http::HdrType::CONTENT_LENGTH
:
2100 // pass through unless we chunk; also, keeping this away from default
2101 // prevents request smuggling via Connection: Content-Length tricks
2102 if (!flags
.chunked_request
)
2103 hdr_out
->addEntry(e
->clone());
2106 case Http::HdrType::X_FORWARDED_FOR
:
2108 case Http::HdrType::CACHE_CONTROL
:
2109 /** \par X-Forwarded-For:, Cache-Control:
2110 * handled specially by Squid, so leave off for now.
2111 * append these after the loop if needed */
2114 case Http::HdrType::FRONT_END_HTTPS
:
2115 /** \par Front-End-Https:
2116 * Pass thru only if peer is configured with front-end-https */
2117 if (!flags
.front_end_https
)
2118 hdr_out
->addEntry(e
->clone());
2124 * pass on all other header fields
2125 * which are NOT listed by the special Connection: header. */
2126 if (strConnection
.size()>0 && strListIsMember(&strConnection
, e
->name
, ',')) {
2127 debugs(11, 2, "'" << e
->name
<< "' header cropped by Connection: definition");
2131 hdr_out
->addEntry(e
->clone());
2136 HttpStateData::decideIfWeDoRanges (HttpRequest
* request
)
2139 /* decide if we want to do Ranges ourselves
2140 * and fetch the whole object now)
2141 * We want to handle Ranges ourselves iff
2142 * - we can actually parse client Range specs
2143 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
2144 * - reply will be cachable
2145 * (If the reply will be uncachable we have to throw it away after
2146 * serving this request, so it is better to forward ranges to
2147 * the server and fetch only the requested content)
2150 int64_t roffLimit
= request
->getRangeOffsetLimit();
2152 if (NULL
== request
->range
|| !request
->flags
.cachable
2153 || request
->range
->offsetLimitExceeded(roffLimit
) || request
->flags
.connectionAuth
)
2156 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
2157 request
->range
<< ", cachable: " <<
2158 request
->flags
.cachable
<< "; we_do_ranges: " << result
);
2163 /* build request prefix and append it to a given MemBuf;
2164 * return the length of the prefix */
2166 HttpStateData::buildRequestPrefix(MemBuf
* mb
)
2168 const int offset
= mb
->size
;
2169 /* Uses a local httpver variable to print the HTTP label
2170 * since the HttpRequest may have an older version label.
2171 * XXX: This could create protocol bugs as the headers sent and
2172 * flow control should all be based on the HttpRequest version
2173 * not the one we are sending. Needs checking.
2175 const AnyP::ProtocolVersion httpver
= Http::ProtocolVersion();
2176 const SBuf
url(flags
.toOrigin
? request
->url
.path() : request
->effectiveRequestUri());
2177 mb
->appendf(SQUIDSBUFPH
" " SQUIDSBUFPH
" %s/%d.%d\r\n",
2178 SQUIDSBUFPRINT(request
->method
.image()),
2179 SQUIDSBUFPRINT(url
),
2180 AnyP::ProtocolType_str
[httpver
.protocol
],
2181 httpver
.major
,httpver
.minor
);
2182 /* build and pack headers */
2184 HttpHeader
hdr(hoRequest
);
2185 httpBuildRequestHeader(request
.getRaw(), entry
, fwd
->al
, &hdr
, flags
);
2187 if (request
->flags
.pinned
&& request
->flags
.connectionAuth
)
2188 request
->flags
.authSent
= true;
2189 else if (hdr
.has(Http::HdrType::AUTHORIZATION
))
2190 request
->flags
.authSent
= true;
2195 /* append header terminator */
2196 mb
->append(crlf
, 2);
2197 return mb
->size
- offset
;
2200 /* This will be called when connect completes. Write request. */
2202 HttpStateData::sendRequest()
2206 debugs(11, 5, HERE
<< serverConnection
<< ", request " << request
<< ", this " << this << ".");
2208 if (!Comm::IsConnOpen(serverConnection
)) {
2209 debugs(11,3, HERE
<< "cannot send request to closing " << serverConnection
);
2210 assert(closeHandler
!= NULL
);
2214 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2215 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
2216 TimeoutDialer
, this, HttpStateData::httpTimeout
);
2217 commSetConnTimeout(serverConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
2218 flags
.do_next_read
= true;
2219 maybeReadVirginBody();
2221 if (request
->body_pipe
!= NULL
) {
2222 if (!startRequestBodyFlow()) // register to receive body data
2224 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2225 requestSender
= JobCallback(11,5,
2226 Dialer
, this, HttpStateData::sentRequestBody
);
2228 Must(!flags
.chunked_request
);
2229 // use chunked encoding if we do not know the length
2230 if (request
->content_length
< 0)
2231 flags
.chunked_request
= true;
2233 assert(!requestBodySource
);
2234 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2235 requestSender
= JobCallback(11,5,
2236 Dialer
, this, HttpStateData::wroteLast
);
2240 * Is keep-alive okay for all request methods?
2242 if (request
->flags
.mustKeepalive
)
2243 flags
.keepalive
= true;
2244 else if (request
->flags
.pinned
)
2245 flags
.keepalive
= request
->persistent();
2246 else if (!Config
.onoff
.server_pconns
)
2247 flags
.keepalive
= false;
2248 else if (flags
.tunneling
)
2249 // tunneled non pinned bumped requests must not keepalive
2250 flags
.keepalive
= !request
->flags
.sslBumped
;
2251 else if (_peer
== NULL
)
2252 flags
.keepalive
= true;
2253 else if (_peer
->stats
.n_keepalives_sent
< 10)
2254 flags
.keepalive
= true;
2255 else if ((double) _peer
->stats
.n_keepalives_recv
/
2256 (double) _peer
->stats
.n_keepalives_sent
> 0.50)
2257 flags
.keepalive
= true;
2259 if (_peer
&& !flags
.tunneling
) {
2260 /*The old code here was
2261 if (neighborType(_peer, request->url) == PEER_SIBLING && ...
2262 which is equivalent to:
2263 if (neighborType(_peer, URL()) == PEER_SIBLING && ...
2265 if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) ||
2266 _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss)
2267 flags.only_if_cached = 1;
2269 But I suppose it was a bug
2271 if (neighborType(_peer
, request
->url
) == PEER_SIBLING
&& !_peer
->options
.allow_miss
)
2272 flags
.only_if_cached
= true;
2274 flags
.front_end_https
= _peer
->front_end_https
;
2278 request
->peer_host
=_peer
?_peer
->host
:NULL
;
2279 buildRequestPrefix(&mb
);
2281 debugs(11, 2, "HTTP Server " << serverConnection
);
2282 debugs(11, 2, "HTTP Server REQUEST:\n---------\n" << mb
.buf
<< "\n----------");
2284 Comm::Write(serverConnection
, &mb
, requestSender
);
2289 HttpStateData::getMoreRequestBody(MemBuf
&buf
)
2291 // parent's implementation can handle the no-encoding case
2292 if (!flags
.chunked_request
)
2293 return Client::getMoreRequestBody(buf
);
2297 Must(requestBodySource
!= NULL
);
2298 if (!requestBodySource
->getMoreData(raw
))
2299 return false; // no request body bytes to chunk yet
2301 // optimization: pre-allocate buffer size that should be enough
2302 const mb_size_t rawDataSize
= raw
.contentSize();
2303 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2304 buf
.init(16 + 2 + rawDataSize
+ 2 + 5, raw
.max_capacity
);
2306 buf
.appendf("%x\r\n", static_cast<unsigned int>(rawDataSize
));
2307 buf
.append(raw
.content(), rawDataSize
);
2308 buf
.append("\r\n", 2);
2310 Must(rawDataSize
> 0); // we did not accidently created last-chunk above
2312 // Do not send last-chunk unless we successfully received everything
2313 if (receivedWholeRequestBody
) {
2314 Must(!flags
.sentLastChunk
);
2315 flags
.sentLastChunk
= true;
2316 buf
.append("0\r\n\r\n", 5);
2323 httpStart(FwdState
*fwd
)
2325 debugs(11, 3, fwd
->request
->method
<< ' ' << fwd
->entry
->url());
2326 AsyncJob::Start(new HttpStateData(fwd
));
2330 HttpStateData::start()
2332 if (!sendRequest()) {
2333 debugs(11, 3, "httpStart: aborted");
2334 mustStop("HttpStateData::start failed");
2338 ++ statCounter
.server
.all
.requests
;
2339 ++ statCounter
.server
.http
.requests
;
2342 * We used to set the read timeout here, but not any more.
2343 * Now its set in httpSendComplete() after the full request,
2344 * including request body, has been written to the server.
2348 /// if broken posts are enabled for the request, try to fix and return true
2350 HttpStateData::finishingBrokenPost()
2352 #if USE_HTTP_VIOLATIONS
2353 if (!Config
.accessList
.brokenPosts
) {
2354 debugs(11, 5, HERE
<< "No brokenPosts list");
2358 ACLFilledChecklist
ch(Config
.accessList
.brokenPosts
, originalRequest().getRaw());
2360 ch
.syncAle(originalRequest().getRaw(), nullptr);
2361 if (!ch
.fastCheck().allowed()) {
2362 debugs(11, 5, HERE
<< "didn't match brokenPosts");
2366 if (!Comm::IsConnOpen(serverConnection
)) {
2367 debugs(11, 3, HERE
<< "ignoring broken POST for closed " << serverConnection
);
2368 assert(closeHandler
!= NULL
);
2369 return true; // prevent caller from proceeding as if nothing happened
2372 debugs(11, 3, "finishingBrokenPost: fixing broken POST");
2373 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2374 requestSender
= JobCallback(11,5,
2375 Dialer
, this, HttpStateData::wroteLast
);
2376 Comm::Write(serverConnection
, "\r\n", 2, requestSender
, NULL
);
2380 #endif /* USE_HTTP_VIOLATIONS */
2383 /// if needed, write last-chunk to end the request body and return true
2385 HttpStateData::finishingChunkedRequest()
2387 if (flags
.sentLastChunk
) {
2388 debugs(11, 5, HERE
<< "already sent last-chunk");
2392 Must(receivedWholeRequestBody
); // or we should not be sending last-chunk
2393 flags
.sentLastChunk
= true;
2395 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2396 requestSender
= JobCallback(11,5, Dialer
, this, HttpStateData::wroteLast
);
2397 Comm::Write(serverConnection
, "0\r\n\r\n", 5, requestSender
, NULL
);
2402 HttpStateData::doneSendingRequestBody()
2404 Client::doneSendingRequestBody();
2405 debugs(11,5, HERE
<< serverConnection
);
2407 // do we need to write something after the last body byte?
2408 if (flags
.chunked_request
&& finishingChunkedRequest())
2410 if (!flags
.chunked_request
&& finishingBrokenPost())
2416 // more origin request body data is available
2418 HttpStateData::handleMoreRequestBodyAvailable()
2420 if (eof
|| !Comm::IsConnOpen(serverConnection
)) {
2421 // XXX: we should check this condition in other callbacks then!
2422 // TODO: Check whether this can actually happen: We should unsubscribe
2423 // as a body consumer when the above condition(s) are detected.
2424 debugs(11, DBG_IMPORTANT
, HERE
<< "Transaction aborted while reading HTTP body");
2428 assert(requestBodySource
!= NULL
);
2430 if (requestBodySource
->buf().hasContent()) {
2431 // XXX: why does not this trigger a debug message on every request?
2433 if (flags
.headers_parsed
&& !flags
.abuse_detected
) {
2434 flags
.abuse_detected
= true;
2435 debugs(11, DBG_IMPORTANT
, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request
->client_addr
<< "' -> '" << entry
->url() << "'" );
2437 if (virginReply()->sline
.status() == Http::scInvalidHeader
) {
2439 mustStop("HttpStateData::handleMoreRequestBodyAvailable");
2445 HttpStateData::handleMoreRequestBodyAvailable();
2448 // premature end of the request body
2450 HttpStateData::handleRequestBodyProducerAborted()
2452 Client::handleRequestBodyProducerAborted();
2453 if (entry
->isEmpty()) {
2454 debugs(11, 3, "request body aborted: " << serverConnection
);
2455 // We usually get here when ICAP REQMOD aborts during body processing.
2456 // We might also get here if client-side aborts, but then our response
2457 // should not matter because either client-side will provide its own or
2458 // there will be no response at all (e.g., if the the client has left).
2459 const auto err
= new ErrorState(ERR_ICAP_FAILURE
, Http::scInternalServerError
, fwd
->request
, fwd
->al
);
2460 err
->detailError(ERR_DETAIL_SRV_REQMOD_REQ_BODY
);
2464 abortTransaction("request body producer aborted");
2467 // called when we wrote request headers(!) or a part of the body
2469 HttpStateData::sentRequestBody(const CommIoCbParams
&io
)
2472 statCounter
.server
.http
.kbytes_out
+= io
.size
;
2474 Client::sentRequestBody(io
);
2478 HttpStateData::abortAll(const char *reason
)
2480 debugs(11,5, HERE
<< "aborting transaction for " << reason
<<
2481 "; " << serverConnection
<< ", this " << this);
2485 HttpStateData::ReuseDecision::ReuseDecision(const StoreEntry
*e
, const Http::StatusCode code
)
2486 : answer(HttpStateData::ReuseDecision::reuseNot
), reason(nullptr), entry(e
), statusCode(code
) {}
2488 HttpStateData::ReuseDecision::Answers
2489 HttpStateData::ReuseDecision::make(const HttpStateData::ReuseDecision::Answers ans
, const char *why
)
2496 std::ostream
&operator <<(std::ostream
&os
, const HttpStateData::ReuseDecision
&d
)
2498 static const char *ReuseMessages
[] = {
2499 "do not cache and do not share", // reuseNot
2500 "cache positively and share", // cachePositively
2501 "cache negatively and share", // cacheNegatively
2502 "do not cache but share" // doNotCacheButShare
2505 assert(d
.answer
>= HttpStateData::ReuseDecision::reuseNot
&&
2506 d
.answer
<= HttpStateData::ReuseDecision::doNotCacheButShare
);
2507 return os
<< ReuseMessages
[d
.answer
] << " because " << d
.reason
<<
2508 "; HTTP status " << d
.statusCode
<< " " << *(d
.entry
);