/*
- * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
+ * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
/*
* Anonymizing patch by lutz@as-node.jena.thur.de
- * have a look into http-anon.c to get more informations.
+ * have a look into http-anon.c to get more information.
*/
#include "squid.h"
#include "comm/Read.h"
#include "comm/Write.h"
#include "CommRead.h"
-#include "err_detail_type.h"
+#include "error/Detail.h"
#include "errorpage.h"
#include "fd.h"
#include "fde.h"
#include "http.h"
#include "http/one/ResponseParser.h"
#include "http/one/TeChunkedParser.h"
+#include "http/Stream.h"
#include "HttpControlMsg.h"
#include "HttpHdrCc.h"
#include "HttpHdrContRange.h"
#include "HttpHeaderTools.h"
#include "HttpReply.h"
#include "HttpRequest.h"
-#include "HttpStateFlags.h"
+#include "HttpUpgradeProtocolAccess.h"
#include "log/access_log.h"
#include "MemBuf.h"
#include "MemObject.h"
#include "neighbors.h"
+#include "pconn.h"
#include "peer_proxy_negotiate_auth.h"
#include "profiler/Profiler.h"
#include "refresh.h"
#include "Store.h"
#include "StrList.h"
#include "tools.h"
-#include "URL.h"
#include "util.h"
#if USE_AUTH
#include "DelayPools.h"
#endif
-#define SQUID_ENTER_THROWING_CODE() try {
-#define SQUID_EXIT_THROWING_CODE(status) \
- status = true; \
- } \
- catch (const std::exception &e) { \
- debugs (11, 1, "Exception error:" << e.what()); \
- status = false; \
- }
-
CBDATA_CLASS_INIT(HttpStateData);
static const char *const crlf = "\r\n";
static void httpMaybeRemovePublic(StoreEntry *, Http::StatusCode);
static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request,
- HttpHeader * hdr_out, const int we_do_ranges, const HttpStateFlags &);
-//Declared in HttpHeaderTools.cc
-void httpHdrAdd(HttpHeader *heads, HttpRequest *request, const AccessLogEntryPointer &al, HeaderWithAclList &headers_add);
+ HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &);
HttpStateData::HttpStateData(FwdState *theFwdState) :
AsyncJob("HttpStateData"),
lastChunk(0),
httpChunkDecoder(NULL),
payloadSeen(0),
- payloadTruncated(0)
+ payloadTruncated(0),
+ sawDateGoBack(false)
{
debugs(11,5,HERE << "HttpStateData " << this << " created");
ignoreCacheControl = false;
surrogateNoStore = false;
serverConnection = fwd->serverConnection();
- // reset peer response time stats for %<pt
- request->hier.peer_http_request_sent.tv_sec = 0;
- request->hier.peer_http_request_sent.tv_usec = 0;
-
if (fwd->serverConnection() != NULL)
_peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */
+ flags.peering = _peer;
+ flags.tunneling = (_peer && request->flags.sslBumped);
+ flags.toOrigin = (!_peer || _peer->options.originserver || request->flags.sslBumped);
+
if (_peer) {
- request->flags.proxying = true;
/*
* This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
* We might end up getting the object from somewhere else if,
* for example, the request to this neighbor fails.
*/
- if (_peer->options.proxy_only)
- entry->releaseRequest();
+ if (!flags.tunneling && _peer->options.proxy_only)
+ entry->releaseRequest(true);
#if USE_DELAY_POOLS
entry->setNoDelay(_peer->options.no_delay);
cbdataReferenceDone(_peer);
+ delete upgradeHeaderOut;
+
debugs(11,5, HERE << "HttpStateData " << this << " destroyed; " << serverConnection);
}
HttpStateData::httpStateConnClosed(const CommCloseCbParams ¶ms)
{
debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
+ doneWithFwd = "httpStateConnClosed()"; // assume FwdState is monitoring too
mustStop("HttpStateData::httpStateConnClosed");
}
debugs(11, 4, serverConnection << ": '" << entry->url() << "'");
if (entry->store_status == STORE_PENDING) {
- fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, fwd->request));
+ fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, fwd->request, fwd->al));
}
- serverConnection->close();
+ closeServer();
+ mustStop("HttpStateData::httpTimeout");
+}
+
+static StoreEntry *
+findPreviouslyCachedEntry(StoreEntry *newEntry) {
+ assert(newEntry->mem_obj);
+ return newEntry->mem_obj->request ?
+ storeGetPublicByRequest(newEntry->mem_obj->request.getRaw()) :
+ storeGetPublic(newEntry->mem_obj->storeId(), newEntry->mem_obj->method);
}
/// Remove an existing public store entry if the incoming response (to be
{
int remove = 0;
int forbidden = 0;
- StoreEntry *pe;
// If the incoming response already goes into a public entry, then there is
// nothing to remove. This protects ready-for-collapsing entries as well.
if (!EBIT_TEST(e->flags, KEY_PRIVATE))
return;
+ // If the new/incoming response cannot be stored, then it does not
+ // compete with the old stored response for the public key, and the
+ // old stored response should be left as is.
+ if (e->mem_obj->request && !e->mem_obj->request->flags.cachable)
+ return;
+
switch (status) {
case Http::scOkay:
case Http::scFound:
+ case Http::scSeeOther:
+
case Http::scGone:
case Http::scNotFound:
if (!remove && !forbidden)
return;
- assert(e->mem_obj);
-
- if (e->mem_obj->request)
- pe = storeGetPublicByRequest(e->mem_obj->request);
- else
- pe = storeGetPublic(e->mem_obj->storeId(), e->mem_obj->method);
+ StoreEntry *pe = findPreviouslyCachedEntry(e);
if (pe != NULL) {
assert(e != pe);
#if USE_HTCP
- neighborsHtcpClear(e, NULL, e->mem_obj->request, e->mem_obj->method, HTCP_CLR_INVALIDATION);
+ neighborsHtcpClear(e, e->mem_obj->request.getRaw(), e->mem_obj->method, HTCP_CLR_INVALIDATION);
#endif
- pe->release();
+ pe->release(true);
}
/** \par
* changed.
*/
if (e->mem_obj->request)
- pe = storeGetPublicByRequestMethod(e->mem_obj->request, Http::METHOD_HEAD);
+ pe = storeGetPublicByRequestMethod(e->mem_obj->request.getRaw(), Http::METHOD_HEAD);
else
pe = storeGetPublic(e->mem_obj->storeId(), Http::METHOD_HEAD);
if (pe != NULL) {
assert(e != pe);
#if USE_HTCP
- neighborsHtcpClear(e, NULL, e->mem_obj->request, HttpRequestMethod(Http::METHOD_HEAD), HTCP_CLR_INVALIDATION);
+ neighborsHtcpClear(e, e->mem_obj->request.getRaw(), HttpRequestMethod(Http::METHOD_HEAD), HTCP_CLR_INVALIDATION);
#endif
- pe->release();
+ pe->release(true);
}
}
HttpHdrScTarget *sctusable = reply->surrogate_control->getMergedTarget(Config.Accel.surrogate_id);
if (sctusable) {
- if (sctusable->noStore() ||
+ if (sctusable->hasNoStore() ||
(Config.onoff.surrogate_is_remote
&& sctusable->noStoreRemote())) {
surrogateNoStore = true;
- entry->makePrivate();
+ // Be conservative for now and make it non-shareable because
+ // there is no enough information here to make the decision.
+ entry->makePrivate(false);
}
/* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
}
}
-int
-HttpStateData::cacheableReply()
+HttpStateData::ReuseDecision::Answers
+HttpStateData::reusableReply(HttpStateData::ReuseDecision &decision)
{
HttpReply const *rep = finalReply();
HttpHeader const *hdr = &rep->header;
#define REFRESH_OVERRIDE(flag) 0
#endif
- if (EBIT_TEST(entry->flags, RELEASE_REQUEST)) {
- debugs(22, 3, "NO because " << *entry << " has been released.");
- return 0;
- }
+ if (EBIT_TEST(entry->flags, RELEASE_REQUEST))
+ return decision.make(ReuseDecision::doNotCacheButShare, "the entry has been released");
+
+ // RFC 7234 section 4: a cache MUST use the most recent response
+ // (as determined by the Date header field)
+ // TODO: whether such responses could be shareable?
+ if (sawDateGoBack)
+ return decision.make(ReuseDecision::reuseNot, "the response has an older date header");
// Check for Surrogate/1.0 protocol conditions
// NP: reverse-proxy traffic our parent server has instructed us never to cache
- if (surrogateNoStore) {
- debugs(22, 3, HERE << "NO because Surrogate-Control:no-store");
- return 0;
- }
+ if (surrogateNoStore)
+ return decision.make(ReuseDecision::reuseNot, "Surrogate-Control:no-store");
// RFC 2616: HTTP/1.1 Cache-Control conditions
if (!ignoreCacheControl) {
// for now we are not reliably doing that so we waste CPU re-checking request CC
// RFC 2616 section 14.9.2 - MUST NOT cache any response with request CC:no-store
- if (request && request->cache_control && request->cache_control->noStore() &&
- !REFRESH_OVERRIDE(ignore_no_store)) {
- debugs(22, 3, HERE << "NO because client request Cache-Control:no-store");
- return 0;
- }
+ if (request && request->cache_control && request->cache_control->hasNoStore() &&
+ !REFRESH_OVERRIDE(ignore_no_store))
+ return decision.make(ReuseDecision::reuseNot,
+ "client request Cache-Control:no-store");
// NP: request CC:no-cache only means cache READ is forbidden. STORE is permitted.
- if (rep->cache_control && rep->cache_control->hasNoCache() && rep->cache_control->noCache().size() > 0) {
+ if (rep->cache_control && rep->cache_control->hasNoCacheWithParameters()) {
/* TODO: we are allowed to cache when no-cache= has parameters.
* Provided we strip away any of the listed headers unless they are revalidated
* successfully (ie, must revalidate AND these headers are prohibited on stale replies).
* That is a bit tricky for squid right now so we avoid caching entirely.
*/
- debugs(22, 3, HERE << "NO because server reply Cache-Control:no-cache has parameters");
- return 0;
+ return decision.make(ReuseDecision::reuseNot,
+ "server reply Cache-Control:no-cache has parameters");
}
// NP: request CC:private is undefined. We ignore.
// NP: other request CC flags are limiters on HIT/MISS. We don't care about here.
// RFC 2616 section 14.9.2 - MUST NOT cache any response with CC:no-store
- if (rep->cache_control && rep->cache_control->noStore() &&
- !REFRESH_OVERRIDE(ignore_no_store)) {
- debugs(22, 3, HERE << "NO because server reply Cache-Control:no-store");
- return 0;
- }
+ if (rep->cache_control && rep->cache_control->hasNoStore() &&
+ !REFRESH_OVERRIDE(ignore_no_store))
+ return decision.make(ReuseDecision::reuseNot,
+ "server reply Cache-Control:no-store");
// RFC 2616 section 14.9.1 - MUST NOT cache any response with CC:private in a shared cache like Squid.
// CC:private overrides CC:public when both are present in a response.
* successfully (ie, must revalidate AND these headers are prohibited on stale replies).
* That is a bit tricky for squid right now so we avoid caching entirely.
*/
- debugs(22, 3, HERE << "NO because server reply Cache-Control:private");
- return 0;
+ return decision.make(ReuseDecision::reuseNot,
+ "server reply Cache-Control:private");
}
}
// RFC 2068, sec 14.9.4 - MUST NOT cache any response with Authentication UNLESS certain CC controls are present
// allow HTTP violations to IGNORE those controls (ie re-block caching Auth)
if (request && (request->flags.auth || request->flags.authSent)) {
- if (!rep->cache_control) {
- debugs(22, 3, HERE << "NO because Authenticated and server reply missing Cache-Control");
- return 0;
- }
+ if (!rep->cache_control)
+ return decision.make(ReuseDecision::reuseNot,
+ "authenticated and server reply missing Cache-Control");
- if (ignoreCacheControl) {
- debugs(22, 3, HERE << "NO because Authenticated and ignoring Cache-Control");
- return 0;
- }
+ if (ignoreCacheControl)
+ return decision.make(ReuseDecision::reuseNot,
+ "authenticated and ignoring Cache-Control");
bool mayStore = false;
// HTTPbis pt6 section 3.2: a response CC:public is present
- if (rep->cache_control->Public()) {
+ if (rep->cache_control->hasPublic()) {
debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:public");
mayStore = true;
// HTTPbis pt6 section 3.2: a response CC:must-revalidate is present
- } else if (rep->cache_control->mustRevalidate()) {
+ } else if (rep->cache_control->hasMustRevalidate()) {
debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:must-revalidate");
mayStore = true;
// HTTPbis WG verdict on this is that it is omitted from the spec due to being 'unexpected' by
// some. The caching+revalidate is not exactly unsafe though with Squids interpretation of no-cache
// (without parameters) as equivalent to must-revalidate in the reply.
- } else if (rep->cache_control->hasNoCache() && rep->cache_control->noCache().size() == 0) {
+ } else if (rep->cache_control->hasNoCacheWithoutParameters()) {
debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:no-cache (equivalent to must-revalidate)");
mayStore = true;
#endif
// HTTPbis pt6 section 3.2: a response CC:s-maxage is present
- } else if (rep->cache_control->sMaxAge()) {
+ } else if (rep->cache_control->hasSMaxAge()) {
debugs(22, 3, HERE << "Authenticated but server reply Cache-Control:s-maxage");
mayStore = true;
}
- if (!mayStore) {
- debugs(22, 3, HERE << "NO because Authenticated transaction");
- return 0;
- }
+ if (!mayStore)
+ return decision.make(ReuseDecision::reuseNot, "authenticated transaction");
// NP: response CC:no-cache is equivalent to CC:must-revalidate,max-age=0. We MAY cache, and do so.
// NP: other request CC flags are limiters on HIT/MISS/REFRESH. We don't care about here.
* probably should not be cachable
*/
if ((v = hdr->getStr(Http::HdrType::CONTENT_TYPE)))
- if (!strncasecmp(v, "multipart/x-mixed-replace", 25)) {
- debugs(22, 3, HERE << "NO because Content-Type:multipart/x-mixed-replace");
- return 0;
- }
+ if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
+ return decision.make(ReuseDecision::reuseNot, "Content-Type:multipart/x-mixed-replace");
+
+ // TODO: if possible, provide more specific message for each status code
+ static const char *shareableError = "shareable error status code";
+ static const char *nonShareableError = "non-shareable error status code";
+ ReuseDecision::Answers statusAnswer = ReuseDecision::reuseNot;
+ const char *statusReason = nonShareableError;
switch (rep->sline.status()) {
+
+ /* There are several situations when a non-cacheable response may be
+ * still shareable (e.g., among collapsed clients). We assume that these
+ * are 3xx and 5xx responses, indicating server problems and some of
+ * 4xx responses, common for all clients with a given cache key (e.g.,
+ * 404 Not Found or 414 URI Too Long). On the other hand, we should not
+ * share non-cacheable client-specific errors, such as 400 Bad Request
+ * or 406 Not Acceptable.
+ */
+
/* Responses that are cacheable */
case Http::scOkay:
* unless we know how to refresh it.
*/
- if (!refreshIsCachable(entry) && !REFRESH_OVERRIDE(store_stale)) {
- debugs(22, 3, "NO because refreshIsCachable() returned non-cacheable..");
- return 0;
- } else {
- debugs(22, 3, HERE << "YES because HTTP status " << rep->sline.status());
- return 1;
- }
- /* NOTREACHED */
+ if (refreshIsCachable(entry) || REFRESH_OVERRIDE(store_stale))
+ decision.make(ReuseDecision::cachePositively, "refresh check returned cacheable");
+ else
+ decision.make(ReuseDecision::doNotCacheButShare, "refresh check returned non-cacheable");
break;
/* Responses that only are cacheable if the server says so */
case Http::scFound:
case Http::scTemporaryRedirect:
- if (rep->date <= 0) {
- debugs(22, 3, HERE << "NO because HTTP status " << rep->sline.status() << " and Date missing/invalid");
- return 0;
- }
- if (rep->expires > rep->date) {
- debugs(22, 3, HERE << "YES because HTTP status " << rep->sline.status() << " and Expires > Date");
- return 1;
- } else {
- debugs(22, 3, HERE << "NO because HTTP status " << rep->sline.status() << " and Expires <= Date");
- return 0;
- }
- /* NOTREACHED */
+ if (rep->date <= 0)
+ decision.make(ReuseDecision::doNotCacheButShare, "Date is missing/invalid");
+ else if (rep->expires > rep->date)
+ decision.make(ReuseDecision::cachePositively, "Expires > Date");
+ else
+ decision.make(ReuseDecision::doNotCacheButShare, "Expires <= Date");
break;
- /* Errors can be negatively cached */
-
+ /* These responses can be negatively cached. Most can also be shared. */
case Http::scNoContent:
-
case Http::scUseProxy:
-
- case Http::scBadRequest:
-
case Http::scForbidden:
-
case Http::scNotFound:
-
case Http::scMethodNotAllowed:
-
case Http::scUriTooLong:
-
case Http::scInternalServerError:
-
case Http::scNotImplemented:
-
case Http::scBadGateway:
-
case Http::scServiceUnavailable:
-
case Http::scGatewayTimeout:
case Http::scMisdirectedRequest:
+ statusAnswer = ReuseDecision::doNotCacheButShare;
+ statusReason = shareableError;
+ // fall through to the actual decision making below
- debugs(22, 3, "MAYBE because HTTP status " << rep->sline.status());
- return -1;
-
- /* NOTREACHED */
+ case Http::scBadRequest: // no sharing; perhaps the server did not like something specific to this request
+#if USE_HTTP_VIOLATIONS
+ if (Config.negativeTtl > 0)
+ decision.make(ReuseDecision::cacheNegatively, "Config.negativeTtl > 0");
+ else
+#endif
+ decision.make(statusAnswer, statusReason);
break;
- /* Some responses can never be cached */
-
- case Http::scPartialContent: /* Not yet supported */
-
+ /* these responses can never be cached, some
+ of them can be shared though */
case Http::scSeeOther:
-
case Http::scNotModified:
-
case Http::scUnauthorized:
-
case Http::scProxyAuthenticationRequired:
-
- case Http::scInvalidHeader: /* Squid header parsing error */
-
- case Http::scHeaderTooLarge:
-
case Http::scPaymentRequired:
+ case Http::scInsufficientStorage:
+ // TODO: use more specific reason for non-error status codes
+ decision.make(ReuseDecision::doNotCacheButShare, shareableError);
+ break;
+
+ case Http::scPartialContent: /* Not yet supported. TODO: make shareable for suitable ranges */
case Http::scNotAcceptable:
- case Http::scRequestTimeout:
- case Http::scConflict:
+ case Http::scRequestTimeout: // TODO: is this shareable?
+ case Http::scConflict: // TODO: is this shareable?
case Http::scLengthRequired:
case Http::scPreconditionFailed:
case Http::scPayloadTooLarge:
case Http::scUnsupportedMediaType:
case Http::scUnprocessableEntity:
- case Http::scLocked:
+ case Http::scLocked: // TODO: is this shareable?
case Http::scFailedDependency:
- case Http::scInsufficientStorage:
case Http::scRequestedRangeNotSatisfied:
case Http::scExpectationFailed:
-
- debugs(22, 3, HERE << "NO because HTTP status " << rep->sline.status());
- return 0;
+ case Http::scInvalidHeader: /* Squid header parsing error */
+ case Http::scHeaderTooLarge:
+ decision.make(ReuseDecision::reuseNot, nonShareableError);
+ break;
default:
/* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
- debugs (11, 3, HERE << "NO because unknown HTTP status code " << rep->sline.status());
- return 0;
-
- /* NOTREACHED */
+ decision.make(ReuseDecision::reuseNot, "unknown status code");
break;
}
- /* NOTREACHED */
+ return decision.answer;
}
-/*
- * For Vary, store the relevant request headers as
- * virtual headers in the reply
- * Returns false if the variance cannot be stored
- */
-const char *
-httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
+/// assemble a variant key (vary-mark) from the given Vary header and HTTP request
+static void
+assembleVaryKey(String &vary, SBuf &vstr, const HttpRequest &request)
{
- String vary, hdr;
- const char *pos = NULL;
- const char *item;
- const char *value;
- int ilen;
- static String vstr;
-
- vstr.clean();
- vary = reply->header.getList(Http::HdrType::VARY);
+ static const SBuf asterisk("*");
+ const char *pos = nullptr;
+ const char *item = nullptr;
+ int ilen = 0;
while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
- static const SBuf asterisk("*");
SBuf name(item, ilen);
if (name == asterisk) {
- vstr.clean();
+ vstr = asterisk;
break;
}
name.toLower();
- strListAdd(&vstr, name.c_str(), ',');
- hdr = request->header.getByName(name);
- value = hdr.termedBuf();
+ if (!vstr.isEmpty())
+ vstr.append(", ", 2);
+ vstr.append(name);
+ String hdr(request.header.getByName(name));
+ const char *value = hdr.termedBuf();
if (value) {
value = rfc1738_escape_part(value);
vstr.append("=\"", 2);
hdr.clean();
}
+}
- vary.clean();
-#if X_ACCELERATOR_VARY
-
- pos = NULL;
- vary = reply->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
-
- while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
- char *name = (char *)xmalloc(ilen + 1);
- xstrncpy(name, item, ilen + 1);
- Tolower(name);
- strListAdd(&vstr, name, ',');
- hdr = request->header.getByName(name);
- safe_free(name);
- value = hdr.termedBuf();
-
- if (value) {
- value = rfc1738_escape_part(value);
- vstr.append("=\"", 2);
- vstr.append(value);
- vstr.append("\"", 1);
- }
+/*
+ * For Vary, store the relevant request headers as
+ * virtual headers in the reply
+ * Returns an empty SBuf if the variance cannot be stored
+ */
+SBuf
+httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
+{
+ SBuf vstr;
+ String vary;
- hdr.clean();
- }
+ vary = reply->header.getList(Http::HdrType::VARY);
+ assembleVaryKey(vary, vstr, *request);
+#if X_ACCELERATOR_VARY
vary.clean();
+ vary = reply->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
+ assembleVaryKey(vary, vstr, *request);
#endif
- debugs(11, 3, "httpMakeVaryMark: " << vstr);
- return vstr.termedBuf();
+ debugs(11, 3, vstr);
+ return vstr;
}
void
HttpStateData::keepaliveAccounting(HttpReply *reply)
{
if (flags.keepalive)
- if (_peer)
+ if (flags.peering && !flags.tunneling)
++ _peer->stats.n_keepalives_sent;
if (reply->keep_alive) {
- if (_peer)
+ if (flags.peering && !flags.tunneling)
++ _peer->stats.n_keepalives_recv;
if (Config.onoff.detect_broken_server_pconns
void
HttpStateData::checkDateSkew(HttpReply *reply)
{
- if (reply->date > -1 && !_peer) {
+ if (reply->date > -1 && flags.toOrigin) {
int skew = abs((int)(reply->date - squid_curtime));
if (skew > 86400)
hp = new Http1::ResponseParser;
bool parsedOk = hp->parse(inBuf);
+ // remember the actual received status-code before returning on errors,
+ // overwriting any previously stored value from earlier forwarding attempts
+ request->hier.peer_reply_status = hp->messageStatus(); // may still be scNone
// sync the buffers after parsing.
inBuf = hp->remaining();
if (!parsedOk) {
// unrecoverable parsing error
+ // TODO: Use Raw! XXX: inBuf no longer has the [beginning of the] malformed header.
debugs(11, 3, "Non-HTTP-compliant header:\n---------\n" << inBuf << "\n----------");
flags.headers_parsed = true;
HttpReply *newrep = new HttpReply;
- newrep->sline.set(Http::ProtocolVersion(), hp->messageStatus());
- HttpReply *vrep = setVirginReply(newrep);
- entry->replaceHttpReply(vrep);
- // XXX: close the server connection ?
+ newrep->sline.set(Http::ProtocolVersion(), hp->parseStatusCode);
+ setVirginReply(newrep);
ctx_exit(ctx);
return;
}
// XXX: RFC 7230 indicates we MAY ignore the reason phrase,
// and use an empty string on unknown status.
// We do that now to avoid performance regression from using SBuf::c_str()
- newrep->sline.set(Http::ProtocolVersion(1,1), hp->messageStatus() /* , hp->reasonPhrase() */);
- newrep->sline.protocol = newrep->sline.version.protocol = hp->messageProtocol().protocol;
- newrep->sline.version.major = hp->messageProtocol().major;
- newrep->sline.version.minor = hp->messageProtocol().minor;
+ newrep->sline.set(hp->messageProtocol(), hp->messageStatus() /* , hp->reasonPhrase() */);
// parse headers
if (!newrep->parseHeader(*hp)) {
- // XXX: when Http::ProtocolVersion is a function, remove this hack. just set with messageProtocol()
- newrep->sline.set(Http::ProtocolVersion(), Http::scInvalidHeader);
- newrep->sline.version.protocol = hp->messageProtocol().protocol;
- newrep->sline.version.major = hp->messageProtocol().major;
- newrep->sline.version.minor = hp->messageProtocol().minor;
+ newrep->sline.set(hp->messageProtocol(), Http::scInvalidHeader);
debugs(11, 2, "error parsing response headers mime block");
}
// done with Parser, now process using the HttpReply
hp = NULL;
+ newrep->sources |= request->url.getScheme() == AnyP::PROTO_HTTPS ? Http::Message::srcHttps : Http::Message::srcHttp;
+
newrep->removeStaleWarnings();
- if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->sline.status() >= 100 && newrep->sline.status() < 200) {
+ if (newrep->sline.version.protocol == AnyP::PROTO_HTTP && Http::Is1xx(newrep->sline.status())) {
handle1xx(newrep);
ctx_exit(ctx);
return;
}
flags.chunked = false;
- if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) {
+ if (newrep->sline.version.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) {
flags.chunked = true;
httpChunkDecoder = new Http1::TeChunkedParser;
}
processSurrogateControl (vrep);
- request->hier.peer_reply_status = newrep->sline.status();
-
ctx_exit(ctx);
}
void
HttpStateData::handle1xx(HttpReply *reply)
{
+ if (fwd->al)
+ fwd->al->reply = reply;
+
HttpReply::Pointer msg(reply); // will destroy reply if unused
// one 1xx at a time: we must not be called while waiting for previous 1xx
Must(!flags.handling1xx);
flags.handling1xx = true;
- if (!request->canHandle1xx() || request->forcedBodyContinuation) {
- debugs(11, 2, "ignoring 1xx because it is " << (request->forcedBodyContinuation ? "already sent" : "not supported by client"));
- proceedAfter1xx();
- return;
- }
+ const auto statusCode = reply->sline.status();
+
+ // drop1xx() needs to handle HTTP 101 (Switching Protocols) responses
+ // specially because they indicate that the server has stopped speaking HTTP
+ Must(!flags.serverSwitchedProtocols);
+ flags.serverSwitchedProtocols = (statusCode == Http::scSwitchingProtocols);
+
+ if (statusCode == Http::scContinue && request->forcedBodyContinuation)
+ return drop1xx("we have sent it already");
+
+ if (!request->canHandle1xx())
+ return drop1xx("the client does not support it");
#if USE_HTTP_VIOLATIONS
// check whether the 1xx response forwarding is allowed by squid.conf
if (Config.accessList.reply) {
- ACLFilledChecklist ch(Config.accessList.reply, originalRequest(), NULL);
+ ACLFilledChecklist ch(Config.accessList.reply, originalRequest().getRaw());
+ ch.al = fwd->al;
ch.reply = reply;
+ ch.syncAle(originalRequest().getRaw(), nullptr);
HTTPMSGLOCK(ch.reply);
- if (ch.fastCheck() != ACCESS_ALLOWED) { // TODO: support slow lookups?
- debugs(11, 3, HERE << "ignoring denied 1xx");
- proceedAfter1xx();
- return;
- }
+ if (!ch.fastCheck().allowed()) // TODO: support slow lookups?
+ return drop1xx("http_reply_access blocked it");
}
#endif // USE_HTTP_VIOLATIONS
+ if (flags.serverSwitchedProtocols) {
+ if (const auto reason = blockSwitchingProtocols(*reply))
+ return drop1xx(reason);
+ }
+
debugs(11, 2, HERE << "forwarding 1xx to client");
// the Sink will use this to call us back after writing 1xx to the client
// for similar reasons without a 1xx response.
}
+/// if possible, safely ignores the received 1xx control message
+/// otherwise, terminates the server connection
+void
+HttpStateData::drop1xx(const char *reason)
+{
+ if (flags.serverSwitchedProtocols) {
+ debugs(11, 2, "bad 101 because " << reason);
+ const auto err = new ErrorState(ERR_INVALID_RESP, Http::scBadGateway, request.getRaw(), fwd->al);
+ fwd->fail(err);
+ closeServer();
+ mustStop("prohibited HTTP/101 response");
+ return;
+ }
+
+ debugs(11, 2, "ignoring 1xx because " << reason);
+ proceedAfter1xx();
+}
+
+/// \retval nil if the HTTP/101 (Switching Protocols) reply should be forwarded
+/// \retval reason why an attempt to switch protocols should be stopped
+const char *
+HttpStateData::blockSwitchingProtocols(const HttpReply &reply) const
+{
+ if (!upgradeHeaderOut)
+ return "Squid offered no Upgrade at all, but server switched to a tunnel";
+
+ // See RFC 7230 section 6.7 for the corresponding MUSTs
+
+ if (!reply.header.has(Http::HdrType::UPGRADE))
+ return "server did not send an Upgrade header field";
+
+ if (!reply.header.hasListMember(Http::HdrType::CONNECTION, "upgrade", ','))
+ return "server did not send 'Connection: upgrade'";
+
+ const auto acceptedProtos = reply.header.getList(Http::HdrType::UPGRADE);
+ const char *pos = nullptr;
+ const char *accepted = nullptr;
+ int acceptedLen = 0;
+ while (strListGetItem(&acceptedProtos, ',', &accepted, &acceptedLen, &pos)) {
+ debugs(11, 5, "server accepted at least" << Raw(nullptr, accepted, acceptedLen));
+ return nullptr; // OK: let the client validate server's selection
+ }
+
+ return "server sent an essentially empty Upgrade header field";
+}
+
/// restores state and resumes processing after 1xx is ignored or forwarded
void
HttpStateData::proceedAfter1xx()
{
Must(flags.handling1xx);
+
+ if (flags.serverSwitchedProtocols) {
+ // pass server connection ownership to request->clientConnectionManager
+ ConnStateData::ServerConnectionContext scc(serverConnection, request, inBuf);
+ typedef UnaryMemFunT<ConnStateData, ConnStateData::ServerConnectionContext> MyDialer;
+ AsyncCall::Pointer call = asyncCall(11, 3, "ConnStateData::noteTakeServerConnectionControl",
+ MyDialer(request->clientConnectionManager,
+ &ConnStateData::noteTakeServerConnectionControl, scc));
+ ScheduleCallHere(call);
+ fwd->unregister(serverConnection);
+ comm_remove_close_handler(serverConnection->fd, closeHandler);
+ closeHandler = nullptr;
+ serverConnection = nullptr;
+ doneWithFwd = "switched protocols";
+ mustStop(doneWithFwd);
+ return;
+ }
+
debugs(11, 2, "continuing with " << payloadSeen << " bytes in buffer after 1xx");
CallJobHere(11, 3, this, HttpStateData, HttpStateData::processReply);
}
/**
* returns true if the peer can support connection pinning
*/
-bool HttpStateData::peerSupportsConnectionPinning() const
+bool
+HttpStateData::peerSupportsConnectionPinning() const
{
- const HttpReply *rep = entry->mem_obj->getReply();
- const HttpHeader *hdr = &rep->header;
- bool rc;
- String header;
-
if (!_peer)
return true;
+ // we are talking "through" rather than "to" our _peer
+ if (flags.tunneling)
+ return true;
+
/*If this peer does not support connection pinning (authenticated
connections) return false
*/
if (!_peer->connection_auth)
return false;
+ const auto &rep = entry->mem().freshestReply();
+
/*The peer supports connection pinning and the http reply status
is not unauthorized, so the related connection can be pinned
*/
- if (rep->sline.status() != Http::scUnauthorized)
+ if (rep.sline.status() != Http::scUnauthorized)
return true;
/*The server respond with Http::scUnauthorized and the peer configured
reply and has in its list the "Session-Based-Authentication"
which means that the peer supports connection pinning.
*/
- if (!hdr->has(Http::HdrType::PROXY_SUPPORT))
- return false;
-
- header = hdr->getStrOrList(Http::HdrType::PROXY_SUPPORT);
- /* XXX This ought to be done in a case-insensitive manner */
- rc = (strstr(header.termedBuf(), "Session-Based-Authentication") != NULL);
+ if (rep.header.hasListMember(Http::HdrType::PROXY_SUPPORT, "Session-Based-Authentication", ','))
+ return true;
- return rc;
+ return false;
}
// Called when we parsed (and possibly adapted) the headers but
Ctx ctx = ctx_enter(entry->mem_obj->urlXXX());
HttpReply *rep = finalReply();
+ const Http::StatusCode statusCode = rep->sline.status();
entry->timestampsSet();
/* Check if object is cacheable or not based on reply code */
- debugs(11, 3, "HTTP CODE: " << rep->sline.status());
+ debugs(11, 3, "HTTP CODE: " << statusCode);
+
+ if (StoreEntry *oldEntry = findPreviouslyCachedEntry(entry)) {
+ oldEntry->lock("HttpStateData::haveParsedReplyHeaders");
+ sawDateGoBack = rep->olderThan(oldEntry->hasFreshestReply());
+ oldEntry->unlock("HttpStateData::haveParsedReplyHeaders");
+ }
- if (neighbors_do_private_keys)
+ if (neighbors_do_private_keys && !sawDateGoBack)
httpMaybeRemovePublic(entry, rep->sline.status());
bool varyFailure = false;
|| rep->header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY)
#endif
) {
- const char *vary = httpMakeVaryMark(request, rep);
-
- if (!vary) {
- entry->makePrivate();
- if (!fwd->reforwardableStatus(rep->sline.status()))
- EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+ const SBuf vary(httpMakeVaryMark(request.getRaw(), rep));
+
+ if (vary.isEmpty()) {
+ // TODO: check whether such responses are shareable.
+ // Do not share for now.
+ entry->makePrivate(false);
+ if (fwd->reforwardableStatus(rep->sline.status()))
+ EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
varyFailure = true;
} else {
- entry->mem_obj->vary_headers = xstrdup(vary);
+ entry->mem_obj->vary_headers = vary;
+
+ // RFC 7231 section 7.1.4
+ // Vary:* can be cached, but has mandatory revalidation
+ static const SBuf asterisk("*");
+ if (vary == asterisk)
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
}
}
* If its not a reply that we will re-forward, then
* allow the client to get it.
*/
- if (!fwd->reforwardableStatus(rep->sline.status()))
- EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
+ if (fwd->reforwardableStatus(rep->sline.status()))
+ EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
+
+ ReuseDecision decision(entry, statusCode);
- switch (cacheableReply()) {
+ switch (reusableReply(decision)) {
- case 1:
- entry->makePublic();
+ case ReuseDecision::reuseNot:
+ entry->makePrivate(false);
break;
- case 0:
- entry->makePrivate();
+ case ReuseDecision::cachePositively:
+ if (!entry->makePublic()) {
+ decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
+ entry->makePrivate(true);
+ }
break;
- case -1:
+ case ReuseDecision::cacheNegatively:
+ if (!entry->cacheNegatively()) {
+ decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
+ entry->makePrivate(true);
+ }
+ break;
-#if USE_HTTP_VIOLATIONS
- if (Config.negativeTtl > 0)
- entry->cacheNegatively();
- else
-#endif
- entry->makePrivate();
+ case ReuseDecision::doNotCacheButShare:
+ entry->makePrivate(true);
break;
default:
assert(0);
break;
}
+ debugs(11, 3, "decided: " << decision);
}
if (!ignoreCacheControl) {
// For security reasons we do so even if storage was caused by refresh_pattern ignore-* option
// CC:must-revalidate or CC:proxy-revalidate
- const bool ccMustRevalidate = (rep->cache_control->proxyRevalidate() || rep->cache_control->mustRevalidate());
+ const bool ccMustRevalidate = (rep->cache_control->hasProxyRevalidate() || rep->cache_control->hasMustRevalidate());
// CC:no-cache (only if there are no parameters)
- const bool ccNoCacheNoParams = (rep->cache_control->hasNoCache() && rep->cache_control->noCache().size()==0);
+ const bool ccNoCacheNoParams = rep->cache_control->hasNoCacheWithoutParameters();
// CC:s-maxage=N
const bool ccSMaxAge = rep->cache_control->hasSMaxAge();
// CC:private (yes, these can sometimes be stored)
const bool ccPrivate = rep->cache_control->hasPrivate();
- if (ccMustRevalidate || ccNoCacheNoParams || ccSMaxAge || ccPrivate)
- EBIT_SET(entry->flags, ENTRY_REVALIDATE);
+ if (ccNoCacheNoParams || ccPrivate)
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
+ else if (ccMustRevalidate || ccSMaxAge)
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE_STALE);
}
#if USE_HTTP_VIOLATIONS // response header Pragma::no-cache is undefined in HTTP
else {
* but servers like "Active Imaging Webcast/2.0" sure do use it */
if (rep->header.has(Http::HdrType::PRAGMA) &&
rep->header.hasListMember(Http::HdrType::PRAGMA,"no-cache",','))
- EBIT_SET(entry->flags, ENTRY_REVALIDATE);
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
}
#endif
}
/** \par
* If the reply wants to close the connection, it takes precedence */
- if (httpHeaderHasConnDir(&rep->header, "close"))
+ static SBuf close("close", 5);
+ if (httpHeaderHasConnDir(&rep->header, close))
return COMPLETE_NONPERSISTENT_MSG;
/** \par
- * If we didn't send a keep-alive request header, then this
+ * If we sent a Connection:close request header, then this
* can not be a persistent connection.
*/
if (!flags.keepalive)
return COMPLETE_NONPERSISTENT_MSG;
+ /** \par
+ * If we banned reuse, then this cannot be a persistent connection.
+ */
+ if (flags.forceClose)
+ return COMPLETE_NONPERSISTENT_MSG;
+
/** \par
* If we haven't sent the whole request then this can not be a persistent
* connection.
/** \par
* In chunked response we do not know the content length but we are absolutely
* sure about the end of response, so we are calling the statusIfComplete to
- * decide if we can be persistant
+ * decide if we can be persistent
*/
if (lastChunk && flags.chunked)
return statusIfComplete();
return statusIfComplete();
}
-#if USE_DELAY_POOLS
static void
readDelayed(void *context, CommRead const &)
{
state->flags.do_next_read = true;
state->maybeReadVirginBody();
}
-#endif
void
HttpStateData::readReply(const CommIoCbParams &io)
CommIoCbParams rd(this); // will be expanded with ReadNow results
rd.conn = io.conn;
rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
-#if USE_DELAY_POOLS
- if (rd.size < 1) {
- assert(entry->mem_obj);
- /* read ahead limit */
- /* Perhaps these two calls should both live in MemObject */
+ if (rd.size <= 0) {
+ assert(entry->mem_obj);
AsyncCall::Pointer nilCall;
- if (!entry->mem_obj->readAheadPolicyCanRead()) {
- entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
- return;
- }
-
- /* delay id limit */
- entry->mem_obj->mostBytesAllowed().delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
+ entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
return;
}
-#endif
switch (Comm::ReadNow(rd, inBuf)) {
case Comm::INPROGRESS:
++ IOStats.Http.read_hist[bin];
- // update peer response time stats (%<pt)
- const timeval &sent = request->hier.peer_http_request_sent;
- if (sent.tv_sec)
- tvSub(request->hier.peer_response_time, sent, current_time);
- else
- request->hier.peer_response_time.tv_sec = -1;
+ request->hier.notePeerRead();
}
/* Continue to process previously read data */
// case Comm::COMM_ERROR:
default: // no other flags should ever occur
debugs(11, 2, io.conn << ": read failure: " << xstrerr(rd.xerrno));
- ErrorState *err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request);
+ const auto err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request, fwd->al);
err->xerrno = rd.xerrno;
fwd->fail(err);
flags.do_next_read = false;
- io.conn->close();
-
+ closeServer();
+ mustStop("HttpStateData::readReply");
return;
}
Must(!flags.headers_parsed);
}
+ if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
+ abortTransaction("store entry aborted while we were waiting for processReply()");
+ return;
+ }
+
if (!flags.headers_parsed) { // have not parsed headers yet?
PROF_start(HttpStateData_processReplyHeader);
processReplyHeader();
} else if (vrep->header.conflictingContentLength()) {
fwd->dontRetry(true);
error = ERR_INVALID_RESP;
+ } else if (vrep->header.unsupportedTe()) {
+ fwd->dontRetry(true);
+ error = ERR_INVALID_RESP;
} else {
return true; // done parsing, got reply, and no error
}
assert(error != ERR_NONE);
entry->reset();
- fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request));
+ fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request, fwd->al));
flags.do_next_read = false;
- serverConnection->close();
+ closeServer();
+ mustStop("HttpStateData::continueAfterParsingHeader");
return false; // quit on error
}
// server sent more that the advertised content length
debugs(11, 5, "payloadSeen=" << payloadSeen <<
" clen=" << clen << '/' << vrep->content_length <<
- " trucated=" << payloadTruncated << '+' << extras);
+ " truncated=" << payloadTruncated << '+' << extras);
inBuf.chop(0, inBuf.length() - extras);
payloadTruncated += extras;
bool
HttpStateData::decodeAndWriteReplyBody()
{
- const char *data = NULL;
- int len;
- bool wasThereAnException = false;
assert(flags.chunked);
assert(httpChunkDecoder);
- SQUID_ENTER_THROWING_CODE();
- MemBuf decodedData;
- decodedData.init();
- httpChunkDecoder->setPayloadBuffer(&decodedData);
- const bool doneParsing = httpChunkDecoder->parse(inBuf);
- inBuf = httpChunkDecoder->remaining(); // sync buffers after parse
- len = decodedData.contentSize();
- data=decodedData.content();
- addVirginReplyBody(data, len);
- if (doneParsing) {
- lastChunk = 1;
- flags.do_next_read = false;
+ try {
+ MemBuf decodedData;
+ decodedData.init();
+ httpChunkDecoder->setPayloadBuffer(&decodedData);
+ const bool doneParsing = httpChunkDecoder->parse(inBuf);
+ inBuf = httpChunkDecoder->remaining(); // sync buffers after parse
+ addVirginReplyBody(decodedData.content(), decodedData.contentSize());
+ if (doneParsing) {
+ lastChunk = 1;
+ flags.do_next_read = false;
+ }
+ return true;
+ }
+ catch (...) {
+ debugs (11, 2, "de-chunking failure: " << CurrentException);
}
- SQUID_EXIT_THROWING_CODE(wasThereAnException);
- return wasThereAnException;
+ return false;
}
/**
void
HttpStateData::processReplyBody()
{
- Ip::Address client_addr;
- bool ispinned = false;
-
if (!flags.headers_parsed) {
flags.do_next_read = true;
maybeReadVirginBody();
writeReplyBody();
}
+ // storing/sending methods like earlier adaptOrFinalizeReply() or
+ // above writeReplyBody() may release/abort the store entry.
if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
- // The above writeReplyBody() call may have aborted the store entry.
+ // TODO: In some cases (e.g., 304), we should keep persistent conn open.
+ // Detect end-of-reply (and, hence, pool our idle pconn) earlier (ASAP).
abortTransaction("store entry aborted while storing reply");
return;
} else
}
break;
- case COMPLETE_PERSISTENT_MSG:
+ case COMPLETE_PERSISTENT_MSG: {
debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection);
- /* yes we have to clear all these! */
+
+ // TODO: Remove serverConnectionSaved but preserve exception safety.
+
commUnsetConnTimeout(serverConnection);
flags.do_next_read = false;
comm_remove_close_handler(serverConnection->fd, closeHandler);
closeHandler = NULL;
- fwd->unregister(serverConnection);
+ Ip::Address client_addr; // XXX: Remove as unused. Why was it added?
if (request->flags.spoofClientIp)
client_addr = request->client_addr;
+ auto serverConnectionSaved = serverConnection;
+ fwd->unregister(serverConnection);
+ serverConnection = nullptr;
+
+ bool ispinned = false; // TODO: Rename to isOrShouldBePinned
if (request->flags.pinned) {
ispinned = true;
} else if (request->flags.connectionAuth && request->flags.authSent) {
ispinned = true;
}
- if (ispinned && request->clientConnectionManager.valid()) {
- request->clientConnectionManager->pinConnection(serverConnection, request, _peer,
- (request->flags.connectionAuth));
+ if (ispinned) {
+ if (request->clientConnectionManager.valid()) {
+ CallJobHere1(11, 4, request->clientConnectionManager,
+ ConnStateData,
+ notePinnedConnectionBecameIdle,
+ ConnStateData::PinnedIdleContext(serverConnectionSaved, request));
+ } else {
+ // must not pool/share ispinned connections, even orphaned ones
+ serverConnectionSaved->close();
+ }
} else {
- fwd->pconnPush(serverConnection, request->url.host());
+ fwdPconnPool->push(serverConnectionSaved, request->url.host());
}
- serverConnection = NULL;
serverComplete();
return;
+ }
case COMPLETE_NONPERSISTENT_MSG:
debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection);
+ if (flags.chunked && !lastChunk)
+ entry->lengthWentBad("missing last-chunk");
+
serverComplete();
return;
}
if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
// when buffer is at or over limit already
- debugs(11, 7, "wont read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
+ debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
// Process next response from buffer
processReply();
const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
if (!read_size) {
- debugs(11, 7, "wont read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
+ debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
return false;
}
- // just report whether we could grow or not, dont actually do it
+ // just report whether we could grow or not, do not actually do it
if (doGrow)
return (read_size >= 2);
// we may need to grow the buffer
inBuf.reserveSpace(read_size);
- debugs(11, 8, (!flags.do_next_read ? "wont" : "may") <<
+ debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
" read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
") from " << serverConnection);
entry->mem_obj->checkUrlChecksum();
#endif
+ // XXX: Keep in sync with Client::sentRequestBody().
+ // TODO: Extract common parts.
+
if (io.size > 0) {
fd_bytes(io.fd, io.size, FD_WRITE);
statCounter.server.all.kbytes_out += io.size;
if (io.flag == Comm::ERR_CLOSING)
return;
+ // both successful and failed writes affect response times
+ request->hier.notePeerWrite();
+
if (io.flag) {
- ErrorState *err = new ErrorState(ERR_WRITE_ERROR, Http::scBadGateway, fwd->request);
+ const auto err = new ErrorState(ERR_WRITE_ERROR, Http::scBadGateway, fwd->request, fwd->al);
err->xerrno = io.xerrno;
fwd->fail(err);
- serverConnection->close();
+ closeServer();
+ mustStop("HttpStateData::wroteLast");
return;
}
commSetConnTimeout(serverConnection, Config.Timeout.read, timeoutCall);
flags.request_sent = true;
- request->hier.peer_http_request_sent = current_time;
}
-// Close the HTTP server connection. Used by serverComplete().
void
HttpStateData::closeServer()
{
* Fixup authentication request headers for special cases
*/
static void
-httpFixupAuthentication(HttpRequest * request, const HttpHeader * hdr_in, HttpHeader * hdr_out, const HttpStateFlags &flags)
+httpFixupAuthentication(HttpRequest * request, const HttpHeader * hdr_in, HttpHeader * hdr_out, const Http::StateFlags &flags)
{
- Http::HdrType header = flags.originpeer ? Http::HdrType::AUTHORIZATION : Http::HdrType::PROXY_AUTHORIZATION;
-
/* Nothing to do unless we are forwarding to a peer */
- if (!request->flags.proxying)
+ if (!flags.peering)
+ return;
+
+ // This request is going "through" rather than "to" our _peer.
+ if (flags.tunneling)
return;
/* Needs to be explicitly enabled */
if (!request->peer_login)
return;
+ const auto header = flags.toOrigin ? Http::HdrType::AUTHORIZATION : Http::HdrType::PROXY_AUTHORIZATION;
/* Maybe already dealt with? */
if (hdr_out->has(header))
return;
if (strcmp(request->peer_login, "PASSTHRU") == 0)
return;
- /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
- if (flags.originpeer && strcmp(request->peer_login, "PROXYPASS") == 0 && hdr_in->has(Http::HdrType::PROXY_AUTHORIZATION)) {
+ // Dangerous and undocumented PROXYPASS is a single-signon to servers with
+ // the proxy password. Only Basic Authentication can work this way. This
+ // statement forwards a "basic" Proxy-Authorization value from our client
+ // to an originserver peer. Other PROXYPASS cases are handled lower.
+ if (flags.toOrigin &&
+ strcmp(request->peer_login, "PROXYPASS") == 0 &&
+ hdr_in->has(Http::HdrType::PROXY_AUTHORIZATION)) {
+
const char *auth = hdr_in->getStr(Http::HdrType::PROXY_AUTHORIZATION);
if (auth && strncasecmp(auth, "basic ", 6) == 0) {
}
}
- uint8_t loginbuf[base64_encode_len(MAX_LOGIN_SZ)];
+ char loginbuf[base64_encode_len(MAX_LOGIN_SZ)];
size_t blen;
struct base64_encode_ctx ctx;
base64_encode_init(&ctx);
if (strncmp(request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
char *Token=NULL;
char *PrincipalName=NULL,*p;
+ int negotiate_flags = 0;
+
if ((p=strchr(request->peer_login,':')) != NULL ) {
PrincipalName=++p;
}
- Token = peer_proxy_negotiate_auth(PrincipalName, request->peer_host);
+ if (request->flags.auth_no_keytab) {
+ negotiate_flags |= PEER_PROXY_NEGOTIATE_NOKEYTAB;
+ }
+ Token = peer_proxy_negotiate_auth(PrincipalName, request->peer_host, negotiate_flags);
if (Token) {
httpHeaderPutStrf(hdr_out, header, "Negotiate %s",Token);
}
StoreEntry * entry,
const AccessLogEntryPointer &al,
HttpHeader * hdr_out,
- const HttpStateFlags &flags)
+ const Http::StateFlags &flags)
{
/* building buffer for complex strings */
#define BBUF_SZ (MAX_URL+32)
// Add our own If-None-Match field if the cached entry has a strong ETag.
// copyOneHeaderFromClientsideRequestToUpstreamRequest() adds client ones.
if (request->etag.size() > 0) {
- hdr_out->addEntry(new HttpHeaderEntry(Http::HdrType::IF_NONE_MATCH, NULL,
+ hdr_out->addEntry(new HttpHeaderEntry(Http::HdrType::IF_NONE_MATCH, SBuf(),
request->etag.termedBuf()));
}
request->flags.isRanged = false;
}
- /* append Via */
- if (Config.onoff.via) {
- String strVia;
- strVia = hdr_in->getList(Http::HdrType::VIA);
- snprintf(bbuf, BBUF_SZ, "%d.%d %s",
- request->http_ver.major,
- request->http_ver.minor, ThisCache);
- strListAdd(&strVia, bbuf, ',');
- hdr_out->putStr(Http::HdrType::VIA, strVia.termedBuf());
- strVia.clean();
- }
+ hdr_out->addVia(request->http_ver, hdr_in);
if (request->flags.accelerated) {
/* Append Surrogate-Capabilities */
String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
- if (strFwd.size() > 65536/2) {
+ // if we cannot double strFwd size, then it grew past 50% of the limit
+ if (!strFwd.canGrowBy(strFwd.size())) {
// There is probably a forwarding loop with Via detection disabled.
// If we do nothing, String will assert on overflow soon.
// TODO: Terminate all transactions with huge XFF?
/* append Authorization if known in URL, not in header and going direct */
if (!hdr_out->has(Http::HdrType::AUTHORIZATION)) {
- if (!request->flags.proxying && !request->url.userInfo().isEmpty()) {
- static uint8_t result[base64_encode_len(MAX_URL*2)]; // should be big enough for a single URI segment
+ if (flags.toOrigin && !request->url.userInfo().isEmpty()) {
+ static char result[base64_encode_len(MAX_URL*2)]; // should be big enough for a single URI segment
struct base64_encode_ctx ctx;
base64_encode_init(&ctx);
size_t blen = base64_encode_update(&ctx, result, request->url.userInfo().length(), reinterpret_cast<const uint8_t*>(request->url.userInfo().rawContent()));
delete cc;
}
- /* maybe append Connection: keep-alive */
- if (flags.keepalive) {
- hdr_out->putStr(Http::HdrType::CONNECTION, "keep-alive");
- }
+ // Always send Connection because HTTP/1.0 servers need explicit
+ // "keep-alive", HTTP/1.1 servers need explicit "close", Upgrade recipients
+ // need bare "upgrade", and we do not always know the server expectations.
+ if (!hdr_out->has(Http::HdrType::CONNECTION)) // forwardUpgrade() may add it
+ hdr_out->putStr(Http::HdrType::CONNECTION, flags.keepalive ? "keep-alive" : "close");
/* append Front-End-Https */
if (flags.front_end_https) {
}
/* Now mangle the headers. */
- if (Config2.onoff.mangle_request_headers)
- httpHdrMangleList(hdr_out, request, ROR_REQUEST);
-
- if (Config.request_header_add && !Config.request_header_add->empty())
- httpHdrAdd(hdr_out, request, al, *Config.request_header_add);
+ httpHdrMangleList(hdr_out, request, al, ROR_REQUEST);
strConnection.clean();
}
+/// copies from-client Upgrade info into the given to-server header while
+/// honoring configuration filters and following HTTP requirements
+void
+HttpStateData::forwardUpgrade(HttpHeader &hdrOut)
+{
+ if (!Config.http_upgrade_request_protocols)
+ return; // forward nothing by default
+
+ /* RFC 7230 section 6.7 paragraph 10:
+ * A server MUST ignore an Upgrade header field that is received in
+ * an HTTP/1.0 request.
+ */
+ if (request->http_ver == Http::ProtocolVersion(1,0))
+ return;
+
+ const auto &hdrIn = request->header;
+ if (!hdrIn.has(Http::HdrType::UPGRADE))
+ return;
+ const auto upgradeIn = hdrIn.getList(Http::HdrType::UPGRADE);
+
+ String upgradeOut;
+
+ ACLFilledChecklist ch(nullptr, request.getRaw());
+ ch.al = fwd->al;
+ const char *pos = nullptr;
+ const char *offeredStr = nullptr;
+ int offeredStrLen = 0;
+ while (strListGetItem(&upgradeIn, ',', &offeredStr, &offeredStrLen, &pos)) {
+ const ProtocolView offeredProto(offeredStr, offeredStrLen);
+ debugs(11, 5, "checks all rules applicable to " << offeredProto);
+ Config.http_upgrade_request_protocols->forApplicable(offeredProto, [&ch, offeredStr, offeredStrLen, &upgradeOut] (const SBuf &cfgProto, const acl_access *guard) {
+ debugs(11, 5, "checks " << cfgProto << " rule(s)");
+ ch.changeAcl(guard);
+ const auto answer = ch.fastCheck();
+ if (answer.implicit)
+ return false; // keep looking for an explicit rule match
+ if (answer.allowed())
+ strListAdd(upgradeOut, offeredStr, offeredStrLen);
+ // else drop the offer (explicitly denied cases and ACL errors)
+ return true; // stop after an explicit rule match or an error
+ });
+ }
+
+ if (upgradeOut.size()) {
+ hdrOut.putStr(Http::HdrType::UPGRADE, upgradeOut.termedBuf());
+
+ /* RFC 7230 section 6.7 paragraph 10:
+ * When Upgrade is sent, the sender MUST also send a Connection header
+ * field that contains an "upgrade" connection option, in
+ * order to prevent Upgrade from being accidentally forwarded by
+ * intermediaries that might not implement the listed protocols.
+ *
+ * NP: Squid does not truly implement the protocol(s) in this Upgrade.
+ * For now we are treating an explicit blind tunnel as "implemented"
+ * regardless of the security implications.
+ */
+ hdrOut.putStr(Http::HdrType::CONNECTION, "upgrade");
+
+ // Connection:close and Connection:keepalive confuse some Upgrade
+ // recipients, so we do not send those headers. Our Upgrade request
+ // implicitly offers connection persistency per HTTP/1.1 defaults.
+ // Update the keepalive flag to reflect that offer.
+ // * If the server upgrades, then we would not be talking HTTP past the
+ // HTTP 101 control message, and HTTP persistence would be irrelevant.
+ // * Otherwise, our request will contradict onoff.server_pconns=off or
+ // other no-keepalive conditions (if any). We compensate by copying
+ // the original no-keepalive decision now and honoring it later.
+ flags.forceClose = !flags.keepalive;
+ flags.keepalive = true; // should already be true in most cases
+ }
+}
+
/**
* Decides whether a particular header may be cloned from the received Clients request
* to our outgoing fetch request.
*/
void
-copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const HttpStateFlags &flags)
+copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &flags)
{
debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
* Only pass on proxy authentication to peers for which
* authentication forwarding is explicitly enabled
*/
- if (!flags.originpeer && flags.proxying && request->peer_login &&
+ if (!flags.toOrigin && request->peer_login &&
(strcmp(request->peer_login, "PASS") == 0 ||
strcmp(request->peer_login, "PROXYPASS") == 0 ||
strcmp(request->peer_login, "PASSTHRU") == 0)) {
case Http::HdrType::KEEP_ALIVE: /** \par Keep-Alive: */
case Http::HdrType::PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
case Http::HdrType::TRAILER: /** \par Trailer: */
- case Http::HdrType::UPGRADE: /** \par Upgrade: */
case Http::HdrType::TRANSFER_ENCODING: /** \par Transfer-Encoding: */
break;
+ /// \par Upgrade is hop-by-hop but forwardUpgrade() may send a filtered one
+ case Http::HdrType::UPGRADE:
+ break;
+
/** \par OTHER headers I haven't bothered to track down yet. */
case Http::HdrType::AUTHORIZATION:
/** \par WWW-Authorization:
* Pass on WWW authentication */
- if (!flags.originpeer) {
+ if (!flags.toOriginPeer()) {
hdr_out->addEntry(e->clone());
} else {
- /** \note In accelerators, only forward authentication if enabled
+ /** \note Assume that talking to a cache_peer originserver makes
+ * us a reverse proxy and only forward authentication if enabled
* (see also httpFixupAuthentication for special cases)
*/
if (request->peer_login &&
/** \par default.
* pass on all other header fields
* which are NOT listed by the special Connection: header. */
-
- if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) {
+ if (strConnection.size()>0 && strListIsMember(&strConnection, e->name, ',')) {
debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
return;
}
* not the one we are sending. Needs checking.
*/
const AnyP::ProtocolVersion httpver = Http::ProtocolVersion();
- const SBuf url(_peer && !_peer->options.originserver ? request->effectiveRequestUri() : request->url.path());
+ const SBuf url(flags.toOrigin ? request->url.path() : request->effectiveRequestUri());
mb->appendf(SQUIDSBUFPH " " SQUIDSBUFPH " %s/%d.%d\r\n",
SQUIDSBUFPRINT(request->method.image()),
SQUIDSBUFPRINT(url),
/* build and pack headers */
{
HttpHeader hdr(hoRequest);
- httpBuildRequestHeader(request, entry, fwd->al, &hdr, flags);
+ forwardUpgrade(hdr); // before httpBuildRequestHeader() for CONNECTION
+ httpBuildRequestHeader(request.getRaw(), entry, fwd->al, &hdr, flags);
if (request->flags.pinned && request->flags.connectionAuth)
request->flags.authSent = true;
else if (hdr.has(Http::HdrType::AUTHORIZATION))
request->flags.authSent = true;
+ // The late placement of this check supports reply_header_add mangling,
+ // but also complicates optimizing upgradeHeaderOut-like lookups.
+ if (hdr.has(Http::HdrType::UPGRADE)) {
+ assert(!upgradeHeaderOut);
+ upgradeHeaderOut = new String(hdr.getList(Http::HdrType::UPGRADE));
+ }
+
hdr.packInto(mb);
hdr.clean();
}
Dialer, this, HttpStateData::wroteLast);
}
- flags.originpeer = (_peer != NULL && _peer->options.originserver);
- flags.proxying = (_peer != NULL && !flags.originpeer);
-
/*
* Is keep-alive okay for all request methods?
*/
flags.keepalive = request->persistent();
else if (!Config.onoff.server_pconns)
flags.keepalive = false;
+ else if (flags.tunneling)
+ // tunneled non pinned bumped requests must not keepalive
+ flags.keepalive = !request->flags.sslBumped;
else if (_peer == NULL)
flags.keepalive = true;
else if (_peer->stats.n_keepalives_sent < 10)
(double) _peer->stats.n_keepalives_sent > 0.50)
flags.keepalive = true;
- if (_peer) {
+ if (_peer && !flags.tunneling) {
/*The old code here was
if (neighborType(_peer, request->url) == PEER_SIBLING && ...
which is equivalent to:
buf.append(raw.content(), rawDataSize);
buf.append("\r\n", 2);
- Must(rawDataSize > 0); // we did not accidently created last-chunk above
+ Must(rawDataSize > 0); // we did not accidentally created last-chunk above
// Do not send last-chunk unless we successfully received everything
if (receivedWholeRequestBody) {
return false;
}
- ACLFilledChecklist ch(Config.accessList.brokenPosts, originalRequest(), NULL);
- if (ch.fastCheck() != ACCESS_ALLOWED) {
+ ACLFilledChecklist ch(Config.accessList.brokenPosts, originalRequest().getRaw());
+ ch.al = fwd->al;
+ ch.syncAle(originalRequest().getRaw(), nullptr);
+ if (!ch.fastCheck().allowed()) {
debugs(11, 5, HERE << "didn't match brokenPosts");
return false;
}
debugs(11, DBG_IMPORTANT, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request->client_addr << "' -> '" << entry->url() << "'" );
if (virginReply()->sline.status() == Http::scInvalidHeader) {
- serverConnection->close();
+ closeServer();
+ mustStop("HttpStateData::handleMoreRequestBodyAvailable");
return;
}
}
// We might also get here if client-side aborts, but then our response
// should not matter because either client-side will provide its own or
// there will be no response at all (e.g., if the the client has left).
- ErrorState *err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, fwd->request);
- err->detailError(ERR_DETAIL_SRV_REQMOD_REQ_BODY);
+ const auto err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, fwd->request, fwd->al);
+ static const auto d = MakeNamedErrorDetail("SRV_REQMOD_REQ_BODY");
+ err->detailError(d);
fwd->fail(err);
}
Client::sentRequestBody(io);
}
-// Quickly abort the transaction
-// TODO: destruction should be sufficient as the destructor should cleanup,
-// including canceling close handlers
void
-HttpStateData::abortTransaction(const char *reason)
+HttpStateData::abortAll(const char *reason)
{
debugs(11,5, HERE << "aborting transaction for " << reason <<
"; " << serverConnection << ", this " << this);
+ mustStop(reason);
+}
- if (Comm::IsConnOpen(serverConnection)) {
- serverConnection->close();
- return;
- }
+HttpStateData::ReuseDecision::ReuseDecision(const StoreEntry *e, const Http::StatusCode code)
+ : answer(HttpStateData::ReuseDecision::reuseNot), reason(nullptr), entry(e), statusCode(code) {}
- fwd->handleUnregisteredServerEnd();
- mustStop("HttpStateData::abortTransaction");
+HttpStateData::ReuseDecision::Answers
+HttpStateData::ReuseDecision::make(const HttpStateData::ReuseDecision::Answers ans, const char *why)
+{
+ answer = ans;
+ reason = why;
+ return answer;
+}
+
+std::ostream &operator <<(std::ostream &os, const HttpStateData::ReuseDecision &d)
+{
+ static const char *ReuseMessages[] = {
+ "do not cache and do not share", // reuseNot
+ "cache positively and share", // cachePositively
+ "cache negatively and share", // cacheNegatively
+ "do not cache but share" // doNotCacheButShare
+ };
+
+ assert(d.answer >= HttpStateData::ReuseDecision::reuseNot &&
+ d.answer <= HttpStateData::ReuseDecision::doNotCacheButShare);
+ return os << ReuseMessages[d.answer] << " because " << d.reason <<
+ "; HTTP status " << d.statusCode << " " << *(d.entry);
}