/*
- * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
+ * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
#include "acl/FilledChecklist.h"
#include "acl/Gadgets.h"
#include "anyp/PortCfg.h"
+#include "base/AsyncCallbacks.h"
+#include "base/AsyncCbdataCalls.h"
#include "CacheManager.h"
#include "CachePeer.h"
#include "client_side.h"
#include "clients/forward.h"
+#include "clients/HttpTunneler.h"
+#include "clients/WhoisGateway.h"
#include "comm/Connection.h"
#include "comm/ConnOpener.h"
#include "comm/Loops.h"
#include "fde.h"
#include "FwdState.h"
#include "globals.h"
-#include "gopher.h"
+#include "HappyConnOpener.h"
#include "hier_code.h"
#include "http.h"
#include "http/Stream.h"
#include "icmp/net_db.h"
#include "internal.h"
#include "ip/Intercept.h"
+#include "ip/NfMarkConfig.h"
#include "ip/QosConfig.h"
#include "ip/tools.h"
#include "MemObject.h"
#include "neighbors.h"
#include "pconn.h"
#include "PeerPoolMgr.h"
+#include "ResolvedPeers.h"
#include "security/BlindPeerConnector.h"
#include "SquidConfig.h"
-#include "SquidTime.h"
#include "ssl/PeekingPeerConnector.h"
#include "Store.h"
#include "StoreClient.h"
#include "urn.h"
-#include "whois.h"
#if USE_OPENSSL
#include "ssl/cert_validate_message.h"
#include "ssl/Config.h"
-#include "ssl/ErrorDetail.h"
#include "ssl/helper.h"
#include "ssl/ServerBump.h"
#include "ssl/support.h"
#include <cerrno>
static CLCB fwdServerClosedWrapper;
-static CNCB fwdConnectDoneWrapper;
static OBJH fwdStats;
#define MAX_FWD_STATS_IDX 9
static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
-static PconnPool *fwdPconnPool = new PconnPool("server-peers", NULL);
-CBDATA_CLASS_INIT(FwdState);
-
-class FwdStatePeerAnswerDialer: public CallDialer, public Security::PeerConnector::CbDialer
-{
-public:
- typedef void (FwdState::*Method)(Security::EncryptorAnswer &);
+PconnPool *fwdPconnPool = new PconnPool("server-peers", nullptr);
- FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
- method_(method), fwd_(fwd), answer_() {}
-
- /* CallDialer API */
- virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
- void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
- virtual void print(std::ostream &os) const {
- os << '(' << fwd_.get() << ", " << answer_ << ')';
- }
-
- /* Security::PeerConnector::CbDialer API */
- virtual Security::EncryptorAnswer &answer() { return answer_; }
-
-private:
- Method method_;
- CbcPointer<FwdState> fwd_;
- Security::EncryptorAnswer answer_;
-};
+CBDATA_CLASS_INIT(FwdState);
void
-FwdState::abort(void* d)
+FwdState::HandleStoreAbort(FwdState *fwd)
{
- FwdState* fwd = (FwdState*)d;
Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
if (Comm::IsConnOpen(fwd->serverConnection())) {
fwd->closeServerConnection("store entry aborted");
} else {
- debugs(17, 7, HERE << "store entry aborted; no connection to close");
+ debugs(17, 7, "store entry aborted; no connection to close");
}
- fwd->serverDestinations.clear();
fwd->stopAndDestroy("store entry aborted");
}
+void
+FwdState::closePendingConnection(const Comm::ConnectionPointer &conn, const char *reason)
+{
+ debugs(17, 3, "because " << reason << "; " << conn);
+ assert(!serverConn);
+ assert(!closeHandler);
+ if (IsConnOpen(conn)) {
+ fwdPconnPool->noteUses(fd_table[conn->fd].pconn.uses);
+ conn->close();
+ }
+}
+
void
FwdState::closeServerConnection(const char *reason)
{
debugs(17, 3, "because " << reason << "; " << serverConn);
+ assert(Comm::IsConnOpen(serverConn));
comm_remove_close_handler(serverConn->fd, closeHandler);
- closeHandler = NULL;
+ closeHandler = nullptr;
fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
serverConn->close();
}
entry(e),
request(r),
al(alp),
- err(NULL),
+ err(nullptr),
clientConn(client),
start_t(squid_curtime),
n_tries(0),
- pconnRace(raceImpossible)
+ waitingForDispatched(false),
+ destinations(new ResolvedPeers()),
+ pconnRace(raceImpossible),
+ storedWholeReply_(nullptr)
{
debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
HTTPMSGLOCK(request);
- serverDestinations.reserve(Config.forward_max_tries);
e->lock("FwdState");
- EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
flags.connected_okay = false;
flags.dont_retry = false;
flags.forward_completed = false;
+ flags.destinationsFound = false;
debugs(17, 3, "FwdState constructed, this=" << this);
}
// Ftp::Relay needs to preserve control connection on data aborts
// so it registers its own abort handler that calls ours when needed.
- if (!request->flags.ftpNative)
- entry->registerAbort(FwdState::abort, this);
+ if (!request->flags.ftpNative) {
+ AsyncCall::Pointer call = asyncCall(17, 4, "FwdState::Abort", cbdataDialer(&FwdState::HandleStoreAbort, this));
+ entry->registerAbortCallback(call);
+ }
+
+ // just in case; should already be initialized to false
+ request->flags.pinned = false;
#if STRICT_ORIGINAL_DST
// Bug 3243: CVE 2009-0801
const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
if (isIntercepted && useOriginalDst) {
selectPeerForIntercepted();
- // 3.2 does not suppro re-wrapping inside CONNECT.
- // our only alternative is to fake destination "found" and continue with the forwarding.
- startConnectionOrFail();
return;
}
#endif
FwdState::stopAndDestroy(const char *reason)
{
debugs(17, 3, "for " << reason);
+
+ cancelStep(reason);
+
PeerSelectionInitiator::subscribed = false; // may already be false
self = nullptr; // we hope refcounting destroys us soon; may already be nil
/* do not place any code here as this object may be gone by now */
}
+/// Notify a pending subtask, if any, that we no longer need its help. We do not
+/// have to do this -- the subtask job will eventually end -- but ending it
+/// earlier reduces waste and may reduce DoS attack surface.
+void
+FwdState::cancelStep(const char *reason)
+{
+ transportWait.cancel(reason);
+ encryptionWait.cancel(reason);
+ peerWait.cancel(reason);
+}
+
#if STRICT_ORIGINAL_DST
/// bypasses peerSelect() when dealing with intercepted requests
void
FwdState::selectPeerForIntercepted()
{
+ // We do not support re-wrapping inside CONNECT.
+ // Our only alternative is to fake a noteDestination() call.
+
// use pinned connection if available
- Comm::ConnectionPointer p;
if (ConnStateData *client = request->pinnedConnection()) {
- p = client->validatePinnedConnection(request, NULL);
- if (Comm::IsConnOpen(p)) {
- /* duplicate peerSelectPinned() effects */
- p->peerType = PINNED;
- entry->ping_status = PING_DONE; /* Skip ICP */
-
- debugs(17, 3, "reusing a pinned conn: " << *p);
- serverDestinations.push_back(p);
- } else {
- debugs(17,2, "Pinned connection is not valid: " << p);
- ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
- fail(anErr);
- }
- // Either use the valid pinned connection or fail if it is invalid.
+ // emulate the PeerSelector::selectPinned() "Skip ICP" effect
+ entry->ping_status = PING_DONE;
+
+ usePinned();
return;
}
// use client original destination as second preferred choice
- p = new Comm::Connection();
+ const auto p = new Comm::Connection();
p->peerType = ORIGINAL_DST;
p->remote = clientConn->local;
getOutgoingAddress(request, p);
- debugs(17, 3, HERE << "using client original destination: " << *p);
- serverDestinations.push_back(p);
+ debugs(17, 3, "using client original destination: " << *p);
+ destinations->addPath(p);
+ destinations->destinationsFinalized = true;
+ PeerSelectionInitiator::subscribed = false;
+ useDestinations();
}
#endif
+/// updates ALE when we finalize the transaction error (if any)
+void
+FwdState::updateAleWithFinalError()
+{
+ if (!err || !al)
+ return;
+
+ const auto lte = LogTagsErrors::FromErrno(err->type == ERR_READ_TIMEOUT ? ETIMEDOUT : err->xerrno);
+ al->cache.code.err.update(lte);
+ if (!err->detail) {
+ static const auto d = MakeNamedErrorDetail("WITH_SERVER");
+ err->detailError(d);
+ }
+ al->updateError(Error(err->type, err->detail));
+}
+
void
FwdState::completed()
{
if (flags.forward_completed) {
- debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
+ debugs(17, DBG_IMPORTANT, "ERROR: FwdState::completed called on a completed request! Bad!");
return;
}
request->hier.stopPeerClock(false);
if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
- debugs(17, 3, HERE << "entry aborted");
+ debugs(17, 3, "entry aborted");
return ;
}
if (entry->store_status == STORE_PENDING) {
if (entry->isEmpty()) {
+ assert(!storedWholeReply_);
if (!err) // we quit (e.g., fd closed) before an error or content
- fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
+ fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request, al));
assert(err);
+ updateAleWithFinalError();
errorAppendEntry(entry, err);
- err = NULL;
+ err = nullptr;
#if USE_OPENSSL
if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request));
+ // no flags.dont_retry: completed() is a post-reforward() act
}
#endif
} else {
- EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
- entry->complete();
- entry->releaseRequest();
+ updateAleWithFinalError(); // if any
+ if (storedWholeReply_)
+ entry->completeSuccessfully(storedWholeReply_);
+ else
+ entry->completeTruncated("FwdState default");
}
}
delete err;
- entry->unregisterAbort();
+ entry->unregisterAbortCallback("FwdState object destructed");
entry->unlock("FwdState");
- entry = NULL;
+ entry = nullptr;
- if (calls.connector != NULL) {
- calls.connector->cancel("FwdState destructed");
- calls.connector = NULL;
- }
+ cancelStep("~FwdState");
if (Comm::IsConnOpen(serverConn))
closeServerConnection("~FwdState");
- serverDestinations.clear();
-
debugs(17, 3, "FwdState destructed, this=" << this);
}
* be allowed. yuck, I know.
*/
- if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
- !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
+ if ( Config.accessList.miss && !request->client_addr.isNoAddr() && !request->flags.internal) {
/**
* Check if this host is allowed to fetch MISSES from us (miss_access).
* Intentionally replace the src_addr automatically selected by the checklist code
* we do NOT want the indirect client address to be tested here.
*/
- ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
+ ACLFilledChecklist ch(Config.accessList.miss, request, nullptr);
+ ch.al = al;
ch.src_addr = request->client_addr;
+ ch.syncAle(request, nullptr);
if (ch.fastCheck().denied()) {
err_type page_id;
page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
if (page_id == ERR_NONE)
page_id = ERR_FORWARDING_DENIED;
- ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
+ const auto anErr = new ErrorState(page_id, Http::scForbidden, request, al);
errorAppendEntry(entry, anErr); // frees anErr
return;
}
}
- debugs(17, 3, HERE << "'" << entry->url() << "'");
+ debugs(17, 3, "'" << entry->url() << "'");
/*
* This seems like an odd place to bind mem_obj and request.
* Might want to assert that request is NULL at this point
if (shutting_down) {
/* more yuck */
- ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
+ const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
errorAppendEntry(entry, anErr); // frees anErr
return;
}
if (request->flags.internal) {
debugs(17, 2, "calling internalStart() due to request flag");
- internalStart(clientConn, request, entry);
+ internalStart(clientConn, request, entry, al);
return;
}
switch (request->url.getScheme()) {
- case AnyP::PROTO_CACHE_OBJECT:
- debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
- CacheManager::GetInstance()->Start(clientConn, request, entry);
- return;
-
case AnyP::PROTO_URN:
- urnStart(request, entry);
+ urnStart(request, entry, al);
return;
default:
FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
{
// Hides AccessLogEntry.h from code that does not supply ALE anyway.
- Start(clientConn, entry, request, NULL);
+ Start(clientConn, entry, request, nullptr);
}
/// subtracts time_t values, returning zero if smaller exceeds the larger value
}
void
-FwdState::startConnectionOrFail()
+FwdState::useDestinations()
{
- debugs(17, 3, HERE << entry->url());
-
- if (serverDestinations.size() > 0) {
- // Ditch error page if it was created before.
- // A new one will be created if there's another problem
- delete err;
- err = NULL;
-
- // Update the logging information about this new server connection.
- // Done here before anything else so the errors get logged for
- // this server link regardless of what happens when connecting to it.
- // IF sucessfuly connected this top destination will become the serverConnection().
- syncHierNote(serverDestinations[0], request->url.host());
- request->clearError();
-
+ if (!destinations->empty()) {
connectStart();
} else {
if (PeerSelectionInitiator::subscribed) {
return; // expect a noteDestination*() call
}
- debugs(17, 3, HERE << "Connection failed: " << entry->url());
+ debugs(17, 3, "Connection failed: " << entry->url());
if (!err) {
- ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
+ const auto anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request, al);
fail(anErr);
} // else use actual error from last connection attempt
if (!errorState->request)
errorState->request = request;
- if (err->type != ERR_ZERO_SIZE_OBJECT)
- return;
+ if (err->type == ERR_ZERO_SIZE_OBJECT)
+ reactToZeroSizeObject();
+
+ destinationReceipt = nullptr; // may already be nil
+}
+
+/// ERR_ZERO_SIZE_OBJECT requires special adjustments
+void
+FwdState::reactToZeroSizeObject()
+{
+ assert(err->type == ERR_ZERO_SIZE_OBJECT);
if (pconnRace == racePossible) {
- debugs(17, 5, HERE << "pconn race happened");
+ debugs(17, 5, "pconn race happened");
pconnRace = raceHappened;
+ if (destinationReceipt) {
+ destinations->reinstatePath(destinationReceipt);
+ destinationReceipt = nullptr;
+ }
}
if (ConnStateData *pinned_connection = request->pinnedConnection()) {
pinned_connection->pinning.zeroReply = true;
- flags.dont_retry = true; // we want to propagate failure to the client
debugs(17, 4, "zero reply on pinned connection");
}
}
void
FwdState::unregister(Comm::ConnectionPointer &conn)
{
- debugs(17, 3, HERE << entry->url() );
+ debugs(17, 3, entry->url() );
assert(serverConnection() == conn);
assert(Comm::IsConnOpen(conn));
comm_remove_close_handler(conn->fd, closeHandler);
- closeHandler = NULL;
- serverConn = NULL;
+ closeHandler = nullptr;
+ serverConn = nullptr;
+ destinationReceipt = nullptr;
}
// \deprecated use unregister(Comm::ConnectionPointer &conn) instead
void
FwdState::unregister(int fd)
{
- debugs(17, 3, HERE << entry->url() );
+ debugs(17, 3, entry->url() );
assert(fd == serverConnection()->fd);
unregister(serverConn);
}
void
FwdState::complete()
{
- debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
+ const auto replyStatus = entry->mem().baseReply().sline.status();
+ debugs(17, 3, *entry << " status " << replyStatus << ' ' << entry->url());
#if URL_CHECKSUM_DEBUG
entry->mem_obj->checkUrlChecksum();
#endif
- logReplyStatus(n_tries, entry->getReply()->sline.status());
+ logReplyStatus(n_tries, replyStatus);
+
+ // will already be false if complete() was called before/without dispatch()
+ waitingForDispatched = false;
if (reforward()) {
- debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
+ debugs(17, 3, "re-forwarding " << replyStatus << " " << entry->url());
if (Comm::IsConnOpen(serverConn))
unregister(serverConn);
+ serverConn = nullptr;
+ destinationReceipt = nullptr;
+ storedWholeReply_ = nullptr;
entry->reset();
- // drop the last path off the selection list. try the next one.
- if (!serverDestinations.empty()) // paranoid
- serverDestinations.erase(serverDestinations.begin());
- startConnectionOrFail();
+ useDestinations();
} else {
if (Comm::IsConnOpen(serverConn))
- debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
+ debugs(17, 3, "server FD " << serverConnection()->fd << " not re-forwarding status " << replyStatus);
else
- debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
- EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
- entry->complete();
+ debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus);
- if (!Comm::IsConnOpen(serverConn))
- completed();
+ completed();
stopAndDestroy("forwarding completed");
}
}
+/// Whether a forwarding attempt to some selected destination X is in progress
+/// (after successfully opening/reusing a transport connection to X).
+/// See also: transportWait
+bool
+FwdState::transporting() const
+{
+ return peerWait || encryptionWait || waitingForDispatched;
+}
+
+void
+FwdState::markStoredReplyAsWhole(const char * const whyWeAreSure)
+{
+ debugs(17, 5, whyWeAreSure << " for " << *entry);
+
+ // the caller wrote everything to Store, but Store may silently abort writes
+ if (EBIT_TEST(entry->flags, ENTRY_ABORTED))
+ return;
+
+ storedWholeReply_ = whyWeAreSure;
+}
+
void
FwdState::noteDestination(Comm::ConnectionPointer path)
{
- const bool wasBlocked = serverDestinations.empty();
- serverDestinations.push_back(path);
- if (wasBlocked)
- startConnectionOrFail();
- // else continue to use one of the previously noted destinations;
- // if all of them fail, we may try this path
+ flags.destinationsFound = true;
+
+ if (!path) {
+ // We can call usePinned() without fear of clashing with an earlier
+ // forwarding attempt because PINNED must be the first destination.
+ assert(destinations->empty());
+ usePinned();
+ return;
+ }
+
+ debugs(17, 3, path);
+
+ destinations->addPath(path);
+
+ if (transportWait) {
+ assert(!transporting());
+ notifyConnOpener();
+ return; // and continue to wait for FwdState::noteConnection() callback
+ }
+
+ if (transporting())
+ return; // and continue to receive destinations for backup
+
+ useDestinations();
}
void
FwdState::noteDestinationsEnd(ErrorState *selectionError)
{
PeerSelectionInitiator::subscribed = false;
- if (serverDestinations.empty()) { // was blocked, waiting for more paths
+ destinations->destinationsFinalized = true;
+ if (!flags.destinationsFound) {
if (selectionError) {
debugs(17, 3, "Will abort forwarding because path selection has failed.");
Must(!err); // if we tried to connect, then path selection succeeded
fail(selectionError);
}
- else if (err)
- debugs(17, 3, "Will abort forwarding because all found paths have failed.");
- else
- debugs(17, 3, "Will abort forwarding because path selection found no paths.");
- startConnectionOrFail(); // will choose the OrFail code path
+ stopAndDestroy("path selection found no paths");
return;
}
// else continue to use one of the previously noted destinations;
// if all of them fail, forwarding as whole will fail
Must(!selectionError); // finding at least one path means selection succeeded
+
+ if (transportWait) {
+ assert(!transporting());
+ notifyConnOpener();
+ return; // and continue to wait for FwdState::noteConnection() callback
+ }
+
+ if (transporting()) {
+ // We are already using a previously opened connection (but were also
+ // receiving more destinations in case we need to re-forward).
+ debugs(17, 7, "keep transporting");
+ return;
+ }
+
+ // destinationsFound, but none of them worked, and we were waiting for more
+ debugs(17, 7, "no more destinations to try after " << n_tries << " failed attempts");
+ if (!err) {
+ const auto finalError = new ErrorState(ERR_CANNOT_FORWARD, Http::scBadGateway, request, al);
+ static const auto d = MakeNamedErrorDetail("REFORWARD_TO_NONE");
+ finalError->detailError(d);
+ fail(finalError);
+ } // else use actual error from last forwarding attempt
+ stopAndDestroy("all found paths have failed");
+}
+
+/// makes sure connection opener knows that the destinations have changed
+void
+FwdState::notifyConnOpener()
+{
+ if (destinations->notificationPending) {
+ debugs(17, 7, "reusing pending notification about " << *destinations);
+ } else {
+ debugs(17, 7, "notifying about " << *destinations);
+ destinations->notificationPending = true;
+ CallJobHere(17, 5, transportWait.job(), HappyConnOpener, noteCandidatesChange);
+ }
}
/**** CALLBACK WRAPPERS ************************************************************/
fwdServerClosedWrapper(const CommCloseCbParams ¶ms)
{
FwdState *fwd = (FwdState *)params.data;
- fwd->serverClosed(params.fd);
-}
-
-void
-fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
-{
- FwdState *fwd = (FwdState *) data;
- fwd->connectDone(conn, status, xerrno);
+ fwd->serverClosed();
}
/**** PRIVATE *****************************************************************/
return false;
if (!self) { // we have aborted before the server called us back
- debugs(17, 5, HERE << "not retrying because of earlier abort");
+ debugs(17, 5, "not retrying because of earlier abort");
// we will be destroyed when the server clears its Pointer to us
return false;
}
if (!entry->isEmpty())
return false;
- if (n_tries > Config.forward_max_tries)
+ if (exhaustedTries())
+ return false;
+
+ if (request->flags.pinned && !pinnedCanRetry())
return false;
if (!EnoughTimeToReForward(start_t))
// Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
// complicated] code required to protect the PUT request body from being
// nibbled during the first try. Thus, Squid cannot retry some PUTs today.
- if (request->body_pipe != NULL)
+ if (request->body_pipe != nullptr)
return false;
// RFC2616 9.1 Safe and Idempotent Methods
}
void
-FwdState::serverClosed(int fd)
+FwdState::serverClosed()
{
- // XXX: fd is often -1 here
- debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
- (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
- if (fd >= 0 && serverConnection()->fd == fd)
- fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
+ // XXX: This method logic attempts to tolerate Connection::close() called
+ // for serverConn earlier, by one of our dispatch()ed jobs. If that happens,
+ // serverConn will already be closed here or, worse, it will already be open
+ // for the next forwarding attempt. The current code prevents us getting
+ // stuck, but the long term solution is to stop sharing serverConn.
+ debugs(17, 2, serverConn);
+ if (Comm::IsConnOpen(serverConn)) {
+ const auto uses = fd_table[serverConn->fd].pconn.uses;
+ debugs(17, 3, "prior uses: " << uses);
+ fwdPconnPool->noteUses(uses); // XXX: May not have come from fwdPconnPool
+ serverConn->noteClosure();
+ }
+ serverConn = nullptr;
+ closeHandler = nullptr;
+ destinationReceipt = nullptr;
+
+ // will already be false if this closure happened before/without dispatch()
+ waitingForDispatched = false;
+
retryOrBail();
}
FwdState::retryOrBail()
{
if (checkRetry()) {
- debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
- // we should retry the same destination if it failed due to pconn race
- if (pconnRace == raceHappened)
- debugs(17, 4, HERE << "retrying the same destination");
- else
- serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
- startConnectionOrFail();
+ debugs(17, 3, "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
+ useDestinations();
return;
}
request->hier.stopPeerClock(false);
- if (self != NULL && !err && shutting_down && entry->isEmpty()) {
- ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
+ if (self != nullptr && !err && shutting_down && entry->isEmpty()) {
+ const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
errorAppendEntry(entry, anErr);
}
void
FwdState::doneWithRetries()
{
- if (request && request->body_pipe != NULL)
+ if (request && request->body_pipe != nullptr)
request->body_pipe->expectNoConsumption();
}
void
FwdState::handleUnregisteredServerEnd()
{
- debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
+ debugs(17, 2, "self=" << self << " err=" << err << ' ' << entry->url());
assert(!Comm::IsConnOpen(serverConn));
+ serverConn = nullptr;
+ destinationReceipt = nullptr;
+
+ // might already be false due to uncertainties documented in serverClosed()
+ waitingForDispatched = false;
+
retryOrBail();
}
+/// starts a preparation step for an established connection; retries on failures
+template <typename StepStart>
void
-FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
+FwdState::advanceDestination(const char *stepDescription, const Comm::ConnectionPointer &conn, const StepStart &startStep)
{
- if (status != Comm::OK) {
- ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
- anErr->xerrno = xerrno;
- fail(anErr);
+ // TODO: Extract destination-specific handling from FwdState so that all the
+ // awkward, limited-scope advanceDestination() calls can be replaced with a
+ // single simple try/catch,retry block.
+ try {
+ startStep();
+ // now wait for the step callback
+ } catch (...) {
+ debugs (17, 2, "exception while trying to " << stepDescription << ": " << CurrentException);
+ closePendingConnection(conn, "connection preparation exception");
+ if (!err)
+ fail(new ErrorState(ERR_GATEWAY_FAILURE, Http::scInternalServerError, request, al));
+ retryOrBail();
+ }
+}
- /* it might have been a timeout with a partially open link */
- if (conn != NULL) {
- if (conn->getPeer())
- peerConnectFailed(conn->getPeer());
+/// called when a to-peer connection has been successfully obtained or
+/// when all candidate destinations have been tried and all have failed
+void
+FwdState::noteConnection(HappyConnOpener::Answer &answer)
+{
+ assert(!destinationReceipt);
+
+ transportWait.finish();
- conn->close();
+ updateAttempts(answer.n_tries);
+
+ ErrorState *error = nullptr;
+ if ((error = answer.error.get())) {
+ flags.dont_retry = true; // or HappyConnOpener would not have given up
+ syncHierNote(answer.conn, request->url.host());
+ Must(!Comm::IsConnOpen(answer.conn));
+ answer.error.clear(); // preserve error for errorSendComplete()
+ } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
+ // The socket could get closed while our callback was queued. Sync
+ // Connection. XXX: Connection::fd may already be stale/invalid here.
+ // We do not know exactly why the connection got closed, so we play it
+ // safe, allowing retries only for persistent (reused) connections
+ if (answer.reused) {
+ destinationReceipt = answer.conn;
+ assert(destinationReceipt);
}
+ syncHierNote(answer.conn, request->url.host());
+ closePendingConnection(answer.conn, "conn was closed while waiting for noteConnection");
+ error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
+ } else {
+ assert(!error);
+ destinationReceipt = answer.conn;
+ assert(destinationReceipt);
+ // serverConn remains nil until syncWithServerConn()
+ }
+
+ if (error) {
+ fail(error);
retryOrBail();
return;
}
- serverConn = conn;
- debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
-
- closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
-
- if (!request->flags.pinned) {
- const CachePeer *p = serverConnection()->getPeer();
- const bool peerWantsTls = p && p->secure.encryptTransport;
- // userWillTlsToPeerForUs assumes CONNECT == HTTPS
- const bool userWillTlsToPeerForUs = p && p->options.originserver &&
- request->method == Http::METHOD_CONNECT;
- const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
- const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS;
- if (needTlsToPeer || needTlsToOrigin || request->flags.sslPeek) {
- HttpRequest::Pointer requestPointer = request;
- AsyncCall::Pointer callback = asyncCall(17,4,
- "FwdState::ConnectedToPeer",
- FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
- // Use positive timeout when less than one second is left.
- const time_t connTimeout = serverDestinations[0]->connectTimeout(start_t);
- const time_t sslNegotiationTimeout = positiveTimeout(connTimeout);
- Security::PeerConnector *connector = nullptr;
-#if USE_OPENSSL
- if (request->flags.sslPeek)
- connector = new Ssl::PeekingPeerConnector(requestPointer, serverConnection(), clientConn, callback, al, sslNegotiationTimeout);
- else
-#endif
- connector = new Security::BlindPeerConnector(requestPointer, serverConnection(), callback, al, sslNegotiationTimeout);
- AsyncJob::Start(connector); // will call our callback
- return;
- }
+ if (answer.reused) {
+ syncWithServerConn(answer.conn, request->url.host(), answer.reused);
+ return dispatch();
}
- // if not encrypting just run the post-connect actions
- Security::EncryptorAnswer nil;
- connectedToPeer(nil);
+ // Check if we need to TLS before use
+ if (const auto *peer = answer.conn->getPeer()) {
+ // Assume that it is only possible for the client-first from the
+ // bumping modes to try connect to a remote server. The bumped
+ // requests with other modes are using pinned connections or fails.
+ const bool clientFirstBump = request->flags.sslBumped;
+ // We need a CONNECT tunnel to send encrypted traffic through a proxy,
+ // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
+ const bool originWantsEncryptedTraffic =
+ request->method == Http::METHOD_CONNECT ||
+ request->flags.sslPeek ||
+ clientFirstBump;
+ if (originWantsEncryptedTraffic && // the "encrypted traffic" part
+ !peer->options.originserver && // the "through a proxy" part
+ !peer->secure.encryptTransport) // the "exclude HTTPS proxies" part
+ return advanceDestination("establish tunnel through proxy", answer.conn, [this,&answer] {
+ establishTunnelThruProxy(answer.conn);
+ });
+ }
+
+ secureConnectionToPeerIfNeeded(answer.conn);
}
void
-FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
+FwdState::establishTunnelThruProxy(const Comm::ConnectionPointer &conn)
{
- if (ErrorState *error = answer.error.get()) {
+ const auto callback = asyncCallback(17, 4, FwdState::tunnelEstablishmentDone, this);
+ HttpRequest::Pointer requestPointer = request;
+ const auto tunneler = new Http::Tunneler(conn, requestPointer, callback, connectingTimeout(conn), al);
+
+ // TODO: Replace this hack with proper Comm::Connection-Pool association
+ // that is not tied to fwdPconnPool and can handle disappearing pools.
+ tunneler->noteFwdPconnUse = true;
+
+#if USE_DELAY_POOLS
+ Must(conn);
+ Must(conn->getPeer());
+ if (!conn->getPeer()->options.no_delay)
+ tunneler->setDelayId(entry->mem_obj->mostBytesAllowed());
+#endif
+ peerWait.start(tunneler, callback);
+}
+
+/// resumes operations after the (possibly failed) HTTP CONNECT exchange
+void
+FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer &answer)
+{
+ peerWait.finish();
+
+ ErrorState *error = nullptr;
+ if (!answer.positive()) {
+ Must(!answer.conn);
+ error = answer.squidError.get();
+ Must(error);
+ answer.squidError.clear(); // preserve error for fail()
+ } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
+ // The socket could get closed while our callback was queued. Sync
+ // Connection. XXX: Connection::fd may already be stale/invalid here.
+ closePendingConnection(answer.conn, "conn was closed while waiting for tunnelEstablishmentDone");
+ error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
+ } else if (!answer.leftovers.isEmpty()) {
+ // This should not happen because TLS servers do not speak first. If we
+ // have to handle this, then pass answer.leftovers via a PeerConnector
+ // to ServerBio. See ClientBio::setReadBufData().
+ static int occurrences = 0;
+ const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
+ debugs(17, level, "ERROR: Early data after CONNECT response. " <<
+ "Found " << answer.leftovers.length() << " bytes. " <<
+ "Closing " << answer.conn);
+ error = new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request, al);
+ closePendingConnection(answer.conn, "server spoke before tunnelEstablishmentDone");
+ }
+ if (error) {
fail(error);
- answer.error.clear(); // preserve error for errorSendComplete()
- if (CachePeer *p = serverConnection()->getPeer())
- peerConnectFailed(p);
- serverConnection()->close();
+ retryOrBail();
return;
}
- if (answer.tunneled) {
+ secureConnectionToPeerIfNeeded(answer.conn);
+}
+
+/// handles an established TCP connection to peer (including origin servers)
+void
+FwdState::secureConnectionToPeerIfNeeded(const Comm::ConnectionPointer &conn)
+{
+ assert(!request->flags.pinned);
+
+ const auto p = conn->getPeer();
+ const bool peerWantsTls = p && p->secure.encryptTransport;
+ // userWillTlsToPeerForUs assumes CONNECT == HTTPS
+ const bool userWillTlsToPeerForUs = p && p->options.originserver &&
+ request->method == Http::METHOD_CONNECT;
+ const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
+ const bool clientFirstBump = request->flags.sslBumped; // client-first (already) bumped connection
+ const bool needsBump = request->flags.sslPeek || clientFirstBump;
+
+ // 'GET https://...' requests. If a peer is used the request is forwarded
+ // as is
+ const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS && !clientFirstBump;
+
+ if (needTlsToPeer || needTlsToOrigin || needsBump) {
+ return advanceDestination("secure connection to peer", conn, [this,&conn] {
+ secureConnectionToPeer(conn);
+ });
+ }
+
+ // if not encrypting just run the post-connect actions
+ successfullyConnectedToPeer(conn);
+}
+
+/// encrypts an established TCP connection to peer (including origin servers)
+void
+FwdState::secureConnectionToPeer(const Comm::ConnectionPointer &conn)
+{
+ HttpRequest::Pointer requestPointer = request;
+ const auto callback = asyncCallback(17, 4, FwdState::connectedToPeer, this);
+ const auto sslNegotiationTimeout = connectingTimeout(conn);
+ Security::PeerConnector *connector = nullptr;
+#if USE_OPENSSL
+ if (request->flags.sslPeek)
+ connector = new Ssl::PeekingPeerConnector(requestPointer, conn, clientConn, callback, al, sslNegotiationTimeout);
+ else
+#endif
+ connector = new Security::BlindPeerConnector(requestPointer, conn, callback, al, sslNegotiationTimeout);
+ connector->noteFwdPconnUse = true;
+ encryptionWait.start(connector, callback);
+}
+
+/// called when all negotiations with the TLS-speaking peer have been completed
+void
+FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
+{
+ encryptionWait.finish();
+
+ ErrorState *error = nullptr;
+ if ((error = answer.error.get())) {
+ assert(!answer.conn);
+ answer.error.clear(); // preserve error for errorSendComplete()
+ } else if (answer.tunneled) {
+ assert(!answer.conn);
// TODO: When ConnStateData establishes tunnels, its state changes
// [in ways that may affect logging?]. Consider informing
// ConnStateData about our tunnel or otherwise unifying tunnel
// establishment [side effects].
- unregister(serverConn); // async call owns it now
+ flags.dont_retry = true; // TunnelStateData took forwarding control
+ entry->abort();
complete(); // destroys us
return;
+ } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
+ // The socket could get closed while our callback was queued. Sync
+ // Connection. XXX: Connection::fd may already be stale/invalid here.
+ closePendingConnection(answer.conn, "conn was closed while waiting for connectedToPeer");
+ error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
}
- // should reach ConnStateData before the dispatched Client job starts
- CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
- ConnStateData::notePeerConnection, serverConnection());
-
- if (serverConnection()->getPeer())
- peerConnectSucceded(serverConnection()->getPeer());
+ if (error) {
+ fail(error);
+ retryOrBail();
+ return;
+ }
- flags.connected_okay = true;
- dispatch();
+ successfullyConnectedToPeer(answer.conn);
}
+/// called when all negotiations with the peer have been completed
void
-FwdState::connectTimeout(int fd)
+FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer &conn)
{
- debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
- assert(serverDestinations[0] != NULL);
- assert(fd == serverDestinations[0]->fd);
+ syncWithServerConn(conn, request->url.host(), false);
- if (entry->isEmpty()) {
- ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
- anErr->xerrno = ETIMEDOUT;
- fail(anErr);
+ // should reach ConnStateData before the dispatched Client job starts
+ CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
+ ConnStateData::notePeerConnection, serverConnection());
- /* This marks the peer DOWN ... */
- if (serverDestinations[0]->getPeer())
- peerConnectFailed(serverDestinations[0]->getPeer());
- }
+ NoteOutgoingConnectionSuccess(serverConnection()->getPeer());
- if (Comm::IsConnOpen(serverDestinations[0])) {
- serverDestinations[0]->close();
- }
+ dispatch();
}
-/// called when serverConn is set to an _open_ to-peer connection
+/// commits to using the given open to-peer connection
void
-FwdState::syncWithServerConn(const char *host)
+FwdState::syncWithServerConn(const Comm::ConnectionPointer &conn, const char *host, const bool reused)
{
- if (Ip::Qos::TheConfig.isAclTosActive())
- Ip::Qos::setSockTos(serverConn, GetTosToServer(request));
+ Must(IsConnOpen(conn));
+ serverConn = conn;
+ // no effect on destinationReceipt (which may even be nil here)
-#if SO_MARK
- if (Ip::Qos::TheConfig.isAclNfmarkActive())
- Ip::Qos::setSockNfmark(serverConn, GetNfmarkToServer(request));
-#endif
+ closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
+
+ if (reused) {
+ pconnRace = racePossible;
+ ResetMarkingsToServer(request, *serverConn);
+ } else {
+ pconnRace = raceImpossible;
+ // Comm::ConnOpener already applied proper/current markings
+ }
syncHierNote(serverConn, host);
}
al->hier.resetPeerNotes(server, host);
}
+/// sets n_tries to the given value (while keeping ALE, if any, in sync)
+void
+FwdState::updateAttempts(const int newValue)
+{
+ Assure(n_tries <= newValue); // n_tries cannot decrease
+
+ // Squid probably creates at most one FwdState/TunnelStateData object per
+ // ALE, but, unlike an assignment would, this increment logic works even if
+ // Squid uses multiple such objects for a given ALE in some esoteric cases.
+ if (al)
+ al->requestAttempts += (newValue - n_tries);
+
+ n_tries = newValue;
+ debugs(17, 5, n_tries);
+}
+
/**
* Called after forwarding path selection (via peer select) has taken place
* and whenever forwarding needs to attempt a new connection (routing failover).
void
FwdState::connectStart()
{
- assert(serverDestinations.size() > 0);
+ debugs(17, 3, *destinations << " to " << entry->url());
+
+ Must(!request->pinnedConnection());
- debugs(17, 3, "fwdConnectStart: " << entry->url());
+ assert(!destinations->empty());
+ assert(!transporting());
+
+ // Ditch error page if it was created before.
+ // A new one will be created if there's another problem
+ delete err;
+ err = nullptr;
+ request->clearError();
request->hier.startPeerClock();
- // Do not fowrward bumped connections to parent proxy unless it is an
- // origin server
- if (serverDestinations[0]->getPeer() && !serverDestinations[0]->getPeer()->options.originserver && request->flags.sslBumped) {
- debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
- ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
- fail(anErr);
- stopAndDestroy("SslBump misconfiguration");
- return;
+ const auto callback = asyncCallback(17, 5, FwdState::noteConnection, this);
+ HttpRequest::Pointer cause = request;
+ const auto cs = new HappyConnOpener(destinations, callback, cause, start_t, n_tries, al);
+ cs->setHost(request->url.host());
+ bool retriable = checkRetriable();
+ if (!retriable && Config.accessList.serverPconnForNonretriable) {
+ ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, nullptr);
+ ch.al = al;
+ ch.syncAle(request, nullptr);
+ retriable = ch.fastCheck().allowed();
}
+ cs->setRetriable(retriable);
+ cs->allowPersistent(pconnRace != raceHappened);
+ destinations->notificationPending = true; // start() is async
+ transportWait.start(cs, callback);
+}
- request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
- // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
- // XXX: also, logs will now lie if pinning is broken and leads to an error message.
- if (serverDestinations[0]->peerType == PINNED) {
- ConnStateData *pinned_connection = request->pinnedConnection();
- debugs(17,7, "pinned peer connection: " << pinned_connection);
- // pinned_connection may become nil after a pconn race
- serverConn = pinned_connection ? pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer()) : nullptr;
- if (Comm::IsConnOpen(serverConn)) {
- flags.connected_okay = true;
- ++n_tries;
- request->flags.pinned = true;
-
- if (pinned_connection->pinnedAuth())
- request->flags.auth = true;
-
- closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
-
- syncWithServerConn(pinned_connection->pinning.host);
-
- // the server may close the pinned connection before this request
- pconnRace = racePossible;
- dispatch();
- return;
- }
-
- // Pinned connection failure.
- debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
- ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
+/// send request on an existing connection dedicated to the requesting client
+void
+FwdState::usePinned()
+{
+ const auto connManager = request->pinnedConnection();
+ debugs(17, 7, "connection manager: " << connManager);
+
+ try {
+ // TODO: Refactor syncWithServerConn() and callers to always set
+ // serverConn inside that method.
+ serverConn = ConnStateData::BorrowPinnedConnection(request, al);
+ debugs(17, 5, "connection: " << serverConn);
+ } catch (ErrorState * const anErr) {
+ syncHierNote(nullptr, connManager ? connManager->pinning.host : request->url.host());
+ serverConn = nullptr;
fail(anErr);
+ // Connection managers monitor their idle pinned to-server
+ // connections and close from-client connections upon seeing
+ // a to-server connection closure. Retrying here is futile.
stopAndDestroy("pinned connection failure");
return;
}
- // Use pconn to avoid opening a new connection.
- const char *host = NULL;
- if (!serverDestinations[0]->getPeer())
- host = request->url.host();
-
- Comm::ConnectionPointer temp;
- // Avoid pconns after races so that the same client does not suffer twice.
- // This does not increase the total number of connections because we just
- // closed the connection that failed the race. And re-pinning assumes this.
- if (pconnRace != raceHappened)
- temp = pconnPop(serverDestinations[0], host);
+ updateAttempts(n_tries + 1);
- const bool openedPconn = Comm::IsConnOpen(temp);
- pconnRace = openedPconn ? racePossible : raceImpossible;
+ request->flags.pinned = true;
- // if we found an open persistent connection to use. use it.
- if (openedPconn) {
- serverConn = temp;
- flags.connected_okay = true;
- debugs(17, 3, HERE << "reusing pconn " << serverConnection());
- ++n_tries;
+ assert(connManager);
+ if (connManager->pinnedAuth())
+ request->flags.auth = true;
- closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
+ // the server may close the pinned connection before this request
+ const auto reused = true;
+ syncWithServerConn(serverConn, connManager->pinning.host, reused);
- syncWithServerConn(request->url.host());
-
- dispatch();
- return;
- }
-
- // We will try to open a new connection, possibly to the same destination.
- // We reset serverDestinations[0] in case we are using it again because
- // ConnOpener modifies its destination argument.
- serverDestinations[0]->local.port(0);
- serverConn = NULL;
-
-#if URL_CHECKSUM_DEBUG
- entry->mem_obj->checkUrlChecksum();
-#endif
-
- GetMarkingsToServer(request, *serverDestinations[0]);
-
- calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
- const time_t connTimeout = serverDestinations[0]->connectTimeout(start_t);
- Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, connTimeout);
- if (host)
- cs->setHost(host);
- AsyncJob::Start(cs);
+ dispatch();
}
void
*/
assert(Comm::IsConnOpen(serverConn));
+ assert(!waitingForDispatched);
+ waitingForDispatched = true;
+
fd_note(serverConnection()->fd, entry->url());
fd_table[serverConnection()->fd].noteUse();
EBIT_SET(entry->flags, ENTRY_DISPATCHED);
+ flags.connected_okay = true;
+
netdbPingSite(request->url.host());
/* Retrieves remote server TOS or MARK value, and stores it as part of the
if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
- /* Get the netfilter mark for the connection */
- Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
+ /* Get the netfilter CONNMARK */
+ clientFde->nfConnmarkFromServer = Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened);
}
}
#if USE_OPENSSL
if (request->flags.sslPeek) {
+ // we were just asked to peek at the server, and we did that
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(serverConnection(), request));
unregister(serverConn); // async call owns it now
+ flags.dont_retry = true; // we gave up forwarding control
+ entry->abort();
complete(); // destroys us
return;
}
#endif
- if (serverConnection()->getPeer() != NULL) {
- ++ serverConnection()->getPeer()->stats.fetches;
- request->peer_login = serverConnection()->getPeer()->login;
- request->peer_domain = serverConnection()->getPeer()->domain;
- request->flags.auth_no_keytab = serverConnection()->getPeer()->options.auth_no_keytab;
+ if (const auto peer = serverConnection()->getPeer()) {
+ ++peer->stats.fetches;
+ request->prepForPeering(*peer);
httpStart(this);
} else {
assert(!request->flags.sslPeek);
- request->peer_login = NULL;
- request->peer_domain = NULL;
- request->flags.auth_no_keytab = 0;
+ request->prepForDirect();
switch (request->url.getScheme()) {
httpStart(this);
break;
- case AnyP::PROTO_GOPHER:
- gopherStart(this);
- break;
-
case AnyP::PROTO_FTP:
if (request->flags.ftpNative)
Ftp::StartRelay(this);
Ftp::StartGateway(this);
break;
- case AnyP::PROTO_CACHE_OBJECT:
-
case AnyP::PROTO_URN:
fatal_dump("Should never get here");
break;
default:
debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
- ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
+ const auto anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request, al);
fail(anErr);
// Set the dont_retry flag because this is not a transient (network) error.
flags.dont_retry = true;
if (Comm::IsConnOpen(serverConn)) {
- serverConn->close();
+ serverConn->close(); // trigger cleanup
}
break;
}
StoreEntry *e = entry;
if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
- debugs(17, 3, HERE << "entry aborted");
+ debugs(17, 3, "entry aborted");
return 0;
}
e->mem_obj->checkUrlChecksum();
#endif
- debugs(17, 3, HERE << e->url() << "?" );
+ debugs(17, 3, e->url() << "?" );
+
+ if (request->flags.pinned && !pinnedCanRetry()) {
+ debugs(17, 3, "pinned connection; cannot retry");
+ return 0;
+ }
if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
- debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
+ debugs(17, 3, "No, ENTRY_FWD_HDR_WAIT isn't set");
return 0;
}
- if (n_tries > Config.forward_max_tries)
+ if (exhaustedTries())
return 0;
if (request->bodyNibbled())
return 0;
- if (serverDestinations.size() <= 1 && !PeerSelectionInitiator::subscribed) {
- // NP: <= 1 since total count includes the recently failed one.
- debugs(17, 3, HERE << "No alternative forwarding paths left");
+ if (destinations->empty() && !PeerSelectionInitiator::subscribed) {
+ debugs(17, 3, "No alternative forwarding paths left");
return 0;
}
- const Http::StatusCode s = e->getReply()->sline.status();
- debugs(17, 3, HERE << "status " << s);
- return reforwardableStatus(s);
-}
-
-/**
- * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
- * on whether this is a validation request. RFC 2616 says that we MUST reply
- * with "504 Gateway Timeout" if validation fails and cached reply has
- * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
- */
-ErrorState *
-FwdState::makeConnectingError(const err_type type) const
-{
- return new ErrorState(type, request->flags.needValidation ?
- Http::scGatewayTimeout : Http::scServiceUnavailable, request);
+ const auto s = entry->mem().baseReply().sline.status();
+ debugs(17, 3, "status " << s);
+ return Http::IsReforwardableStatus(s);
}
static void
/**** STATIC MEMBER FUNCTIONS *************************************************/
-bool
-FwdState::reforwardableStatus(const Http::StatusCode s) const
-{
- switch (s) {
-
- case Http::scBadGateway:
-
- case Http::scGatewayTimeout:
- return true;
-
- case Http::scForbidden:
-
- case Http::scInternalServerError:
-
- case Http::scNotImplemented:
-
- case Http::scServiceUnavailable:
- return Config.retry.onerror;
-
- default:
- return false;
- }
-
- /* NOTREACHED */
-}
-
-/**
- * Decide where details need to be gathered to correctly describe a persistent connection.
- * What is needed:
- * - the address/port details about this link
- * - domain name of server at other end of this link (either peer or requested host)
- */
-void
-FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
-{
- if (conn->getPeer()) {
- fwdPconnPool->push(conn, NULL);
- } else {
- fwdPconnPool->push(conn, domain);
- }
-}
-
-Comm::ConnectionPointer
-FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
-{
- bool retriable = checkRetriable();
- if (!retriable && Config.accessList.serverPconnForNonretriable) {
- ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, NULL);
- retriable = ch.fastCheck().allowed();
- }
- // always call shared pool first because we need to close an idle
- // connection there if we have to use a standby connection.
- Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, retriable);
- if (!Comm::IsConnOpen(conn)) {
- // either there was no pconn to pop or this is not a retriable xaction
- if (CachePeer *peer = dest->getPeer()) {
- if (peer->standby.pool)
- conn = peer->standby.pool->pop(dest, domain, true);
- }
- }
- return conn; // open, closed, or nil
-}
-
void
FwdState::initModule()
{
++ FwdReplyCodes[tries][status];
}
+bool
+FwdState::exhaustedTries() const
+{
+ return n_tries >= Config.forward_max_tries;
+}
+
+bool
+FwdState::pinnedCanRetry() const
+{
+ assert(request->flags.pinned);
+
+ // pconn race on pinned connection: Currently we do not have any mechanism
+ // to retry current pinned connection path.
+ if (pconnRace == raceHappened)
+ return false;
+
+ // If a bumped connection was pinned, then the TLS client was given our peer
+ // details. Do not retry because we do not ensure that those details stay
+ // constant. Step1-bumped connections do not get our TLS peer details, are
+ // never pinned, and, hence, never reach this method.
+ if (request->flags.sslBumped)
+ return false;
+
+ // The other pinned cases are FTP proxying and connection-based HTTP
+ // authentication. TODO: Do these cases have restrictions?
+ return true;
+}
+
+time_t
+FwdState::connectingTimeout(const Comm::ConnectionPointer &conn) const
+{
+ const auto connTimeout = conn->connectTimeout(start_t);
+ return positiveTimeout(connTimeout);
+}
+
/**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
/*
}
/// Checks for a netfilter mark value to apply depending on the ACL
-nfmark_t
-aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
+Ip::NfMarkConfig
+aclFindNfMarkConfig(acl_nfmark * head, ACLChecklist * ch)
{
for (acl_nfmark *l = head; l; l = l->next) {
if (!l->aclList || ch->fastCheck(l->aclList).allowed())
- return l->nfmark;
+ return l->markConfig;
}
- return 0;
+ return {};
}
void
-getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
+getOutgoingAddress(HttpRequest * request, const Comm::ConnectionPointer &conn)
{
// skip if an outgoing address is already set.
if (!conn->local.isAnyAddr()) return;
else
#endif
conn->local = request->client_addr;
+ conn->local.port(0); // let OS pick the source port to prevent address clashes
// some flags need setting on the socket to use this address
conn->flags |= COMM_DOBIND;
conn->flags |= COMM_TRANSPARENT;
return; // anything will do.
}
- ACLFilledChecklist ch(NULL, request, NULL);
- ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : NULL;
+ ACLFilledChecklist ch(nullptr, request, nullptr);
+ ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : nullptr;
ch.dst_addr = conn->remote;
// TODO use the connection details in ACL.
}
}
-tos_t
-GetTosToServer(HttpRequest * request)
+/// \returns the TOS value that should be set on the to-peer connection
+static tos_t
+GetTosToServer(HttpRequest * request, Comm::Connection &conn)
{
- ACLFilledChecklist ch(NULL, request, NULL);
+ if (!Ip::Qos::TheConfig.tosToServer)
+ return 0;
+
+ ACLFilledChecklist ch(nullptr, request, nullptr);
+ ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
+ ch.dst_addr = conn.remote;
return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
}
-nfmark_t
-GetNfmarkToServer(HttpRequest * request)
+/// \returns the Netfilter mark that should be set on the to-peer connection
+static nfmark_t
+GetNfmarkToServer(HttpRequest * request, Comm::Connection &conn)
{
- ACLFilledChecklist ch(NULL, request, NULL);
- return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
+ if (!Ip::Qos::TheConfig.nfmarkToServer)
+ return 0;
+
+ ACLFilledChecklist ch(nullptr, request, nullptr);
+ ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
+ ch.dst_addr = conn.remote;
+ const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
+ return mc.mark;
}
void
GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
{
// Get the server side TOS and Netfilter mark to be set on the connection.
- if (Ip::Qos::TheConfig.isAclTosActive()) {
- conn.tos = GetTosToServer(request);
- debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
- }
+ conn.tos = GetTosToServer(request, conn);
+ conn.nfmark = GetNfmarkToServer(request, conn);
+ debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos) << " netfilter mark " << conn.nfmark);
+}
-#if SO_MARK && USE_LIBCAP
- conn.nfmark = GetNfmarkToServer(request);
- debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
-#else
- conn.nfmark = 0;
-#endif
+void
+ResetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
+{
+ GetMarkingsToServer(request, conn);
+
+ // TODO: Avoid these calls if markings has not changed.
+ if (conn.tos)
+ Ip::Qos::setSockTos(&conn, conn.tos);
+ if (conn.nfmark)
+ Ip::Qos::setSockNfmark(&conn, conn.nfmark);
}