#include "FwdState.h"
#include "globals.h"
#include "gopher.h"
+#include "HappyConnOpener.h"
#include "hier_code.h"
#include "http.h"
#include "http/Stream.h"
#include "neighbors.h"
#include "pconn.h"
#include "PeerPoolMgr.h"
+#include "ResolvedPeers.h"
#include "security/BlindPeerConnector.h"
#include "SquidConfig.h"
#include "SquidTime.h"
#include <cerrno>
static CLCB fwdServerClosedWrapper;
-static CNCB fwdConnectDoneWrapper;
static OBJH fwdStats;
#define MAX_FWD_STATS_IDX 9
static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
-static PconnPool *fwdPconnPool = new PconnPool("server-peers", NULL);
+PconnPool *fwdPconnPool = new PconnPool("server-peers", nullptr);
+
CBDATA_CLASS_INIT(FwdState);
class FwdStatePeerAnswerDialer: public CallDialer, public Security::PeerConnector::CbDialer
} else {
debugs(17, 7, HERE << "store entry aborted; no connection to close");
}
- fwd->serverDestinations.clear();
fwd->stopAndDestroy("store entry aborted");
}
clientConn(client),
start_t(squid_curtime),
n_tries(0),
+ destinations(new ResolvedPeers()),
pconnRace(raceImpossible)
{
debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
HTTPMSGLOCK(request);
- serverDestinations.reserve(Config.forward_max_tries);
e->lock("FwdState");
flags.connected_okay = false;
flags.dont_retry = false;
flags.forward_completed = false;
+ flags.destinationsFound = false;
debugs(17, 3, "FwdState constructed, this=" << this);
}
const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
if (isIntercepted && useOriginalDst) {
selectPeerForIntercepted();
- useDestinations();
return;
}
#endif
FwdState::stopAndDestroy(const char *reason)
{
debugs(17, 3, "for " << reason);
+
+ if (opening())
+ cancelOpening(reason);
+
PeerSelectionInitiator::subscribed = false; // may already be false
self = nullptr; // we hope refcounting destroys us soon; may already be nil
/* do not place any code here as this object may be gone by now */
}
+/// Notify connOpener that we no longer need connections. We do not have to do
+/// this -- connOpener would eventually notice on its own, but notifying reduces
+/// waste and speeds up spare connection opening for other transactions (that
+/// could otherwise wait for this transaction to use its spare allowance).
+void
+FwdState::cancelOpening(const char *reason)
+{
+ assert(calls.connector);
+ calls.connector->cancel(reason);
+ calls.connector = nullptr;
+ notifyConnOpener();
+ connOpener.clear();
+}
+
#if STRICT_ORIGINAL_DST
/// bypasses peerSelect() when dealing with intercepted requests
void
// emulate the PeerSelector::selectPinned() "Skip ICP" effect
entry->ping_status = PING_DONE;
- serverDestinations.push_back(nullptr);
+ usePinned();
return;
}
getOutgoingAddress(request, p);
debugs(17, 3, HERE << "using client original destination: " << *p);
- serverDestinations.push_back(p);
+ destinations->addPath(p);
+ destinations->destinationsFinalized = true;
+ PeerSelectionInitiator::subscribed = false;
+ useDestinations();
}
#endif
entry = NULL;
+ if (opening())
+ cancelOpening("~FwdState");
+
if (Comm::IsConnOpen(serverConn))
closeServerConnection("~FwdState");
- serverDestinations.clear();
-
debugs(17, 3, "FwdState destructed, this=" << this);
}
void
FwdState::useDestinations()
{
- debugs(17, 3, serverDestinations.size() << " paths to " << entry->url());
- if (!serverDestinations.empty()) {
- if (!serverDestinations[0])
- usePinned();
- else
- connectStart();
+ if (!destinations->empty()) {
+ connectStart();
} else {
if (PeerSelectionInitiator::subscribed) {
debugs(17, 4, "wait for more destinations to try");
if (pconnRace == racePossible) {
debugs(17, 5, HERE << "pconn race happened");
pconnRace = raceHappened;
+ destinations->retryPath(serverConn);
}
if (ConnStateData *pinned_connection = request->pinnedConnection()) {
entry->reset();
- // drop the last path off the selection list. try the next one.
- if (!serverDestinations.empty()) // paranoid
- serverDestinations.erase(serverDestinations.begin());
useDestinations();
} else {
void
FwdState::noteDestination(Comm::ConnectionPointer path)
{
- const bool wasBlocked = serverDestinations.empty();
- // XXX: Push even a nil path so that subsequent noteDestination() calls
- // can rely on wasBlocked to detect ongoing/concurrent attempts.
- // Upcoming Happy Eyeballs changes will handle this properly.
- serverDestinations.push_back(path);
- assert(wasBlocked || path); // pinned destinations are always selected first
-
- if (wasBlocked)
- useDestinations();
- // else continue to use one of the previously noted destinations;
- // if all of them fail, we may try this path
+ flags.destinationsFound = true;
+
+ if (!path) {
+ // We can call usePinned() without fear of clashing with an earlier
+ // forwarding attempt because PINNED must be the first destination.
+ assert(destinations->empty());
+ usePinned();
+ return;
+ }
+
+ debugs(17, 3, path);
+
+ // Requests bumped at step2+ require their pinned connection. Since we
+ // failed to reuse the pinned connection, we now must terminate the
+ // bumped request. For client-first and step1 bumped requests, the
+ // from-client connection is already bumped, but the connection to the
+ // server is not established/pinned so they must be excluded. We can
+ // recognize step1 bumping by nil ConnStateData::serverBump().
+#if USE_OPENSSL
+ const auto clientFirstBump = request->clientConnectionManager.valid() &&
+ (request->clientConnectionManager->sslBumpMode == Ssl::bumpClientFirst ||
+ (request->clientConnectionManager->sslBumpMode == Ssl::bumpBump && !request->clientConnectionManager->serverBump())
+ );
+#else
+ const auto clientFirstBump = false;
+#endif /* USE_OPENSSL */
+ if (request->flags.sslBumped && !clientFirstBump) {
+ // TODO: Factor out/reuse as Occasionally(DBG_IMPORTANT, 2[, occurrences]).
+ static int occurrences = 0;
+ const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
+ debugs(17, level, "BUG: Lost previously bumped from-Squid connection. Rejecting bumped request.");
+ fail(new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al));
+ self = nullptr; // refcounted
+ return;
+ }
+
+ destinations->addPath(path);
+
+ if (Comm::IsConnOpen(serverConn)) {
+ // We are already using a previously opened connection, so we cannot be
+ // waiting for connOpener. We still receive destinations for backup.
+ Must(!opening());
+ return;
+ }
+
+ if (opening()) {
+ notifyConnOpener();
+ return; // and continue to wait for FwdState::noteConnection() callback
+ }
+
+ // This is the first path candidate we have seen. Create connOpener.
+ useDestinations();
}
void
FwdState::noteDestinationsEnd(ErrorState *selectionError)
{
PeerSelectionInitiator::subscribed = false;
- if (serverDestinations.empty()) { // was blocked, waiting for more paths
+ destinations->destinationsFinalized = true;
+ if (!flags.destinationsFound) {
if (selectionError) {
debugs(17, 3, "Will abort forwarding because path selection has failed.");
Must(!err); // if we tried to connect, then path selection succeeded
// else continue to use one of the previously noted destinations;
// if all of them fail, forwarding as whole will fail
Must(!selectionError); // finding at least one path means selection succeeded
+
+ if (Comm::IsConnOpen(serverConn)) {
+ // We are already using a previously opened connection, so we cannot be
+ // waiting for connOpener. We were receiving destinations for backup.
+ Must(!opening());
+ return;
+ }
+
+ Must(opening()); // or we would be stuck with nothing to do or wait for
+ notifyConnOpener();
+ // and continue to wait for FwdState::noteConnection() callback
+}
+
+/// makes sure connOpener knows that destinations have changed
+void
+FwdState::notifyConnOpener()
+{
+ if (destinations->notificationPending) {
+ debugs(17, 7, "reusing pending notification about " << *destinations);
+ } else {
+ debugs(17, 7, "notifying about " << *destinations);
+ destinations->notificationPending = true;
+ CallJobHere(17, 5, connOpener, HappyConnOpener, noteCandidatesChange);
+ }
}
/**** CALLBACK WRAPPERS ************************************************************/
fwd->serverClosed(params.fd);
}
-void
-fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
-{
- FwdState *fwd = (FwdState *) data;
- fwd->connectDone(conn, status, xerrno);
-}
-
/**** PRIVATE *****************************************************************/
/*
{
if (checkRetry()) {
debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
- // we should retry the same destination if it failed due to pconn race
- if (pconnRace == raceHappened)
- debugs(17, 4, HERE << "retrying the same destination");
- else
- serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
useDestinations();
return;
}
retryOrBail();
}
-/// handles an established TCP connection to peer (including origin servers)
+/// called when a to-peer connection has been successfully obtained or
+/// when all candidate destinations have been tried and all have failed
void
-FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
+FwdState::noteConnection(HappyConnOpener::Answer &answer)
{
- if (status != Comm::OK) {
- ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
- anErr->xerrno = xerrno;
- fail(anErr);
+ calls.connector = nullptr;
+ connOpener.clear();
- /* it might have been a timeout with a partially open link */
- if (conn != NULL) {
- if (conn->getPeer())
- peerConnectFailed(conn->getPeer());
+ Must(n_tries <= answer.n_tries); // n_tries cannot decrease
+ n_tries = answer.n_tries;
- conn->close();
- }
- retryOrBail();
+ if (const auto error = answer.error.get()) {
+ flags.dont_retry = true; // or HappyConnOpener would not have given up
+ syncHierNote(answer.conn, request->url.host());
+ fail(error);
+ answer.error.clear(); // preserve error for errorSendComplete()
+ retryOrBail(); // will notice flags.dont_retry and bail
return;
}
- serverConn = conn;
- debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
-
- closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
+ syncWithServerConn(answer.conn, request->url.host(), answer.reused);
- // request->flags.pinned cannot be true in connectDone(). The flag is
- // only set when we dispatch the request to an existing (pinned) connection.
- assert(!request->flags.pinned);
+ if (answer.reused)
+ return dispatch();
+ // Check if we need to TLS before use
if (const CachePeer *peer = serverConnection()->getPeer()) {
// Assume that it is only possible for the client-first from the
// bumping modes to try connect to a remote server. The bumped
AsyncCall::Pointer callback = asyncCall(17,4,
"FwdState::ConnectedToPeer",
FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
- const auto sslNegotiationTimeout = connectingTimeout(serverDestinations[0]);
+ const auto sslNegotiationTimeout = connectingTimeout(serverConnection());
Security::PeerConnector *connector = nullptr;
#if USE_OPENSSL
if (request->flags.sslPeek)
if (serverConnection()->getPeer())
peerConnectSucceded(serverConnection()->getPeer());
- flags.connected_okay = true;
dispatch();
}
+/// commits to using the given open to-peer connection
void
-FwdState::connectTimeout(int fd)
+FwdState::syncWithServerConn(const Comm::ConnectionPointer &conn, const char *host, const bool reused)
{
- debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
- assert(serverDestinations[0] != NULL);
- assert(fd == serverDestinations[0]->fd);
-
- if (entry->isEmpty()) {
- const auto anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request, al);
- anErr->xerrno = ETIMEDOUT;
- fail(anErr);
+ Must(IsConnOpen(conn));
+ serverConn = conn;
- /* This marks the peer DOWN ... */
- if (serverDestinations[0]->getPeer())
- peerConnectFailed(serverDestinations[0]->getPeer());
- }
+ closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
- if (Comm::IsConnOpen(serverDestinations[0])) {
- serverDestinations[0]->close();
+ if (reused) {
+ pconnRace = racePossible;
+ ResetMarkingsToServer(request, *serverConn);
+ } else {
+ pconnRace = raceImpossible;
+ // Comm::ConnOpener already applied proper/current markings
}
-}
-
-/// called when serverConn is set to an _open_ to-peer connection
-void
-FwdState::syncWithServerConn(const char *host)
-{
- if (Ip::Qos::TheConfig.isAclTosActive())
- Ip::Qos::setSockTos(serverConn, GetTosToServer(request));
-
-#if SO_MARK
- if (Ip::Qos::TheConfig.isAclNfmarkActive())
- Ip::Qos::setSockNfmark(serverConn, GetNfmarkToServer(request));
-#endif
syncHierNote(serverConn, host);
}
void
FwdState::connectStart()
{
- assert(serverDestinations.size() > 0);
+ debugs(17, 3, *destinations << " to " << entry->url());
- debugs(17, 3, "fwdConnectStart: " << entry->url());
+ assert(!destinations->empty());
+ assert(!opening());
- // pinned connections go through usePinned() rather than connectStart()
- assert(serverDestinations[0] != nullptr);
- request->flags.pinned = false;
-
- // Ditch the previous error if any.
- // A new error page will be created if there is another problem.
+ // Ditch error page if it was created before.
+ // A new one will be created if there's another problem
delete err;
err = nullptr;
request->clearError();
-
- // Update logging information with the upcoming server connection
- // destination. Do this early so that any connection establishment errors
- // are attributed to this destination. If successfully connected, this
- // destination becomes serverConnection().
- syncHierNote(serverDestinations[0], request->url.host());
+ serverConn = nullptr;
request->hier.startPeerClock();
- // Requests bumped at step2+ require their pinned connection. Since we
- // failed to reuse the pinned connection, we now must terminate the
- // bumped request. For client-first and step1 bumped requests, the
- // from-client connection is already bumped, but the connection to the
- // server is not established/pinned so they must be excluded. We can
- // recognize step1 bumping by nil ConnStateData::serverBump().
-#if USE_OPENSSL
- const auto clientFirstBump = request->clientConnectionManager.valid() &&
- (request->clientConnectionManager->sslBumpMode == Ssl::bumpClientFirst ||
- (request->clientConnectionManager->sslBumpMode == Ssl::bumpBump && !request->clientConnectionManager->serverBump())
- );
-#else
- const auto clientFirstBump = false;
-#endif /* USE_OPENSSL */
- if (request->flags.sslBumped && !clientFirstBump) {
- // TODO: Factor out/reuse as Occasionally(DBG_IMPORTANT, 2[, occurrences]).
- static int occurrences = 0;
- const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
- debugs(17, level, "BUG: Lost previously bumped from-Squid connection. Rejecting bumped request.");
- fail(new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al));
- self = nullptr; // refcounted
- return;
- }
-
- // Use pconn to avoid opening a new connection.
- const char *host = NULL;
- if (!serverDestinations[0]->getPeer())
- host = request->url.host();
+ calls.connector = asyncCall(17, 5, "FwdState::noteConnection", HappyConnOpener::CbDialer<FwdState>(&FwdState::noteConnection, this));
- bool bumpThroughPeer = request->flags.sslBumped && serverDestinations[0]->getPeer();
- Comm::ConnectionPointer temp;
- // Avoid pconns after races so that the same client does not suffer twice.
- // This does not increase the total number of connections because we just
- // closed the connection that failed the race. And re-pinning assumes this.
- if (pconnRace != raceHappened && !bumpThroughPeer)
- temp = pconnPop(serverDestinations[0], host);
-
- const bool openedPconn = Comm::IsConnOpen(temp);
- pconnRace = openedPconn ? racePossible : raceImpossible;
-
- // if we found an open persistent connection to use. use it.
- if (openedPconn) {
- serverConn = temp;
- flags.connected_okay = true;
- debugs(17, 3, HERE << "reusing pconn " << serverConnection());
- ++n_tries;
-
- closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
-
- syncWithServerConn(request->url.host());
-
- dispatch();
- return;
+ HttpRequest::Pointer cause = request;
+ const auto cs = new HappyConnOpener(destinations, calls.connector, cause, start_t, n_tries, al);
+ cs->setHost(request->url.host());
+ bool retriable = checkRetriable();
+ if (!retriable && Config.accessList.serverPconnForNonretriable) {
+ ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, nullptr);
+ ch.al = al;
+ ch.syncAle(request, nullptr);
+ retriable = ch.fastCheck().allowed();
}
-
- // We will try to open a new connection, possibly to the same destination.
- // We reset serverDestinations[0] in case we are using it again because
- // ConnOpener modifies its destination argument.
- serverDestinations[0]->local.port(0);
- serverConn = NULL;
-
-#if URL_CHECKSUM_DEBUG
- entry->mem_obj->checkUrlChecksum();
-#endif
-
- GetMarkingsToServer(request, *serverDestinations[0]);
-
- const AsyncCall::Pointer connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
- const auto connTimeout = connectingTimeout(serverDestinations[0]);
- const auto cs = new Comm::ConnOpener(serverDestinations[0], connector, connTimeout);
- if (host)
- cs->setHost(host);
- ++n_tries;
+ cs->setRetriable(retriable);
+ cs->allowPersistent(pconnRace != raceHappened);
+ destinations->notificationPending = true; // start() is async
+ connOpener = cs;
AsyncJob::Start(cs);
}
void
FwdState::usePinned()
{
- // we only handle pinned destinations; others are handled by connectStart()
- assert(!serverDestinations.empty());
- assert(!serverDestinations[0]);
-
const auto connManager = request->pinnedConnection();
debugs(17, 7, "connection manager: " << connManager);
return;
}
- serverConn = temp;
- flags.connected_okay = true;
++n_tries;
request->flags.pinned = true;
if (connManager->pinnedAuth())
request->flags.auth = true;
- closeHandler = comm_add_close_handler(temp->fd, fwdServerClosedWrapper, this);
-
- syncWithServerConn(connManager->pinning.host);
-
// the server may close the pinned connection before this request
- pconnRace = racePossible;
+ const auto reused = true;
+ syncWithServerConn(temp, connManager->pinning.host, reused);
+
dispatch();
}
EBIT_SET(entry->flags, ENTRY_DISPATCHED);
+ flags.connected_okay = true;
+
netdbPingSite(request->url.host());
/* Retrieves remote server TOS or MARK value, and stores it as part of the
if (request->bodyNibbled())
return 0;
- if (serverDestinations.size() <= 1 && !PeerSelectionInitiator::subscribed) {
- // NP: <= 1 since total count includes the recently failed one.
+ if (destinations->empty() && !PeerSelectionInitiator::subscribed) {
debugs(17, 3, HERE << "No alternative forwarding paths left");
return 0;
}
return reforwardableStatus(s);
}
-/**
- * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
- * on whether this is a validation request. RFC 2616 says that we MUST reply
- * with "504 Gateway Timeout" if validation fails and cached reply has
- * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
- */
-ErrorState *
-FwdState::makeConnectingError(const err_type type) const
-{
- return new ErrorState(type, request->flags.needValidation ?
- Http::scGatewayTimeout : Http::scServiceUnavailable, request, al);
-}
-
static void
fwdStats(StoreEntry * s)
{
/* NOTREACHED */
}
-/**
- * Decide where details need to be gathered to correctly describe a persistent connection.
- * What is needed:
- * - the address/port details about this link
- * - domain name of server at other end of this link (either peer or requested host)
- */
-void
-FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
-{
- if (conn->getPeer()) {
- fwdPconnPool->push(conn, NULL);
- } else {
- fwdPconnPool->push(conn, domain);
- }
-}
-
-Comm::ConnectionPointer
-FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
-{
- bool retriable = checkRetriable();
- if (!retriable && Config.accessList.serverPconnForNonretriable) {
- ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, NULL);
- ch.al = al;
- ch.syncAle(request, nullptr);
- retriable = ch.fastCheck().allowed();
- }
- // always call shared pool first because we need to close an idle
- // connection there if we have to use a standby connection.
- Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, retriable);
- if (!Comm::IsConnOpen(conn)) {
- // either there was no pconn to pop or this is not a retriable xaction
- if (CachePeer *peer = dest->getPeer()) {
- if (peer->standby.pool)
- conn = peer->standby.pool->pop(dest, domain, true);
- }
- }
- return conn; // open, closed, or nil
-}
-
void
FwdState::initModule()
{
}
}
-tos_t
-GetTosToServer(HttpRequest * request)
+/// \returns the TOS value that should be set on the to-peer connection
+static tos_t
+GetTosToServer(HttpRequest * request, Comm::Connection &conn)
{
+ if (!Ip::Qos::TheConfig.tosToServer)
+ return 0;
+
ACLFilledChecklist ch(NULL, request, NULL);
+ ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
+ ch.dst_addr = conn.remote;
return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
}
-nfmark_t
-GetNfmarkToServer(HttpRequest * request)
+/// \returns the Netfilter mark that should be set on the to-peer connection
+static nfmark_t
+GetNfmarkToServer(HttpRequest * request, Comm::Connection &conn)
{
+ if (!Ip::Qos::TheConfig.nfmarkToServer)
+ return 0;
+
ACLFilledChecklist ch(NULL, request, NULL);
+ ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
+ ch.dst_addr = conn.remote;
const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
return mc.mark;
}
GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
{
// Get the server side TOS and Netfilter mark to be set on the connection.
- if (Ip::Qos::TheConfig.isAclTosActive()) {
- conn.tos = GetTosToServer(request);
- debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
- }
+ conn.tos = GetTosToServer(request, conn);
+ conn.nfmark = GetNfmarkToServer(request, conn);
+ debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos) << " netfilter mark " << conn.nfmark);
+}
-#if SO_MARK && USE_LIBCAP
- conn.nfmark = GetNfmarkToServer(request);
- debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
-#else
- conn.nfmark = 0;
-#endif
+void
+ResetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
+{
+ GetMarkingsToServer(request, conn);
+
+ // TODO: Avoid these calls if markings has not changed.
+ if (conn.tos)
+ Ip::Qos::setSockTos(&conn, conn.tos);
+ if (conn.nfmark)
+ Ip::Qos::setSockNfmark(&conn, conn.nfmark);
}
#ifndef SQUID_FORWARD_H
#define SQUID_FORWARD_H
+#include "base/CbcPointer.h"
#include "base/RefCount.h"
+#include "base/forward.h"
#include "clients/forward.h"
#include "comm.h"
#include "comm/Connection.h"
+#include "comm/ConnOpener.h"
#include "err_type.h"
#include "fde.h"
#include "http/StatusCode.h"
class AccessLogEntry;
typedef RefCount<AccessLogEntry> AccessLogEntryPointer;
-class PconnPool;
-typedef RefCount<PconnPool> PconnPoolPointer;
class ErrorState;
class HttpRequest;
+class PconnPool;
+class ResolvedPeers;
+typedef RefCount<ResolvedPeers> ResolvedPeersPointer;
+
+class HappyConnOpener;
+typedef CbcPointer<HappyConnOpener> HappyConnOpenerPointer;
+class HappyConnOpenerAnswer;
#if USE_OPENSSL
namespace Ssl
};
#endif
-/**
- * Returns the TOS value that we should be setting on the connection
- * to the server, based on the ACL.
- */
-tos_t GetTosToServer(HttpRequest * request);
-
-/**
- * Returns the Netfilter mark value that we should be setting on the
- * connection to the server, based on the ACL.
- */
-nfmark_t GetNfmarkToServer(HttpRequest * request);
-
/// Sets initial TOS value and Netfilter for the future outgoing connection.
+/// Updates the given Connection object, not the future transport connection.
void GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn);
+/// Recomputes and applies TOS value and Netfilter to the outgoing connection.
+/// Updates both the given Connection object and the transport connection.
+void ResetMarkingsToServer(HttpRequest *, Comm::Connection &);
+
class HelperReply;
class FwdState: public RefCountable, public PeerSelectionInitiator
void serverClosed(int fd);
void connectStart();
void connectDone(const Comm::ConnectionPointer & conn, Comm::Flag status, int xerrno);
- void connectTimeout(int fd);
bool checkRetry();
bool checkRetriable();
void dispatch();
- /// Pops a connection from connection pool if available. If not
- /// checks the peer stand-by connection pool for available connection.
- Comm::ConnectionPointer pconnPop(const Comm::ConnectionPointer &dest, const char *domain);
+
void pconnPush(Comm::ConnectionPointer & conn, const char *domain);
bool dontRetry() { return flags.dont_retry; }
virtual void noteDestination(Comm::ConnectionPointer conn) override;
virtual void noteDestinationsEnd(ErrorState *selectionError) override;
+ void noteConnection(HappyConnOpenerAnswer &);
+
#if STRICT_ORIGINAL_DST
void selectPeerForIntercepted();
#endif
/// stops monitoring server connection for closure and updates pconn stats
void closeServerConnection(const char *reason);
- void syncWithServerConn(const char *host);
+ void syncWithServerConn(const Comm::ConnectionPointer &server, const char *host, const bool reused);
void syncHierNote(const Comm::ConnectionPointer &server, const char *host);
/// whether we have used up all permitted forwarding attempts
/// \returns the time left for this connection to become connected or 1 second if it is less than one second left
time_t connectingTimeout(const Comm::ConnectionPointer &conn) const;
+ /// whether we are waiting for HappyConnOpener
+ /// same as calls.connector but may differ from connOpener.valid()
+ bool opening() const { return connOpener.set(); }
+
+ void cancelOpening(const char *reason);
+
+ void notifyConnOpener();
+
public:
StoreEntry *entry;
HttpRequest *request;
time_t start_t;
int n_tries; ///< the number of forwarding attempts so far
+ // AsyncCalls which we set and may need cancelling.
+ struct {
+ AsyncCall::Pointer connector; ///< a call linking us to the ConnOpener producing serverConn.
+ } calls;
+
struct {
bool connected_okay; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc
bool dont_retry;
bool forward_completed;
+ bool destinationsFound; ///< at least one candidate path found
} flags;
- /** connections to open, in order, until successful */
- Comm::ConnectionList serverDestinations;
-
+ HappyConnOpenerPointer connOpener; ///< current connection opening job
+ ResolvedPeersPointer destinations; ///< paths for forwarding the request
Comm::ConnectionPointer serverConn; ///< a successfully opened connection to a server.
AsyncCall::Pointer closeHandler; ///< The serverConn close handler
void getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn);
+/// a collection of previously used persistent Squid-to-peer HTTP(S) connections
+extern PconnPool *fwdPconnPool;
+
#endif /* SQUID_FORWARD_H */
--- /dev/null
+/*
+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#include "squid.h"
+#include "AccessLogEntry.h"
+#include "CachePeer.h"
+#include "FwdState.h"
+#include "errorpage.h"
+#include "HappyConnOpener.h"
+#include "HttpRequest.h"
+#include "ip/QosConfig.h"
+#include "neighbors.h"
+#include "pconn.h"
+#include "PeerPoolMgr.h"
+#include "ResolvedPeers.h"
+#include "SquidConfig.h"
+
+CBDATA_CLASS_INIT(HappyConnOpener);
+
+// HappyOrderEnforcer optimizes enforcement of the "pause before opening a spare
+// connection" requirements. Its inefficient alternative would add hundreds of
+// concurrent events to the Squid event queue in many busy configurations, one
+// concurrent event per concurrent HappyConnOpener job.
+//
+// EventScheduler::schedule() uses linear search to find the right place for a
+// new event; having hundreds of concurrent events is prohibitively expensive.
+// Both alternatives may have comparable high rate of eventAdd() calls, but
+// HappyOrderEnforcer usually schedules the first or second event (as opposed to
+// events that would be fired after hundreds of already scheduled events, making
+// that linear search a lot longer).
+//
+// EventScheduler::cancel() also uses linear search. HappyOrderEnforcer does not
+// need to cancel scheduled events, while its inefficient alternative may cancel
+// events at a rate comparable to the high eventAdd() rate -- many events would
+// be scheduled in vain because external factors would speed up (or make
+// unnecessary) spare connection attempts, canceling the wait.
+//
+// This optimization is possible only where each job needs to pause for the same
+// amount of time, creating a naturally ordered list of jobs waiting to be
+// resumed. This is why two HappyOrderEnforcers are needed to efficiently honor
+// both happy_eyeballs_connect_timeout and happy_eyeballs_connect_gap
+// directives.
+
+/// Efficiently drains a FIFO HappyConnOpener queue while delaying each "pop"
+/// event by the time determined by the top element currently in the queue. Its
+/// current cbdata-free implementation assumes static storage duration.
+class HappyOrderEnforcer
+{
+public:
+ /// \param aName names scheduled events, for debugging
+ HappyOrderEnforcer(const char *aName): name(aName) {}
+
+ /// resumes jobs that need resuming (if any)
+ void checkpoint();
+
+ /// starts managing the job's wait; the job should expect a call back
+ void enqueue(HappyConnOpener &);
+
+ /// stops managing the job's wait; cancels the pending callback, if any
+ void dequeue(HappyConnOpener &);
+
+ const char * const name; ///< waiting event name, for debugging
+
+protected:
+ virtual bool readyNow(const HappyConnOpener &) const = 0;
+ virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) = 0;
+
+ bool waiting() const { return waitEnd_ > 0; }
+ bool startedWaiting(const HappyAbsoluteTime lastStart, const int cfgTimeoutMsec) const;
+
+private:
+ static void NoteWaitOver(void *raw);
+ void noteWaitOver();
+
+ HappySpareWaitList jobs_; ///< queued jobs waiting their turn
+ mutable HappyAbsoluteTime waitEnd_ = 0; ///< expected NoteWaitOver() call time (or zero)
+};
+
+std::ostream &operator <<(std::ostream &os, const HappyConnOpenerAnswer &answer)
+{
+ if (answer.error.set())
+ os << "bad ";
+ if (answer.conn)
+ os << (answer.reused ? "reused " : "new ") << answer.conn;
+ if (answer.n_tries != 1)
+ os << " after " << answer.n_tries;
+ return os;
+}
+
+/// enforces happy_eyeballs_connect_timeout
+class PrimeChanceGiver: public HappyOrderEnforcer
+{
+public:
+ PrimeChanceGiver(): HappyOrderEnforcer("happy_eyeballs_connect_timeout enforcement") {}
+
+ /* HappyOrderEnforcer API */
+ virtual bool readyNow(const HappyConnOpener &job) const override;
+
+private:
+ /* HappyOrderEnforcer API */
+ virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) override;
+};
+
+/// enforces happy_eyeballs_connect_gap and happy_eyeballs_connect_limit
+class SpareAllowanceGiver: public HappyOrderEnforcer
+{
+public:
+ SpareAllowanceGiver(): HappyOrderEnforcer("happy_eyeballs_connect_gap/happy_eyeballs_connect_limit enforcement") {}
+
+ /* HappyOrderEnforcer API */
+ virtual bool readyNow(const HappyConnOpener &job) const override;
+
+ /// reacts to HappyConnOpener discovering readyNow() conditions for a spare path
+ /// the caller must attempt to open a spare connection immediately
+ void jobGotInstantAllowance();
+
+ /// reacts to HappyConnOpener getting a spare connection opening result
+ void jobUsedAllowance();
+
+ /// reacts to HappyConnOpener dropping its spare connection allowance
+ void jobDroppedAllowance();
+
+private:
+ /* HappyOrderEnforcer API */
+ virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) override;
+
+ bool concurrencyLimitReached() const;
+ void recordAllowance();
+ void forgetAllowance();
+
+ /// the time of the last noteSpareAllowance() call
+ HappyAbsoluteTime lastAllowanceStart = 0;
+
+ /// the number of noteSpareAllowance() calls not already
+ /// returned via jobUsedAllowance() or jobDroppedAllowance()
+ int concurrencyLevel = 0;
+};
+
+PrimeChanceGiver ThePrimeChanceGiver;
+SpareAllowanceGiver TheSpareAllowanceGiver;
+
+/* HappyOrderEnforcer */
+
+void
+HappyOrderEnforcer::enqueue(HappyConnOpener &job)
+{
+ Must(!job.spareWaiting.callback);
+ jobs_.emplace_back(&job);
+ job.spareWaiting.position = std::prev(jobs_.end());
+}
+
+void
+HappyOrderEnforcer::dequeue(HappyConnOpener &job)
+{
+ if (job.spareWaiting.callback) {
+ job.spareWaiting.callback->cancel("HappyOrderEnforcer::dequeue");
+ job.spareWaiting.callback = nullptr;
+ } else {
+ Must(!jobs_.empty());
+ jobs_.erase(job.spareWaiting.position);
+ }
+}
+
+void
+HappyOrderEnforcer::checkpoint()
+{
+ while (!jobs_.empty()) {
+ if (const auto jobPtr = jobs_.front().valid()) {
+ auto &job = *jobPtr;
+ if (readyNow(job))
+ job.spareWaiting.callback = notify(jobPtr); // and fall through to the next job
+ else
+ break; // the next job cannot be ready earlier (FIFO)
+ }
+ jobs_.pop_front();
+ }
+}
+
+bool
+HappyOrderEnforcer::startedWaiting(const HappyAbsoluteTime lastStart, const int cfgTimeoutMsec) const
+{
+ // Normally, the job would not even be queued if there is no timeout. This
+ // check handles reconfiguration that happened after this job was queued.
+ if (cfgTimeoutMsec <= 0)
+ return false;
+
+ // convert to seconds and adjust for SMP workers to keep aggregated load in
+ // check despite the lack of coordination among workers
+ const auto tout = static_cast<HappyAbsoluteTime>(cfgTimeoutMsec) * Config.workers / 1000.0;
+ const auto newWaitEnd = std::min(lastStart, current_dtime) + tout;
+ if (newWaitEnd <= current_dtime)
+ return false; // no need to wait
+
+ // We cannot avoid event accumulation because calling eventDelete() is
+ // unsafe, but any accumulation will be small because it can only be caused
+ // by hot reconfiguration changes or current time jumps.
+ if (!waiting() || newWaitEnd < waitEnd_) {
+ const auto waitTime = newWaitEnd - current_dtime;
+ eventAdd(name, &HappyOrderEnforcer::NoteWaitOver, const_cast<HappyOrderEnforcer*>(this), waitTime, 0, false);
+ waitEnd_ = newWaitEnd;
+ assert(waiting());
+ }
+
+ return true;
+}
+
+void
+HappyOrderEnforcer::NoteWaitOver(void *raw)
+{
+ assert(raw);
+ static_cast<HappyOrderEnforcer*>(raw)->noteWaitOver();
+}
+
+void
+HappyOrderEnforcer::noteWaitOver()
+{
+ Must(waiting());
+ waitEnd_ = 0;
+ checkpoint();
+}
+
+/* PrimeChanceGiver */
+
+bool
+PrimeChanceGiver::readyNow(const HappyConnOpener &job) const
+{
+ return !startedWaiting(job.primeStart, Config.happyEyeballs.connect_timeout);
+}
+
+AsyncCall::Pointer
+PrimeChanceGiver::notify(const CbcPointer<HappyConnOpener> &job)
+{
+ return CallJobHere(17, 5, job, HappyConnOpener, noteGavePrimeItsChance);
+}
+
+/* SpareAllowanceGiver */
+
+bool
+SpareAllowanceGiver::readyNow(const HappyConnOpener &) const
+{
+ return !concurrencyLimitReached() &&
+ !startedWaiting(lastAllowanceStart, Config.happyEyeballs.connect_gap);
+}
+
+AsyncCall::Pointer
+SpareAllowanceGiver::notify(const CbcPointer<HappyConnOpener> &job)
+{
+ recordAllowance();
+ return CallJobHere(17, 5, job, HappyConnOpener, noteSpareAllowance);
+}
+
+void
+SpareAllowanceGiver::jobGotInstantAllowance()
+{
+ recordAllowance();
+}
+
+void
+SpareAllowanceGiver::jobUsedAllowance()
+{
+ forgetAllowance();
+}
+
+void
+SpareAllowanceGiver::jobDroppedAllowance()
+{
+ // Without happy_eyeballs_connect_gap, lastAllowanceStart does not matter.
+ // Otherwise, the dropped allowance ought to be the last one, and since it
+ // was allowed, we would still observe the gap even if we do not wait now.
+ lastAllowanceStart = 0;
+
+ forgetAllowance();
+}
+
+/// account for the given allowance
+void
+SpareAllowanceGiver::recordAllowance()
+{
+ ++concurrencyLevel;
+ lastAllowanceStart = current_dtime;
+ // not a checkpoint(): no other spare can become ready here
+}
+
+void
+SpareAllowanceGiver::forgetAllowance()
+{
+ Must(concurrencyLevel);
+ --concurrencyLevel;
+ checkpoint();
+}
+
+/// whether opening a spare connection now would violate happy_eyeballs_connect_limit
+bool
+SpareAllowanceGiver::concurrencyLimitReached() const
+{
+ if (Config.happyEyeballs.connect_limit < 0)
+ return false; // no limit
+
+ if (Config.happyEyeballs.connect_limit == 0)
+ return true; // concurrent spares prohibited regardless of spare level
+
+ // adjust for SMP workers to keep aggregated spare level in check despite
+ // the lack of coordination among workers
+ const auto aggregateLevel = concurrencyLevel * Config.workers;
+ return aggregateLevel >= Config.happyEyeballs.connect_limit;
+}
+
+/* HappyConnOpenerAnswer */
+
+HappyConnOpenerAnswer::~HappyConnOpenerAnswer()
+{
+ // XXX: When multiple copies of an answer exist, this delete in one copy
+ // invalidates error in other copies -- their error.get() returns nil. The
+ // current code "works", but probably only because the initiator gets the
+ // error before any answer copies are deleted. Same in ~EncryptorAnswer.
+ delete error.get();
+}
+
+/* HappyConnOpener */
+
+HappyConnOpener::HappyConnOpener(const ResolvedPeers::Pointer &dests, const AsyncCall::Pointer &aCall, HttpRequest::Pointer &request, const time_t aFwdStart, int tries, const AccessLogEntry::Pointer &anAle):
+ AsyncJob("HappyConnOpener"),
+ fwdStart(aFwdStart),
+ callback_(aCall),
+ destinations(dests),
+ ale(anAle),
+ cause(request),
+ n_tries(tries)
+{
+ assert(destinations);
+ assert(dynamic_cast<Answer*>(callback_->getDialer()));
+}
+
+HappyConnOpener::~HappyConnOpener()
+{
+ safe_free(host_);
+ delete lastError;
+}
+
+void
+HappyConnOpener::setHost(const char *h)
+{
+ safe_free(host_);
+ if (h)
+ host_ = xstrdup(h);
+}
+
+void
+HappyConnOpener::start()
+{
+ destinations->notificationPending = false;
+ checkForNewConnection();
+}
+
+bool
+HappyConnOpener::doneAll() const
+{
+ if (!callback_)
+ return true; // (probably found a good path and) informed the requestor
+
+ // TODO: Expose AsyncCall::canFire() instead so that code like this can
+ // detect gone initiators without the need to explicitly cancel callbacks.
+ if (callback_->canceled())
+ return true; // the requestor is gone or has lost interest
+
+ if (prime || spare)
+ return false;
+
+ if (ranOutOfTimeOrAttempts())
+ return true; // trying new connection paths prohibited
+
+ if (destinations->empty() && destinations->destinationsFinalized)
+ return true; // there are no more paths to try
+
+ return false;
+}
+
+void
+HappyConnOpener::swanSong()
+{
+ debugs(17, 5, this);
+
+ if (callback_ && !callback_->canceled())
+ sendFailure();
+
+ if (spareWaiting)
+ cancelSpareWait("HappyConnOpener object destructed");
+
+ // TODO: Find an automated, faster way to kill no-longer-needed jobs.
+
+ if (prime) {
+ if (prime.connector)
+ prime.connector->cancel("HappyConnOpener object destructed");
+ prime.clear();
+ }
+
+ if (spare) {
+ if (spare.connector)
+ spare.connector->cancel("HappyConnOpener object destructed");
+ spare.clear();
+ if (gotSpareAllowance) {
+ TheSpareAllowanceGiver.jobDroppedAllowance();
+ gotSpareAllowance = false;
+ }
+ }
+
+ AsyncJob::swanSong();
+}
+
+const char *
+HappyConnOpener::status() const
+{
+ static MemBuf buf;
+ buf.reset();
+
+ buf.append(" [", 2);
+ if (stopReason)
+ buf.appendf("Stopped, reason:%s", stopReason);
+ if (prime) {
+ if (prime.path && prime.path->isOpen())
+ buf.appendf(" prime FD %d", prime.path->fd);
+ else if (prime.connector)
+ buf.appendf(" prime call%ud", prime.connector->id.value);
+ }
+ if (spare) {
+ if (spare.path && spare.path->isOpen())
+ buf.appendf(" spare FD %d", spare.path->fd);
+ else if (spare.connector)
+ buf.appendf(" spare call%ud", spare.connector->id.value);
+ }
+ if (n_tries)
+ buf.appendf(" tries %d", n_tries);
+ buf.appendf(" %s%u]", id.prefix(), id.value);
+ buf.terminate();
+
+ return buf.content();
+}
+
+/// Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
+/// on whether this is a validation request. RFC 7234 section 5.2.2 says that
+/// we MUST reply with "504 Gateway Timeout" if validation fails and cached
+/// reply has proxy-revalidate, must-revalidate or s-maxage Cache-Control
+/// directive.
+ErrorState *
+HappyConnOpener::makeError(const err_type type) const
+{
+ const auto statusCode = cause->flags.needValidation ?
+ Http::scGatewayTimeout : Http::scServiceUnavailable;
+ return new ErrorState(type, statusCode, cause.getRaw(), ale);
+}
+
+/// \returns pre-filled Answer if the initiator needs an answer (or nil)
+HappyConnOpener::Answer *
+HappyConnOpener::futureAnswer(const Comm::ConnectionPointer &conn)
+{
+ if (callback_ && !callback_->canceled()) {
+ const auto answer = dynamic_cast<Answer *>(callback_->getDialer());
+ assert(answer);
+ answer->conn = conn;
+ answer->n_tries = n_tries;
+ return answer;
+ }
+ return nullptr;
+}
+
+/// send a successful result to the initiator (if it still needs an answer)
+void
+HappyConnOpener::sendSuccess(const Comm::ConnectionPointer &conn, bool reused, const char *connKind)
+{
+ debugs(17, 4, connKind << ": " << conn);
+ if (auto *answer = futureAnswer(conn)) {
+ answer->reused = reused;
+ assert(!answer->error);
+ ScheduleCallHere(callback_);
+ }
+ callback_ = nullptr;
+}
+
+/// inform the initiator about our failure to connect (if needed)
+void
+HappyConnOpener::sendFailure()
+{
+ debugs(17, 3, lastFailedConnection);
+ if (auto *answer = futureAnswer(lastFailedConnection)) {
+ if (!lastError)
+ lastError = makeError(ERR_GATEWAY_FAILURE);
+ answer->error = lastError;
+ assert(answer->error.valid());
+ lastError = nullptr; // the answer owns it now
+ ScheduleCallHere(callback_);
+ }
+ callback_ = nullptr;
+}
+
+void
+HappyConnOpener::noteCandidatesChange()
+{
+ destinations->notificationPending = false;
+ checkForNewConnection();
+}
+
+/// starts opening (or reusing) a connection to the given destination
+void
+HappyConnOpener::startConnecting(Attempt &attempt, Comm::ConnectionPointer &dest)
+{
+ Must(!attempt.path);
+ Must(!attempt.connector);
+ Must(dest);
+
+ const auto bumpThroughPeer = cause->flags.sslBumped && dest->getPeer();
+ const auto canReuseOld = allowPconn_ && !bumpThroughPeer;
+ if (!canReuseOld || !reuseOldConnection(dest))
+ openFreshConnection(attempt, dest);
+}
+
+/// reuses a persistent connection to the given destination (if possible)
+/// \returns true if and only if reuse was possible
+/// must be called via startConnecting()
+bool
+HappyConnOpener::reuseOldConnection(const Comm::ConnectionPointer &dest)
+{
+ assert(allowPconn_);
+
+ if (const auto pconn = fwdPconnPool->pop(dest, host_, retriable_)) {
+ ++n_tries;
+ sendSuccess(pconn, true, "reused connection");
+ return true;
+ }
+
+ return false;
+}
+
+/// opens a fresh connection to the given destination
+/// must be called via startConnecting()
+void
+HappyConnOpener::openFreshConnection(Attempt &attempt, Comm::ConnectionPointer &dest)
+{
+#if URL_CHECKSUM_DEBUG
+ entry->mem_obj->checkUrlChecksum();
+#endif
+
+ GetMarkingsToServer(cause.getRaw(), *dest);
+
+ // ConnOpener modifies its destination argument so we reset the source port
+ // in case we are reusing the destination already used by our predecessor.
+ dest->local.port(0);
+ ++n_tries;
+
+ typedef CommCbMemFunT<HappyConnOpener, CommConnectCbParams> Dialer;
+ AsyncCall::Pointer callConnect = JobCallback(48, 5, Dialer, this, HappyConnOpener::connectDone);
+ const time_t connTimeout = dest->connectTimeout(fwdStart);
+ Comm::ConnOpener *cs = new Comm::ConnOpener(dest, callConnect, connTimeout);
+ if (!dest->getPeer())
+ cs->setHost(host_);
+
+ attempt.path = dest;
+ attempt.connector = callConnect;
+
+ AsyncJob::Start(cs);
+}
+
+/// called by Comm::ConnOpener objects after a prime or spare connection attempt
+/// completes (successfully or not)
+void
+HappyConnOpener::connectDone(const CommConnectCbParams ¶ms)
+{
+ Must(params.conn);
+ const bool itWasPrime = (params.conn == prime.path);
+ const bool itWasSpare = (params.conn == spare.path);
+ Must(itWasPrime != itWasSpare);
+
+ if (itWasPrime) {
+ prime.clear();
+ } else {
+ spare.clear();
+ if (gotSpareAllowance) {
+ TheSpareAllowanceGiver.jobUsedAllowance();
+ gotSpareAllowance = false;
+ }
+ }
+
+ const char *what = itWasPrime ? "new prime connection" : "new spare connection";
+ if (params.flag == Comm::OK) {
+ sendSuccess(params.conn, false, what);
+ return;
+ }
+
+ debugs(17, 8, what << " failed: " << params.conn);
+ if (const auto peer = params.conn->getPeer())
+ peerConnectFailed(peer);
+ params.conn->close(); // TODO: Comm::ConnOpener should do this instead.
+
+ // remember the last failure (we forward it if we cannot connect anywhere)
+ lastFailedConnection = params.conn;
+ delete lastError;
+ lastError = nullptr; // in case makeError() throws
+ lastError = makeError(ERR_CONNECT_FAIL);
+ lastError->xerrno = params.xerrno;
+
+ if (spareWaiting)
+ updateSpareWaitAfterPrimeFailure();
+
+ checkForNewConnection();
+}
+
+/// reacts to a prime attempt failure
+void
+HappyConnOpener::updateSpareWaitAfterPrimeFailure()
+{
+ Must(currentPeer);
+ Must(!prime);
+ Must(spareWaiting);
+
+ if (destinations->doneWithPrimes(*currentPeer)) {
+ cancelSpareWait("all primes failed");
+ ignoreSpareRestrictions = true;
+ return; // checkForNewConnection() will open a spare connection ASAP
+ }
+
+ if (spareWaiting.toGivePrimeItsChance)
+ stopGivingPrimeItsChance();
+
+ // may still be spareWaiting.forSpareAllowance or
+ // may still be spareWaiting.forPrimesToFail
+}
+
+/// called when the prime attempt has used up its chance for a solo victory
+void
+HappyConnOpener::stopGivingPrimeItsChance() {
+ Must(spareWaiting.toGivePrimeItsChance);
+ spareWaiting.toGivePrimeItsChance = false;
+ ThePrimeChanceGiver.dequeue(*this);
+}
+
+/// called when the spare attempt should no longer obey spare connection limits
+void
+HappyConnOpener::stopWaitingForSpareAllowance() {
+ Must(spareWaiting.forSpareAllowance);
+ spareWaiting.forSpareAllowance = false;
+
+ if (spareWaiting.callback)
+ TheSpareAllowanceGiver.jobDroppedAllowance();
+ TheSpareAllowanceGiver.dequeue(*this); // clears spareWaiting.callback
+}
+
+/// stops waiting for the right conditions to open a spare connection
+void
+HappyConnOpener::cancelSpareWait(const char *reason)
+{
+ debugs(17, 5, "because " << reason);
+ Must(spareWaiting);
+
+ if (spareWaiting.toGivePrimeItsChance)
+ stopGivingPrimeItsChance();
+ else if (spareWaiting.forSpareAllowance)
+ stopWaitingForSpareAllowance();
+
+ spareWaiting.clear();
+}
+
+/** Called when an external event changes initiator interest, destinations,
+ * prime, spare, or spareWaiting. Leaves HappyConnOpener in one of these
+ * (mutually exclusive beyond the exceptional state #0) "stable" states:
+ *
+ * 0. Exceptional termination: done()
+ * 1. Processing a single peer: currentPeer
+ * 1.1. Connecting: prime || spare
+ * 1.2. Waiting for spare gap and/or paths: !prime && !spare
+ * 2. Waiting for a new peer: destinations->empty() && !destinations->destinationsFinalized && !currentPeer
+ * 3. Finished: destinations->empty() && destinations->destinationsFinalized && !currentPeer
+ */
+void
+HappyConnOpener::checkForNewConnection()
+{
+ debugs(17, 7, *destinations);
+
+ // The order of the top-level if-statements below is important.
+
+ if (done())
+ return; // bail ASAP to minimize our waste and others delays (state #0)
+
+ if (ranOutOfTimeOrAttempts()) {
+ Must(currentPeer); // or we would be done() already
+ return; // will continue working (state #1.1)
+ }
+
+ // update stale currentPeer and/or stale spareWaiting
+ if (currentPeer && !spare && !prime && destinations->doneWithPeer(*currentPeer)) {
+ debugs(17, 7, "done with peer; " << *currentPeer);
+ if (spareWaiting.forNewPeer)
+ cancelSpareWait("done with peer");
+ else
+ Must(!spareWaiting);
+
+ currentPeer = nullptr;
+ ignoreSpareRestrictions = false;
+ Must(!gotSpareAllowance);
+ } else if (currentPeer && !spareWaiting.forNewPeer && spareWaiting && destinations->doneWithSpares(*currentPeer)) {
+ cancelSpareWait("no spares are coming");
+ spareWaiting.forNewPeer = true;
+ }
+
+ // open a new prime and/or a new spare connection if needed
+ if (!destinations->empty()) {
+ if (!currentPeer) {
+ currentPeer = destinations->extractFront();
+ Must(currentPeer);
+ debugs(17, 7, "new peer " << *currentPeer);
+ primeStart = current_dtime;
+ startConnecting(prime, currentPeer);
+ maybeGivePrimeItsChance();
+ Must(prime); // entering state #1.1
+ } else {
+ if (!prime)
+ maybeOpenAnotherPrimeConnection(); // may make destinations empty()
+ }
+
+ if (!spare && !spareWaiting)
+ maybeOpenSpareConnection(); // may make destinations empty()
+
+ Must(currentPeer);
+ }
+
+ if (currentPeer) {
+ debugs(17, 7, "working on " << *currentPeer);
+ return; // remaining in state #1.1 or #1.2
+ }
+
+ if (!destinations->destinationsFinalized) {
+ debugs(17, 7, "waiting for more peers");
+ return; // remaining in state #2
+ }
+
+ debugs(17, 7, "done; no more peers");
+ Must(doneAll());
+ // entering state #3
+}
+
+void
+HappyConnOpener::noteGavePrimeItsChance()
+{
+ Must(spareWaiting.toGivePrimeItsChance);
+ spareWaiting.clear();
+ checkForNewConnection();
+}
+
+void
+HappyConnOpener::noteSpareAllowance()
+{
+ Must(spareWaiting.forSpareAllowance);
+ spareWaiting.clear();
+
+ if (ranOutOfTimeOrAttempts()) {
+ TheSpareAllowanceGiver.jobDroppedAllowance();
+ return; // will quit or continue working on prime
+ }
+
+ Must(!gotSpareAllowance);
+ gotSpareAllowance = true;
+
+ auto dest = destinations->extractSpare(*currentPeer); // ought to succeed
+ startConnecting(spare, dest);
+}
+
+/// starts a prime connection attempt if possible or does nothing otherwise
+void
+HappyConnOpener::maybeOpenAnotherPrimeConnection()
+{
+ Must(currentPeer);
+ if (auto dest = destinations->extractPrime(*currentPeer))
+ startConnecting(prime, dest);
+ // else wait for more prime paths or their exhaustion
+}
+
+/// starts waiting for a spare permission (if spare connections may be possible)
+/// or does nothing (otherwise)
+void
+HappyConnOpener::maybeGivePrimeItsChance()
+{
+ Must(currentPeer);
+ Must(prime);
+ Must(!spare);
+ Must(!spareWaiting);
+
+ if (destinations->doneWithSpares(*currentPeer)) {
+ debugs(17, 7, "no spares for " << *currentPeer);
+ spareWaiting.forNewPeer = true;
+ return;
+ }
+
+ if (Config.happyEyeballs.connect_limit == 0) {
+ debugs(17, 7, "concurrent spares are prohibited");
+ spareWaiting.forPrimesToFail = true;
+ return;
+ }
+
+ if (ThePrimeChanceGiver.readyNow(*this)) {
+ debugs(17, 7, "no happy_eyeballs_connect_timeout");
+ return;
+ }
+
+ ThePrimeChanceGiver.enqueue(*this);
+ spareWaiting.toGivePrimeItsChance = true;
+ // wait for a prime connect result or noteGavePrimeItsChance()
+}
+
+/// if possible, starts a spare connection attempt
+void
+HappyConnOpener::maybeOpenSpareConnection()
+{
+ Must(currentPeer);
+ Must(!spare);
+ Must(!spareWaiting);
+ Must(!gotSpareAllowance);
+
+ if (ranOutOfTimeOrAttempts())
+ return; // will quit or continue working on prime
+
+ // jobGotInstantAllowance() call conditions below rely on the readyNow() check here
+ if (!ignoreSpareRestrictions && // we have to honor spare restrictions
+ !TheSpareAllowanceGiver.readyNow(*this) && // all new spares must wait
+ destinations->haveSpare(*currentPeer)) { // and we do have a new spare
+ TheSpareAllowanceGiver.enqueue(*this);
+ spareWaiting.forSpareAllowance = true;
+ return;
+ }
+
+ if (auto dest = destinations->extractSpare(*currentPeer)) {
+
+ if (!ignoreSpareRestrictions) {
+ TheSpareAllowanceGiver.jobGotInstantAllowance();
+ gotSpareAllowance = true;
+ }
+
+ startConnecting(spare, dest);
+ return;
+ }
+
+ // wait for more spare paths or their exhaustion
+}
+
+/// Check for maximum connection tries and forwarding time restrictions
+bool
+HappyConnOpener::ranOutOfTimeOrAttempts() const
+{
+ if (ranOutOfTimeOrAttemptsEarlier_)
+ return true;
+
+ if (n_tries >= Config.forward_max_tries) {
+ debugs(17, 5, "maximum allowed tries exhausted");
+ ranOutOfTimeOrAttemptsEarlier_ = "maximum tries";
+ return true;
+ }
+
+ if (FwdState::ForwardTimeout(fwdStart) <= 0) {
+ debugs(17, 5, "forwarding timeout");
+ ranOutOfTimeOrAttemptsEarlier_ = "forwarding timeout";
+ return true;
+ }
+
+ return false;
+}
+
--- /dev/null
+/*
+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID_HAPPYCONNOPENER_H
+#define SQUID_HAPPYCONNOPENER_H
+#include "base/RefCount.h"
+#include "comm.h"
+#include "comm/Connection.h"
+#include "comm/ConnOpener.h"
+#include "http/forward.h"
+#include "log/forward.h"
+
+#include <iosfwd>
+
+class HappyConnOpener;
+class HappyOrderEnforcer;
+class JobGapEnforcer;
+class ResolvedPeers;
+typedef RefCount<ResolvedPeers> ResolvedPeersPointer;
+
+/// A FIFO queue of HappyConnOpener jobs waiting to open a spare connection.
+typedef std::list< CbcPointer<HappyConnOpener> > HappySpareWaitList;
+
+/// absolute time in fractional seconds; compatible with current_timed
+typedef double HappyAbsoluteTime;
+
+/// keeps track of HappyConnOpener spare track waiting state
+class HappySpareWait {
+public:
+ explicit operator bool() const { return toGivePrimeItsChance || forSpareAllowance || forPrimesToFail || forNewPeer; }
+
+ /// restores default-constructed state
+ /// nullifies but does not cancel the callback
+ void clear() { *this = HappySpareWait(); }
+
+ /// a pending noteGavePrimeItsChance() or noteSpareAllowance() call
+ AsyncCall::Pointer callback;
+
+ /// location on the toGivePrimeItsChance or forSpareAllowance wait list
+ /// invalidated when the callback is set
+ HappySpareWaitList::iterator position;
+
+ /* The following four fields represent mutually exclusive wait reasons. */
+
+ /// Honoring happy_eyeballs_connect_timeout (once per currentPeer).
+ /// A prime connection failure ends this wait.
+ bool toGivePrimeItsChance = false;
+
+ /// Honors happy_eyeballs_connect_gap and positive happy_eyeballs_connect_limit
+ /// (one allowance per spare path).
+ /// Does not start until there is a new spare path to try.
+ /// Prime exhaustion ends this wait (see ignoreSpareRestrictions).
+ bool forSpareAllowance = false;
+
+ /// Honors zero happy_eyeballs_connect_limit.
+ /// Prime exhaustion ends this wait (see ignoreSpareRestrictions).
+ bool forPrimesToFail = false;
+
+ /// The current peer has no spares left to try.
+ /// Prime exhaustion ends this wait (by changing currentPeer).
+ bool forNewPeer = false;
+};
+
+/// Final result (an open connection or an error) sent to the job initiator.
+class HappyConnOpenerAnswer
+{
+public:
+ ~HappyConnOpenerAnswer();
+
+ /// whether HappyConnOpener succeeded, returning a usable connection
+ bool success() const { return !error; }
+
+ /// on success: an open, ready-to-use Squid-to-peer connection
+ /// on failure: either a closed failed Squid-to-peer connection or nil
+ Comm::ConnectionPointer conn;
+
+ // answer recipients must clear the error member in order to keep its info
+ // XXX: We should refcount ErrorState instead of cbdata-protecting it.
+ CbcPointer<ErrorState> error; ///< problem details (nil on success)
+
+ /// The total number of attempts to establish a connection. Includes any
+ /// failed attempts and [always successful] persistent connection reuse.
+ int n_tries = 0;
+
+ /// whether conn was open earlier, by/for somebody else
+ bool reused = false;
+};
+
+/// reports Answer details (for AsyncCall parameter debugging)
+std::ostream &operator <<(std::ostream &, const HappyConnOpenerAnswer &);
+
+/// A TCP connection opening algorithm based on Happy Eyeballs (RFC 8305).
+/// Maintains two concurrent connection opening tracks: prime and spare.
+/// Shares ResolvedPeers list with the job initiator.
+class HappyConnOpener: public AsyncJob
+{
+ CBDATA_CHILD(HappyConnOpener);
+public:
+ typedef HappyConnOpenerAnswer Answer;
+
+ /// AsyncCall dialer for our callback. Gives us access to callback Answer.
+ template <class Initiator>
+ class CbDialer: public CallDialer, public Answer {
+ public:
+ // initiator method to receive our answer
+ typedef void (Initiator::*Method)(Answer &);
+
+ CbDialer(Method method, Initiator *initiator): initiator_(initiator), method_(method) {}
+ virtual ~CbDialer() = default;
+
+ /* CallDialer API */
+ bool canDial(AsyncCall &) { return initiator_.valid(); }
+ void dial(AsyncCall &) {((*initiator_).*method_)(*this); }
+ virtual void print(std::ostream &os) const override {
+ os << '(' << static_cast<const Answer&>(*this) << ')';
+ }
+
+ private:
+ CbcPointer<Initiator> initiator_; ///< object to deliver the answer to
+ Method method_; ///< initiator_ method to call with the answer
+ };
+
+public:
+ HappyConnOpener(const ResolvedPeersPointer &, const AsyncCall::Pointer &, HttpRequestPointer &, const time_t aFwdStart, int tries, const AccessLogEntryPointer &al);
+ virtual ~HappyConnOpener() override;
+
+ /// configures reuse of old connections
+ void allowPersistent(bool permitted) { allowPconn_ = permitted; }
+
+ /// configures whether the request may be retried later if things go wrong
+ void setRetriable(bool retriable) { retriable_ = retriable; }
+
+ /// configures the origin server domain name
+ void setHost(const char *);
+
+ /// reacts to changes in the destinations list
+ void noteCandidatesChange();
+
+ /// reacts to expired happy_eyeballs_connect_timeout
+ void noteGavePrimeItsChance();
+
+ /// reacts to satisfying happy_eyeballs_connect_gap and happy_eyeballs_connect_limit
+ void noteSpareAllowance();
+
+ /// the start of the first connection attempt for the currentPeer
+ HappyAbsoluteTime primeStart = 0;
+
+private:
+ /// a connection opening attempt in progress (or falsy)
+ class Attempt {
+ public:
+ explicit operator bool() const { return static_cast<bool>(path); }
+ void clear() { path = nullptr; connector = nullptr; }
+
+ Comm::ConnectionPointer path; ///< the destination we are connecting to
+ AsyncCall::Pointer connector; ///< our Comm::ConnOpener callback
+ };
+
+ /* AsyncJob API */
+ virtual void start() override;
+ virtual bool doneAll() const override;
+ virtual void swanSong() override;
+ virtual const char *status() const override;
+
+ void maybeOpenAnotherPrimeConnection();
+
+ void maybeGivePrimeItsChance();
+ void stopGivingPrimeItsChance();
+ void stopWaitingForSpareAllowance();
+ void maybeOpenSpareConnection();
+
+ void startConnecting(Attempt &, Comm::ConnectionPointer &);
+ void openFreshConnection(Attempt &, Comm::ConnectionPointer &);
+ bool reuseOldConnection(const Comm::ConnectionPointer &);
+
+ void connectDone(const CommConnectCbParams &);
+
+ void checkForNewConnection();
+
+ void updateSpareWaitAfterPrimeFailure();
+
+ void cancelSpareWait(const char *reason);
+
+ bool ranOutOfTimeOrAttempts() const;
+
+ ErrorState *makeError(const err_type type) const;
+ Answer *futureAnswer(const Comm::ConnectionPointer &);
+ void sendSuccess(const Comm::ConnectionPointer &conn, bool reused, const char *connKind);
+ void sendFailure();
+
+ const time_t fwdStart; ///< requestor start time
+
+ AsyncCall::Pointer callback_; ///< handler to be called on connection completion.
+
+ /// Candidate paths. Shared with the initiator. May not be finalized yet.
+ ResolvedPeersPointer destinations;
+
+ /// current connection opening attempt on the prime track (if any)
+ Attempt prime;
+
+ /// current connection opening attempt on the spare track (if any)
+ Attempt spare;
+
+ /// CachePeer and IP address family of the peer we are trying to connect
+ /// to now (or, if we are just waiting for paths to a new peer, nil)
+ Comm::ConnectionPointer currentPeer;
+
+ /// preconditions for an attempt to open a spare connection
+ HappySpareWait spareWaiting;
+ friend class HappyOrderEnforcer;
+
+ AccessLogEntryPointer ale; ///< transaction details
+
+ ErrorState *lastError = nullptr; ///< last problem details (or nil)
+ Comm::ConnectionPointer lastFailedConnection; ///< nil if none has failed
+
+ /// whether spare connection attempts disregard happy_eyeballs_* settings
+ bool ignoreSpareRestrictions = false;
+
+ /// whether we have received a permission to open a spare while spares are limited
+ bool gotSpareAllowance = false;
+
+ /// whether persistent connections are allowed
+ bool allowPconn_ = true;
+
+ /// whether we are opening connections for a request that may be resent
+ bool retriable_ = true;
+
+ /// origin server domain name (or equivalent)
+ const char *host_ = nullptr;
+
+ /// the request that needs a to-server connection
+ HttpRequestPointer cause;
+
+ /// number of connection opening attempts, including those in the requestor
+ int n_tries;
+
+ /// Reason to ran out of time or attempts
+ mutable const char *ranOutOfTimeOrAttemptsEarlier_ = nullptr;
+};
+
+#endif
+
globals.h \
gopher.h \
gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
helper.cc \
helper.h \
hier_code.h \
refresh.cc \
RemovalPolicy.cc \
RemovalPolicy.h \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
send-announce.h \
send-announce.cc \
SBufStatsAction.h \
RemovalPolicy.cc \
RequestFlags.cc \
RequestFlags.h \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
StatCounters.h \
StatCounters.cc \
StatHist.h \
RemovalPolicy.cc \
RequestFlags.h \
RequestFlags.cc \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
tests/stub_libsecurity.cc \
StatCounters.h \
StatCounters.cc \
FwdState.h \
gopher.h \
tests/stub_gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
helper.cc \
hier_code.h \
$(HTCPSOURCE) \
refresh.h \
refresh.cc \
RemovalPolicy.cc \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
tests/stub_SBufDetailedStats.cc \
$(SNMP_SOURCE) \
SquidMath.h \
FwdState.h \
gopher.h \
tests/stub_gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
helper.cc \
hier_code.h \
$(HTCPSOURCE) \
refresh.h \
refresh.cc \
RemovalPolicy.cc \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
tests/stub_SBufDetailedStats.cc \
$(SNMP_SOURCE) \
SquidMath.h \
FwdState.h \
gopher.h \
tests/stub_gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
hier_code.h \
helper.cc \
$(HTCPSOURCE) \
refresh.h \
refresh.cc \
RemovalPolicy.cc \
+ ResolvedPeers.h \
+ ResolvedPeers.cc \
tests/stub_SBufDetailedStats.cc \
$(SNMP_SOURCE) \
SquidMath.h \
FwdState.h \
gopher.h \
tests/stub_gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
helper.cc \
hier_code.h \
$(HTCPSOURCE) \
refresh.h \
refresh.cc \
RemovalPolicy.cc \
+ ResolvedPeers.h \
+ ResolvedPeers.cc \
StrList.h \
StrList.cc \
tests/stub_SBufDetailedStats.cc \
FwdState.h \
gopher.h \
tests/stub_gopher.cc \
+ HappyConnOpener.cc \
+ HappyConnOpener.h \
helper.cc \
hier_code.h \
$(HTCPSOURCE) \
Pipeline.cc \
Pipeline.h \
RemovalPolicy.cc \
+ ResolvedPeers.cc \
+ ResolvedPeers.h \
redirect.h \
tests/stub_redirect.cc \
refresh.h \
#include "ipcache.h"
#include "mem/forward.h"
#include "PingData.h"
+#include "typedefs.h" /* for IRCB */
class ErrorState;
class HtcpReplyData;
--- /dev/null
+/*
+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#include "squid.h"
+#include "CachePeer.h"
+#include "comm/Connection.h"
+#include "comm/ConnOpener.h"
+#include "ResolvedPeers.h"
+#include "SquidConfig.h"
+
+ResolvedPeers::ResolvedPeers()
+{
+ if (Config.forward_max_tries > 0)
+ paths_.reserve(Config.forward_max_tries);
+}
+
+void
+ResolvedPeers::retryPath(const Comm::ConnectionPointer &path)
+{
+ debugs(17, 4, path);
+ assert(path);
+ paths_.emplace(paths_.begin(), path);
+}
+
+void
+ResolvedPeers::addPath(const Comm::ConnectionPointer &path)
+{
+ paths_.emplace_back(path);
+}
+
+Comm::ConnectionPointer
+ResolvedPeers::extractFront()
+{
+ Must(!empty());
+ return extractFound("first: ", paths_.begin());
+}
+
+Comm::ConnectionPointer
+ResolvedPeers::extractPrime(const Comm::Connection ¤tPeer)
+{
+ if (!empty()) {
+ const auto peerToMatch = currentPeer.getPeer();
+ const auto familyToMatch = ConnectionFamily(currentPeer);
+ const auto &conn = paths_.front();
+ if (conn->getPeer() == peerToMatch && familyToMatch == ConnectionFamily(*conn))
+ return extractFound("same-peer same-family match: ", paths_.begin());
+ }
+
+ debugs(17, 7, "no same-peer same-family paths");
+ return nullptr;
+}
+
+/// If spare paths exist for currentPeer, returns the first spare path iterator.
+/// Otherwise, if there are paths for other peers, returns one of those.
+/// Otherwise, returns the end() iterator.
+Comm::ConnectionList::iterator
+ResolvedPeers::findSpareOrNextPeer(const Comm::Connection ¤tPeer)
+{
+ const auto peerToMatch = currentPeer.getPeer();
+ const auto familyToAvoid = ConnectionFamily(currentPeer);
+ // Optimization: Also stop at the first mismatching peer because all
+ // same-peer paths are grouped together.
+ auto found = std::find_if(paths_.begin(), paths_.end(),
+ [peerToMatch, familyToAvoid](const Comm::ConnectionPointer &conn) {
+ return peerToMatch != conn->getPeer() ||
+ familyToAvoid != ConnectionFamily(*conn);
+ });
+ if (found != paths_.end() && peerToMatch == (*found)->getPeer())
+ return found;
+ return paths_.end();
+}
+
+Comm::ConnectionPointer
+ResolvedPeers::extractSpare(const Comm::Connection ¤tPeer)
+{
+ auto found = findSpareOrNextPeer(currentPeer);
+ if (found != paths_.end() && currentPeer.getPeer() == (*found)->getPeer())
+ return extractFound("same-peer different-family match: ", found);
+
+ debugs(17, 7, "no same-peer different-family paths");
+ return nullptr;
+}
+
+/// convenience method to finish a successful extract*() call
+Comm::ConnectionPointer
+ResolvedPeers::extractFound(const char *description, const Comm::ConnectionList::iterator &found)
+{
+ const auto path = *found;
+ paths_.erase(found);
+ debugs(17, 7, description << path);
+ return path;
+}
+
+bool
+ResolvedPeers::haveSpare(const Comm::Connection ¤tPeer)
+{
+ const auto found = findSpareOrNextPeer(currentPeer);
+ return found != paths_.end() &&
+ currentPeer.getPeer() == (*found)->getPeer();
+}
+
+bool
+ResolvedPeers::doneWithSpares(const Comm::Connection ¤tPeer)
+{
+ const auto found = findSpareOrNextPeer(currentPeer);
+ if (found == paths_.end())
+ return destinationsFinalized;
+ return currentPeer.getPeer() != (*found)->getPeer();
+}
+
+bool
+ResolvedPeers::doneWithPrimes(const Comm::Connection ¤tPeer) const
+{
+ const auto first = paths_.begin();
+ if (first == paths_.end())
+ return destinationsFinalized;
+ return currentPeer.getPeer() != (*first)->getPeer() ||
+ ConnectionFamily(currentPeer) != ConnectionFamily(**first);
+}
+
+bool
+ResolvedPeers::doneWithPeer(const Comm::Connection ¤tPeer) const
+{
+ const auto first = paths_.begin();
+ if (first == paths_.end())
+ return destinationsFinalized;
+ return currentPeer.getPeer() != (*first)->getPeer();
+}
+
+int
+ResolvedPeers::ConnectionFamily(const Comm::Connection &conn)
+{
+ return conn.remote.isIPv4() ? AF_INET : AF_INET6;
+}
+
+std::ostream &
+operator <<(std::ostream &os, const ResolvedPeers &peers)
+{
+ if (peers.empty())
+ return os << "[no paths]";
+ return os << peers.size() << (peers.destinationsFinalized ? "" : "+") << " paths";
+}
+
--- /dev/null
+/*
+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID_RESOLVEDPEERS_H
+#define SQUID_RESOLVEDPEERS_H
+
+#include "base/RefCount.h"
+#include "comm/forward.h"
+
+#include <iosfwd>
+
+/// cache_peer and origin server addresses (a.k.a. paths)
+/// selected and resolved by the peering code
+class ResolvedPeers: public RefCountable
+{
+ MEMPROXY_CLASS(ResolvedPeers);
+
+public:
+ typedef RefCount<ResolvedPeers> Pointer;
+
+ ResolvedPeers();
+
+ /// whether we lack any known candidate paths
+ bool empty() const { return paths_.empty(); }
+
+ /// add a candidate path to try after all the existing paths
+ void addPath(const Comm::ConnectionPointer &);
+
+ /// add a candidate path to try before all the existing paths
+ void retryPath(const Comm::ConnectionPointer &);
+
+ /// extracts and returns the first queued address
+ Comm::ConnectionPointer extractFront();
+
+ /// extracts and returns the first same-peer same-family address
+ /// \returns nil if it cannot find the requested address
+ Comm::ConnectionPointer extractPrime(const Comm::Connection ¤tPeer);
+
+ /// extracts and returns the first same-peer different-family address
+ /// \returns nil if it cannot find the requested address
+ Comm::ConnectionPointer extractSpare(const Comm::Connection ¤tPeer);
+
+ /// whether extractSpare() would return a non-nil path right now
+ bool haveSpare(const Comm::Connection ¤tPeer);
+
+ /// whether extractPrime() returns and will continue to return nil
+ bool doneWithPrimes(const Comm::Connection ¤tPeer) const;
+
+ /// whether extractSpare() returns and will continue to return nil
+ bool doneWithSpares(const Comm::Connection ¤tPeer);
+
+ /// whether doneWithPrimes() and doneWithSpares() are true for currentPeer
+ bool doneWithPeer(const Comm::Connection ¤tPeer) const;
+
+ /// the current number of candidate paths
+ Comm::ConnectionList::size_type size() const { return paths_.size(); }
+
+ /// whether all of the available candidate paths received from DNS
+ bool destinationsFinalized = false;
+
+ /// whether HappyConnOpener::noteCandidatesChange() is scheduled to fire
+ bool notificationPending = false;
+
+private:
+ /// The protocol family of the given path, AF_INET or AF_INET6
+ static int ConnectionFamily(const Comm::Connection &conn);
+
+ Comm::ConnectionList::iterator findSpareOrNextPeer(const Comm::Connection ¤tPeer);
+ Comm::ConnectionPointer extractFound(const char *description, const Comm::ConnectionList::iterator &found);
+
+ Comm::ConnectionList paths_;
+};
+
+/// summarized ResolvedPeers (for debugging)
+std::ostream &operator <<(std::ostream &, const ResolvedPeers &);
+
+#endif /* SQUID_RESOLVEDPEERS_H */
+
int v4_first; ///< Place IPv4 first in the order of DNS results.
ssize_t packet_max; ///< maximum size EDNS advertised for DNS replies.
} dns;
+
+ struct {
+ int connect_limit;
+ int connect_gap;
+ int connect_timeout;
+ } happyEyeballs;
};
extern SquidConfig Config;
/// schedule an async job call using a dialer; use CallJobHere macros instead
template <class Dialer>
-bool
+AsyncCall::Pointer
CallJob(int debugSection, int debugLevel, const char *fileName, int fileLine,
const char *callName, const Dialer &dialer)
{
AsyncCall::Pointer call = asyncCall(debugSection, debugLevel, callName, dialer);
- return ScheduleCall(fileName, fileLine, call);
+ ScheduleCall(fileName, fileLine, call);
+ return call;
}
#define CallJobHere(debugSection, debugLevel, job, Class, method) \
EnumIterator.h \
File.cc \
File.h \
+ forward.h \
HardFun.h \
Here.cc \
Here.h \
--- /dev/null
+/*
+ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
+ *
+ * Squid software is distributed under GPLv2+ license and includes
+ * contributions from numerous individuals and organizations.
+ * Please see the COPYING and CONTRIBUTORS files for details.
+ */
+
+#ifndef SQUID_SRC_BASE_FORWARD_H
+#define SQUID_SRC_BASE_FORWARD_H
+
+template<class Cbc> class CbcPointer;
+
+class AsyncJob;
+typedef CbcPointer<AsyncJob> AsyncJobPointer;
+
+#endif /* SQUID_SRC_BASE_FORWARD_H */
+
server_pconn_for_nonretriable allow SpeedIsWorthTheRisk
DOC_END
+NAME: happy_eyeballs_connect_timeout
+COMMENT: (msec)
+TYPE: int
+DEFAULT: 250
+LOC: Config.happyEyeballs.connect_timeout
+DOC_START
+ This Happy Eyeballs (RFC 8305) tuning directive specifies the minimum
+ delay between opening a primary to-server connection and opening a
+ spare to-server connection for the same master transaction. This delay
+ is similar to the Connection Attempt Delay in RFC 8305, but it is only
+ applied to the first spare connection attempt. Subsequent spare
+ connection attempts use happy_eyeballs_connect_gap, and primary
+ connection attempts are not artificially delayed at all.
+
+ Terminology: The "primary" and "spare" designations are determined by
+ the order of DNS answers received by Squid: If Squid DNS AAAA query
+ was answered first, then primary connections are connections to IPv6
+ peer addresses (while spare connections use IPv4 addresses).
+ Similarly, if Squid DNS A query was answered first, then primary
+ connections are connections to IPv4 peer addresses (while spare
+ connections use IPv6 addresses).
+
+ Shorter happy_eyeballs_connect_timeout values reduce master
+ transaction response time, potentially improving user-perceived
+ response times (i.e., making user eyeballs happier). Longer delays
+ reduce both concurrent connection level and server bombardment with
+ connection requests, potentially improving overall Squid performance
+ and reducing the chance of being blocked by servers for opening too
+ many unused connections.
+
+ RFC 8305 prohibits happy_eyeballs_connect_timeout values smaller than
+ 10 (milliseconds) to "avoid congestion collapse in the presence of
+ high packet-loss rates".
+
+ The following Happy Eyeballs directives place additional connection
+ opening restrictions: happy_eyeballs_connect_gap and
+ happy_eyeballs_connect_limit.
+DOC_END
+
+NAME: happy_eyeballs_connect_gap
+COMMENT: (msec)
+TYPE: int
+DEFAULT: -1
+DEFAULT_DOC: no artificial delays between spare attempts
+LOC: Config.happyEyeballs.connect_gap
+DOC_START
+ This Happy Eyeballs (RFC 8305) tuning directive specifies the
+ minimum delay between opening spare to-server connections (to any
+ server; i.e. across all concurrent master transactions in a Squid
+ instance). Each SMP worker currently multiplies the configured gap
+ by the total number of workers so that the combined spare connection
+ opening rate of a Squid instance obeys the configured limit. The
+ workers do not coordinate connection openings yet; a micro burst
+ of spare connection openings may violate the configured gap.
+
+ This directive has similar trade-offs as
+ happy_eyeballs_connect_timeout, but its focus is on limiting traffic
+ amplification effects for Squid as a whole, while
+ happy_eyeballs_connect_timeout works on an individual master
+ transaction level.
+
+ The following Happy Eyeballs directives place additional connection
+ opening restrictions: happy_eyeballs_connect_timeout and
+ happy_eyeballs_connect_limit. See the former for related terminology.
+DOC_END
+
+NAME: happy_eyeballs_connect_limit
+TYPE: int
+DEFAULT: -1
+DEFAULT_DOC: no artificial limit on the number of concurrent spare attempts
+LOC: Config.happyEyeballs.connect_limit
+DOC_START
+ This Happy Eyeballs (RFC 8305) tuning directive specifies the
+ maximum number of spare to-server connections (to any server; i.e.
+ across all concurrent master transactions in a Squid instance).
+ Each SMP worker gets an equal share of the total limit. However,
+ the workers do not share the actual connection counts yet, so one
+ (busier) worker cannot "borrow" spare connection slots from another
+ (less loaded) worker.
+
+ Setting this limit to zero disables concurrent use of primary and
+ spare TCP connections: Spare connection attempts are made only after
+ all primary attempts fail. However, Squid would still use the
+ DNS-related optimizations of the Happy Eyeballs approach.
+
+ This directive has similar trade-offs as happy_eyeballs_connect_gap,
+ but its focus is on limiting Squid overheads, while
+ happy_eyeballs_connect_gap focuses on the origin server and peer
+ overheads.
+
+ The following Happy Eyeballs directives place additional connection
+ opening restrictions: happy_eyeballs_connect_timeout and
+ happy_eyeballs_connect_gap. See the former for related terminology.
+DOC_END
+
EOF
if (!old)
return 0;
+ // XXX: We are collapsing this DNS query (B) onto another one (A), but there
+ // is no code to later send B if the A answer has unshareable 0 TTL records.
+
idns_query *q = new idns_query;
// no query_id on this instance.
#include "MemBuf.h"
#include "MemObject.h"
#include "neighbors.h"
+#include "pconn.h"
#include "peer_proxy_negotiate_auth.h"
#include "profiler/Profiler.h"
#include "refresh.h"
serverConnectionSaved->close();
}
} else {
- fwd->pconnPush(serverConnectionSaved, request->url.host());
+ fwdPconnPool->push(serverConnectionSaved, request->url.host());
}
serverComplete();
LOCAL_ARRAY(char, buf, SQUIDHOSTNAMELEN * 3 + 10);
destLink->remote.toUrl(buf, SQUIDHOSTNAMELEN * 3 + 10);
+
+ // when connecting through a cache_peer, ignore the final destination
+ if (destLink->getPeer())
+ domain = nullptr;
+
if (domain) {
const int used = strlen(buf);
snprintf(buf+used, SQUIDHOSTNAMELEN * 3 + 10-used, "/%s", domain);
Comm::ConnectionPointer
PconnPool::pop(const Comm::ConnectionPointer &dest, const char *domain, bool keepOpen)
{
+ // always call shared pool first because we need to close an idle
+ // connection there if we have to use a standby connection.
+ if (const auto direct = popStored(dest, domain, keepOpen))
+ return direct;
+
+ // either there was no pconn to pop or this is not a retriable xaction
+ if (const auto peer = dest->getPeer()) {
+ if (peer->standby.pool)
+ return peer->standby.pool->popStored(dest, domain, true);
+ }
+ return nullptr;
+}
+
+/// implements pop() API while disregarding peer standby pools
+/// \returns an open connection or nil
+Comm::ConnectionPointer
+PconnPool::popStored(const Comm::ConnectionPointer &dest, const char *domain, const bool keepOpen)
+{
const char * aKey = key(dest, domain);
IdleConnList *list = (IdleConnList *)hash_lookup(table, aKey);
if (list == NULL) {
debugs(48, 3, HERE << "lookup for key {" << aKey << "} failed.");
// failure notifications resume standby conn creation after fdUsageHigh
- notifyManager("pop failure");
+ notifyManager("pop lookup failure");
return Comm::ConnectionPointer();
} else {
debugs(48, 3, "found " << hashKeyStr(list) <<
(keepOpen ? " to use" : " to kill"));
}
- /* may delete list */
- Comm::ConnectionPointer popped = list->findUseable(dest);
- if (!keepOpen && Comm::IsConnOpen(popped))
+ if (const auto popped = list->findUseable(dest)) { // may delete list
+ // successful pop notifications replenish standby connections pool
+ notifyManager("pop");
+
+ if (keepOpen)
+ return popped;
+
popped->close();
+ return Comm::ConnectionPointer();
+ }
- // successful pop notifications replenish standby connections pool
- notifyManager("pop");
- return popped;
+ // failure notifications resume standby conn creation after fdUsageHigh
+ notifyManager("pop usability failure");
+ return Comm::ConnectionPointer();
}
void
/**
* Returns either a pointer to a popped connection to dest or nil.
* Closes the connection before returning its pointer unless keepOpen.
+ * For connection going to a cache_peer, supports standby connection pools.
*
* A caller with a non-retriable transaction should set keepOpen to false
* and call pop() anyway, even though the caller does not want a pconn.
static const char *key(const Comm::ConnectionPointer &destLink, const char *domain);
+ Comm::ConnectionPointer popStored(const Comm::ConnectionPointer &dest, const char *domain, const bool keepOpen);
+
int hist[PCONN_HIST_SZ];
hash_table *table;
const char *descr;
#include "fde.h"
#include "FwdState.h"
#include "globals.h"
+#include "HappyConnOpener.h"
#include "http.h"
#include "http/Stream.h"
#include "HttpRequest.h"
#include "MemBuf.h"
#include "neighbors.h"
#include "PeerSelectState.h"
+#include "ResolvedPeers.h"
#include "sbuf/SBuf.h"
#include "security/BlindPeerConnector.h"
#include "SquidConfig.h"
CbcPointer<ClientHttpRequest> http;
HttpRequest::Pointer request;
AccessLogEntryPointer al;
- Comm::ConnectionList serverDestinations;
const char * getHost() const {
return (server.conn != NULL && server.conn->getPeer() ? server.conn->getPeer()->host : request->url.host());
/// recovering from the previous connect failure
void startConnecting();
- void noteConnectFailure(const Comm::ConnectionPointer &conn);
-
/// called when negotiations with the peer have been successfully completed
void notePeerReadyToShovel();
time_t startTime; ///< object creation time, before any peer selection/connection attempts
/// Whether we are waiting for the CONNECT request/response exchange with the peer.
bool waitingForConnectExchange;
+ HappyConnOpenerPointer connOpener; ///< current connection opening job
+ ResolvedPeersPointer destinations; ///< paths for forwarding the request
+ bool destinationsFound; ///< At least one candidate path found
+
+ // AsyncCalls which we set and may need cancelling.
+ struct {
+ AsyncCall::Pointer connector; ///< a call linking us to the ConnOpener producing serverConn.
+ } calls;
void copyRead(Connection &from, IOCB *completion);
virtual void noteDestination(Comm::ConnectionPointer conn) override;
virtual void noteDestinationsEnd(ErrorState *selectionError) override;
+ void syncHierNote(const Comm::ConnectionPointer &server, const char *origin);
+
+ /// called when a connection has been successfully established or
+ /// when all candidate destinations have been tried and all have failed
+ void noteConnection(HappyConnOpenerAnswer &);
+
+ /// whether we are waiting for HappyConnOpener
+ /// same as calls.connector but may differ from connOpener.valid()
+ bool opening() const { return connOpener.set(); }
+
+ void cancelOpening(const char *reason);
+
+ /// Start using an established connection
+ void connectDone(const Comm::ConnectionPointer &conn, const char *origin, const bool reused);
+
+ void notifyConnOpener();
+
void saveError(ErrorState *finalError);
void sendError(ErrorState *finalError, const char *reason);
static const char *const conn_established = "HTTP/1.1 200 Connection established\r\n\r\n";
-static CNCB tunnelConnectDone;
static ERCB tunnelErrorComplete;
static CLCB tunnelServerClosed;
static CLCB tunnelClientClosed;
TunnelStateData::TunnelStateData(ClientHttpRequest *clientRequest) :
startTime(squid_curtime),
- waitingForConnectExchange(false)
+ waitingForConnectExchange(false),
+ destinations(new ResolvedPeers()),
+ destinationsFound(false)
{
debugs(26, 3, "TunnelStateData constructed this=" << this);
client.readPendingFunc = &tunnelDelayedClientRead;
assert(clientRequest);
url = xstrdup(clientRequest->uri);
request = clientRequest->request;
+ Must(request);
server.size_ptr = &clientRequest->out.size;
client.size_ptr = &clientRequest->al->http.clientRequestSz.payloadData;
status_ptr = &clientRequest->al->http.code;
debugs(26, 3, "TunnelStateData destructed this=" << this);
assert(noConnections());
xfree(url);
- serverDestinations.clear();
+ if (opening())
+ cancelOpening("~TunnelStateData");
delete savedError;
}
len += count;
}
+/// update "hierarchy" annotations with a new (possibly failed) destination
+/// \param origin the name of the origin server we were trying to reach
+void
+TunnelStateData::syncHierNote(const Comm::ConnectionPointer &conn, const char *origin)
+{
+ request->hier.resetPeerNotes(conn, origin);
+ if (al)
+ al->hier.resetPeerNotes(conn, origin);
+}
+
int
TunnelStateData::Connection::debugLevelForError(int const xerrno) const
{
tunnelState->server.conn->close();
}
-/// reacts to a failure to establish the given TCP connection
void
-TunnelStateData::noteConnectFailure(const Comm::ConnectionPointer &conn)
+TunnelStateData::noteConnection(HappyConnOpener::Answer &answer)
{
- debugs(26, 4, "removing the failed one from " << serverDestinations.size() <<
- " destinations: " << conn);
-
- if (CachePeer *peer = conn->getPeer())
- peerConnectFailed(peer);
-
- assert(!serverDestinations.empty());
- serverDestinations.erase(serverDestinations.begin());
-
- // Since no TCP payload has been passed to client or server, we may
- // TCP-connect to other destinations (including alternate IPs).
-
- if (!FwdState::EnoughTimeToReForward(startTime))
- return sendError(savedError, "forwarding timeout");
-
- if (!serverDestinations.empty())
- return startConnecting();
-
- if (!PeerSelectionInitiator::subscribed)
- return sendError(savedError, "tried all destinations");
+ calls.connector = nullptr;
+ connOpener.clear();
+
+ if (const auto error = answer.error.get()) {
+ syncHierNote(answer.conn, request->url.host());
+ saveError(error);
+ answer.error.clear(); // savedError has it now
+ sendError(savedError, "tried all destinations");
+ return;
+ }
- debugs(26, 4, "wait for more destinations to try");
- // expect a noteDestination*() call
+ connectDone(answer.conn, request->url.host(), answer.reused);
}
-static void
-tunnelConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
+void
+TunnelStateData::connectDone(const Comm::ConnectionPointer &conn, const char *origin, const bool reused)
{
- TunnelStateData *tunnelState = (TunnelStateData *)data;
+ Must(Comm::IsConnOpen(conn));
+ server.conn = conn;
- if (status != Comm::OK) {
- const auto err = new ErrorState(ERR_CONNECT_FAIL, Http::scServiceUnavailable, tunnelState->request.getRaw(), tunnelState->al);
- err->xerrno = xerrno;
- // on timeout is this still: err->xerrno = ETIMEDOUT;
- err->port = conn->remote.port();
- tunnelState->saveError(err);
- tunnelState->noteConnectFailure(conn);
- return;
- }
+ if (reused)
+ ResetMarkingsToServer(request.getRaw(), *conn);
+ // else Comm::ConnOpener already applied proper/current markings
+
+ syncHierNote(server.conn, request->url.host());
+
+ request->hier.resetPeerNotes(conn, origin);
+ if (al)
+ al->hier.resetPeerNotes(conn, origin);
#if USE_DELAY_POOLS
/* no point using the delayIsNoDelay stuff since tunnel is nice and simple */
if (conn->getPeer() && conn->getPeer()->options.no_delay)
- tunnelState->server.setDelayId(DelayId());
+ server.setDelayId(DelayId());
#endif
- netdbPingSite(tunnelState->request->url.host());
-
- tunnelState->request->hier.resetPeerNotes(conn, tunnelState->getHost());
+ netdbPingSite(request->url.host());
- tunnelState->server.conn = conn;
- tunnelState->request->peer_host = conn->getPeer() ? conn->getPeer()->host : NULL;
- comm_add_close_handler(conn->fd, tunnelServerClosed, tunnelState);
+ request->peer_host = conn->getPeer() ? conn->getPeer()->host : nullptr;
+ comm_add_close_handler(conn->fd, tunnelServerClosed, this);
bool toOrigin = false; // same semantics as StateFlags::toOrigin
if (const auto * const peer = conn->getPeer()) {
- tunnelState->request->prepForPeering(*peer);
+ request->prepForPeering(*peer);
toOrigin = peer->options.originserver;
} else {
- tunnelState->request->prepForDirect();
+ request->prepForDirect();
toOrigin = true;
}
if (!toOrigin)
- tunnelState->connectToPeer();
+ connectToPeer();
else {
- tunnelState->notePeerReadyToShovel();
+ notePeerReadyToShovel();
}
AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "tunnelTimeout",
- CommTimeoutCbPtrFun(tunnelTimeout, tunnelState));
+ CommTimeoutCbPtrFun(tunnelTimeout, this));
commSetConnTimeout(conn, Config.Timeout.read, timeoutCall);
}
void
TunnelStateData::noteDestination(Comm::ConnectionPointer path)
{
- const bool wasBlocked = serverDestinations.empty();
- // XXX: Push even a nil path so that subsequent noteDestination() calls
- // can rely on wasBlocked to detect ongoing/concurrent attempts.
- // Upcoming Happy Eyeballs changes will handle this properly.
- serverDestinations.push_back(path);
+ destinationsFound = true;
if (!path) { // decided to use a pinned connection
// We can call usePinned() without fear of clashing with an earlier
// forwarding attempt because PINNED must be the first destination.
- assert(wasBlocked);
+ assert(destinations->empty());
usePinned();
return;
}
- if (wasBlocked)
- startConnecting();
- // else continue to use one of the previously noted destinations;
- // if all of them fail, we may try this path
+ destinations->addPath(path);
+
+ if (Comm::IsConnOpen(server.conn)) {
+ // We are already using a previously opened connection but also
+ // receiving destinations in case we need to re-forward.
+ Must(!opening());
+ return;
+ }
+
+ if (opening()) {
+ notifyConnOpener();
+ return; // and continue to wait for tunnelConnectDone() callback
+ }
+
+ startConnecting();
}
void
TunnelStateData::noteDestinationsEnd(ErrorState *selectionError)
{
PeerSelectionInitiator::subscribed = false;
- if (serverDestinations.empty()) { // was blocked, waiting for more paths
+ destinations->destinationsFinalized = true;
+ if (!destinationsFound) {
if (selectionError)
return sendError(selectionError, "path selection has failed");
// else continue to use one of the previously noted destinations;
// if all of them fail, tunneling as whole will fail
Must(!selectionError); // finding at least one path means selection succeeded
+
+ if (Comm::IsConnOpen(server.conn)) {
+ // We are already using a previously opened connection but also
+ // receiving destinations in case we need to re-forward.
+ Must(!opening());
+ return;
+ }
+
+ Must(opening()); // or we would be stuck with nothing to do or wait for
+ notifyConnOpener();
}
/// remembers an error to be used if there will be no more connection attempts
if (request)
request->hier.stopPeerClock(false);
+ if (opening())
+ cancelOpening(reason);
+
assert(finalError);
// get rid of any cached error unless that is what the caller is sending
errorSend(client.conn, finalError);
}
+/// Notify connOpener that we no longer need connections. We do not have to do
+/// this -- connOpener would eventually notice on its own, but notifying reduces
+/// waste and speeds up spare connection opening for other transactions (that
+/// could otherwise wait for this transaction to use its spare allowance).
+void
+TunnelStateData::cancelOpening(const char *reason)
+{
+ assert(calls.connector);
+ calls.connector->cancel(reason);
+ calls.connector = nullptr;
+ notifyConnOpener();
+ connOpener.clear();
+}
+
void
TunnelStateData::startConnecting()
{
if (request)
request->hier.startPeerClock();
- assert(!serverDestinations.empty());
- Comm::ConnectionPointer &dest = serverDestinations.front();
- debugs(26, 3, "to " << dest);
- assert(dest != nullptr);
-
- GetMarkingsToServer(request.getRaw(), *dest);
+ assert(!destinations->empty());
- const time_t connectTimeout = dest->connectTimeout(startTime);
- AsyncCall::Pointer call = commCbCall(26,3, "tunnelConnectDone", CommConnectCbPtrFun(tunnelConnectDone, this));
- Comm::ConnOpener *cs = new Comm::ConnOpener(dest, call, connectTimeout);
- cs->setHost(url);
+ calls.connector = asyncCall(17, 5, "TunnelStateData::noteConnection", HappyConnOpener::CbDialer<TunnelStateData>(&TunnelStateData::noteConnection, this));
+ const auto cs = new HappyConnOpener(destinations, calls.connector, request, startTime, 0, al);
+ cs->setHost(request->url.host());
+ cs->setRetriable(false);
+ cs->allowPersistent(false);
+ destinations->notificationPending = true; // start() is async
+ connOpener = cs;
AsyncJob::Start(cs);
}
void
TunnelStateData::usePinned()
{
+ Must(request);
+ const auto connManager = request->pinnedConnection();
const auto serverConn = borrowPinnedConnection(request.getRaw());
debugs(26,7, "pinned peer connection: " << serverConn);
if (!Comm::IsConnOpen(serverConn)) {
+ syncHierNote(serverConn, connManager ? connManager->pinning.host : request->url.host());
// a PINNED path failure is fatal; do not wait for more paths
sendError(new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request.getRaw(), al),
"pinned path failure");
return;
}
- tunnelConnectDone(serverConn, Comm::OK, 0, (void *)this);
+ Must(connManager);
+
+ // Set HttpRequest pinned related flags for consistency even if
+ // they are not really used by tunnel.cc code.
+ request->flags.pinned = true;
+ if (connManager->pinnedAuth())
+ request->flags.auth = true;
+
+ // the server may close the pinned connection before this request
+ const auto reused = true;
+ connectDone(serverConn, connManager->pinning.host, reused);
}
CBDATA_CLASS_INIT(TunnelStateData);
#endif
+/// makes sure connOpener knows that destinations have changed
+void
+TunnelStateData::notifyConnOpener()
+{
+ if (destinations->notificationPending) {
+ debugs(17, 7, "reusing pending notification");
+ } else {
+ destinations->notificationPending = true;
+ CallJobHere(17, 5, connOpener, HappyConnOpener, noteCandidatesChange);
+ }
+}
+
#if USE_OPENSSL
void
switchToTunnel(HttpRequest *request, Comm::ConnectionPointer &clientConn, Comm::ConnectionPointer &srvConn)