From: Christos Tsantilas Date: Mon, 28 Jan 2013 04:11:25 +0000 (-0700) Subject: Propagate pinned connection persistency and closures to the client. X-Git-Tag: SQUID_3_3_1~24 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3947642d720a4a7c0ad2fcc923065165442570fe;p=thirdparty%2Fsquid.git Propagate pinned connection persistency and closures to the client. Squid was trying hard to forward a request after pinned connection failures because some of those failures were benign pconn races. That meant re-pinning failed connections. After a few iterations to correctly handle non-idempotent requests, the code appeared to work, but the final design, with all the added complexity and related dangers was deemed inferior to the approach we use now. Squid now simply propagates connection closures (including pconn races) to the client. It is now the client responsibility not to send non-idempotent requests on idle persistent connections and to recover from pconn races. Squid also propagates HTTP connection persistency indicators from client to server and back, to make client job feasible. Squid will send Connection:close and will close the client connection if the pinned server says so, even if Squid could still maintain a persistent connection with the client. These changes are not mean to affect regular (not pinned) transactions. In access.log, one can detect requests that were not responded to (due to race conditions on pinned connections) by searching for ERR_ZERO_SIZE_OBJECT %err_code with TCP_MISS/000 status and zero response bytes. This is a Measurement Factory project. --- diff --git a/src/RequestFlags.h b/src/RequestFlags.h index d0c0774aeb..a6e36abe65 100644 --- a/src/RequestFlags.h +++ b/src/RequestFlags.h @@ -105,8 +105,6 @@ public: bool connectionProxyAuth :1; /** set if the request was sent on a pinned connection */ bool pinned :1; - /** OK to reopen a failed pinned connection */ - bool canRePin :1; /** Authentication was already sent upstream (e.g. due tcp-level auth) */ bool authSent :1; /** Deny direct forwarding unless overriden by always_direct diff --git a/src/client_side.cc b/src/client_side.cc index 0a6e061119..f2ab2c567d 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -2657,7 +2657,6 @@ clientProcessRequest(ConnStateData *conn, HttpParser *hp, ClientSocketContext *c request->flags.accelerated = http->flags.accel; request->flags.sslBumped=conn->switchedToHttps(); - request->flags.canRePin = request->flags.sslBumped && conn->pinning.pinned; request->flags.ignoreCc = conn->port->ignore_cc; // TODO: decouple http->flags.accel from request->flags.sslBumped request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ? @@ -4259,8 +4258,12 @@ ConnStateData::ConnStateData() : stoppedSending_(NULL), stoppedReceiving_(NULL) { + pinning.host = NULL; + pinning.port = -1; pinning.pinned = false; pinning.auth = false; + pinning.zeroReply = false; + pinning.peer = NULL; } bool @@ -4416,9 +4419,13 @@ ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io) { // FwdState might repin a failed connection sooner than this close // callback is called for the failed connection. - if (pinning.serverConnection == io.conn) { - pinning.closeHandler = NULL; // Comm unregisters handlers before calling - unpinConnection(); + assert(pinning.serverConnection == io.conn); + pinning.closeHandler = NULL; // Comm unregisters handlers before calling + const bool sawZeroReply = pinning.zeroReply; // reset when unpinning + unpinConnection(); + if (sawZeroReply) { + debugs(33, 3, "Closing client connection on pinned zero reply."); + clientConnection->close(); } } @@ -4517,6 +4524,8 @@ ConnStateData::unpinConnection() safe_free(pinning.host); + pinning.zeroReply = false; + /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host * connection has gone away */ } diff --git a/src/client_side.h b/src/client_side.h index 6185817102..10c456757a 100644 --- a/src/client_side.h +++ b/src/client_side.h @@ -263,6 +263,7 @@ public: int port; /* port of pinned connection */ bool pinned; /* this connection was pinned */ bool auth; /* pinned for www authentication */ + bool zeroReply; ///< server closed w/o response (ERR_ZERO_SIZE_OBJECT) CachePeer *peer; /* CachePeer the connection goes via */ AsyncCall::Pointer closeHandler; /*The close handler for pinned server side connection*/ } pinning; diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc index 4b533f59c2..833c8f8686 100644 --- a/src/client_side_reply.cc +++ b/src/client_side_reply.cc @@ -1483,6 +1483,10 @@ clientReplyContext::buildReplyHeader() // We do not really have to close, but we pretend we are a tunnel. debugs(88, 3, "clientBuildReplyHeader: bumped reply forces close"); request->flags.proxyKeepalive = 0; + } else if (request->pinnedConnection() && !reply->persistent()) { + // The peer wants to close the pinned connection + debugs(88, 3, "pinned reply forces close"); + request->flags.proxyKeepalive = 0; } // Decide if we send chunked reply @@ -2081,6 +2085,10 @@ clientReplyContext::sendMoreData (StoreIOBuffer result) debugs(33,3, "not sending more data to closing connection " << conn->clientConnection); return; } + if (conn->pinning.zeroReply) { + debugs(33,3, "not sending more data after a pinned zero reply " << conn->clientConnection); + return; + } char *buf = next()->readBuffer.data; diff --git a/src/forward.cc b/src/forward.cc index 4fe87ecee5..f51f57b807 100644 --- a/src/forward.cc +++ b/src/forward.cc @@ -169,16 +169,22 @@ FwdState::selectPeerForIntercepted() { // use pinned connection if available Comm::ConnectionPointer p; - if (ConnStateData *client = request->pinnedConnection()) + if (ConnStateData *client = request->pinnedConnection()) { p = client->validatePinnedConnection(request, NULL); + if (Comm::IsConnOpen(p)) { + /* duplicate peerSelectPinned() effects */ + p->peerType = PINNED; + entry->ping_status = PING_DONE; /* Skip ICP */ - if (Comm::IsConnOpen(p)) { - /* duplicate peerSelectPinned() effects */ - p->peerType = PINNED; - entry->ping_status = PING_DONE; /* Skip ICP */ - - debugs(17, 3, HERE << "reusing a pinned conn: " << *p); - serverDestinations.push_back(p); + debugs(17, 3, "reusing a pinned conn: " << *p); + serverDestinations.push_back(p); + } else { + debugs(17,2, "Pinned connection is not valid: " << p); + ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, HTTP_SERVICE_UNAVAILABLE, request); + fail(anErr); + } + // Either use the valid pinned connection or fail if it is invalid. + return; } // use client original destination as second preferred choice @@ -393,10 +399,19 @@ FwdState::fail(ErrorState * errorState) if (!errorState->request) errorState->request = HTTPMSGLOCK(request); - if (pconnRace == racePossible && err->type == ERR_ZERO_SIZE_OBJECT) { + if (err->type != ERR_ZERO_SIZE_OBJECT) + return; + + if (pconnRace == racePossible) { debugs(17, 5, HERE << "pconn race happened"); pconnRace = raceHappened; } + + if (ConnStateData *pinned_connection = request->pinnedConnection()) { + pinned_connection->pinning.zeroReply = true; + flags.dont_retry = true; // we want to propagate failure to the client + debugs(17, 4, "zero reply on pinned connection"); + } } /** @@ -872,18 +887,8 @@ FwdState::connectDone(const Comm::ConnectionPointer &conn, comm_err_t status, in if (serverConnection()->getPeer()) peerConnectSucceded(serverConnection()->getPeer()); - // some requests benefit from pinning but do not require it and can "repin" - const bool rePin = request->flags.canRePin && - request->clientConnectionManager.valid(); - if (rePin) { - debugs(17, 3, HERE << "repinning " << serverConn); - request->clientConnectionManager->pinConnection(serverConn, - request, serverConn->getPeer(), request->flags.auth); - request->flags.pinned = 1; - } - #if USE_SSL - if (!request->flags.pinned || rePin) { + if (!request->flags.pinned) { if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) || (!serverConnection()->getPeer() && request->protocol == AnyP::PROTO_HTTPS) || request->flags.sslPeek) { @@ -963,6 +968,7 @@ FwdState::connectStart() // XXX: also, logs will now lie if pinning is broken and leads to an error message. if (serverDestinations[0]->peerType == PINNED) { ConnStateData *pinned_connection = request->pinnedConnection(); + debugs(17,7, "pinned peer connection: " << pinned_connection); // pinned_connection may become nil after a pconn race if (pinned_connection) serverConn = pinned_connection->validatePinnedConnection(request, serverDestinations[0]->getPeer()); @@ -984,16 +990,12 @@ FwdState::connectStart() dispatch(); return; } - /* Failure. Fall back on next path unless we can re-pin */ + // Pinned connection failure. debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection); - if (pconnRace != raceHappened || !request->flags.canRePin) { - serverDestinations.shift(); - pconnRace = raceImpossible; - startConnectionOrFail(); - return; - } - debugs(17,3, HERE << "There was a pconn race. Will try to repin."); - // and fall through to regular handling + ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, HTTP_SERVICE_UNAVAILABLE, request); + fail(anErr); + self = NULL; // refcounted + return; } // Use pconn to avoid opening a new connection. diff --git a/src/http.cc b/src/http.cc index 1c0952027e..58f9467510 100644 --- a/src/http.cc +++ b/src/http.cc @@ -2149,6 +2149,8 @@ HttpStateData::sendRequest() */ if (request->flags.mustKeepalive) flags.keepalive = true; + else if (request->flags.pinned) + flags.keepalive = request->persistent(); else if (!Config.onoff.server_pconns) flags.keepalive = false; else if (_peer == NULL)