2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 17 Request Forwarding */
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "comm/Connection.h"
22 #include "comm/ConnOpener.h"
23 #include "comm/Loops.h"
24 #include "CommCalls.h"
25 #include "errorpage.h"
32 #include "hier_code.h"
34 #include "HttpReply.h"
35 #include "HttpRequest.h"
36 #include "icmp/net_db.h"
38 #include "ip/Intercept.h"
39 #include "ip/QosConfig.h"
41 #include "MemObject.h"
42 #include "mgr/Registration.h"
43 #include "neighbors.h"
45 #include "PeerPoolMgr.h"
46 #include "PeerSelectState.h"
47 #include "SquidConfig.h"
48 #include "SquidTime.h"
50 #include "StoreClient.h"
54 #include "ssl/cert_validate_message.h"
55 #include "ssl/Config.h"
56 #include "ssl/ErrorDetail.h"
57 #include "ssl/helper.h"
58 #include "ssl/PeerConnector.h"
59 #include "ssl/ServerBump.h"
60 #include "ssl/support.h"
62 #include "security/EncryptorAnswer.h"
67 static PSC fwdPeerSelectionCompleteWrapper
;
68 static CLCB fwdServerClosedWrapper
;
69 static CNCB fwdConnectDoneWrapper
;
73 #define MAX_FWD_STATS_IDX 9
74 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
76 static PconnPool
*fwdPconnPool
= new PconnPool("server-peers", NULL
);
77 CBDATA_CLASS_INIT(FwdState
);
80 class FwdStatePeerAnswerDialer
: public CallDialer
, public Ssl::PeerConnector::CbDialer
83 typedef void (FwdState::*Method
)(Security::EncryptorAnswer
&);
85 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
86 method_(method
), fwd_(fwd
), answer_() {}
89 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
90 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
91 virtual void print(std::ostream
&os
) const {
92 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')';
95 /* Ssl::PeerConnector::CbDialer API */
96 virtual Security::EncryptorAnswer
&answer() { return answer_
; }
100 CbcPointer
<FwdState
> fwd_
;
101 Security::EncryptorAnswer answer_
;
106 FwdState::abort(void* d
)
108 FwdState
* fwd
= (FwdState
*)d
;
109 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
111 if (Comm::IsConnOpen(fwd
->serverConnection())) {
112 fwd
->closeServerConnection("store entry aborted");
114 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
116 fwd
->serverDestinations
.clear();
121 FwdState::closeServerConnection(const char *reason
)
123 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
124 comm_remove_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
125 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
129 /**** PUBLIC INTERFACE ********************************************************/
131 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
137 start_t(squid_curtime
),
139 pconnRace(raceImpossible
)
141 debugs(17, 2, "Forwarding client request " << client
<< ", url=" << e
->url());
142 HTTPMSGLOCK(request
);
143 serverDestinations
.reserve(Config
.forward_max_tries
);
145 EBIT_SET(e
->flags
, ENTRY_FWD_HDR_WAIT
);
146 flags
.connected_okay
= false;
147 flags
.dont_retry
= false;
148 flags
.forward_completed
= false;
149 debugs(17, 3, "FwdState constructed, this=" << this);
152 // Called once, right after object creation, when it is safe to set self
153 void FwdState::start(Pointer aSelf
)
155 // Protect ourselves from being destroyed when the only Server pointing
156 // to us is gone (while we expect to talk to more Servers later).
157 // Once we set self, we are responsible for clearing it when we do not
158 // expect to talk to any servers.
159 self
= aSelf
; // refcounted
161 // We hope that either the store entry aborts or peer is selected.
162 // Otherwise we are going to leak our object.
164 entry
->registerAbort(FwdState::abort
, this);
166 #if STRICT_ORIGINAL_DST
167 // Bug 3243: CVE 2009-0801
168 // Bypass of browser same-origin access control in intercepted communication
169 // To resolve this we must force DIRECT and only to the original client destination.
170 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
171 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
172 if (isIntercepted
&& useOriginalDst
) {
173 selectPeerForIntercepted();
174 // 3.2 does not suppro re-wrapping inside CONNECT.
175 // our only alternative is to fake destination "found" and continue with the forwarding.
176 startConnectionOrFail();
181 // do full route options selection
182 peerSelect(&serverDestinations
, request
, al
, entry
, fwdPeerSelectionCompleteWrapper
, this);
185 #if STRICT_ORIGINAL_DST
186 /// bypasses peerSelect() when dealing with intercepted requests
188 FwdState::selectPeerForIntercepted()
190 // use pinned connection if available
191 Comm::ConnectionPointer p
;
192 if (ConnStateData
*client
= request
->pinnedConnection()) {
193 p
= client
->validatePinnedConnection(request
, NULL
);
194 if (Comm::IsConnOpen(p
)) {
195 /* duplicate peerSelectPinned() effects */
196 p
->peerType
= PINNED
;
197 entry
->ping_status
= PING_DONE
; /* Skip ICP */
199 debugs(17, 3, "reusing a pinned conn: " << *p
);
200 serverDestinations
.push_back(p
);
202 debugs(17,2, "Pinned connection is not valid: " << p
);
203 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
206 // Either use the valid pinned connection or fail if it is invalid.
210 // use client original destination as second preferred choice
211 p
= new Comm::Connection();
212 p
->peerType
= ORIGINAL_DST
;
213 p
->remote
= clientConn
->local
;
214 getOutgoingAddress(request
, p
);
216 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
217 serverDestinations
.push_back(p
);
222 FwdState::completed()
224 if (flags
.forward_completed
) {
225 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
229 flags
.forward_completed
= true;
231 request
->hier
.stopPeerClock(false);
233 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
234 debugs(17, 3, HERE
<< "entry aborted");
238 #if URL_CHECKSUM_DEBUG
240 entry
->mem_obj
->checkUrlChecksum();
243 if (entry
->store_status
== STORE_PENDING
) {
244 if (entry
->isEmpty()) {
245 if (!err
) // we quit (e.g., fd closed) before an error or content
246 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
));
248 errorAppendEntry(entry
, err
);
251 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
252 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
253 ConnStateData::httpsPeeked
, Comm::ConnectionPointer(NULL
));
257 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
259 entry
->releaseRequest();
263 if (storePendingNClients(entry
) > 0)
264 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
268 FwdState::~FwdState()
270 debugs(17, 3, "FwdState destructor start");
272 if (! flags
.forward_completed
)
277 HTTPMSGUNLOCK(request
);
281 entry
->unregisterAbort();
283 entry
->unlock("FwdState");
287 if (calls
.connector
!= NULL
) {
288 calls
.connector
->cancel("FwdState destructed");
289 calls
.connector
= NULL
;
292 if (Comm::IsConnOpen(serverConn
))
293 closeServerConnection("~FwdState");
295 serverDestinations
.clear();
297 debugs(17, 3, "FwdState destructed, this=" << this);
301 * This is the entry point for client-side to start forwarding
302 * a transaction. It is a static method that may or may not
303 * allocate a FwdState.
306 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
309 * client_addr == no_addr indicates this is an "internal" request
310 * from peer_digest.c, asn.c, netdb.c, etc and should always
311 * be allowed. yuck, I know.
314 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
315 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
317 * Check if this host is allowed to fetch MISSES from us (miss_access).
318 * Intentionally replace the src_addr automatically selected by the checklist code
319 * we do NOT want the indirect client address to be tested here.
321 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
322 ch
.src_addr
= request
->client_addr
;
323 if (ch
.fastCheck() == ACCESS_DENIED
) {
325 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
327 if (page_id
== ERR_NONE
)
328 page_id
= ERR_FORWARDING_DENIED
;
330 ErrorState
*anErr
= new ErrorState(page_id
, Http::scForbidden
, request
);
331 errorAppendEntry(entry
, anErr
); // frees anErr
336 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
338 * This seems like an odd place to bind mem_obj and request.
339 * Might want to assert that request is NULL at this point
341 entry
->mem_obj
->request
= request
;
342 HTTPMSGLOCK(entry
->mem_obj
->request
);
343 #if URL_CHECKSUM_DEBUG
345 entry
->mem_obj
->checkUrlChecksum();
350 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
351 errorAppendEntry(entry
, anErr
); // frees anErr
355 if (request
->flags
.internal
) {
356 debugs(17, 2, "calling internalStart() due to request flag");
357 internalStart(clientConn
, request
, entry
);
361 switch (request
->url
.getScheme()) {
363 case AnyP::PROTO_CACHE_OBJECT
:
364 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
365 CacheManager::GetInstance()->Start(clientConn
, request
, entry
);
368 case AnyP::PROTO_URN
:
369 urnStart(request
, entry
);
373 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
382 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
384 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
385 Start(clientConn
, entry
, request
, NULL
);
389 FwdState::startConnectionOrFail()
391 debugs(17, 3, HERE
<< entry
->url());
393 if (serverDestinations
.size() > 0) {
394 // Ditch error page if it was created before.
395 // A new one will be created if there's another problem
399 // Update the logging information about this new server connection.
400 // Done here before anything else so the errors get logged for
401 // this server link regardless of what happens when connecting to it.
402 // IF sucessfuly connected this top destination will become the serverConnection().
403 request
->hier
.note(serverDestinations
[0], request
->url
.host());
404 request
->clearError();
408 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
410 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
);
412 } // else use actual error from last connection attempt
413 self
= NULL
; // refcounted
418 FwdState::fail(ErrorState
* errorState
)
420 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
425 if (!errorState
->request
) {
426 errorState
->request
= request
;
427 HTTPMSGLOCK(errorState
->request
);
430 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
433 if (pconnRace
== racePossible
) {
434 debugs(17, 5, HERE
<< "pconn race happened");
435 pconnRace
= raceHappened
;
438 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
439 pinned_connection
->pinning
.zeroReply
= true;
440 flags
.dont_retry
= true; // we want to propagate failure to the client
441 debugs(17, 4, "zero reply on pinned connection");
446 * Frees fwdState without closing FD or generating an abort
449 FwdState::unregister(Comm::ConnectionPointer
&conn
)
451 debugs(17, 3, HERE
<< entry
->url() );
452 assert(serverConnection() == conn
);
453 assert(Comm::IsConnOpen(conn
));
454 comm_remove_close_handler(conn
->fd
, fwdServerClosedWrapper
, this);
458 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
460 FwdState::unregister(int fd
)
462 debugs(17, 3, HERE
<< entry
->url() );
463 assert(fd
== serverConnection()->fd
);
464 unregister(serverConn
);
468 * FooClient modules call fwdComplete() when they are done
469 * downloading an object. Then, we either 1) re-forward the
470 * request somewhere else if needed, or 2) call storeComplete()
476 debugs(17, 3, HERE
<< entry
->url() << "\n\tstatus " << entry
->getReply()->sline
.status());
477 #if URL_CHECKSUM_DEBUG
479 entry
->mem_obj
->checkUrlChecksum();
482 logReplyStatus(n_tries
, entry
->getReply()->sline
.status());
485 debugs(17, 3, HERE
<< "re-forwarding " << entry
->getReply()->sline
.status() << " " << entry
->url());
487 if (Comm::IsConnOpen(serverConn
))
488 unregister(serverConn
);
492 // drop the last path off the selection list. try the next one.
493 serverDestinations
.erase(serverDestinations
.begin());
494 startConnectionOrFail();
497 if (Comm::IsConnOpen(serverConn
))
498 debugs(17, 3, HERE
<< "server FD " << serverConnection()->fd
<< " not re-forwarding status " << entry
->getReply()->sline
.status());
500 debugs(17, 3, HERE
<< "server (FD closed) not re-forwarding status " << entry
->getReply()->sline
.status());
501 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
504 if (!Comm::IsConnOpen(serverConn
))
507 self
= NULL
; // refcounted
511 /**** CALLBACK WRAPPERS ************************************************************/
514 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList
*, ErrorState
*err
, void *data
)
516 FwdState
*fwd
= (FwdState
*) data
;
519 fwd
->startConnectionOrFail();
523 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
525 FwdState
*fwd
= (FwdState
*)params
.data
;
526 fwd
->serverClosed(params
.fd
);
530 fwdConnectDoneWrapper(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
, void *data
)
532 FwdState
*fwd
= (FwdState
*) data
;
533 fwd
->connectDone(conn
, status
, xerrno
);
536 /**** PRIVATE *****************************************************************/
539 * FwdState::checkRetry
541 * Return TRUE if the request SHOULD be retried. This method is
542 * called when the HTTP connection fails, or when the connection
543 * is closed before reading the end of HTTP headers from the server.
546 FwdState::checkRetry()
551 if (!self
) { // we have aborted before the server called us back
552 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
553 // we will be destroyed when the server clears its Pointer to us
557 if (entry
->store_status
!= STORE_PENDING
)
560 if (!entry
->isEmpty())
563 if (n_tries
> Config
.forward_max_tries
)
566 if (squid_curtime
- start_t
> Config
.Timeout
.forward
)
569 if (flags
.dont_retry
)
572 if (request
->bodyNibbled())
575 // NP: not yet actually connected anywhere. retry is safe.
576 if (!flags
.connected_okay
)
579 if (!checkRetriable())
586 * FwdState::checkRetriable
588 * Return TRUE if this is the kind of request that can be retried
589 * after a failure. If the request is not retriable then we don't
590 * want to risk sending it on a persistent connection. Instead we'll
591 * force it to go on a new HTTP connection.
594 FwdState::checkRetriable()
596 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
597 // complicated] code required to protect the PUT request body from being
598 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
599 if (request
->body_pipe
!= NULL
)
602 // RFC2616 9.1 Safe and Idempotent Methods
603 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
607 FwdState::serverClosed(int fd
)
609 // XXX: fd is often -1 here
610 debugs(17, 2, "FD " << fd
<< " " << entry
->url() << " after " <<
611 (fd
>= 0 ? fd_table
[fd
].pconn
.uses
: -1) << " requests");
612 if (fd
>= 0 && serverConnection()->fd
== fd
)
613 fwdPconnPool
->noteUses(fd_table
[fd
].pconn
.uses
);
618 FwdState::retryOrBail()
621 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
622 // we should retry the same destination if it failed due to pconn race
623 if (pconnRace
== raceHappened
)
624 debugs(17, 4, HERE
<< "retrying the same destination");
626 serverDestinations
.erase(serverDestinations
.begin()); // last one failed. try another.
627 startConnectionOrFail();
631 // TODO: should we call completed() here and move doneWithRetries there?
634 request
->hier
.stopPeerClock(false);
636 if (self
!= NULL
&& !err
&& shutting_down
&& entry
->isEmpty()) {
637 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
638 errorAppendEntry(entry
, anErr
);
641 self
= NULL
; // refcounted
644 // If the Server quits before nibbling at the request body, the body sender
645 // will not know (so that we can retry). Call this if we will not retry. We
646 // will notify the sender so that it does not get stuck waiting for space.
648 FwdState::doneWithRetries()
650 if (request
&& request
->body_pipe
!= NULL
)
651 request
->body_pipe
->expectNoConsumption();
654 // called by the server that failed after calling unregister()
656 FwdState::handleUnregisteredServerEnd()
658 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
659 assert(!Comm::IsConnOpen(serverConn
));
664 FwdState::connectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
)
666 if (status
!= Comm::OK
) {
667 ErrorState
*const anErr
= makeConnectingError(ERR_CONNECT_FAIL
);
668 anErr
->xerrno
= xerrno
;
671 /* it might have been a timeout with a partially open link */
674 peerConnectFailed(conn
->getPeer());
683 flags
.connected_okay
= true;
685 debugs(17, 3, HERE
<< serverConnection() << ": '" << entry
->url() << "'" );
687 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
689 if (serverConnection()->getPeer())
690 peerConnectSucceded(serverConnection()->getPeer());
693 if (!request
->flags
.pinned
) {
694 const CachePeer
*p
= serverConnection()->getPeer();
695 const bool peerWantsTls
= p
&& p
->secure
.encryptTransport
;
696 // userWillSslToPeerForUs assumes CONNECT == HTTPS
697 const bool userWillTlsToPeerForUs
= p
&& p
->options
.originserver
&&
698 request
->method
== Http::METHOD_CONNECT
;
699 const bool needTlsToPeer
= peerWantsTls
&& !userWillTlsToPeerForUs
;
700 const bool needTlsToOrigin
= !p
&& request
->url
.getScheme() == AnyP::PROTO_HTTPS
;
701 if (needTlsToPeer
|| needTlsToOrigin
|| request
->flags
.sslPeek
) {
702 HttpRequest::Pointer requestPointer
= request
;
703 AsyncCall::Pointer callback
= asyncCall(17,4,
704 "FwdState::ConnectedToPeer",
705 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
706 // Use positive timeout when less than one second is left.
707 const time_t sslNegotiationTimeout
= max(static_cast<time_t>(1), timeLeft());
708 Ssl::PeerConnector
*connector
;
709 if (request
->clientConnectionManager
->connectionless()) {
710 // It is an internal request, no client connection
711 // does not make sense to peek and slpice/or bump.
712 connector
= new Ssl::BlindPeerConnector(requestPointer
, serverConnection(), callback
, sslNegotiationTimeout
);
714 connector
= new Ssl::PeekingPeerConnector(requestPointer
, serverConnection(), clientConn
, callback
, sslNegotiationTimeout
);
716 AsyncJob::Start(connector
); // will call our callback
722 // if not encrypting just run the post-connect actions
723 Security::EncryptorAnswer nil
;
724 connectedToPeer(nil
);
728 FwdState::connectedToPeer(Security::EncryptorAnswer
&answer
)
730 if (ErrorState
*error
= answer
.error
.get()) {
732 answer
.error
.clear(); // preserve error for errorSendComplete()
737 // should reach ConnStateData before the dispatched Client job starts
738 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
739 ConnStateData::notePeerConnection
, serverConnection());
745 FwdState::connectTimeout(int fd
)
747 debugs(17, 2, "fwdConnectTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
748 assert(serverDestinations
[0] != NULL
);
749 assert(fd
== serverDestinations
[0]->fd
);
751 if (entry
->isEmpty()) {
752 ErrorState
*anErr
= new ErrorState(ERR_CONNECT_FAIL
, Http::scGatewayTimeout
, request
);
753 anErr
->xerrno
= ETIMEDOUT
;
756 /* This marks the peer DOWN ... */
757 if (serverDestinations
[0]->getPeer())
758 peerConnectFailed(serverDestinations
[0]->getPeer());
761 if (Comm::IsConnOpen(serverDestinations
[0])) {
762 serverDestinations
[0]->close();
767 FwdState::timeLeft() const
769 /* connection timeout */
771 if (serverDestinations
[0]->getPeer()) {
772 ctimeout
= serverDestinations
[0]->getPeer()->connect_timeout
> 0 ?
773 serverDestinations
[0]->getPeer()->connect_timeout
: Config
.Timeout
.peer_connect
;
775 ctimeout
= Config
.Timeout
.connect
;
778 /* calculate total forwarding timeout ??? */
779 int ftimeout
= Config
.Timeout
.forward
- (squid_curtime
- start_t
);
783 if (ftimeout
< ctimeout
)
784 return (time_t)ftimeout
;
786 return (time_t)ctimeout
;
790 * Called after forwarding path selection (via peer select) has taken place
791 * and whenever forwarding needs to attempt a new connection (routing failover).
792 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
795 FwdState::connectStart()
797 assert(serverDestinations
.size() > 0);
799 debugs(17, 3, "fwdConnectStart: " << entry
->url());
801 request
->hier
.startPeerClock();
803 // Do not fowrward bumped connections to parent proxy unless it is an
805 if (serverDestinations
[0]->getPeer() && !serverDestinations
[0]->getPeer()->options
.originserver
&& request
->flags
.sslBumped
) {
806 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
807 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
);
809 self
= NULL
; // refcounted
813 request
->flags
.pinned
= false; // XXX: what if the ConnStateData set this to flag existing credentials?
814 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
815 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
816 if (serverDestinations
[0]->peerType
== PINNED
) {
817 ConnStateData
*pinned_connection
= request
->pinnedConnection();
818 debugs(17,7, "pinned peer connection: " << pinned_connection
);
819 // pinned_connection may become nil after a pconn race
820 if (pinned_connection
)
821 serverConn
= pinned_connection
->borrowPinnedConnection(request
, serverDestinations
[0]->getPeer());
824 if (Comm::IsConnOpen(serverConn
)) {
825 pinned_connection
->stopPinnedConnectionMonitoring();
826 flags
.connected_okay
= true;
828 request
->flags
.pinned
= true;
829 request
->hier
.note(serverConn
, pinned_connection
->pinning
.host
);
830 if (pinned_connection
->pinnedAuth())
831 request
->flags
.auth
= true;
832 comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
834 /* Update server side TOS and Netfilter mark on the connection. */
835 if (Ip::Qos::TheConfig
.isAclTosActive()) {
836 debugs(17, 3, HERE
<< "setting tos for pinned connection to " << (int)serverConn
->tos
);
837 serverConn
->tos
= GetTosToServer(request
);
838 Ip::Qos::setSockTos(serverConn
, serverConn
->tos
);
841 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
842 serverConn
->nfmark
= GetNfmarkToServer(request
);
843 Ip::Qos::setSockNfmark(serverConn
, serverConn
->nfmark
);
847 // the server may close the pinned connection before this request
848 pconnRace
= racePossible
;
852 // Pinned connection failure.
853 debugs(17,2,HERE
<< "Pinned connection failed: " << pinned_connection
);
854 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
856 self
= NULL
; // refcounted
860 // Use pconn to avoid opening a new connection.
861 const char *host
= NULL
;
862 if (!serverDestinations
[0]->getPeer())
863 host
= request
->url
.host();
865 Comm::ConnectionPointer temp
;
866 // Avoid pconns after races so that the same client does not suffer twice.
867 // This does not increase the total number of connections because we just
868 // closed the connection that failed the race. And re-pinning assumes this.
869 if (pconnRace
!= raceHappened
)
870 temp
= pconnPop(serverDestinations
[0], host
);
872 const bool openedPconn
= Comm::IsConnOpen(temp
);
873 pconnRace
= openedPconn
? racePossible
: raceImpossible
;
875 // if we found an open persistent connection to use. use it.
878 flags
.connected_okay
= true;
879 debugs(17, 3, HERE
<< "reusing pconn " << serverConnection());
882 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
884 /* Update server side TOS and Netfilter mark on the connection. */
885 if (Ip::Qos::TheConfig
.isAclTosActive()) {
886 const tos_t tos
= GetTosToServer(request
);
887 Ip::Qos::setSockTos(temp
, tos
);
890 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
891 const nfmark_t nfmark
= GetNfmarkToServer(request
);
892 Ip::Qos::setSockNfmark(temp
, nfmark
);
900 // We will try to open a new connection, possibly to the same destination.
901 // We reset serverDestinations[0] in case we are using it again because
902 // ConnOpener modifies its destination argument.
903 serverDestinations
[0]->local
.port(0);
906 #if URL_CHECKSUM_DEBUG
907 entry
->mem_obj
->checkUrlChecksum();
910 GetMarkingsToServer(request
, *serverDestinations
[0]);
912 calls
.connector
= commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper
, this));
913 Comm::ConnOpener
*cs
= new Comm::ConnOpener(serverDestinations
[0], calls
.connector
, timeLeft());
922 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
924 * Assert that server_fd is set. This is to guarantee that fwdState
925 * is attached to something and will be deallocated when server_fd
928 assert(Comm::IsConnOpen(serverConn
));
930 fd_note(serverConnection()->fd
, entry
->url());
932 fd_table
[serverConnection()->fd
].noteUse();
934 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
935 assert(entry
->ping_status
!= PING_WAITING
);
937 assert(entry
->locked());
939 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
941 netdbPingSite(request
->url
.host());
943 /* Retrieves remote server TOS or MARK value, and stores it as part of the
944 * original client request FD object. It is later used to forward
945 * remote server's TOS/MARK in the response to the client in case of a MISS.
947 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
948 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
949 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
950 /* Get the netfilter mark for the connection */
951 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde
);
956 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
957 if (Ip::Qos::TheConfig
.isHitTosActive()) {
958 if (Comm::IsConnOpen(clientConn
)) {
959 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
960 /* Get the TOS value for the packet */
961 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
967 if (request
->flags
.sslPeek
) {
968 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
969 ConnStateData::httpsPeeked
, serverConnection());
970 unregister(serverConn
); // async call owns it now
971 complete(); // destroys us
976 if (serverConnection()->getPeer() != NULL
) {
977 ++ serverConnection()->getPeer()->stats
.fetches
;
978 request
->peer_login
= serverConnection()->getPeer()->login
;
979 request
->peer_domain
= serverConnection()->getPeer()->domain
;
982 assert(!request
->flags
.sslPeek
);
983 request
->peer_login
= NULL
;
984 request
->peer_domain
= NULL
;
986 switch (request
->url
.getScheme()) {
989 case AnyP::PROTO_HTTPS
:
994 case AnyP::PROTO_HTTP
:
998 case AnyP::PROTO_GOPHER
:
1002 case AnyP::PROTO_FTP
:
1003 if (request
->flags
.ftpNative
)
1004 Ftp::StartRelay(this);
1006 Ftp::StartGateway(this);
1009 case AnyP::PROTO_CACHE_OBJECT
:
1011 case AnyP::PROTO_URN
:
1012 fatal_dump("Should never get here");
1015 case AnyP::PROTO_WHOIS
:
1019 case AnyP::PROTO_WAIS
: /* Not implemented */
1022 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1023 ErrorState
*anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
);
1025 // Set the dont_retry flag because this is not a transient (network) error.
1026 flags
.dont_retry
= true;
1027 if (Comm::IsConnOpen(serverConn
)) {
1028 serverConn
->close();
1036 * FwdState::reforward
1038 * returns TRUE if the transaction SHOULD be re-forwarded to the
1039 * next choice in the serverDestinations list. This method is called when
1040 * peer communication completes normally, or experiences
1041 * some error after receiving the end of HTTP headers.
1044 FwdState::reforward()
1046 StoreEntry
*e
= entry
;
1048 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1049 debugs(17, 3, HERE
<< "entry aborted");
1053 assert(e
->store_status
== STORE_PENDING
);
1055 #if URL_CHECKSUM_DEBUG
1057 e
->mem_obj
->checkUrlChecksum();
1060 debugs(17, 3, HERE
<< e
->url() << "?" );
1062 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1063 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1067 if (n_tries
> Config
.forward_max_tries
)
1070 if (request
->bodyNibbled())
1073 if (serverDestinations
.size() <= 1) {
1074 // NP: <= 1 since total count includes the recently failed one.
1075 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1079 const Http::StatusCode s
= e
->getReply()->sline
.status();
1080 debugs(17, 3, HERE
<< "status " << s
);
1081 return reforwardableStatus(s
);
1085 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1086 * on whether this is a validation request. RFC 2616 says that we MUST reply
1087 * with "504 Gateway Timeout" if validation fails and cached reply has
1088 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1091 FwdState::makeConnectingError(const err_type type
) const
1093 return new ErrorState(type
, request
->flags
.needValidation
?
1094 Http::scGatewayTimeout
: Http::scServiceUnavailable
, request
);
1098 fwdStats(StoreEntry
* s
)
1102 storeAppendPrintf(s
, "Status");
1104 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1105 storeAppendPrintf(s
, "\ttry#%d", j
);
1108 storeAppendPrintf(s
, "\n");
1110 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1111 if (FwdReplyCodes
[0][i
] == 0)
1114 storeAppendPrintf(s
, "%3d", i
);
1116 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1117 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1120 storeAppendPrintf(s
, "\n");
1124 /**** STATIC MEMBER FUNCTIONS *************************************************/
1127 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1131 case Http::scBadGateway
:
1133 case Http::scGatewayTimeout
:
1136 case Http::scForbidden
:
1138 case Http::scInternalServerError
:
1140 case Http::scNotImplemented
:
1142 case Http::scServiceUnavailable
:
1143 return Config
.retry
.onerror
;
1153 * Decide where details need to be gathered to correctly describe a persistent connection.
1155 * - the address/port details about this link
1156 * - domain name of server at other end of this link (either peer or requested host)
1159 FwdState::pconnPush(Comm::ConnectionPointer
&conn
, const char *domain
)
1161 if (conn
->getPeer()) {
1162 fwdPconnPool
->push(conn
, NULL
);
1164 fwdPconnPool
->push(conn
, domain
);
1168 Comm::ConnectionPointer
1169 FwdState::pconnPop(const Comm::ConnectionPointer
&dest
, const char *domain
)
1171 // always call shared pool first because we need to close an idle
1172 // connection there if we have to use a standby connection.
1173 Comm::ConnectionPointer conn
= fwdPconnPool
->pop(dest
, domain
, checkRetriable());
1174 if (!Comm::IsConnOpen(conn
)) {
1175 // either there was no pconn to pop or this is not a retriable xaction
1176 if (CachePeer
*peer
= dest
->getPeer()) {
1177 if (peer
->standby
.pool
)
1178 conn
= peer
->standby
.pool
->pop(dest
, domain
, true);
1181 return conn
; // open, closed, or nil
1185 FwdState::initModule()
1187 RegisterWithCacheManager();
1191 FwdState::RegisterWithCacheManager(void)
1193 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1197 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1199 if (status
> Http::scInvalidHeader
)
1204 if (tries
> MAX_FWD_STATS_IDX
)
1205 tries
= MAX_FWD_STATS_IDX
;
1207 ++ FwdReplyCodes
[tries
][status
];
1210 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1214 * Formerly static, but now used by client_side_request.cc
1216 /// Checks for a TOS value to apply depending on the ACL
1218 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1220 for (acl_tos
*l
= head
; l
; l
= l
->next
) {
1221 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1228 /// Checks for a netfilter mark value to apply depending on the ACL
1230 aclMapNfmark(acl_nfmark
* head
, ACLChecklist
* ch
)
1232 for (acl_nfmark
*l
= head
; l
; l
= l
->next
) {
1233 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1241 getOutgoingAddress(HttpRequest
* request
, Comm::ConnectionPointer conn
)
1243 // skip if an outgoing address is already set.
1244 if (!conn
->local
.isAnyAddr()) return;
1246 // ensure that at minimum the wildcard local matches remote protocol
1247 if (conn
->remote
.isIPv4())
1248 conn
->local
.setIPv4();
1250 // maybe use TPROXY client address
1251 if (request
&& request
->flags
.spoofClientIp
) {
1252 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1253 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1254 if (Config
.onoff
.tproxy_uses_indirect_client
)
1255 conn
->local
= request
->indirect_client_addr
;
1258 conn
->local
= request
->client_addr
;
1259 // some flags need setting on the socket to use this address
1260 conn
->flags
|= COMM_DOBIND
;
1261 conn
->flags
|= COMM_TRANSPARENT
;
1264 // else no tproxy today ...
1267 if (!Config
.accessList
.outgoing_address
) {
1268 return; // anything will do.
1271 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1272 ch
.dst_peer_name
= conn
->getPeer() ? conn
->getPeer()->name
: NULL
;
1273 ch
.dst_addr
= conn
->remote
;
1275 // TODO use the connection details in ACL.
1276 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1278 for (Acl::Address
*l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1280 /* check if the outgoing address is usable to the destination */
1281 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1283 /* check ACLs for this outgoing address */
1284 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
) == ACCESS_ALLOWED
) {
1285 conn
->local
= l
->addr
;
1292 GetTosToServer(HttpRequest
* request
)
1294 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1295 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1299 GetNfmarkToServer(HttpRequest
* request
)
1301 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1302 return aclMapNfmark(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1306 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1308 // Get the server side TOS and Netfilter mark to be set on the connection.
1309 if (Ip::Qos::TheConfig
.isAclTosActive()) {
1310 conn
.tos
= GetTosToServer(request
);
1311 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
));
1314 #if SO_MARK && USE_LIBCAP
1315 conn
.nfmark
= GetNfmarkToServer(request
);
1316 debugs(17, 3, "from " << conn
.local
<< " netfilter mark " << conn
.nfmark
);