2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 17 Request Forwarding */
12 #include "AccessLogEntry.h"
13 #include "acl/AclAddress.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "comm/Connection.h"
22 #include "comm/ConnOpener.h"
23 #include "comm/Loops.h"
24 #include "CommCalls.h"
25 #include "errorpage.h"
32 #include "hier_code.h"
34 #include "HttpReply.h"
35 #include "HttpRequest.h"
36 #include "icmp/net_db.h"
38 #include "ip/Intercept.h"
39 #include "ip/QosConfig.h"
41 #include "MemObject.h"
42 #include "mgr/Registration.h"
43 #include "neighbors.h"
45 #include "PeerPoolMgr.h"
46 #include "PeerSelectState.h"
47 #include "SquidConfig.h"
48 #include "SquidTime.h"
50 #include "StoreClient.h"
54 #include "ssl/cert_validate_message.h"
55 #include "ssl/Config.h"
56 #include "ssl/ErrorDetail.h"
57 #include "ssl/helper.h"
58 #include "ssl/PeerConnector.h"
59 #include "ssl/ServerBump.h"
60 #include "ssl/support.h"
65 static PSC fwdPeerSelectionCompleteWrapper
;
66 static CLCB fwdServerClosedWrapper
;
67 static CNCB fwdConnectDoneWrapper
;
71 #define MAX_FWD_STATS_IDX 9
72 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
74 static PconnPool
*fwdPconnPool
= new PconnPool("server-peers", NULL
);
75 CBDATA_CLASS_INIT(FwdState
);
78 class FwdStatePeerAnswerDialer
: public CallDialer
, public Ssl::PeerConnector::CbDialer
81 typedef void (FwdState::*Method
)(Ssl::PeerConnectorAnswer
&);
83 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
84 method_(method
), fwd_(fwd
), answer_() {}
87 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
88 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
89 virtual void print(std::ostream
&os
) const {
90 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')';
93 /* Ssl::PeerConnector::CbDialer API */
94 virtual Ssl::PeerConnectorAnswer
&answer() { return answer_
; }
98 CbcPointer
<FwdState
> fwd_
;
99 Ssl::PeerConnectorAnswer answer_
;
104 FwdState::abort(void* d
)
106 FwdState
* fwd
= (FwdState
*)d
;
107 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
109 if (Comm::IsConnOpen(fwd
->serverConnection())) {
110 fwd
->closeServerConnection("store entry aborted");
112 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
114 fwd
->serverDestinations
.clear();
119 FwdState::closeServerConnection(const char *reason
)
121 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
122 comm_remove_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
123 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
127 /**** PUBLIC INTERFACE ********************************************************/
129 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
132 debugs(17, 2, HERE
<< "Forwarding client request " << client
<< ", url=" << e
->url() );
136 HTTPMSGLOCK(request
);
137 pconnRace
= raceImpossible
;
138 start_t
= squid_curtime
;
139 serverDestinations
.reserve(Config
.forward_max_tries
);
141 EBIT_SET(e
->flags
, ENTRY_FWD_HDR_WAIT
);
144 // Called once, right after object creation, when it is safe to set self
145 void FwdState::start(Pointer aSelf
)
147 // Protect ourselves from being destroyed when the only Server pointing
148 // to us is gone (while we expect to talk to more Servers later).
149 // Once we set self, we are responsible for clearing it when we do not
150 // expect to talk to any servers.
151 self
= aSelf
; // refcounted
153 // We hope that either the store entry aborts or peer is selected.
154 // Otherwise we are going to leak our object.
156 entry
->registerAbort(FwdState::abort
, this);
158 #if STRICT_ORIGINAL_DST
159 // Bug 3243: CVE 2009-0801
160 // Bypass of browser same-origin access control in intercepted communication
161 // To resolve this we must force DIRECT and only to the original client destination.
162 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
163 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
164 if (isIntercepted
&& useOriginalDst
) {
165 selectPeerForIntercepted();
166 // 3.2 does not suppro re-wrapping inside CONNECT.
167 // our only alternative is to fake destination "found" and continue with the forwarding.
168 startConnectionOrFail();
173 // do full route options selection
174 peerSelect(&serverDestinations
, request
, al
, entry
, fwdPeerSelectionCompleteWrapper
, this);
177 #if STRICT_ORIGINAL_DST
178 /// bypasses peerSelect() when dealing with intercepted requests
180 FwdState::selectPeerForIntercepted()
182 // use pinned connection if available
183 Comm::ConnectionPointer p
;
184 if (ConnStateData
*client
= request
->pinnedConnection()) {
185 p
= client
->validatePinnedConnection(request
, NULL
);
186 if (Comm::IsConnOpen(p
)) {
187 /* duplicate peerSelectPinned() effects */
188 p
->peerType
= PINNED
;
189 entry
->ping_status
= PING_DONE
; /* Skip ICP */
191 debugs(17, 3, "reusing a pinned conn: " << *p
);
192 serverDestinations
.push_back(p
);
194 debugs(17,2, "Pinned connection is not valid: " << p
);
195 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
198 // Either use the valid pinned connection or fail if it is invalid.
202 // use client original destination as second preferred choice
203 p
= new Comm::Connection();
204 p
->peerType
= ORIGINAL_DST
;
205 p
->remote
= clientConn
->local
;
206 getOutgoingAddress(request
, p
);
208 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
209 serverDestinations
.push_back(p
);
214 FwdState::completed()
216 if (flags
.forward_completed
) {
217 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
221 flags
.forward_completed
= true;
223 request
->hier
.stopPeerClock(false);
225 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
226 debugs(17, 3, HERE
<< "entry aborted");
230 #if URL_CHECKSUM_DEBUG
232 entry
->mem_obj
->checkUrlChecksum();
235 if (entry
->store_status
== STORE_PENDING
) {
236 if (entry
->isEmpty()) {
237 if (!err
) // we quit (e.g., fd closed) before an error or content
238 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
));
240 errorAppendEntry(entry
, err
);
243 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
244 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
245 ConnStateData::httpsPeeked
, Comm::ConnectionPointer(NULL
));
249 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
251 entry
->releaseRequest();
255 if (storePendingNClients(entry
) > 0)
256 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
260 FwdState::~FwdState()
262 debugs(17, 3, HERE
<< "FwdState destructor starting");
264 if (! flags
.forward_completed
)
269 HTTPMSGUNLOCK(request
);
273 entry
->unregisterAbort();
275 entry
->unlock("FwdState");
279 if (calls
.connector
!= NULL
) {
280 calls
.connector
->cancel("FwdState destructed");
281 calls
.connector
= NULL
;
284 if (Comm::IsConnOpen(serverConn
))
285 closeServerConnection("~FwdState");
287 serverDestinations
.clear();
289 debugs(17, 3, HERE
<< "FwdState destructor done");
293 * This is the entry point for client-side to start forwarding
294 * a transaction. It is a static method that may or may not
295 * allocate a FwdState.
298 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
301 * client_addr == no_addr indicates this is an "internal" request
302 * from peer_digest.c, asn.c, netdb.c, etc and should always
303 * be allowed. yuck, I know.
306 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
307 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
309 * Check if this host is allowed to fetch MISSES from us (miss_access).
310 * Intentionally replace the src_addr automatically selected by the checklist code
311 * we do NOT want the indirect client address to be tested here.
313 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
314 ch
.src_addr
= request
->client_addr
;
315 if (ch
.fastCheck() == ACCESS_DENIED
) {
317 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
319 if (page_id
== ERR_NONE
)
320 page_id
= ERR_FORWARDING_DENIED
;
322 ErrorState
*anErr
= new ErrorState(page_id
, Http::scForbidden
, request
);
323 errorAppendEntry(entry
, anErr
); // frees anErr
328 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
330 * This seems like an odd place to bind mem_obj and request.
331 * Might want to assert that request is NULL at this point
333 entry
->mem_obj
->request
= request
;
334 HTTPMSGLOCK(entry
->mem_obj
->request
);
335 #if URL_CHECKSUM_DEBUG
337 entry
->mem_obj
->checkUrlChecksum();
342 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
343 errorAppendEntry(entry
, anErr
); // frees anErr
347 if (request
->flags
.internal
) {
348 debugs(17, 2, "calling internalStart() due to request flag");
349 internalStart(clientConn
, request
, entry
);
353 switch (request
->url
.getScheme()) {
355 case AnyP::PROTO_CACHE_OBJECT
:
356 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
357 CacheManager::GetInstance()->Start(clientConn
, request
, entry
);
360 case AnyP::PROTO_URN
:
361 urnStart(request
, entry
);
365 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
374 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
376 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
377 Start(clientConn
, entry
, request
, NULL
);
381 FwdState::startConnectionOrFail()
383 debugs(17, 3, HERE
<< entry
->url());
385 if (serverDestinations
.size() > 0) {
386 // Ditch error page if it was created before.
387 // A new one will be created if there's another problem
391 // Update the logging information about this new server connection.
392 // Done here before anything else so the errors get logged for
393 // this server link regardless of what happens when connecting to it.
394 // IF sucessfuly connected this top destination will become the serverConnection().
395 request
->hier
.note(serverDestinations
[0], request
->GetHost());
396 request
->clearError();
400 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
402 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
);
404 } // else use actual error from last connection attempt
405 self
= NULL
; // refcounted
410 FwdState::fail(ErrorState
* errorState
)
412 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
417 if (!errorState
->request
) {
418 errorState
->request
= request
;
419 HTTPMSGLOCK(errorState
->request
);
422 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
425 if (pconnRace
== racePossible
) {
426 debugs(17, 5, HERE
<< "pconn race happened");
427 pconnRace
= raceHappened
;
430 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
431 pinned_connection
->pinning
.zeroReply
= true;
432 flags
.dont_retry
= true; // we want to propagate failure to the client
433 debugs(17, 4, "zero reply on pinned connection");
438 * Frees fwdState without closing FD or generating an abort
441 FwdState::unregister(Comm::ConnectionPointer
&conn
)
443 debugs(17, 3, HERE
<< entry
->url() );
444 assert(serverConnection() == conn
);
445 assert(Comm::IsConnOpen(conn
));
446 comm_remove_close_handler(conn
->fd
, fwdServerClosedWrapper
, this);
450 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
452 FwdState::unregister(int fd
)
454 debugs(17, 3, HERE
<< entry
->url() );
455 assert(fd
== serverConnection()->fd
);
456 unregister(serverConn
);
460 * FooClient modules call fwdComplete() when they are done
461 * downloading an object. Then, we either 1) re-forward the
462 * request somewhere else if needed, or 2) call storeComplete()
468 debugs(17, 3, HERE
<< entry
->url() << "\n\tstatus " << entry
->getReply()->sline
.status());
469 #if URL_CHECKSUM_DEBUG
471 entry
->mem_obj
->checkUrlChecksum();
474 logReplyStatus(n_tries
, entry
->getReply()->sline
.status());
477 debugs(17, 3, HERE
<< "re-forwarding " << entry
->getReply()->sline
.status() << " " << entry
->url());
479 if (Comm::IsConnOpen(serverConn
))
480 unregister(serverConn
);
484 // drop the last path off the selection list. try the next one.
485 serverDestinations
.erase(serverDestinations
.begin());
486 startConnectionOrFail();
489 if (Comm::IsConnOpen(serverConn
))
490 debugs(17, 3, HERE
<< "server FD " << serverConnection()->fd
<< " not re-forwarding status " << entry
->getReply()->sline
.status());
492 debugs(17, 3, HERE
<< "server (FD closed) not re-forwarding status " << entry
->getReply()->sline
.status());
493 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
496 if (!Comm::IsConnOpen(serverConn
))
499 self
= NULL
; // refcounted
503 /**** CALLBACK WRAPPERS ************************************************************/
506 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList
*, ErrorState
*err
, void *data
)
508 FwdState
*fwd
= (FwdState
*) data
;
511 fwd
->startConnectionOrFail();
515 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
517 FwdState
*fwd
= (FwdState
*)params
.data
;
518 fwd
->serverClosed(params
.fd
);
522 fwdConnectDoneWrapper(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
, void *data
)
524 FwdState
*fwd
= (FwdState
*) data
;
525 fwd
->connectDone(conn
, status
, xerrno
);
528 /**** PRIVATE *****************************************************************/
531 * FwdState::checkRetry
533 * Return TRUE if the request SHOULD be retried. This method is
534 * called when the HTTP connection fails, or when the connection
535 * is closed before reading the end of HTTP headers from the server.
538 FwdState::checkRetry()
543 if (!self
) { // we have aborted before the server called us back
544 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
545 // we will be destroyed when the server clears its Pointer to us
549 if (entry
->store_status
!= STORE_PENDING
)
552 if (!entry
->isEmpty())
555 if (n_tries
> Config
.forward_max_tries
)
558 if (squid_curtime
- start_t
> Config
.Timeout
.forward
)
561 if (flags
.dont_retry
)
564 if (request
->bodyNibbled())
567 // NP: not yet actually connected anywhere. retry is safe.
568 if (!flags
.connected_okay
)
571 if (!checkRetriable())
578 * FwdState::checkRetriable
580 * Return TRUE if this is the kind of request that can be retried
581 * after a failure. If the request is not retriable then we don't
582 * want to risk sending it on a persistent connection. Instead we'll
583 * force it to go on a new HTTP connection.
586 FwdState::checkRetriable()
588 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
589 // complicated] code required to protect the PUT request body from being
590 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
591 if (request
->body_pipe
!= NULL
)
594 // RFC2616 9.1 Safe and Idempotent Methods
595 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
599 FwdState::serverClosed(int fd
)
601 // XXX: fd is often -1 here
602 debugs(17, 2, "FD " << fd
<< " " << entry
->url() << " after " <<
603 (fd
>= 0 ? fd_table
[fd
].pconn
.uses
: -1) << " requests");
604 if (fd
>= 0 && serverConnection()->fd
== fd
)
605 fwdPconnPool
->noteUses(fd_table
[fd
].pconn
.uses
);
610 FwdState::retryOrBail()
613 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
614 // we should retry the same destination if it failed due to pconn race
615 if (pconnRace
== raceHappened
)
616 debugs(17, 4, HERE
<< "retrying the same destination");
618 serverDestinations
.erase(serverDestinations
.begin()); // last one failed. try another.
619 startConnectionOrFail();
623 // TODO: should we call completed() here and move doneWithRetries there?
626 request
->hier
.stopPeerClock(false);
628 if (self
!= NULL
&& !err
&& shutting_down
) {
629 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
630 errorAppendEntry(entry
, anErr
);
633 self
= NULL
; // refcounted
636 // If the Server quits before nibbling at the request body, the body sender
637 // will not know (so that we can retry). Call this if we will not retry. We
638 // will notify the sender so that it does not get stuck waiting for space.
640 FwdState::doneWithRetries()
642 if (request
&& request
->body_pipe
!= NULL
)
643 request
->body_pipe
->expectNoConsumption();
646 // called by the server that failed after calling unregister()
648 FwdState::handleUnregisteredServerEnd()
650 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
651 assert(!Comm::IsConnOpen(serverConn
));
656 FwdState::connectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
)
658 if (status
!= Comm::OK
) {
659 ErrorState
*const anErr
= makeConnectingError(ERR_CONNECT_FAIL
);
660 anErr
->xerrno
= xerrno
;
663 /* it might have been a timeout with a partially open link */
666 peerConnectFailed(conn
->getPeer());
675 flags
.connected_okay
= true;
677 debugs(17, 3, HERE
<< serverConnection() << ": '" << entry
->url() << "'" );
679 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
681 if (serverConnection()->getPeer())
682 peerConnectSucceded(serverConnection()->getPeer());
685 if (!request
->flags
.pinned
) {
686 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl
) ||
687 (!serverConnection()->getPeer() && request
->url
.getScheme() == AnyP::PROTO_HTTPS
) ||
688 request
->flags
.sslPeek
) {
690 HttpRequest::Pointer requestPointer
= request
;
691 AsyncCall::Pointer callback
= asyncCall(17,4,
692 "FwdState::ConnectedToPeer",
693 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
694 // Use positive timeout when less than one second is left.
695 const time_t sslNegotiationTimeout
= max(static_cast<time_t>(1), timeLeft());
696 Ssl::PeerConnector
*connector
=
697 new Ssl::PeerConnector(requestPointer
, serverConnection(), clientConn
, callback
, sslNegotiationTimeout
);
698 AsyncJob::Start(connector
); // will call our callback
704 // should reach ConnStateData before the dispatched Client job starts
705 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
706 ConnStateData::notePeerConnection
, serverConnection());
713 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer
&answer
)
715 if (ErrorState
*error
= answer
.error
.get()) {
717 answer
.error
.clear(); // preserve error for errorSendComplete()
727 FwdState::connectTimeout(int fd
)
729 debugs(17, 2, "fwdConnectTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
730 assert(serverDestinations
[0] != NULL
);
731 assert(fd
== serverDestinations
[0]->fd
);
733 if (entry
->isEmpty()) {
734 ErrorState
*anErr
= new ErrorState(ERR_CONNECT_FAIL
, Http::scGatewayTimeout
, request
);
735 anErr
->xerrno
= ETIMEDOUT
;
738 /* This marks the peer DOWN ... */
739 if (serverDestinations
[0]->getPeer())
740 peerConnectFailed(serverDestinations
[0]->getPeer());
743 if (Comm::IsConnOpen(serverDestinations
[0])) {
744 serverDestinations
[0]->close();
749 FwdState::timeLeft() const
751 /* connection timeout */
753 if (serverDestinations
[0]->getPeer()) {
754 ctimeout
= serverDestinations
[0]->getPeer()->connect_timeout
> 0 ?
755 serverDestinations
[0]->getPeer()->connect_timeout
: Config
.Timeout
.peer_connect
;
757 ctimeout
= Config
.Timeout
.connect
;
760 /* calculate total forwarding timeout ??? */
761 int ftimeout
= Config
.Timeout
.forward
- (squid_curtime
- start_t
);
765 if (ftimeout
< ctimeout
)
766 return (time_t)ftimeout
;
768 return (time_t)ctimeout
;
772 * Called after forwarding path selection (via peer select) has taken place
773 * and whenever forwarding needs to attempt a new connection (routing failover).
774 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
777 FwdState::connectStart()
779 assert(serverDestinations
.size() > 0);
781 debugs(17, 3, "fwdConnectStart: " << entry
->url());
783 request
->hier
.startPeerClock();
785 if (serverDestinations
[0]->getPeer() && request
->flags
.sslBumped
) {
786 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
787 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
);
789 self
= NULL
; // refcounted
793 request
->flags
.pinned
= false; // XXX: what if the ConnStateData set this to flag existing credentials?
794 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
795 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
796 if (serverDestinations
[0]->peerType
== PINNED
) {
797 ConnStateData
*pinned_connection
= request
->pinnedConnection();
798 debugs(17,7, "pinned peer connection: " << pinned_connection
);
799 // pinned_connection may become nil after a pconn race
800 if (pinned_connection
)
801 serverConn
= pinned_connection
->borrowPinnedConnection(request
, serverDestinations
[0]->getPeer());
804 if (Comm::IsConnOpen(serverConn
)) {
805 pinned_connection
->stopPinnedConnectionMonitoring();
806 flags
.connected_okay
= true;
808 request
->flags
.pinned
= true;
809 request
->hier
.note(serverConn
, pinned_connection
->pinning
.host
);
810 if (pinned_connection
->pinnedAuth())
811 request
->flags
.auth
= true;
812 comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
814 /* Update server side TOS and Netfilter mark on the connection. */
815 if (Ip::Qos::TheConfig
.isAclTosActive()) {
816 debugs(17, 3, HERE
<< "setting tos for pinned connection to " << (int)serverConn
->tos
);
817 serverConn
->tos
= GetTosToServer(request
);
818 Ip::Qos::setSockTos(serverConn
, serverConn
->tos
);
821 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
822 serverConn
->nfmark
= GetNfmarkToServer(request
);
823 Ip::Qos::setSockNfmark(serverConn
, serverConn
->nfmark
);
827 // the server may close the pinned connection before this request
828 pconnRace
= racePossible
;
832 // Pinned connection failure.
833 debugs(17,2,HERE
<< "Pinned connection failed: " << pinned_connection
);
834 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
836 self
= NULL
; // refcounted
840 // Use pconn to avoid opening a new connection.
841 const char *host
= NULL
;
842 if (!serverDestinations
[0]->getPeer())
843 host
= request
->GetHost();
845 Comm::ConnectionPointer temp
;
846 // Avoid pconns after races so that the same client does not suffer twice.
847 // This does not increase the total number of connections because we just
848 // closed the connection that failed the race. And re-pinning assumes this.
849 if (pconnRace
!= raceHappened
)
850 temp
= pconnPop(serverDestinations
[0], host
);
852 const bool openedPconn
= Comm::IsConnOpen(temp
);
853 pconnRace
= openedPconn
? racePossible
: raceImpossible
;
855 // if we found an open persistent connection to use. use it.
858 flags
.connected_okay
= true;
859 debugs(17, 3, HERE
<< "reusing pconn " << serverConnection());
862 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
864 /* Update server side TOS and Netfilter mark on the connection. */
865 if (Ip::Qos::TheConfig
.isAclTosActive()) {
866 const tos_t tos
= GetTosToServer(request
);
867 Ip::Qos::setSockTos(temp
, tos
);
870 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
871 const nfmark_t nfmark
= GetNfmarkToServer(request
);
872 Ip::Qos::setSockNfmark(temp
, nfmark
);
880 // We will try to open a new connection, possibly to the same destination.
881 // We reset serverDestinations[0] in case we are using it again because
882 // ConnOpener modifies its destination argument.
883 serverDestinations
[0]->local
.port(0);
886 #if URL_CHECKSUM_DEBUG
887 entry
->mem_obj
->checkUrlChecksum();
890 GetMarkingsToServer(request
, *serverDestinations
[0]);
892 calls
.connector
= commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper
, this));
893 Comm::ConnOpener
*cs
= new Comm::ConnOpener(serverDestinations
[0], calls
.connector
, timeLeft());
902 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
904 * Assert that server_fd is set. This is to guarantee that fwdState
905 * is attached to something and will be deallocated when server_fd
908 assert(Comm::IsConnOpen(serverConn
));
910 fd_note(serverConnection()->fd
, entry
->url());
912 fd_table
[serverConnection()->fd
].noteUse();
914 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
915 assert(entry
->ping_status
!= PING_WAITING
);
917 assert(entry
->locked());
919 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
921 netdbPingSite(request
->GetHost());
923 /* Retrieves remote server TOS or MARK value, and stores it as part of the
924 * original client request FD object. It is later used to forward
925 * remote server's TOS/MARK in the response to the client in case of a MISS.
927 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
928 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
929 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
930 /* Get the netfilter mark for the connection */
931 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde
);
936 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
937 if (Ip::Qos::TheConfig
.isHitTosActive()) {
938 if (Comm::IsConnOpen(clientConn
)) {
939 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
940 /* Get the TOS value for the packet */
941 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
947 if (request
->flags
.sslPeek
) {
948 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
949 ConnStateData::httpsPeeked
, serverConnection());
950 unregister(serverConn
); // async call owns it now
951 complete(); // destroys us
956 if (serverConnection()->getPeer() != NULL
) {
957 ++ serverConnection()->getPeer()->stats
.fetches
;
958 request
->peer_login
= serverConnection()->getPeer()->login
;
959 request
->peer_domain
= serverConnection()->getPeer()->domain
;
962 assert(!request
->flags
.sslPeek
);
963 request
->peer_login
= NULL
;
964 request
->peer_domain
= NULL
;
966 switch (request
->url
.getScheme()) {
969 case AnyP::PROTO_HTTPS
:
974 case AnyP::PROTO_HTTP
:
978 case AnyP::PROTO_GOPHER
:
982 case AnyP::PROTO_FTP
:
983 if (request
->flags
.ftpNative
)
984 Ftp::StartRelay(this);
986 Ftp::StartGateway(this);
989 case AnyP::PROTO_CACHE_OBJECT
:
991 case AnyP::PROTO_URN
:
992 fatal_dump("Should never get here");
995 case AnyP::PROTO_WHOIS
:
999 case AnyP::PROTO_WAIS
: /* Not implemented */
1002 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1003 ErrorState
*anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
);
1005 // Set the dont_retry flag because this is not a transient (network) error.
1006 flags
.dont_retry
= true;
1007 if (Comm::IsConnOpen(serverConn
)) {
1008 serverConn
->close();
1016 * FwdState::reforward
1018 * returns TRUE if the transaction SHOULD be re-forwarded to the
1019 * next choice in the serverDestinations list. This method is called when
1020 * peer communication completes normally, or experiences
1021 * some error after receiving the end of HTTP headers.
1024 FwdState::reforward()
1026 StoreEntry
*e
= entry
;
1028 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1029 debugs(17, 3, HERE
<< "entry aborted");
1033 assert(e
->store_status
== STORE_PENDING
);
1035 #if URL_CHECKSUM_DEBUG
1037 e
->mem_obj
->checkUrlChecksum();
1040 debugs(17, 3, HERE
<< e
->url() << "?" );
1042 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1043 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1047 if (n_tries
> Config
.forward_max_tries
)
1050 if (request
->bodyNibbled())
1053 if (serverDestinations
.size() <= 1) {
1054 // NP: <= 1 since total count includes the recently failed one.
1055 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1059 const Http::StatusCode s
= e
->getReply()->sline
.status();
1060 debugs(17, 3, HERE
<< "status " << s
);
1061 return reforwardableStatus(s
);
1065 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1066 * on whether this is a validation request. RFC 2616 says that we MUST reply
1067 * with "504 Gateway Timeout" if validation fails and cached reply has
1068 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1071 FwdState::makeConnectingError(const err_type type
) const
1073 return new ErrorState(type
, request
->flags
.needValidation
?
1074 Http::scGatewayTimeout
: Http::scServiceUnavailable
, request
);
1078 fwdStats(StoreEntry
* s
)
1082 storeAppendPrintf(s
, "Status");
1084 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1085 storeAppendPrintf(s
, "\ttry#%d", j
);
1088 storeAppendPrintf(s
, "\n");
1090 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1091 if (FwdReplyCodes
[0][i
] == 0)
1094 storeAppendPrintf(s
, "%3d", i
);
1096 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1097 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1100 storeAppendPrintf(s
, "\n");
1104 /**** STATIC MEMBER FUNCTIONS *************************************************/
1107 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1111 case Http::scBadGateway
:
1113 case Http::scGatewayTimeout
:
1116 case Http::scForbidden
:
1118 case Http::scInternalServerError
:
1120 case Http::scNotImplemented
:
1122 case Http::scServiceUnavailable
:
1123 return Config
.retry
.onerror
;
1133 * Decide where details need to be gathered to correctly describe a persistent connection.
1135 * - the address/port details about this link
1136 * - domain name of server at other end of this link (either peer or requested host)
1139 FwdState::pconnPush(Comm::ConnectionPointer
&conn
, const char *domain
)
1141 if (conn
->getPeer()) {
1142 fwdPconnPool
->push(conn
, NULL
);
1144 fwdPconnPool
->push(conn
, domain
);
1148 Comm::ConnectionPointer
1149 FwdState::pconnPop(const Comm::ConnectionPointer
&dest
, const char *domain
)
1151 // always call shared pool first because we need to close an idle
1152 // connection there if we have to use a standby connection.
1153 Comm::ConnectionPointer conn
= fwdPconnPool
->pop(dest
, domain
, checkRetriable());
1154 if (!Comm::IsConnOpen(conn
)) {
1155 // either there was no pconn to pop or this is not a retriable xaction
1156 if (CachePeer
*peer
= dest
->getPeer()) {
1157 if (peer
->standby
.pool
)
1158 conn
= peer
->standby
.pool
->pop(dest
, domain
, true);
1161 return conn
; // open, closed, or nil
1165 FwdState::initModule()
1167 RegisterWithCacheManager();
1171 FwdState::RegisterWithCacheManager(void)
1173 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1177 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1179 if (status
> Http::scInvalidHeader
)
1184 if (tries
> MAX_FWD_STATS_IDX
)
1185 tries
= MAX_FWD_STATS_IDX
;
1187 ++ FwdReplyCodes
[tries
][status
];
1190 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1194 * Formerly static, but now used by client_side_request.cc
1196 /// Checks for a TOS value to apply depending on the ACL
1198 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1202 for (l
= head
; l
; l
= l
->next
) {
1203 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1210 /// Checks for a netfilter mark value to apply depending on the ACL
1212 aclMapNfmark(acl_nfmark
* head
, ACLChecklist
* ch
)
1216 for (l
= head
; l
; l
= l
->next
) {
1217 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1225 getOutgoingAddress(HttpRequest
* request
, Comm::ConnectionPointer conn
)
1227 // skip if an outgoing address is already set.
1228 if (!conn
->local
.isAnyAddr()) return;
1230 // ensure that at minimum the wildcard local matches remote protocol
1231 if (conn
->remote
.isIPv4())
1232 conn
->local
.setIPv4();
1234 // maybe use TPROXY client address
1235 if (request
&& request
->flags
.spoofClientIp
) {
1236 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1237 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1238 if (Config
.onoff
.tproxy_uses_indirect_client
)
1239 conn
->local
= request
->indirect_client_addr
;
1242 conn
->local
= request
->client_addr
;
1243 // some flags need setting on the socket to use this address
1244 conn
->flags
|= COMM_DOBIND
;
1245 conn
->flags
|= COMM_TRANSPARENT
;
1248 // else no tproxy today ...
1251 if (!Config
.accessList
.outgoing_address
) {
1252 return; // anything will do.
1255 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1256 ch
.dst_peer
= conn
->getPeer();
1257 ch
.dst_addr
= conn
->remote
;
1259 // TODO use the connection details in ACL.
1260 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1263 for (l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1265 /* check if the outgoing address is usable to the destination */
1266 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1268 /* check ACLs for this outgoing address */
1269 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
) == ACCESS_ALLOWED
) {
1270 conn
->local
= l
->addr
;
1277 GetTosToServer(HttpRequest
* request
)
1279 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1280 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1284 GetNfmarkToServer(HttpRequest
* request
)
1286 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1287 return aclMapNfmark(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1291 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1293 // Get the server side TOS and Netfilter mark to be set on the connection.
1294 if (Ip::Qos::TheConfig
.isAclTosActive()) {
1295 conn
.tos
= GetTosToServer(request
);
1296 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
));
1299 #if SO_MARK && USE_LIBCAP
1300 conn
.nfmark
= GetNfmarkToServer(request
);
1301 debugs(17, 3, "from " << conn
.local
<< " netfilter mark " << conn
.nfmark
);