2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "clients/forward.h"
43 #include "comm/Connection.h"
44 #include "comm/ConnOpener.h"
45 #include "comm/Loops.h"
46 #include "CommCalls.h"
47 #include "errorpage.h"
54 #include "hier_code.h"
56 #include "HttpReply.h"
57 #include "HttpRequest.h"
58 #include "icmp/net_db.h"
60 #include "ip/Intercept.h"
61 #include "ip/QosConfig.h"
63 #include "MemObject.h"
64 #include "mgr/Registration.h"
65 #include "neighbors.h"
67 #include "PeerPoolMgr.h"
68 #include "PeerSelectState.h"
69 #include "SquidConfig.h"
70 #include "SquidTime.h"
72 #include "StoreClient.h"
76 #include "ssl/cert_validate_message.h"
77 #include "ssl/Config.h"
78 #include "ssl/ErrorDetail.h"
79 #include "ssl/helper.h"
80 #include "ssl/PeerConnector.h"
81 #include "ssl/ServerBump.h"
82 #include "ssl/support.h"
87 static PSC fwdPeerSelectionCompleteWrapper
;
88 static CLCB fwdServerClosedWrapper
;
89 static CNCB fwdConnectDoneWrapper
;
93 #define MAX_FWD_STATS_IDX 9
94 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
96 static PconnPool
*fwdPconnPool
= new PconnPool("server-side", NULL
);
97 CBDATA_CLASS_INIT(FwdState
);
100 class FwdStatePeerAnswerDialer
: public CallDialer
, public Ssl::PeerConnector::CbDialer
103 typedef void (FwdState::*Method
)(Ssl::PeerConnectorAnswer
&);
105 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
106 method_(method
), fwd_(fwd
), answer_() {}
109 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
110 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
111 virtual void print(std::ostream
&os
) const {
112 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')';
115 /* Ssl::PeerConnector::CbDialer API */
116 virtual Ssl::PeerConnectorAnswer
&answer() { return answer_
; }
120 CbcPointer
<FwdState
> fwd_
;
121 Ssl::PeerConnectorAnswer answer_
;
126 FwdState::abort(void* d
)
128 FwdState
* fwd
= (FwdState
*)d
;
129 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
131 if (Comm::IsConnOpen(fwd
->serverConnection())) {
132 fwd
->closeServerConnection("store entry aborted");
134 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
136 fwd
->serverDestinations
.clear();
141 FwdState::closeServerConnection(const char *reason
)
143 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
144 comm_remove_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
145 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
149 /**** PUBLIC INTERFACE ********************************************************/
151 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
154 debugs(17, 2, HERE
<< "Forwarding client request " << client
<< ", url=" << e
->url() );
158 HTTPMSGLOCK(request
);
159 pconnRace
= raceImpossible
;
160 start_t
= squid_curtime
;
161 serverDestinations
.reserve(Config
.forward_max_tries
);
163 EBIT_SET(e
->flags
, ENTRY_FWD_HDR_WAIT
);
166 // Called once, right after object creation, when it is safe to set self
167 void FwdState::start(Pointer aSelf
)
169 // Protect ourselves from being destroyed when the only Server pointing
170 // to us is gone (while we expect to talk to more Servers later).
171 // Once we set self, we are responsible for clearing it when we do not
172 // expect to talk to any servers.
173 self
= aSelf
; // refcounted
175 // We hope that either the store entry aborts or peer is selected.
176 // Otherwise we are going to leak our object.
178 entry
->registerAbort(FwdState::abort
, this);
180 #if STRICT_ORIGINAL_DST
181 // Bug 3243: CVE 2009-0801
182 // Bypass of browser same-origin access control in intercepted communication
183 // To resolve this we must force DIRECT and only to the original client destination.
184 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
185 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
186 if (isIntercepted
&& useOriginalDst
) {
187 selectPeerForIntercepted();
188 // 3.2 does not suppro re-wrapping inside CONNECT.
189 // our only alternative is to fake destination "found" and continue with the forwarding.
190 startConnectionOrFail();
195 // do full route options selection
196 peerSelect(&serverDestinations
, request
, al
, entry
, fwdPeerSelectionCompleteWrapper
, this);
199 #if STRICT_ORIGINAL_DST
200 /// bypasses peerSelect() when dealing with intercepted requests
202 FwdState::selectPeerForIntercepted()
204 // use pinned connection if available
205 Comm::ConnectionPointer p
;
206 if (ConnStateData
*client
= request
->pinnedConnection()) {
207 p
= client
->validatePinnedConnection(request
, NULL
);
208 if (Comm::IsConnOpen(p
)) {
209 /* duplicate peerSelectPinned() effects */
210 p
->peerType
= PINNED
;
211 entry
->ping_status
= PING_DONE
; /* Skip ICP */
213 debugs(17, 3, "reusing a pinned conn: " << *p
);
214 serverDestinations
.push_back(p
);
216 debugs(17,2, "Pinned connection is not valid: " << p
);
217 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
220 // Either use the valid pinned connection or fail if it is invalid.
224 // use client original destination as second preferred choice
225 p
= new Comm::Connection();
226 p
->peerType
= ORIGINAL_DST
;
227 p
->remote
= clientConn
->local
;
228 getOutgoingAddress(request
, p
);
230 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
231 serverDestinations
.push_back(p
);
236 FwdState::completed()
238 if (flags
.forward_completed
) {
239 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
243 flags
.forward_completed
= true;
245 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
246 debugs(17, 3, HERE
<< "entry aborted");
250 #if URL_CHECKSUM_DEBUG
252 entry
->mem_obj
->checkUrlChecksum();
255 if (entry
->store_status
== STORE_PENDING
) {
256 if (entry
->isEmpty()) {
257 if (!err
) // we quit (e.g., fd closed) before an error or content
258 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
));
260 errorAppendEntry(entry
, err
);
263 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
264 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
265 ConnStateData::httpsPeeked
, Comm::ConnectionPointer(NULL
));
269 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
271 entry
->releaseRequest();
275 if (storePendingNClients(entry
) > 0)
276 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
280 FwdState::~FwdState()
282 debugs(17, 3, HERE
<< "FwdState destructor starting");
284 if (! flags
.forward_completed
)
289 HTTPMSGUNLOCK(request
);
293 entry
->unregisterAbort();
295 entry
->unlock("FwdState");
299 if (calls
.connector
!= NULL
) {
300 calls
.connector
->cancel("FwdState destructed");
301 calls
.connector
= NULL
;
304 if (Comm::IsConnOpen(serverConn
))
305 closeServerConnection("~FwdState");
307 serverDestinations
.clear();
309 debugs(17, 3, HERE
<< "FwdState destructor done");
313 * This is the entry point for client-side to start forwarding
314 * a transaction. It is a static method that may or may not
315 * allocate a FwdState.
318 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
321 * client_addr == no_addr indicates this is an "internal" request
322 * from peer_digest.c, asn.c, netdb.c, etc and should always
323 * be allowed. yuck, I know.
326 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
327 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
329 * Check if this host is allowed to fetch MISSES from us (miss_access).
330 * Intentionally replace the src_addr automatically selected by the checklist code
331 * we do NOT want the indirect client address to be tested here.
333 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
334 ch
.src_addr
= request
->client_addr
;
335 if (ch
.fastCheck() == ACCESS_DENIED
) {
337 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
339 if (page_id
== ERR_NONE
)
340 page_id
= ERR_FORWARDING_DENIED
;
342 ErrorState
*anErr
= new ErrorState(page_id
, Http::scForbidden
, request
);
343 errorAppendEntry(entry
, anErr
); // frees anErr
348 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
350 * This seems like an odd place to bind mem_obj and request.
351 * Might want to assert that request is NULL at this point
353 entry
->mem_obj
->request
= request
;
354 HTTPMSGLOCK(entry
->mem_obj
->request
);
355 #if URL_CHECKSUM_DEBUG
357 entry
->mem_obj
->checkUrlChecksum();
362 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
363 errorAppendEntry(entry
, anErr
); // frees anErr
367 if (request
->flags
.internal
) {
368 debugs(17, 2, "calling internalStart() due to request flag");
369 internalStart(clientConn
, request
, entry
);
373 switch (request
->url
.getScheme()) {
375 case AnyP::PROTO_CACHE_OBJECT
:
376 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
377 CacheManager::GetInstance()->Start(clientConn
, request
, entry
);
380 case AnyP::PROTO_URN
:
381 urnStart(request
, entry
);
385 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
394 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
396 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
397 Start(clientConn
, entry
, request
, NULL
);
401 FwdState::startConnectionOrFail()
403 debugs(17, 3, HERE
<< entry
->url());
405 if (serverDestinations
.size() > 0) {
406 // Ditch error page if it was created before.
407 // A new one will be created if there's another problem
411 // Update the logging information about this new server connection.
412 // Done here before anything else so the errors get logged for
413 // this server link regardless of what happens when connecting to it.
414 // IF sucessfuly connected this top destination will become the serverConnection().
415 request
->hier
.note(serverDestinations
[0], request
->GetHost());
416 request
->clearError();
420 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
422 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
);
424 } // else use actual error from last connection attempt
425 self
= NULL
; // refcounted
430 FwdState::fail(ErrorState
* errorState
)
432 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
437 if (!errorState
->request
) {
438 errorState
->request
= request
;
439 HTTPMSGLOCK(errorState
->request
);
442 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
445 if (pconnRace
== racePossible
) {
446 debugs(17, 5, HERE
<< "pconn race happened");
447 pconnRace
= raceHappened
;
450 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
451 pinned_connection
->pinning
.zeroReply
= true;
452 flags
.dont_retry
= true; // we want to propagate failure to the client
453 debugs(17, 4, "zero reply on pinned connection");
458 * Frees fwdState without closing FD or generating an abort
461 FwdState::unregister(Comm::ConnectionPointer
&conn
)
463 debugs(17, 3, HERE
<< entry
->url() );
464 assert(serverConnection() == conn
);
465 assert(Comm::IsConnOpen(conn
));
466 comm_remove_close_handler(conn
->fd
, fwdServerClosedWrapper
, this);
470 // Legacy method to be removed in favor of the above as soon as possible
472 FwdState::unregister(int fd
)
474 debugs(17, 3, HERE
<< entry
->url() );
475 assert(fd
== serverConnection()->fd
);
476 unregister(serverConn
);
480 * server-side modules call fwdComplete() when they are done
481 * downloading an object. Then, we either 1) re-forward the
482 * request somewhere else if needed, or 2) call storeComplete()
488 debugs(17, 3, HERE
<< entry
->url() << "\n\tstatus " << entry
->getReply()->sline
.status());
489 #if URL_CHECKSUM_DEBUG
491 entry
->mem_obj
->checkUrlChecksum();
494 logReplyStatus(n_tries
, entry
->getReply()->sline
.status());
497 debugs(17, 3, HERE
<< "re-forwarding " << entry
->getReply()->sline
.status() << " " << entry
->url());
499 if (Comm::IsConnOpen(serverConn
))
500 unregister(serverConn
);
504 // drop the last path off the selection list. try the next one.
505 serverDestinations
.erase(serverDestinations
.begin());
506 startConnectionOrFail();
509 if (Comm::IsConnOpen(serverConn
))
510 debugs(17, 3, HERE
<< "server FD " << serverConnection()->fd
<< " not re-forwarding status " << entry
->getReply()->sline
.status());
512 debugs(17, 3, HERE
<< "server (FD closed) not re-forwarding status " << entry
->getReply()->sline
.status());
513 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
516 if (!Comm::IsConnOpen(serverConn
))
519 self
= NULL
; // refcounted
523 /**** CALLBACK WRAPPERS ************************************************************/
526 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList
* unused
, ErrorState
*err
, void *data
)
528 FwdState
*fwd
= (FwdState
*) data
;
531 fwd
->startConnectionOrFail();
535 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
537 FwdState
*fwd
= (FwdState
*)params
.data
;
538 fwd
->serverClosed(params
.fd
);
542 fwdConnectDoneWrapper(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
, void *data
)
544 FwdState
*fwd
= (FwdState
*) data
;
545 fwd
->connectDone(conn
, status
, xerrno
);
548 /**** PRIVATE *****************************************************************/
551 * FwdState::checkRetry
553 * Return TRUE if the request SHOULD be retried. This method is
554 * called when the HTTP connection fails, or when the connection
555 * is closed before server-side read the end of HTTP headers.
558 FwdState::checkRetry()
563 if (!self
) { // we have aborted before the server called us back
564 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
565 // we will be destroyed when the server clears its Pointer to us
569 if (entry
->store_status
!= STORE_PENDING
)
572 if (!entry
->isEmpty())
575 if (n_tries
> Config
.forward_max_tries
)
578 if (squid_curtime
- start_t
> Config
.Timeout
.forward
)
581 if (flags
.dont_retry
)
584 if (request
->bodyNibbled())
587 // NP: not yet actually connected anywhere. retry is safe.
588 if (!flags
.connected_okay
)
591 if (!checkRetriable())
598 * FwdState::checkRetriable
600 * Return TRUE if this is the kind of request that can be retried
601 * after a failure. If the request is not retriable then we don't
602 * want to risk sending it on a persistent connection. Instead we'll
603 * force it to go on a new HTTP connection.
606 FwdState::checkRetriable()
608 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
609 // complicated] code required to protect the PUT request body from being
610 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
611 if (request
->body_pipe
!= NULL
)
614 // RFC2616 9.1 Safe and Idempotent Methods
615 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
619 FwdState::serverClosed(int fd
)
621 // XXX: fd is often -1 here
622 debugs(17, 2, "FD " << fd
<< " " << entry
->url() << " after " <<
623 (fd
>= 0 ? fd_table
[fd
].pconn
.uses
: -1) << " requests");
624 if (fd
>= 0 && serverConnection()->fd
== fd
)
625 fwdPconnPool
->noteUses(fd_table
[fd
].pconn
.uses
);
630 FwdState::retryOrBail()
633 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
634 // we should retry the same destination if it failed due to pconn race
635 if (pconnRace
== raceHappened
)
636 debugs(17, 4, HERE
<< "retrying the same destination");
638 serverDestinations
.erase(serverDestinations
.begin()); // last one failed. try another.
639 startConnectionOrFail();
643 // TODO: should we call completed() here and move doneWithRetries there?
646 if (self
!= NULL
&& !err
&& shutting_down
) {
647 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
648 errorAppendEntry(entry
, anErr
);
651 self
= NULL
; // refcounted
654 // If the Server quits before nibbling at the request body, the body sender
655 // will not know (so that we can retry). Call this if we will not retry. We
656 // will notify the sender so that it does not get stuck waiting for space.
658 FwdState::doneWithRetries()
660 if (request
&& request
->body_pipe
!= NULL
)
661 request
->body_pipe
->expectNoConsumption();
664 // called by the server that failed after calling unregister()
666 FwdState::handleUnregisteredServerEnd()
668 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
669 assert(!Comm::IsConnOpen(serverConn
));
674 FwdState::connectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
)
676 if (status
!= Comm::OK
) {
677 ErrorState
*const anErr
= makeConnectingError(ERR_CONNECT_FAIL
);
678 anErr
->xerrno
= xerrno
;
681 /* it might have been a timeout with a partially open link */
684 peerConnectFailed(conn
->getPeer());
693 flags
.connected_okay
= true;
695 debugs(17, 3, HERE
<< serverConnection() << ": '" << entry
->url() << "'" );
697 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
699 if (serverConnection()->getPeer())
700 peerConnectSucceded(serverConnection()->getPeer());
703 if (!request
->flags
.pinned
) {
704 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl
) ||
705 (!serverConnection()->getPeer() && request
->url
.getScheme() == AnyP::PROTO_HTTPS
) ||
706 request
->flags
.sslPeek
) {
708 HttpRequest::Pointer requestPointer
= request
;
709 AsyncCall::Pointer callback
= asyncCall(17,4,
710 "FwdState::ConnectedToPeer",
711 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
712 // Use positive timeout when less than one second is left.
713 const time_t sslNegotiationTimeout
= max(static_cast<time_t>(1), timeLeft());
714 Ssl::PeerConnector
*connector
=
715 new Ssl::PeerConnector(requestPointer
, serverConnection(), clientConn
, callback
, sslNegotiationTimeout
);
716 AsyncJob::Start(connector
); // will call our callback
722 // should reach ConnStateData before the dispatched Client job starts
723 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
724 ConnStateData::notePeerConnection
, serverConnection());
731 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer
&answer
)
733 if (ErrorState
*error
= answer
.error
.get()) {
735 answer
.error
.clear(); // preserve error for errorSendComplete()
745 FwdState::connectTimeout(int fd
)
747 debugs(17, 2, "fwdConnectTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
748 assert(serverDestinations
[0] != NULL
);
749 assert(fd
== serverDestinations
[0]->fd
);
751 if (entry
->isEmpty()) {
752 ErrorState
*anErr
= new ErrorState(ERR_CONNECT_FAIL
, Http::scGatewayTimeout
, request
);
753 anErr
->xerrno
= ETIMEDOUT
;
756 /* This marks the peer DOWN ... */
757 if (serverDestinations
[0]->getPeer())
758 peerConnectFailed(serverDestinations
[0]->getPeer());
761 if (Comm::IsConnOpen(serverDestinations
[0])) {
762 serverDestinations
[0]->close();
767 FwdState::timeLeft() const
769 /* connection timeout */
771 if (serverDestinations
[0]->getPeer()) {
772 ctimeout
= serverDestinations
[0]->getPeer()->connect_timeout
> 0 ?
773 serverDestinations
[0]->getPeer()->connect_timeout
: Config
.Timeout
.peer_connect
;
775 ctimeout
= Config
.Timeout
.connect
;
778 /* calculate total forwarding timeout ??? */
779 int ftimeout
= Config
.Timeout
.forward
- (squid_curtime
- start_t
);
783 if (ftimeout
< ctimeout
)
784 return (time_t)ftimeout
;
786 return (time_t)ctimeout
;
790 * Called after forwarding path selection (via peer select) has taken place
791 * and whenever forwarding needs to attempt a new connection (routing failover).
792 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
795 FwdState::connectStart()
797 assert(serverDestinations
.size() > 0);
799 debugs(17, 3, "fwdConnectStart: " << entry
->url());
801 if (!request
->hier
.first_conn_start
.tv_sec
) // first attempt
802 request
->hier
.first_conn_start
= current_time
;
804 if (serverDestinations
[0]->getPeer() && request
->flags
.sslBumped
) {
805 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
806 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
);
808 self
= NULL
; // refcounted
812 request
->flags
.pinned
= false; // XXX: what if the ConnStateData set this to flag existing credentials?
813 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
814 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
815 if (serverDestinations
[0]->peerType
== PINNED
) {
816 ConnStateData
*pinned_connection
= request
->pinnedConnection();
817 debugs(17,7, "pinned peer connection: " << pinned_connection
);
818 // pinned_connection may become nil after a pconn race
819 if (pinned_connection
)
820 serverConn
= pinned_connection
->borrowPinnedConnection(request
, serverDestinations
[0]->getPeer());
823 if (Comm::IsConnOpen(serverConn
)) {
824 pinned_connection
->stopPinnedConnectionMonitoring();
825 flags
.connected_okay
= true;
827 request
->flags
.pinned
= true;
828 request
->hier
.note(serverConn
, pinned_connection
->pinning
.host
);
829 if (pinned_connection
->pinnedAuth())
830 request
->flags
.auth
= true;
831 comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
833 /* Update server side TOS and Netfilter mark on the connection. */
834 if (Ip::Qos::TheConfig
.isAclTosActive()) {
835 debugs(17, 3, HERE
<< "setting tos for pinned connection to " << (int)serverConn
->tos
);
836 serverConn
->tos
= GetTosToServer(request
);
837 Ip::Qos::setSockTos(serverConn
, serverConn
->tos
);
840 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
841 serverConn
->nfmark
= GetNfmarkToServer(request
);
842 Ip::Qos::setSockNfmark(serverConn
, serverConn
->nfmark
);
846 // the server may close the pinned connection before this request
847 pconnRace
= racePossible
;
851 // Pinned connection failure.
852 debugs(17,2,HERE
<< "Pinned connection failed: " << pinned_connection
);
853 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
855 self
= NULL
; // refcounted
859 // Use pconn to avoid opening a new connection.
860 const char *host
= NULL
;
861 if (!serverDestinations
[0]->getPeer())
862 host
= request
->GetHost();
864 Comm::ConnectionPointer temp
;
865 // Avoid pconns after races so that the same client does not suffer twice.
866 // This does not increase the total number of connections because we just
867 // closed the connection that failed the race. And re-pinning assumes this.
868 if (pconnRace
!= raceHappened
)
869 temp
= pconnPop(serverDestinations
[0], host
);
871 const bool openedPconn
= Comm::IsConnOpen(temp
);
872 pconnRace
= openedPconn
? racePossible
: raceImpossible
;
874 // if we found an open persistent connection to use. use it.
877 flags
.connected_okay
= true;
878 debugs(17, 3, HERE
<< "reusing pconn " << serverConnection());
881 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
883 /* Update server side TOS and Netfilter mark on the connection. */
884 if (Ip::Qos::TheConfig
.isAclTosActive()) {
885 const tos_t tos
= GetTosToServer(request
);
886 Ip::Qos::setSockTos(temp
, tos
);
889 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
890 const nfmark_t nfmark
= GetNfmarkToServer(request
);
891 Ip::Qos::setSockNfmark(temp
, nfmark
);
899 // We will try to open a new connection, possibly to the same destination.
900 // We reset serverDestinations[0] in case we are using it again because
901 // ConnOpener modifies its destination argument.
902 serverDestinations
[0]->local
.port(0);
905 #if URL_CHECKSUM_DEBUG
906 entry
->mem_obj
->checkUrlChecksum();
909 GetMarkingsToServer(request
, *serverDestinations
[0]);
911 calls
.connector
= commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper
, this));
912 Comm::ConnOpener
*cs
= new Comm::ConnOpener(serverDestinations
[0], calls
.connector
, timeLeft());
921 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
923 * Assert that server_fd is set. This is to guarantee that fwdState
924 * is attached to something and will be deallocated when server_fd
927 assert(Comm::IsConnOpen(serverConn
));
929 fd_note(serverConnection()->fd
, entry
->url());
931 fd_table
[serverConnection()->fd
].noteUse();
933 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
934 assert(entry
->ping_status
!= PING_WAITING
);
936 assert(entry
->locked());
938 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
940 netdbPingSite(request
->GetHost());
942 /* Retrieves remote server TOS or MARK value, and stores it as part of the
943 * original client request FD object. It is later used to forward
944 * remote server's TOS/MARK in the response to the client in case of a MISS.
946 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
947 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
948 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
949 /* Get the netfilter mark for the connection */
950 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde
);
955 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
956 if (Ip::Qos::TheConfig
.isHitTosActive()) {
957 if (Comm::IsConnOpen(clientConn
)) {
958 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
959 /* Get the TOS value for the packet */
960 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
966 if (request
->flags
.sslPeek
) {
967 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
968 ConnStateData::httpsPeeked
, serverConnection());
969 unregister(serverConn
); // async call owns it now
970 complete(); // destroys us
975 if (serverConnection()->getPeer() != NULL
) {
976 ++ serverConnection()->getPeer()->stats
.fetches
;
977 request
->peer_login
= serverConnection()->getPeer()->login
;
978 request
->peer_domain
= serverConnection()->getPeer()->domain
;
981 assert(!request
->flags
.sslPeek
);
982 request
->peer_login
= NULL
;
983 request
->peer_domain
= NULL
;
985 switch (request
->url
.getScheme()) {
988 case AnyP::PROTO_HTTPS
:
993 case AnyP::PROTO_HTTP
:
997 case AnyP::PROTO_GOPHER
:
1001 case AnyP::PROTO_FTP
:
1002 if (request
->flags
.ftpNative
)
1003 Ftp::StartRelay(this);
1005 Ftp::StartGateway(this);
1008 case AnyP::PROTO_CACHE_OBJECT
:
1010 case AnyP::PROTO_URN
:
1011 fatal_dump("Should never get here");
1014 case AnyP::PROTO_WHOIS
:
1018 case AnyP::PROTO_WAIS
: /* Not implemented */
1021 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1022 ErrorState
*anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
);
1024 // Set the dont_retry flag because this is not a transient (network) error.
1025 flags
.dont_retry
= true;
1026 if (Comm::IsConnOpen(serverConn
)) {
1027 serverConn
->close();
1035 * FwdState::reforward
1037 * returns TRUE if the transaction SHOULD be re-forwarded to the
1038 * next choice in the serverDestinations list. This method is called when
1039 * server-side communication completes normally, or experiences
1040 * some error after receiving the end of HTTP headers.
1043 FwdState::reforward()
1045 StoreEntry
*e
= entry
;
1047 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1048 debugs(17, 3, HERE
<< "entry aborted");
1052 assert(e
->store_status
== STORE_PENDING
);
1054 #if URL_CHECKSUM_DEBUG
1056 e
->mem_obj
->checkUrlChecksum();
1059 debugs(17, 3, HERE
<< e
->url() << "?" );
1061 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1062 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1066 if (n_tries
> Config
.forward_max_tries
)
1069 if (request
->bodyNibbled())
1072 if (serverDestinations
.size() <= 1) {
1073 // NP: <= 1 since total count includes the recently failed one.
1074 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1078 const Http::StatusCode s
= e
->getReply()->sline
.status();
1079 debugs(17, 3, HERE
<< "status " << s
);
1080 return reforwardableStatus(s
);
1084 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1085 * on whether this is a validation request. RFC 2616 says that we MUST reply
1086 * with "504 Gateway Timeout" if validation fails and cached reply has
1087 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1090 FwdState::makeConnectingError(const err_type type
) const
1092 return new ErrorState(type
, request
->flags
.needValidation
?
1093 Http::scGatewayTimeout
: Http::scServiceUnavailable
, request
);
1097 fwdStats(StoreEntry
* s
)
1101 storeAppendPrintf(s
, "Status");
1103 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1104 storeAppendPrintf(s
, "\ttry#%d", j
);
1107 storeAppendPrintf(s
, "\n");
1109 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1110 if (FwdReplyCodes
[0][i
] == 0)
1113 storeAppendPrintf(s
, "%3d", i
);
1115 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1116 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1119 storeAppendPrintf(s
, "\n");
1123 /**** STATIC MEMBER FUNCTIONS *************************************************/
1126 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1130 case Http::scBadGateway
:
1132 case Http::scGatewayTimeout
:
1135 case Http::scForbidden
:
1137 case Http::scInternalServerError
:
1139 case Http::scNotImplemented
:
1141 case Http::scServiceUnavailable
:
1142 return Config
.retry
.onerror
;
1152 * Decide where details need to be gathered to correctly describe a persistent connection.
1154 * - the address/port details about this link
1155 * - domain name of server at other end of this link (either peer or requested host)
1158 FwdState::pconnPush(Comm::ConnectionPointer
&conn
, const char *domain
)
1160 if (conn
->getPeer()) {
1161 fwdPconnPool
->push(conn
, NULL
);
1163 fwdPconnPool
->push(conn
, domain
);
1167 Comm::ConnectionPointer
1168 FwdState::pconnPop(const Comm::ConnectionPointer
&dest
, const char *domain
)
1170 // always call shared pool first because we need to close an idle
1171 // connection there if we have to use a standby connection.
1172 Comm::ConnectionPointer conn
= fwdPconnPool
->pop(dest
, domain
, checkRetriable());
1173 if (!Comm::IsConnOpen(conn
)) {
1174 // either there was no pconn to pop or this is not a retriable xaction
1175 if (CachePeer
*peer
= dest
->getPeer()) {
1176 if (peer
->standby
.pool
)
1177 conn
= peer
->standby
.pool
->pop(dest
, domain
, true);
1180 return conn
; // open, closed, or nil
1184 FwdState::initModule()
1186 RegisterWithCacheManager();
1190 FwdState::RegisterWithCacheManager(void)
1192 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1196 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1198 if (status
> Http::scInvalidHeader
)
1203 if (tries
> MAX_FWD_STATS_IDX
)
1204 tries
= MAX_FWD_STATS_IDX
;
1206 ++ FwdReplyCodes
[tries
][status
];
1209 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1213 * Formerly static, but now used by client_side_request.cc
1215 /// Checks for a TOS value to apply depending on the ACL
1217 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1221 for (l
= head
; l
; l
= l
->next
) {
1222 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1229 /// Checks for a netfilter mark value to apply depending on the ACL
1231 aclMapNfmark(acl_nfmark
* head
, ACLChecklist
* ch
)
1235 for (l
= head
; l
; l
= l
->next
) {
1236 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1244 getOutgoingAddress(HttpRequest
* request
, Comm::ConnectionPointer conn
)
1246 // skip if an outgoing address is already set.
1247 if (!conn
->local
.isAnyAddr()) return;
1249 // ensure that at minimum the wildcard local matches remote protocol
1250 if (conn
->remote
.isIPv4())
1251 conn
->local
.setIPv4();
1253 // maybe use TPROXY client address
1254 if (request
&& request
->flags
.spoofClientIp
) {
1255 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1256 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1257 if (Config
.onoff
.tproxy_uses_indirect_client
)
1258 conn
->local
= request
->indirect_client_addr
;
1261 conn
->local
= request
->client_addr
;
1262 // some flags need setting on the socket to use this address
1263 conn
->flags
|= COMM_DOBIND
;
1264 conn
->flags
|= COMM_TRANSPARENT
;
1267 // else no tproxy today ...
1270 if (!Config
.accessList
.outgoing_address
) {
1271 return; // anything will do.
1274 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1275 ch
.dst_peer
= conn
->getPeer();
1276 ch
.dst_addr
= conn
->remote
;
1278 // TODO use the connection details in ACL.
1279 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1282 for (l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1284 /* check if the outgoing address is usable to the destination */
1285 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1287 /* check ACLs for this outgoing address */
1288 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
) == ACCESS_ALLOWED
) {
1289 conn
->local
= l
->addr
;
1296 GetTosToServer(HttpRequest
* request
)
1298 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1299 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1303 GetNfmarkToServer(HttpRequest
* request
)
1305 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1306 return aclMapNfmark(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1310 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1312 // Get the server side TOS and Netfilter mark to be set on the connection.
1313 if (Ip::Qos::TheConfig
.isAclTosActive()) {
1314 conn
.tos
= GetTosToServer(request
);
1315 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
));
1318 #if SO_MARK && USE_LIBCAP
1319 conn
.nfmark
= GetNfmarkToServer(request
);
1320 debugs(17, 3, "from " << conn
.local
<< " netfilter mark " << conn
.nfmark
);