2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "comm/Connection.h"
43 #include "comm/ConnOpener.h"
44 #include "comm/Loops.h"
45 #include "CommCalls.h"
46 #include "errorpage.h"
54 #include "hier_code.h"
56 #include "HttpReply.h"
57 #include "HttpRequest.h"
58 #include "icmp/net_db.h"
60 #include "ip/Intercept.h"
61 #include "ip/QosConfig.h"
63 #include "MemObject.h"
64 #include "mgr/Registration.h"
65 #include "neighbors.h"
67 #include "PeerSelectState.h"
68 #include "SquidConfig.h"
69 #include "SquidTime.h"
71 #include "StoreClient.h"
75 #include "ssl/cert_validate_message.h"
76 #include "ssl/Config.h"
77 #include "ssl/ErrorDetail.h"
78 #include "ssl/helper.h"
79 #include "ssl/PeerConnector.h"
80 #include "ssl/ServerBump.h"
81 #include "ssl/support.h"
87 static PSC fwdPeerSelectionCompleteWrapper
;
88 static CLCB fwdServerClosedWrapper
;
89 static CNCB fwdConnectDoneWrapper
;
93 #define MAX_FWD_STATS_IDX 9
94 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
96 static PconnPool
*fwdPconnPool
= new PconnPool("server-side");
97 CBDATA_CLASS_INIT(FwdState
);
100 class FwdStatePeerAnswerDialer
: public CallDialer
, public Ssl::PeerConnector::CbDialer
103 typedef void (FwdState::*Method
)(Ssl::PeerConnectorAnswer
&);
105 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
106 method_(method
), fwd_(fwd
), answer_() {}
109 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
110 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
111 virtual void print(std::ostream
&os
) const {
112 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')'; }
114 /* Ssl::PeerConnector::CbDialer API */
115 virtual Ssl::PeerConnectorAnswer
&answer() { return answer_
; }
119 CbcPointer
<FwdState
> fwd_
;
120 Ssl::PeerConnectorAnswer answer_
;
126 FwdState::abort(void* d
)
128 FwdState
* fwd
= (FwdState
*)d
;
129 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
131 if (Comm::IsConnOpen(fwd
->serverConnection())) {
132 comm_remove_close_handler(fwd
->serverConnection()->fd
, fwdServerClosedWrapper
, fwd
);
133 debugs(17, 3, HERE
<< "store entry aborted; closing " <<
134 fwd
->serverConnection());
135 fwd
->serverConnection()->close();
137 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
139 fwd
->serverDestinations
.clear();
143 /**** PUBLIC INTERFACE ********************************************************/
145 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
148 debugs(17, 2, HERE
<< "Forwarding client request " << client
<< ", url=" << e
->url() );
152 HTTPMSGLOCK(request
);
153 pconnRace
= raceImpossible
;
154 start_t
= squid_curtime
;
155 serverDestinations
.reserve(Config
.forward_max_tries
);
157 EBIT_SET(e
->flags
, ENTRY_FWD_HDR_WAIT
);
160 // Called once, right after object creation, when it is safe to set self
161 void FwdState::start(Pointer aSelf
)
163 // Protect ourselves from being destroyed when the only Server pointing
164 // to us is gone (while we expect to talk to more Servers later).
165 // Once we set self, we are responsible for clearing it when we do not
166 // expect to talk to any servers.
167 self
= aSelf
; // refcounted
169 // We hope that either the store entry aborts or peer is selected.
170 // Otherwise we are going to leak our object.
172 entry
->registerAbort(FwdState::abort
, this);
174 #if STRICT_ORIGINAL_DST
175 // Bug 3243: CVE 2009-0801
176 // Bypass of browser same-origin access control in intercepted communication
177 // To resolve this we must force DIRECT and only to the original client destination.
178 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
179 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
180 if (isIntercepted
&& useOriginalDst
) {
181 selectPeerForIntercepted();
182 // 3.2 does not suppro re-wrapping inside CONNECT.
183 // our only alternative is to fake destination "found" and continue with the forwarding.
184 startConnectionOrFail();
189 // do full route options selection
190 peerSelect(&serverDestinations
, request
, al
, entry
, fwdPeerSelectionCompleteWrapper
, this);
193 #if STRICT_ORIGINAL_DST
194 /// bypasses peerSelect() when dealing with intercepted requests
196 FwdState::selectPeerForIntercepted()
198 // use pinned connection if available
199 Comm::ConnectionPointer p
;
200 if (ConnStateData
*client
= request
->pinnedConnection()) {
201 p
= client
->validatePinnedConnection(request
, NULL
);
202 if (Comm::IsConnOpen(p
)) {
203 /* duplicate peerSelectPinned() effects */
204 p
->peerType
= PINNED
;
205 entry
->ping_status
= PING_DONE
; /* Skip ICP */
207 debugs(17, 3, "reusing a pinned conn: " << *p
);
208 serverDestinations
.push_back(p
);
210 debugs(17,2, "Pinned connection is not valid: " << p
);
211 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
214 // Either use the valid pinned connection or fail if it is invalid.
218 // use client original destination as second preferred choice
219 p
= new Comm::Connection();
220 p
->peerType
= ORIGINAL_DST
;
221 p
->remote
= clientConn
->local
;
222 getOutgoingAddress(request
, p
);
224 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
225 serverDestinations
.push_back(p
);
230 FwdState::completed()
232 if (flags
.forward_completed
) {
233 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
237 flags
.forward_completed
= true;
239 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
240 debugs(17, 3, HERE
<< "entry aborted");
244 #if URL_CHECKSUM_DEBUG
246 entry
->mem_obj
->checkUrlChecksum();
249 if (entry
->store_status
== STORE_PENDING
) {
250 if (entry
->isEmpty()) {
251 if (!err
) // we quit (e.g., fd closed) before an error or content
252 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
));
254 errorAppendEntry(entry
, err
);
257 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
258 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
259 ConnStateData::httpsPeeked
, Comm::ConnectionPointer(NULL
));
263 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
265 entry
->releaseRequest();
269 if (storePendingNClients(entry
) > 0)
270 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
274 FwdState::~FwdState()
276 debugs(17, 3, HERE
<< "FwdState destructor starting");
278 if (! flags
.forward_completed
)
283 HTTPMSGUNLOCK(request
);
287 entry
->unregisterAbort();
289 entry
->unlock("FwdState");
293 if (calls
.connector
!= NULL
) {
294 calls
.connector
->cancel("FwdState destructed");
295 calls
.connector
= NULL
;
298 if (Comm::IsConnOpen(serverConn
)) {
299 comm_remove_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
300 debugs(17, 3, HERE
<< "closing FD " << serverConnection()->fd
);
304 serverDestinations
.clear();
306 debugs(17, 3, HERE
<< "FwdState destructor done");
310 * This is the entry point for client-side to start forwarding
311 * a transaction. It is a static method that may or may not
312 * allocate a FwdState.
315 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
318 * client_addr == no_addr indicates this is an "internal" request
319 * from peer_digest.c, asn.c, netdb.c, etc and should always
320 * be allowed. yuck, I know.
323 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
324 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
326 * Check if this host is allowed to fetch MISSES from us (miss_access).
327 * Intentionally replace the src_addr automatically selected by the checklist code
328 * we do NOT want the indirect client address to be tested here.
330 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
331 ch
.src_addr
= request
->client_addr
;
332 if (ch
.fastCheck() == ACCESS_DENIED
) {
334 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
336 if (page_id
== ERR_NONE
)
337 page_id
= ERR_FORWARDING_DENIED
;
339 ErrorState
*anErr
= new ErrorState(page_id
, Http::scForbidden
, request
);
340 errorAppendEntry(entry
, anErr
); // frees anErr
345 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
347 * This seems like an odd place to bind mem_obj and request.
348 * Might want to assert that request is NULL at this point
350 entry
->mem_obj
->request
= request
;
351 HTTPMSGLOCK(entry
->mem_obj
->request
);
352 #if URL_CHECKSUM_DEBUG
354 entry
->mem_obj
->checkUrlChecksum();
359 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
360 errorAppendEntry(entry
, anErr
); // frees anErr
364 if (request
->flags
.internal
) {
365 debugs(17, 2, "calling internalStart() due to request flag");
366 internalStart(clientConn
, request
, entry
);
370 switch (request
->url
.getScheme()) {
372 case AnyP::PROTO_CACHE_OBJECT
:
373 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
374 CacheManager::GetInstance()->Start(clientConn
, request
, entry
);
377 case AnyP::PROTO_URN
:
378 urnStart(request
, entry
);
382 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
391 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
393 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
394 Start(clientConn
, entry
, request
, NULL
);
398 FwdState::startConnectionOrFail()
400 debugs(17, 3, HERE
<< entry
->url());
402 if (serverDestinations
.size() > 0) {
403 // Ditch error page if it was created before.
404 // A new one will be created if there's another problem
408 // Update the logging information about this new server connection.
409 // Done here before anything else so the errors get logged for
410 // this server link regardless of what happens when connecting to it.
411 // IF sucessfuly connected this top destination will become the serverConnection().
412 request
->hier
.note(serverDestinations
[0], request
->GetHost());
413 request
->clearError();
417 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
419 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
);
421 } // else use actual error from last connection attempt
422 self
= NULL
; // refcounted
427 FwdState::fail(ErrorState
* errorState
)
429 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
434 if (!errorState
->request
) {
435 errorState
->request
= request
;
436 HTTPMSGLOCK(errorState
->request
);
439 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
442 if (pconnRace
== racePossible
) {
443 debugs(17, 5, HERE
<< "pconn race happened");
444 pconnRace
= raceHappened
;
447 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
448 pinned_connection
->pinning
.zeroReply
= true;
449 flags
.dont_retry
= true; // we want to propagate failure to the client
450 debugs(17, 4, "zero reply on pinned connection");
455 * Frees fwdState without closing FD or generating an abort
458 FwdState::unregister(Comm::ConnectionPointer
&conn
)
460 debugs(17, 3, HERE
<< entry
->url() );
461 assert(serverConnection() == conn
);
462 assert(Comm::IsConnOpen(conn
));
463 comm_remove_close_handler(conn
->fd
, fwdServerClosedWrapper
, this);
467 // Legacy method to be removed in favor of the above as soon as possible
469 FwdState::unregister(int fd
)
471 debugs(17, 3, HERE
<< entry
->url() );
472 assert(fd
== serverConnection()->fd
);
473 unregister(serverConn
);
477 * server-side modules call fwdComplete() when they are done
478 * downloading an object. Then, we either 1) re-forward the
479 * request somewhere else if needed, or 2) call storeComplete()
485 debugs(17, 3, HERE
<< entry
->url() << "\n\tstatus " << entry
->getReply()->sline
.status());
486 #if URL_CHECKSUM_DEBUG
488 entry
->mem_obj
->checkUrlChecksum();
491 logReplyStatus(n_tries
, entry
->getReply()->sline
.status());
494 debugs(17, 3, HERE
<< "re-forwarding " << entry
->getReply()->sline
.status() << " " << entry
->url());
496 if (Comm::IsConnOpen(serverConn
))
497 unregister(serverConn
);
501 // drop the last path off the selection list. try the next one.
502 serverDestinations
.erase(serverDestinations
.begin());
503 startConnectionOrFail();
506 if (Comm::IsConnOpen(serverConn
))
507 debugs(17, 3, HERE
<< "server FD " << serverConnection()->fd
<< " not re-forwarding status " << entry
->getReply()->sline
.status());
509 debugs(17, 3, HERE
<< "server (FD closed) not re-forwarding status " << entry
->getReply()->sline
.status());
510 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
513 if (!Comm::IsConnOpen(serverConn
))
516 self
= NULL
; // refcounted
520 /**** CALLBACK WRAPPERS ************************************************************/
523 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList
* unused
, ErrorState
*err
, void *data
)
525 FwdState
*fwd
= (FwdState
*) data
;
528 fwd
->startConnectionOrFail();
532 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
534 FwdState
*fwd
= (FwdState
*)params
.data
;
535 fwd
->serverClosed(params
.fd
);
539 fwdConnectDoneWrapper(const Comm::ConnectionPointer
&conn
, comm_err_t status
, int xerrno
, void *data
)
541 FwdState
*fwd
= (FwdState
*) data
;
542 fwd
->connectDone(conn
, status
, xerrno
);
545 /**** PRIVATE *****************************************************************/
548 * FwdState::checkRetry
550 * Return TRUE if the request SHOULD be retried. This method is
551 * called when the HTTP connection fails, or when the connection
552 * is closed before server-side read the end of HTTP headers.
555 FwdState::checkRetry()
560 if (!self
) { // we have aborted before the server called us back
561 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
562 // we will be destroyed when the server clears its Pointer to us
566 if (entry
->store_status
!= STORE_PENDING
)
569 if (!entry
->isEmpty())
572 if (n_tries
> Config
.forward_max_tries
)
575 if (squid_curtime
- start_t
> Config
.Timeout
.forward
)
578 if (flags
.dont_retry
)
581 if (request
->bodyNibbled())
584 // NP: not yet actually connected anywhere. retry is safe.
585 if (!flags
.connected_okay
)
588 if (!checkRetriable())
595 * FwdState::checkRetriable
597 * Return TRUE if this is the kind of request that can be retried
598 * after a failure. If the request is not retriable then we don't
599 * want to risk sending it on a persistent connection. Instead we'll
600 * force it to go on a new HTTP connection.
603 FwdState::checkRetriable()
605 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
606 // complicated] code required to protect the PUT request body from being
607 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
608 if (request
->body_pipe
!= NULL
)
611 // RFC2616 9.1 Safe and Idempotent Methods
612 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
616 FwdState::serverClosed(int fd
)
618 debugs(17, 2, HERE
<< "FD " << fd
<< " " << entry
->url());
623 FwdState::retryOrBail()
626 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
627 // we should retry the same destination if it failed due to pconn race
628 if (pconnRace
== raceHappened
)
629 debugs(17, 4, HERE
<< "retrying the same destination");
631 serverDestinations
.erase(serverDestinations
.begin()); // last one failed. try another.
632 startConnectionOrFail();
636 // TODO: should we call completed() here and move doneWithRetries there?
639 if (self
!= NULL
&& !err
&& shutting_down
) {
640 ErrorState
*anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
);
641 errorAppendEntry(entry
, anErr
);
644 self
= NULL
; // refcounted
647 // If the Server quits before nibbling at the request body, the body sender
648 // will not know (so that we can retry). Call this if we will not retry. We
649 // will notify the sender so that it does not get stuck waiting for space.
651 FwdState::doneWithRetries()
653 if (request
&& request
->body_pipe
!= NULL
)
654 request
->body_pipe
->expectNoConsumption();
657 // called by the server that failed after calling unregister()
659 FwdState::handleUnregisteredServerEnd()
661 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
662 assert(!Comm::IsConnOpen(serverConn
));
667 FwdState::connectDone(const Comm::ConnectionPointer
&conn
, comm_err_t status
, int xerrno
)
669 if (status
!= COMM_OK
) {
670 ErrorState
*const anErr
= makeConnectingError(ERR_CONNECT_FAIL
);
671 anErr
->xerrno
= xerrno
;
674 /* it might have been a timeout with a partially open link */
677 peerConnectFailed(conn
->getPeer());
686 flags
.connected_okay
= true;
688 debugs(17, 3, HERE
<< serverConnection() << ": '" << entry
->url() << "'" );
690 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
692 if (serverConnection()->getPeer())
693 peerConnectSucceded(serverConnection()->getPeer());
696 if (!request
->flags
.pinned
) {
697 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl
) ||
698 (!serverConnection()->getPeer() && request
->url
.getScheme() == AnyP::PROTO_HTTPS
) ||
699 request
->flags
.sslPeek
) {
701 HttpRequest::Pointer requestPointer
= request
;
702 AsyncCall::Pointer callback
= asyncCall(17,4,
703 "FwdState::ConnectedToPeer",
704 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
705 Ssl::PeerConnector
*connector
=
706 new Ssl::PeerConnector(requestPointer
, serverConnection(), clientConn
, callback
);
707 AsyncJob::Start(connector
); // will call our callback
718 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer
&answer
)
720 if (ErrorState
*error
= answer
.error
.get()) {
722 answer
.error
.clear(); // preserve error for errorSendComplete()
732 FwdState::connectTimeout(int fd
)
734 debugs(17, 2, "fwdConnectTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
735 assert(serverDestinations
[0] != NULL
);
736 assert(fd
== serverDestinations
[0]->fd
);
738 if (entry
->isEmpty()) {
739 ErrorState
*anErr
= new ErrorState(ERR_CONNECT_FAIL
, Http::scGatewayTimeout
, request
);
740 anErr
->xerrno
= ETIMEDOUT
;
743 /* This marks the peer DOWN ... */
744 if (serverDestinations
[0]->getPeer())
745 peerConnectFailed(serverDestinations
[0]->getPeer());
748 if (Comm::IsConnOpen(serverDestinations
[0])) {
749 serverDestinations
[0]->close();
754 * Called after Forwarding path selection (via peer select) has taken place.
755 * And whenever forwarding needs to attempt a new connection (routing failover)
756 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
759 FwdState::connectStart()
761 assert(serverDestinations
.size() > 0);
763 debugs(17, 3, "fwdConnectStart: " << entry
->url());
765 if (!request
->hier
.first_conn_start
.tv_sec
) // first attempt
766 request
->hier
.first_conn_start
= current_time
;
768 /* connection timeout */
770 if (serverDestinations
[0]->getPeer()) {
771 ctimeout
= serverDestinations
[0]->getPeer()->connect_timeout
> 0 ?
772 serverDestinations
[0]->getPeer()->connect_timeout
: Config
.Timeout
.peer_connect
;
774 ctimeout
= Config
.Timeout
.connect
;
777 /* calculate total forwarding timeout ??? */
778 int ftimeout
= Config
.Timeout
.forward
- (squid_curtime
- start_t
);
782 if (ftimeout
< ctimeout
)
785 if (serverDestinations
[0]->getPeer() && request
->flags
.sslBumped
) {
786 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
787 ErrorState
*anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
);
789 self
= NULL
; // refcounted
793 request
->flags
.pinned
= false; // XXX: what if the ConnStateData set this to flag existing credentials?
794 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
795 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
796 if (serverDestinations
[0]->peerType
== PINNED
) {
797 ConnStateData
*pinned_connection
= request
->pinnedConnection();
798 debugs(17,7, "pinned peer connection: " << pinned_connection
);
799 // pinned_connection may become nil after a pconn race
800 if (pinned_connection
)
801 serverConn
= pinned_connection
->validatePinnedConnection(request
, serverDestinations
[0]->getPeer());
804 if (Comm::IsConnOpen(serverConn
)) {
805 pinned_connection
->stopPinnedConnectionMonitoring();
806 flags
.connected_okay
= true;
808 request
->flags
.pinned
= true;
809 request
->hier
.note(serverConn
, pinned_connection
->pinning
.host
);
810 if (pinned_connection
->pinnedAuth())
811 request
->flags
.auth
= true;
812 comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
813 // the server may close the pinned connection before this request
814 pconnRace
= racePossible
;
818 // Pinned connection failure.
819 debugs(17,2,HERE
<< "Pinned connection failed: " << pinned_connection
);
820 ErrorState
*anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
);
822 self
= NULL
; // refcounted
826 // Use pconn to avoid opening a new connection.
827 const char *host
= NULL
;
828 if (!serverDestinations
[0]->getPeer())
829 host
= request
->GetHost();
831 Comm::ConnectionPointer temp
;
832 // Avoid pconns after races so that the same client does not suffer twice.
833 // This does not increase the total number of connections because we just
834 // closed the connection that failed the race. And re-pinning assumes this.
835 if (pconnRace
!= raceHappened
)
836 temp
= fwdPconnPool
->pop(serverDestinations
[0], host
, checkRetriable());
838 const bool openedPconn
= Comm::IsConnOpen(temp
);
839 pconnRace
= openedPconn
? racePossible
: raceImpossible
;
841 // if we found an open persistent connection to use. use it.
844 flags
.connected_okay
= true;
845 debugs(17, 3, HERE
<< "reusing pconn " << serverConnection());
848 comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
850 /* Update server side TOS and Netfilter mark on the connection. */
851 if (Ip::Qos::TheConfig
.isAclTosActive()) {
852 const tos_t tos
= GetTosToServer(request
);
853 Ip::Qos::setSockTos(temp
, tos
);
856 if (Ip::Qos::TheConfig
.isAclNfmarkActive()) {
857 const nfmark_t nfmark
= GetNfmarkToServer(request
);
858 Ip::Qos::setSockNfmark(temp
, nfmark
);
866 // We will try to open a new connection, possibly to the same destination.
867 // We reset serverDestinations[0] in case we are using it again because
868 // ConnOpener modifies its destination argument.
869 serverDestinations
[0]->local
.port(0);
872 #if URL_CHECKSUM_DEBUG
873 entry
->mem_obj
->checkUrlChecksum();
876 GetMarkingsToServer(request
, *serverDestinations
[0]);
878 calls
.connector
= commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper
, this));
879 Comm::ConnOpener
*cs
= new Comm::ConnOpener(serverDestinations
[0], calls
.connector
, ctimeout
);
888 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
890 * Assert that server_fd is set. This is to guarantee that fwdState
891 * is attached to something and will be deallocated when server_fd
894 assert(Comm::IsConnOpen(serverConn
));
896 fd_note(serverConnection()->fd
, entry
->url());
898 fd_table
[serverConnection()->fd
].noteUse(fwdPconnPool
);
900 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
901 assert(entry
->ping_status
!= PING_WAITING
);
903 assert(entry
->locked());
905 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
907 netdbPingSite(request
->GetHost());
909 /* Retrieves remote server TOS or MARK value, and stores it as part of the
910 * original client request FD object. It is later used to forward
911 * remote server's TOS/MARK in the response to the client in case of a MISS.
913 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
914 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
915 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
916 /* Get the netfilter mark for the connection */
917 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde
);
922 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
923 if (Ip::Qos::TheConfig
.isHitTosActive()) {
924 if (Comm::IsConnOpen(clientConn
)) {
925 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
926 /* Get the TOS value for the packet */
927 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
933 if (request
->flags
.sslPeek
) {
934 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
935 ConnStateData::httpsPeeked
, serverConnection());
936 unregister(serverConn
); // async call owns it now
937 complete(); // destroys us
942 if (serverConnection()->getPeer() != NULL
) {
943 ++ serverConnection()->getPeer()->stats
.fetches
;
944 request
->peer_login
= serverConnection()->getPeer()->login
;
945 request
->peer_domain
= serverConnection()->getPeer()->domain
;
948 assert(!request
->flags
.sslPeek
);
949 request
->peer_login
= NULL
;
950 request
->peer_domain
= NULL
;
952 switch (request
->url
.getScheme()) {
955 case AnyP::PROTO_HTTPS
:
960 case AnyP::PROTO_HTTP
:
964 case AnyP::PROTO_GOPHER
:
968 case AnyP::PROTO_FTP
:
972 case AnyP::PROTO_CACHE_OBJECT
:
974 case AnyP::PROTO_URN
:
975 fatal_dump("Should never get here");
978 case AnyP::PROTO_WHOIS
:
982 case AnyP::PROTO_WAIS
: /* Not implemented */
985 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
986 ErrorState
*anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
);
988 // Set the dont_retry flag because this is not a transient (network) error.
989 flags
.dont_retry
= true;
990 if (Comm::IsConnOpen(serverConn
)) {
999 * FwdState::reforward
1001 * returns TRUE if the transaction SHOULD be re-forwarded to the
1002 * next choice in the serverDestinations list. This method is called when
1003 * server-side communication completes normally, or experiences
1004 * some error after receiving the end of HTTP headers.
1007 FwdState::reforward()
1009 StoreEntry
*e
= entry
;
1011 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1012 debugs(17, 3, HERE
<< "entry aborted");
1016 assert(e
->store_status
== STORE_PENDING
);
1018 #if URL_CHECKSUM_DEBUG
1020 e
->mem_obj
->checkUrlChecksum();
1023 debugs(17, 3, HERE
<< e
->url() << "?" );
1025 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1026 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1030 if (n_tries
> Config
.forward_max_tries
)
1033 if (request
->bodyNibbled())
1036 if (serverDestinations
.size() <= 1) {
1037 // NP: <= 1 since total count includes the recently failed one.
1038 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1042 const Http::StatusCode s
= e
->getReply()->sline
.status();
1043 debugs(17, 3, HERE
<< "status " << s
);
1044 return reforwardableStatus(s
);
1048 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1049 * on whether this is a validation request. RFC 2616 says that we MUST reply
1050 * with "504 Gateway Timeout" if validation fails and cached reply has
1051 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1054 FwdState::makeConnectingError(const err_type type
) const
1056 return new ErrorState(type
, request
->flags
.needValidation
?
1057 Http::scGatewayTimeout
: Http::scServiceUnavailable
, request
);
1061 fwdStats(StoreEntry
* s
)
1065 storeAppendPrintf(s
, "Status");
1067 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1068 storeAppendPrintf(s
, "\ttry#%d", j
);
1071 storeAppendPrintf(s
, "\n");
1073 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1074 if (FwdReplyCodes
[0][i
] == 0)
1077 storeAppendPrintf(s
, "%3d", i
);
1079 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1080 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1083 storeAppendPrintf(s
, "\n");
1087 /**** STATIC MEMBER FUNCTIONS *************************************************/
1090 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1094 case Http::scBadGateway
:
1096 case Http::scGatewayTimeout
:
1099 case Http::scForbidden
:
1101 case Http::scInternalServerError
:
1103 case Http::scNotImplemented
:
1105 case Http::scServiceUnavailable
:
1106 return Config
.retry
.onerror
;
1116 * Decide where details need to be gathered to correctly describe a persistent connection.
1118 * - the address/port details about this link
1119 * - domain name of server at other end of this link (either peer or requested host)
1122 FwdState::pconnPush(Comm::ConnectionPointer
&conn
, const char *domain
)
1124 if (conn
->getPeer()) {
1125 fwdPconnPool
->push(conn
, NULL
);
1127 fwdPconnPool
->push(conn
, domain
);
1132 FwdState::initModule()
1134 RegisterWithCacheManager();
1138 FwdState::RegisterWithCacheManager(void)
1140 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1144 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1146 if (status
> Http::scInvalidHeader
)
1151 if (tries
> MAX_FWD_STATS_IDX
)
1152 tries
= MAX_FWD_STATS_IDX
;
1154 ++ FwdReplyCodes
[tries
][status
];
1157 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1161 * Formerly static, but now used by client_side_request.cc
1163 /// Checks for a TOS value to apply depending on the ACL
1165 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1169 for (l
= head
; l
; l
= l
->next
) {
1170 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1177 /// Checks for a netfilter mark value to apply depending on the ACL
1179 aclMapNfmark(acl_nfmark
* head
, ACLChecklist
* ch
)
1183 for (l
= head
; l
; l
= l
->next
) {
1184 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
) == ACCESS_ALLOWED
)
1192 getOutgoingAddress(HttpRequest
* request
, Comm::ConnectionPointer conn
)
1194 // skip if an outgoing address is already set.
1195 if (!conn
->local
.isAnyAddr()) return;
1197 // ensure that at minimum the wildcard local matches remote protocol
1198 if (conn
->remote
.isIPv4())
1199 conn
->local
.setIPv4();
1201 // maybe use TPROXY client address
1202 if (request
&& request
->flags
.spoofClientIp
) {
1203 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1204 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1205 if (Config
.onoff
.tproxy_uses_indirect_client
)
1206 conn
->local
= request
->indirect_client_addr
;
1209 conn
->local
= request
->client_addr
;
1210 // some flags need setting on the socket to use this address
1211 conn
->flags
|= COMM_DOBIND
;
1212 conn
->flags
|= COMM_TRANSPARENT
;
1215 // else no tproxy today ...
1218 if (!Config
.accessList
.outgoing_address
) {
1219 return; // anything will do.
1222 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1223 ch
.dst_peer
= conn
->getPeer();
1224 ch
.dst_addr
= conn
->remote
;
1226 // TODO use the connection details in ACL.
1227 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1230 for (l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1232 /* check if the outgoing address is usable to the destination */
1233 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1235 /* check ACLs for this outgoing address */
1236 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
) == ACCESS_ALLOWED
) {
1237 conn
->local
= l
->addr
;
1244 GetTosToServer(HttpRequest
* request
)
1246 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1247 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1251 GetNfmarkToServer(HttpRequest
* request
)
1253 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1254 return aclMapNfmark(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1258 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1260 // Get the server side TOS and Netfilter mark to be set on the connection.
1261 if (Ip::Qos::TheConfig
.isAclTosActive()) {
1262 conn
.tos
= GetTosToServer(request
);
1263 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
));
1266 #if SO_MARK && USE_LIBCAP
1267 conn
.nfmark
= GetNfmarkToServer(request
);
1268 debugs(17, 3, "from " << conn
.local
<< " netfilter mark " << conn
.nfmark
);