2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 17 Request Forwarding */
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "clients/HttpTunneler.h"
22 #include "comm/Connection.h"
23 #include "comm/ConnOpener.h"
24 #include "comm/Loops.h"
25 #include "CommCalls.h"
26 #include "errorpage.h"
33 #include "HappyConnOpener.h"
34 #include "hier_code.h"
36 #include "http/Stream.h"
37 #include "HttpReply.h"
38 #include "HttpRequest.h"
39 #include "icmp/net_db.h"
41 #include "ip/Intercept.h"
42 #include "ip/NfMarkConfig.h"
43 #include "ip/QosConfig.h"
45 #include "MemObject.h"
46 #include "mgr/Registration.h"
47 #include "neighbors.h"
49 #include "PeerPoolMgr.h"
50 #include "ResolvedPeers.h"
51 #include "security/BlindPeerConnector.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "ssl/PeekingPeerConnector.h"
56 #include "StoreClient.h"
60 #include "ssl/cert_validate_message.h"
61 #include "ssl/Config.h"
62 #include "ssl/helper.h"
63 #include "ssl/ServerBump.h"
64 #include "ssl/support.h"
66 #include "security/EncryptorAnswer.h"
71 static CLCB fwdServerClosedWrapper
;
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
78 PconnPool
*fwdPconnPool
= new PconnPool("server-peers", nullptr);
80 CBDATA_CLASS_INIT(FwdState
);
82 class FwdStatePeerAnswerDialer
: public CallDialer
, public Security::PeerConnector::CbDialer
85 typedef void (FwdState::*Method
)(Security::EncryptorAnswer
&);
87 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
88 method_(method
), fwd_(fwd
), answer_() {}
91 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
92 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
93 virtual void print(std::ostream
&os
) const {
94 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')';
97 /* Security::PeerConnector::CbDialer API */
98 virtual Security::EncryptorAnswer
&answer() { return answer_
; }
102 CbcPointer
<FwdState
> fwd_
;
103 Security::EncryptorAnswer answer_
;
107 FwdState::HandleStoreAbort(FwdState
*fwd
)
109 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
111 if (Comm::IsConnOpen(fwd
->serverConnection())) {
112 fwd
->closeServerConnection("store entry aborted");
114 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
116 fwd
->stopAndDestroy("store entry aborted");
120 FwdState::closePendingConnection(const Comm::ConnectionPointer
&conn
, const char *reason
)
122 debugs(17, 3, "because " << reason
<< "; " << conn
);
124 assert(!closeHandler
);
125 if (IsConnOpen(conn
)) {
126 fwdPconnPool
->noteUses(fd_table
[conn
->fd
].pconn
.uses
);
132 FwdState::closeServerConnection(const char *reason
)
134 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
135 assert(Comm::IsConnOpen(serverConn
));
136 comm_remove_close_handler(serverConn
->fd
, closeHandler
);
138 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
142 /**** PUBLIC INTERFACE ********************************************************/
144 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
150 start_t(squid_curtime
),
152 destinations(new ResolvedPeers()),
153 pconnRace(raceImpossible
)
155 debugs(17, 2, "Forwarding client request " << client
<< ", url=" << e
->url());
156 HTTPMSGLOCK(request
);
158 flags
.connected_okay
= false;
159 flags
.dont_retry
= false;
160 flags
.forward_completed
= false;
161 flags
.destinationsFound
= false;
162 debugs(17, 3, "FwdState constructed, this=" << this);
165 // Called once, right after object creation, when it is safe to set self
166 void FwdState::start(Pointer aSelf
)
168 // Protect ourselves from being destroyed when the only Server pointing
169 // to us is gone (while we expect to talk to more Servers later).
170 // Once we set self, we are responsible for clearing it when we do not
171 // expect to talk to any servers.
172 self
= aSelf
; // refcounted
174 // We hope that either the store entry aborts or peer is selected.
175 // Otherwise we are going to leak our object.
177 // Ftp::Relay needs to preserve control connection on data aborts
178 // so it registers its own abort handler that calls ours when needed.
179 if (!request
->flags
.ftpNative
) {
180 AsyncCall::Pointer call
= asyncCall(17, 4, "FwdState::Abort", cbdataDialer(&FwdState::HandleStoreAbort
, this));
181 entry
->registerAbortCallback(call
);
184 // just in case; should already be initialized to false
185 request
->flags
.pinned
= false;
187 #if STRICT_ORIGINAL_DST
188 // Bug 3243: CVE 2009-0801
189 // Bypass of browser same-origin access control in intercepted communication
190 // To resolve this we must force DIRECT and only to the original client destination.
191 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
192 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
193 if (isIntercepted
&& useOriginalDst
) {
194 selectPeerForIntercepted();
199 // do full route options selection
200 startSelectingDestinations(request
, al
, entry
);
203 /// ends forwarding; relies on refcounting so the effect may not be immediate
205 FwdState::stopAndDestroy(const char *reason
)
207 debugs(17, 3, "for " << reason
);
210 cancelOpening(reason
);
212 PeerSelectionInitiator::subscribed
= false; // may already be false
213 self
= nullptr; // we hope refcounting destroys us soon; may already be nil
214 /* do not place any code here as this object may be gone by now */
217 /// Notify connOpener that we no longer need connections. We do not have to do
218 /// this -- connOpener would eventually notice on its own, but notifying reduces
219 /// waste and speeds up spare connection opening for other transactions (that
220 /// could otherwise wait for this transaction to use its spare allowance).
222 FwdState::cancelOpening(const char *reason
)
224 assert(calls
.connector
);
225 calls
.connector
->cancel(reason
);
226 calls
.connector
= nullptr;
231 #if STRICT_ORIGINAL_DST
232 /// bypasses peerSelect() when dealing with intercepted requests
234 FwdState::selectPeerForIntercepted()
236 // We do not support re-wrapping inside CONNECT.
237 // Our only alternative is to fake a noteDestination() call.
239 // use pinned connection if available
240 if (ConnStateData
*client
= request
->pinnedConnection()) {
241 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
242 entry
->ping_status
= PING_DONE
;
248 // use client original destination as second preferred choice
249 const auto p
= new Comm::Connection();
250 p
->peerType
= ORIGINAL_DST
;
251 p
->remote
= clientConn
->local
;
252 getOutgoingAddress(request
, p
);
254 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
255 destinations
->addPath(p
);
256 destinations
->destinationsFinalized
= true;
257 PeerSelectionInitiator::subscribed
= false;
263 FwdState::completed()
265 if (flags
.forward_completed
) {
266 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
270 flags
.forward_completed
= true;
272 request
->hier
.stopPeerClock(false);
274 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
275 debugs(17, 3, HERE
<< "entry aborted");
279 #if URL_CHECKSUM_DEBUG
281 entry
->mem_obj
->checkUrlChecksum();
284 if (entry
->store_status
== STORE_PENDING
) {
285 if (entry
->isEmpty()) {
286 if (!err
) // we quit (e.g., fd closed) before an error or content
287 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
, al
));
289 errorAppendEntry(entry
, err
);
292 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
293 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
294 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request
));
299 entry
->releaseRequest();
303 if (storePendingNClients(entry
) > 0)
304 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
308 FwdState::~FwdState()
310 debugs(17, 3, "FwdState destructor start");
312 if (! flags
.forward_completed
)
317 HTTPMSGUNLOCK(request
);
321 entry
->unregisterAbortCallback("FwdState object destructed");
323 entry
->unlock("FwdState");
328 cancelOpening("~FwdState");
330 if (Comm::IsConnOpen(serverConn
))
331 closeServerConnection("~FwdState");
333 debugs(17, 3, "FwdState destructed, this=" << this);
337 * This is the entry point for client-side to start forwarding
338 * a transaction. It is a static method that may or may not
339 * allocate a FwdState.
342 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
345 * client_addr == no_addr indicates this is an "internal" request
346 * from peer_digest.c, asn.c, netdb.c, etc and should always
347 * be allowed. yuck, I know.
350 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
351 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
353 * Check if this host is allowed to fetch MISSES from us (miss_access).
354 * Intentionally replace the src_addr automatically selected by the checklist code
355 * we do NOT want the indirect client address to be tested here.
357 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
359 ch
.src_addr
= request
->client_addr
;
360 ch
.syncAle(request
, nullptr);
361 if (ch
.fastCheck().denied()) {
363 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
365 if (page_id
== ERR_NONE
)
366 page_id
= ERR_FORWARDING_DENIED
;
368 const auto anErr
= new ErrorState(page_id
, Http::scForbidden
, request
, al
);
369 errorAppendEntry(entry
, anErr
); // frees anErr
374 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
376 * This seems like an odd place to bind mem_obj and request.
377 * Might want to assert that request is NULL at this point
379 entry
->mem_obj
->request
= request
;
380 #if URL_CHECKSUM_DEBUG
382 entry
->mem_obj
->checkUrlChecksum();
387 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
388 errorAppendEntry(entry
, anErr
); // frees anErr
392 if (request
->flags
.internal
) {
393 debugs(17, 2, "calling internalStart() due to request flag");
394 internalStart(clientConn
, request
, entry
, al
);
398 switch (request
->url
.getScheme()) {
400 case AnyP::PROTO_CACHE_OBJECT
:
401 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
402 CacheManager::GetInstance()->start(clientConn
, request
, entry
, al
);
405 case AnyP::PROTO_URN
:
406 urnStart(request
, entry
, al
);
410 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
419 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
421 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
422 Start(clientConn
, entry
, request
, NULL
);
425 /// subtracts time_t values, returning zero if smaller exceeds the larger value
426 /// time_t might be unsigned so we need to be careful when subtracting times...
428 diffOrZero(const time_t larger
, const time_t smaller
)
430 return (larger
> smaller
) ? (larger
- smaller
) : 0;
433 /// time left to finish the whole forwarding process (which started at fwdStart)
435 FwdState::ForwardTimeout(const time_t fwdStart
)
437 // time already spent on forwarding (0 if clock went backwards)
438 const time_t timeSpent
= diffOrZero(squid_curtime
, fwdStart
);
439 return diffOrZero(Config
.Timeout
.forward
, timeSpent
);
443 FwdState::EnoughTimeToReForward(const time_t fwdStart
)
445 return ForwardTimeout(fwdStart
) > 0;
449 FwdState::useDestinations()
451 if (!destinations
->empty()) {
454 if (PeerSelectionInitiator::subscribed
) {
455 debugs(17, 4, "wait for more destinations to try");
456 return; // expect a noteDestination*() call
459 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
461 const auto anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
, al
);
463 } // else use actual error from last connection attempt
465 stopAndDestroy("tried all destinations");
470 FwdState::fail(ErrorState
* errorState
)
472 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
477 if (!errorState
->request
)
478 errorState
->request
= request
;
480 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
483 if (pconnRace
== racePossible
) {
484 debugs(17, 5, HERE
<< "pconn race happened");
485 pconnRace
= raceHappened
;
486 if (destinationReceipt
) {
487 destinations
->reinstatePath(destinationReceipt
);
488 destinationReceipt
= nullptr;
492 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
493 pinned_connection
->pinning
.zeroReply
= true;
494 debugs(17, 4, "zero reply on pinned connection");
499 * Frees fwdState without closing FD or generating an abort
502 FwdState::unregister(Comm::ConnectionPointer
&conn
)
504 debugs(17, 3, HERE
<< entry
->url() );
505 assert(serverConnection() == conn
);
506 assert(Comm::IsConnOpen(conn
));
507 comm_remove_close_handler(conn
->fd
, closeHandler
);
510 destinationReceipt
= nullptr;
513 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
515 FwdState::unregister(int fd
)
517 debugs(17, 3, HERE
<< entry
->url() );
518 assert(fd
== serverConnection()->fd
);
519 unregister(serverConn
);
523 * FooClient modules call fwdComplete() when they are done
524 * downloading an object. Then, we either 1) re-forward the
525 * request somewhere else if needed, or 2) call storeComplete()
531 const auto replyStatus
= entry
->mem().baseReply().sline
.status();
532 debugs(17, 3, *entry
<< " status " << replyStatus
<< ' ' << entry
->url());
533 #if URL_CHECKSUM_DEBUG
535 entry
->mem_obj
->checkUrlChecksum();
538 logReplyStatus(n_tries
, replyStatus
);
541 debugs(17, 3, "re-forwarding " << replyStatus
<< " " << entry
->url());
543 if (Comm::IsConnOpen(serverConn
))
544 unregister(serverConn
);
551 if (Comm::IsConnOpen(serverConn
))
552 debugs(17, 3, "server FD " << serverConnection()->fd
<< " not re-forwarding status " << replyStatus
);
554 debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus
);
557 if (!Comm::IsConnOpen(serverConn
))
560 stopAndDestroy("forwarding completed");
565 FwdState::noteDestination(Comm::ConnectionPointer path
)
567 flags
.destinationsFound
= true;
570 // We can call usePinned() without fear of clashing with an earlier
571 // forwarding attempt because PINNED must be the first destination.
572 assert(destinations
->empty());
579 destinations
->addPath(path
);
581 if (Comm::IsConnOpen(serverConn
)) {
582 // We are already using a previously opened connection, so we cannot be
583 // waiting for connOpener. We still receive destinations for backup.
590 return; // and continue to wait for FwdState::noteConnection() callback
593 // This is the first path candidate we have seen. Create connOpener.
598 FwdState::noteDestinationsEnd(ErrorState
*selectionError
)
600 PeerSelectionInitiator::subscribed
= false;
601 destinations
->destinationsFinalized
= true;
603 if (!flags
.destinationsFound
) {
604 if (selectionError
) {
605 debugs(17, 3, "Will abort forwarding because path selection has failed.");
606 Must(!err
); // if we tried to connect, then path selection succeeded
607 fail(selectionError
);
610 debugs(17, 3, "Will abort forwarding because all found paths have failed.");
612 debugs(17, 3, "Will abort forwarding because path selection found no paths.");
614 useDestinations(); // will detect and handle the lack of paths
617 // else continue to use one of the previously noted destinations;
618 // if all of them fail, forwarding as whole will fail
619 Must(!selectionError
); // finding at least one path means selection succeeded
621 if (Comm::IsConnOpen(serverConn
)) {
622 // We are already using a previously opened connection, so we cannot be
623 // waiting for connOpener. We were receiving destinations for backup.
628 Must(opening()); // or we would be stuck with nothing to do or wait for
630 // and continue to wait for FwdState::noteConnection() callback
633 /// makes sure connOpener knows that destinations have changed
635 FwdState::notifyConnOpener()
637 if (destinations
->notificationPending
) {
638 debugs(17, 7, "reusing pending notification about " << *destinations
);
640 debugs(17, 7, "notifying about " << *destinations
);
641 destinations
->notificationPending
= true;
642 CallJobHere(17, 5, connOpener
, HappyConnOpener
, noteCandidatesChange
);
646 /**** CALLBACK WRAPPERS ************************************************************/
649 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
651 FwdState
*fwd
= (FwdState
*)params
.data
;
652 fwd
->serverClosed(params
.fd
);
655 /**** PRIVATE *****************************************************************/
658 * FwdState::checkRetry
660 * Return TRUE if the request SHOULD be retried. This method is
661 * called when the HTTP connection fails, or when the connection
662 * is closed before reading the end of HTTP headers from the server.
665 FwdState::checkRetry()
670 if (!self
) { // we have aborted before the server called us back
671 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
672 // we will be destroyed when the server clears its Pointer to us
676 if (entry
->store_status
!= STORE_PENDING
)
679 if (!entry
->isEmpty())
682 if (exhaustedTries())
685 if (request
->flags
.pinned
&& !pinnedCanRetry())
688 if (!EnoughTimeToReForward(start_t
))
691 if (flags
.dont_retry
)
694 if (request
->bodyNibbled())
697 // NP: not yet actually connected anywhere. retry is safe.
698 if (!flags
.connected_okay
)
701 if (!checkRetriable())
707 /// Whether we may try sending this request again after a failure.
709 FwdState::checkRetriable()
711 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
712 // complicated] code required to protect the PUT request body from being
713 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
714 if (request
->body_pipe
!= NULL
)
717 // RFC2616 9.1 Safe and Idempotent Methods
718 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
722 FwdState::serverClosed(int fd
)
724 // XXX: fd is often -1 here
725 debugs(17, 2, "FD " << fd
<< " " << entry
->url() << " after " <<
726 (fd
>= 0 ? fd_table
[fd
].pconn
.uses
: -1) << " requests");
727 if (fd
>= 0 && serverConnection()->fd
== fd
)
728 fwdPconnPool
->noteUses(fd_table
[fd
].pconn
.uses
);
733 FwdState::retryOrBail()
736 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
741 // TODO: should we call completed() here and move doneWithRetries there?
744 request
->hier
.stopPeerClock(false);
746 if (self
!= NULL
&& !err
&& shutting_down
&& entry
->isEmpty()) {
747 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
748 errorAppendEntry(entry
, anErr
);
751 stopAndDestroy("cannot retry");
754 // If the Server quits before nibbling at the request body, the body sender
755 // will not know (so that we can retry). Call this if we will not retry. We
756 // will notify the sender so that it does not get stuck waiting for space.
758 FwdState::doneWithRetries()
760 if (request
&& request
->body_pipe
!= NULL
)
761 request
->body_pipe
->expectNoConsumption();
764 // called by the server that failed after calling unregister()
766 FwdState::handleUnregisteredServerEnd()
768 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
769 assert(!Comm::IsConnOpen(serverConn
));
773 /// starts a preparation step for an established connection; retries on failures
774 template <typename StepStart
>
776 FwdState::advanceDestination(const char *stepDescription
, const Comm::ConnectionPointer
&conn
, const StepStart
&startStep
)
778 // TODO: Extract destination-specific handling from FwdState so that all the
779 // awkward, limited-scope advanceDestination() calls can be replaced with a
780 // single simple try/catch,retry block.
783 // now wait for the step callback
785 debugs (17, 2, "exception while trying to " << stepDescription
<< ": " << CurrentException
);
786 closePendingConnection(conn
, "connection preparation exception");
791 /// called when a to-peer connection has been successfully obtained or
792 /// when all candidate destinations have been tried and all have failed
794 FwdState::noteConnection(HappyConnOpener::Answer
&answer
)
796 assert(!destinationReceipt
);
798 calls
.connector
= nullptr;
801 Must(n_tries
<= answer
.n_tries
); // n_tries cannot decrease
802 n_tries
= answer
.n_tries
;
804 ErrorState
*error
= nullptr;
805 if ((error
= answer
.error
.get())) {
806 flags
.dont_retry
= true; // or HappyConnOpener would not have given up
807 syncHierNote(answer
.conn
, request
->url
.host());
808 Must(!Comm::IsConnOpen(answer
.conn
));
809 answer
.error
.clear(); // preserve error for errorSendComplete()
810 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
811 // We do not know exactly why the connection got closed, so we play it
812 // safe, allowing retries only for persistent (reused) connections
814 destinationReceipt
= answer
.conn
;
815 assert(destinationReceipt
);
817 syncHierNote(answer
.conn
, request
->url
.host());
818 closePendingConnection(answer
.conn
, "conn was closed while waiting for noteConnection");
819 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
822 destinationReceipt
= answer
.conn
;
823 assert(destinationReceipt
);
824 // serverConn remains nil until syncWithServerConn()
834 syncWithServerConn(answer
.conn
, request
->url
.host(), answer
.reused
);
838 // Check if we need to TLS before use
839 if (const auto *peer
= answer
.conn
->getPeer()) {
840 // Assume that it is only possible for the client-first from the
841 // bumping modes to try connect to a remote server. The bumped
842 // requests with other modes are using pinned connections or fails.
843 const bool clientFirstBump
= request
->flags
.sslBumped
;
844 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
845 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
846 const bool originWantsEncryptedTraffic
=
847 request
->method
== Http::METHOD_CONNECT
||
848 request
->flags
.sslPeek
||
850 if (originWantsEncryptedTraffic
&& // the "encrypted traffic" part
851 !peer
->options
.originserver
&& // the "through a proxy" part
852 !peer
->secure
.encryptTransport
) // the "exclude HTTPS proxies" part
853 return advanceDestination("establish tunnel through proxy", answer
.conn
, [this,&answer
] {
854 establishTunnelThruProxy(answer
.conn
);
858 secureConnectionToPeerIfNeeded(answer
.conn
);
862 FwdState::establishTunnelThruProxy(const Comm::ConnectionPointer
&conn
)
864 AsyncCall::Pointer callback
= asyncCall(17,4,
865 "FwdState::tunnelEstablishmentDone",
866 Http::Tunneler::CbDialer
<FwdState
>(&FwdState::tunnelEstablishmentDone
, this));
867 HttpRequest::Pointer requestPointer
= request
;
868 const auto tunneler
= new Http::Tunneler(conn
, requestPointer
, callback
, connectingTimeout(conn
), al
);
870 // TODO: Replace this hack with proper Comm::Connection-Pool association
871 // that is not tied to fwdPconnPool and can handle disappearing pools.
872 tunneler
->noteFwdPconnUse
= true;
876 Must(conn
->getPeer());
877 if (!conn
->getPeer()->options
.no_delay
)
878 tunneler
->setDelayId(entry
->mem_obj
->mostBytesAllowed());
880 AsyncJob::Start(tunneler
);
881 // and wait for the tunnelEstablishmentDone() call
884 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
886 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer
&answer
)
888 ErrorState
*error
= nullptr;
889 if (!answer
.positive()) {
890 Must(!Comm::IsConnOpen(answer
.conn
));
891 error
= answer
.squidError
.get();
893 answer
.squidError
.clear(); // preserve error for fail()
894 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
895 // The socket could get closed while our callback was queued.
896 // We close Connection here to sync Connection::fd.
897 closePendingConnection(answer
.conn
, "conn was closed while waiting for tunnelEstablishmentDone");
898 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
899 } else if (!answer
.leftovers
.isEmpty()) {
900 // This should not happen because TLS servers do not speak first. If we
901 // have to handle this, then pass answer.leftovers via a PeerConnector
902 // to ServerBio. See ClientBio::setReadBufData().
903 static int occurrences
= 0;
904 const auto level
= (occurrences
++ < 100) ? DBG_IMPORTANT
: 2;
905 debugs(17, level
, "ERROR: Early data after CONNECT response. " <<
906 "Found " << answer
.leftovers
.length() << " bytes. " <<
907 "Closing " << answer
.conn
);
908 error
= new ErrorState(ERR_CONNECT_FAIL
, Http::scBadGateway
, request
, al
);
909 closePendingConnection(answer
.conn
, "server spoke before tunnelEstablishmentDone");
917 secureConnectionToPeerIfNeeded(answer
.conn
);
920 /// handles an established TCP connection to peer (including origin servers)
922 FwdState::secureConnectionToPeerIfNeeded(const Comm::ConnectionPointer
&conn
)
924 assert(!request
->flags
.pinned
);
926 const auto p
= conn
->getPeer();
927 const bool peerWantsTls
= p
&& p
->secure
.encryptTransport
;
928 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
929 const bool userWillTlsToPeerForUs
= p
&& p
->options
.originserver
&&
930 request
->method
== Http::METHOD_CONNECT
;
931 const bool needTlsToPeer
= peerWantsTls
&& !userWillTlsToPeerForUs
;
932 const bool clientFirstBump
= request
->flags
.sslBumped
; // client-first (already) bumped connection
933 const bool needsBump
= request
->flags
.sslPeek
|| clientFirstBump
;
935 // 'GET https://...' requests. If a peer is used the request is forwarded
937 const bool needTlsToOrigin
= !p
&& request
->url
.getScheme() == AnyP::PROTO_HTTPS
&& !clientFirstBump
;
939 if (needTlsToPeer
|| needTlsToOrigin
|| needsBump
) {
940 return advanceDestination("secure connection to peer", conn
, [this,&conn
] {
941 secureConnectionToPeer(conn
);
945 // if not encrypting just run the post-connect actions
946 successfullyConnectedToPeer(conn
);
949 /// encrypts an established TCP connection to peer (including origin servers)
951 FwdState::secureConnectionToPeer(const Comm::ConnectionPointer
&conn
)
953 HttpRequest::Pointer requestPointer
= request
;
954 AsyncCall::Pointer callback
= asyncCall(17,4,
955 "FwdState::ConnectedToPeer",
956 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
957 const auto sslNegotiationTimeout
= connectingTimeout(conn
);
958 Security::PeerConnector
*connector
= nullptr;
960 if (request
->flags
.sslPeek
)
961 connector
= new Ssl::PeekingPeerConnector(requestPointer
, conn
, clientConn
, callback
, al
, sslNegotiationTimeout
);
964 connector
= new Security::BlindPeerConnector(requestPointer
, conn
, callback
, al
, sslNegotiationTimeout
);
965 connector
->noteFwdPconnUse
= true;
966 AsyncJob::Start(connector
); // will call our callback
969 /// called when all negotiations with the TLS-speaking peer have been completed
971 FwdState::connectedToPeer(Security::EncryptorAnswer
&answer
)
973 ErrorState
*error
= nullptr;
974 if ((error
= answer
.error
.get())) {
975 Must(!Comm::IsConnOpen(answer
.conn
));
976 answer
.error
.clear(); // preserve error for errorSendComplete()
977 } else if (answer
.tunneled
) {
978 // TODO: When ConnStateData establishes tunnels, its state changes
979 // [in ways that may affect logging?]. Consider informing
980 // ConnStateData about our tunnel or otherwise unifying tunnel
981 // establishment [side effects].
982 complete(); // destroys us
984 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
985 closePendingConnection(answer
.conn
, "conn was closed while waiting for connectedToPeer");
986 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
995 successfullyConnectedToPeer(answer
.conn
);
998 /// called when all negotiations with the peer have been completed
1000 FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer
&conn
)
1002 syncWithServerConn(conn
, request
->url
.host(), false);
1004 // should reach ConnStateData before the dispatched Client job starts
1005 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
1006 ConnStateData::notePeerConnection
, serverConnection());
1008 if (serverConnection()->getPeer())
1009 peerConnectSucceded(serverConnection()->getPeer());
1014 /// commits to using the given open to-peer connection
1016 FwdState::syncWithServerConn(const Comm::ConnectionPointer
&conn
, const char *host
, const bool reused
)
1018 Must(IsConnOpen(conn
));
1020 // no effect on destinationReceipt (which may even be nil here)
1022 closeHandler
= comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
1025 pconnRace
= racePossible
;
1026 ResetMarkingsToServer(request
, *serverConn
);
1028 pconnRace
= raceImpossible
;
1029 // Comm::ConnOpener already applied proper/current markings
1032 syncHierNote(serverConn
, host
);
1036 FwdState::syncHierNote(const Comm::ConnectionPointer
&server
, const char *host
)
1039 request
->hier
.resetPeerNotes(server
, host
);
1041 al
->hier
.resetPeerNotes(server
, host
);
1045 * Called after forwarding path selection (via peer select) has taken place
1046 * and whenever forwarding needs to attempt a new connection (routing failover).
1047 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
1050 FwdState::connectStart()
1052 debugs(17, 3, *destinations
<< " to " << entry
->url());
1054 Must(!request
->pinnedConnection());
1056 assert(!destinations
->empty());
1059 // Ditch error page if it was created before.
1060 // A new one will be created if there's another problem
1063 request
->clearError();
1064 serverConn
= nullptr;
1065 destinationReceipt
= nullptr;
1067 request
->hier
.startPeerClock();
1069 calls
.connector
= asyncCall(17, 5, "FwdState::noteConnection", HappyConnOpener::CbDialer
<FwdState
>(&FwdState::noteConnection
, this));
1071 HttpRequest::Pointer cause
= request
;
1072 const auto cs
= new HappyConnOpener(destinations
, calls
.connector
, cause
, start_t
, n_tries
, al
);
1073 cs
->setHost(request
->url
.host());
1074 bool retriable
= checkRetriable();
1075 if (!retriable
&& Config
.accessList
.serverPconnForNonretriable
) {
1076 ACLFilledChecklist
ch(Config
.accessList
.serverPconnForNonretriable
, request
, nullptr);
1078 ch
.syncAle(request
, nullptr);
1079 retriable
= ch
.fastCheck().allowed();
1081 cs
->setRetriable(retriable
);
1082 cs
->allowPersistent(pconnRace
!= raceHappened
);
1083 destinations
->notificationPending
= true; // start() is async
1085 AsyncJob::Start(cs
);
1088 /// send request on an existing connection dedicated to the requesting client
1090 FwdState::usePinned()
1092 const auto connManager
= request
->pinnedConnection();
1093 debugs(17, 7, "connection manager: " << connManager
);
1096 // TODO: Refactor syncWithServerConn() and callers to always set
1097 // serverConn inside that method.
1098 serverConn
= ConnStateData::BorrowPinnedConnection(request
, al
);
1099 debugs(17, 5, "connection: " << serverConn
);
1100 } catch (ErrorState
* const anErr
) {
1101 syncHierNote(nullptr, connManager
? connManager
->pinning
.host
: request
->url
.host());
1102 serverConn
= nullptr;
1104 // Connection managers monitor their idle pinned to-server
1105 // connections and close from-client connections upon seeing
1106 // a to-server connection closure. Retrying here is futile.
1107 stopAndDestroy("pinned connection failure");
1112 request
->flags
.pinned
= true;
1114 assert(connManager
);
1115 if (connManager
->pinnedAuth())
1116 request
->flags
.auth
= true;
1118 // the server may close the pinned connection before this request
1119 const auto reused
= true;
1120 syncWithServerConn(serverConn
, connManager
->pinning
.host
, reused
);
1126 FwdState::dispatch()
1128 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
1130 * Assert that server_fd is set. This is to guarantee that fwdState
1131 * is attached to something and will be deallocated when server_fd
1134 assert(Comm::IsConnOpen(serverConn
));
1136 fd_note(serverConnection()->fd
, entry
->url());
1138 fd_table
[serverConnection()->fd
].noteUse();
1140 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1141 assert(entry
->ping_status
!= PING_WAITING
);
1143 assert(entry
->locked());
1145 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
1147 flags
.connected_okay
= true;
1149 netdbPingSite(request
->url
.host());
1151 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1152 * original client request FD object. It is later used to forward
1153 * remote server's TOS/MARK in the response to the client in case of a MISS.
1155 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
1156 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
1157 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1158 /* Get the netfilter CONNMARK */
1159 clientFde
->nfConnmarkFromServer
= Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened
);
1164 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1165 if (Ip::Qos::TheConfig
.isHitTosActive()) {
1166 if (Comm::IsConnOpen(clientConn
)) {
1167 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1168 /* Get the TOS value for the packet */
1169 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
1175 if (request
->flags
.sslPeek
) {
1176 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
1177 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(serverConnection(), request
));
1178 unregister(serverConn
); // async call owns it now
1179 complete(); // destroys us
1184 if (const auto peer
= serverConnection()->getPeer()) {
1185 ++peer
->stats
.fetches
;
1186 request
->prepForPeering(*peer
);
1189 assert(!request
->flags
.sslPeek
);
1190 request
->prepForDirect();
1192 switch (request
->url
.getScheme()) {
1194 case AnyP::PROTO_HTTPS
:
1198 case AnyP::PROTO_HTTP
:
1202 case AnyP::PROTO_GOPHER
:
1206 case AnyP::PROTO_FTP
:
1207 if (request
->flags
.ftpNative
)
1208 Ftp::StartRelay(this);
1210 Ftp::StartGateway(this);
1213 case AnyP::PROTO_CACHE_OBJECT
:
1215 case AnyP::PROTO_URN
:
1216 fatal_dump("Should never get here");
1219 case AnyP::PROTO_WHOIS
:
1223 case AnyP::PROTO_WAIS
: /* Not implemented */
1226 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1227 const auto anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
, al
);
1229 // Set the dont_retry flag because this is not a transient (network) error.
1230 flags
.dont_retry
= true;
1231 if (Comm::IsConnOpen(serverConn
)) {
1232 serverConn
->close(); // trigger cleanup
1240 * FwdState::reforward
1242 * returns TRUE if the transaction SHOULD be re-forwarded to the
1243 * next choice in the serverDestinations list. This method is called when
1244 * peer communication completes normally, or experiences
1245 * some error after receiving the end of HTTP headers.
1248 FwdState::reforward()
1250 StoreEntry
*e
= entry
;
1252 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1253 debugs(17, 3, HERE
<< "entry aborted");
1257 assert(e
->store_status
== STORE_PENDING
);
1259 #if URL_CHECKSUM_DEBUG
1261 e
->mem_obj
->checkUrlChecksum();
1264 debugs(17, 3, HERE
<< e
->url() << "?" );
1266 if (request
->flags
.pinned
&& !pinnedCanRetry()) {
1267 debugs(17, 3, "pinned connection; cannot retry");
1271 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1272 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1276 if (exhaustedTries())
1279 if (request
->bodyNibbled())
1282 if (destinations
->empty() && !PeerSelectionInitiator::subscribed
) {
1283 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1287 const auto s
= entry
->mem().baseReply().sline
.status();
1288 debugs(17, 3, HERE
<< "status " << s
);
1289 return reforwardableStatus(s
);
1293 fwdStats(StoreEntry
* s
)
1297 storeAppendPrintf(s
, "Status");
1299 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1300 storeAppendPrintf(s
, "\ttry#%d", j
);
1303 storeAppendPrintf(s
, "\n");
1305 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1306 if (FwdReplyCodes
[0][i
] == 0)
1309 storeAppendPrintf(s
, "%3d", i
);
1311 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1312 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1315 storeAppendPrintf(s
, "\n");
1319 /**** STATIC MEMBER FUNCTIONS *************************************************/
1322 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1326 case Http::scBadGateway
:
1328 case Http::scGatewayTimeout
:
1331 case Http::scForbidden
:
1333 case Http::scInternalServerError
:
1335 case Http::scNotImplemented
:
1337 case Http::scServiceUnavailable
:
1338 return Config
.retry
.onerror
;
1348 FwdState::initModule()
1350 RegisterWithCacheManager();
1354 FwdState::RegisterWithCacheManager(void)
1356 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1360 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1362 if (status
> Http::scInvalidHeader
)
1367 if (tries
> MAX_FWD_STATS_IDX
)
1368 tries
= MAX_FWD_STATS_IDX
;
1370 ++ FwdReplyCodes
[tries
][status
];
1374 FwdState::exhaustedTries() const
1376 return n_tries
>= Config
.forward_max_tries
;
1380 FwdState::pinnedCanRetry() const
1382 assert(request
->flags
.pinned
);
1384 // pconn race on pinned connection: Currently we do not have any mechanism
1385 // to retry current pinned connection path.
1386 if (pconnRace
== raceHappened
)
1389 // If a bumped connection was pinned, then the TLS client was given our peer
1390 // details. Do not retry because we do not ensure that those details stay
1391 // constant. Step1-bumped connections do not get our TLS peer details, are
1392 // never pinned, and, hence, never reach this method.
1393 if (request
->flags
.sslBumped
)
1396 // The other pinned cases are FTP proxying and connection-based HTTP
1397 // authentication. TODO: Do these cases have restrictions?
1402 FwdState::connectingTimeout(const Comm::ConnectionPointer
&conn
) const
1404 const auto connTimeout
= conn
->connectTimeout(start_t
);
1405 return positiveTimeout(connTimeout
);
1408 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1412 * Formerly static, but now used by client_side_request.cc
1414 /// Checks for a TOS value to apply depending on the ACL
1416 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1418 for (acl_tos
*l
= head
; l
; l
= l
->next
) {
1419 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1426 /// Checks for a netfilter mark value to apply depending on the ACL
1428 aclFindNfMarkConfig(acl_nfmark
* head
, ACLChecklist
* ch
)
1430 for (acl_nfmark
*l
= head
; l
; l
= l
->next
) {
1431 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1432 return l
->markConfig
;
1439 getOutgoingAddress(HttpRequest
* request
, const Comm::ConnectionPointer
&conn
)
1441 // skip if an outgoing address is already set.
1442 if (!conn
->local
.isAnyAddr()) return;
1444 // ensure that at minimum the wildcard local matches remote protocol
1445 if (conn
->remote
.isIPv4())
1446 conn
->local
.setIPv4();
1448 // maybe use TPROXY client address
1449 if (request
&& request
->flags
.spoofClientIp
) {
1450 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1451 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1452 if (Config
.onoff
.tproxy_uses_indirect_client
)
1453 conn
->local
= request
->indirect_client_addr
;
1456 conn
->local
= request
->client_addr
;
1457 conn
->local
.port(0); // let OS pick the source port to prevent address clashes
1458 // some flags need setting on the socket to use this address
1459 conn
->flags
|= COMM_DOBIND
;
1460 conn
->flags
|= COMM_TRANSPARENT
;
1463 // else no tproxy today ...
1466 if (!Config
.accessList
.outgoing_address
) {
1467 return; // anything will do.
1470 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1471 ch
.dst_peer_name
= conn
->getPeer() ? conn
->getPeer()->name
: NULL
;
1472 ch
.dst_addr
= conn
->remote
;
1474 // TODO use the connection details in ACL.
1475 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1477 for (Acl::Address
*l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1479 /* check if the outgoing address is usable to the destination */
1480 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1482 /* check ACLs for this outgoing address */
1483 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
).allowed()) {
1484 conn
->local
= l
->addr
;
1490 /// \returns the TOS value that should be set on the to-peer connection
1492 GetTosToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1494 if (!Ip::Qos::TheConfig
.tosToServer
)
1497 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1498 ch
.dst_peer_name
= conn
.getPeer() ? conn
.getPeer()->name
: nullptr;
1499 ch
.dst_addr
= conn
.remote
;
1500 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1503 /// \returns the Netfilter mark that should be set on the to-peer connection
1505 GetNfmarkToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1507 if (!Ip::Qos::TheConfig
.nfmarkToServer
)
1510 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1511 ch
.dst_peer_name
= conn
.getPeer() ? conn
.getPeer()->name
: nullptr;
1512 ch
.dst_addr
= conn
.remote
;
1513 const auto mc
= aclFindNfMarkConfig(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1518 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1520 // Get the server side TOS and Netfilter mark to be set on the connection.
1521 conn
.tos
= GetTosToServer(request
, conn
);
1522 conn
.nfmark
= GetNfmarkToServer(request
, conn
);
1523 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
) << " netfilter mark " << conn
.nfmark
);
1527 ResetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1529 GetMarkingsToServer(request
, conn
);
1531 // TODO: Avoid these calls if markings has not changed.
1533 Ip::Qos::setSockTos(&conn
, conn
.tos
);
1535 Ip::Qos::setSockNfmark(&conn
, conn
.nfmark
);