2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 17 Request Forwarding */
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "base/AsyncCallbacks.h"
18 #include "base/AsyncCbdataCalls.h"
19 #include "CacheManager.h"
20 #include "CachePeer.h"
21 #include "client_side.h"
22 #include "clients/forward.h"
23 #include "clients/HttpTunneler.h"
24 #include "clients/WhoisGateway.h"
25 #include "comm/Connection.h"
26 #include "comm/ConnOpener.h"
27 #include "comm/Loops.h"
28 #include "CommCalls.h"
29 #include "errorpage.h"
35 #include "HappyConnOpener.h"
36 #include "hier_code.h"
38 #include "http/Stream.h"
39 #include "HttpReply.h"
40 #include "HttpRequest.h"
41 #include "icmp/net_db.h"
43 #include "ip/Intercept.h"
44 #include "ip/NfMarkConfig.h"
45 #include "ip/QosConfig.h"
47 #include "MemObject.h"
48 #include "mgr/Registration.h"
49 #include "neighbors.h"
51 #include "PeerPoolMgr.h"
52 #include "ResolvedPeers.h"
53 #include "security/BlindPeerConnector.h"
54 #include "SquidConfig.h"
55 #include "ssl/PeekingPeerConnector.h"
57 #include "StoreClient.h"
60 #include "ssl/cert_validate_message.h"
61 #include "ssl/Config.h"
62 #include "ssl/helper.h"
63 #include "ssl/ServerBump.h"
64 #include "ssl/support.h"
66 #include "security/EncryptorAnswer.h"
71 static CLCB fwdServerClosedWrapper
;
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
78 PconnPool
*fwdPconnPool
= new PconnPool("server-peers", nullptr);
80 CBDATA_CLASS_INIT(FwdState
);
83 FwdState::HandleStoreAbort(FwdState
*fwd
)
85 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
87 if (Comm::IsConnOpen(fwd
->serverConnection())) {
88 fwd
->closeServerConnection("store entry aborted");
90 debugs(17, 7, "store entry aborted; no connection to close");
92 fwd
->stopAndDestroy("store entry aborted");
96 FwdState::closePendingConnection(const Comm::ConnectionPointer
&conn
, const char *reason
)
98 debugs(17, 3, "because " << reason
<< "; " << conn
);
100 assert(!closeHandler
);
101 if (IsConnOpen(conn
)) {
102 fwdPconnPool
->noteUses(fd_table
[conn
->fd
].pconn
.uses
);
108 FwdState::closeServerConnection(const char *reason
)
110 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
111 assert(Comm::IsConnOpen(serverConn
));
112 comm_remove_close_handler(serverConn
->fd
, closeHandler
);
113 closeHandler
= nullptr;
114 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
118 /**** PUBLIC INTERFACE ********************************************************/
120 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
126 start_t(squid_curtime
),
128 waitingForDispatched(false),
129 destinations(new ResolvedPeers()),
130 pconnRace(raceImpossible
),
131 storedWholeReply_(nullptr)
133 debugs(17, 2, "Forwarding client request " << client
<< ", url=" << e
->url());
134 HTTPMSGLOCK(request
);
136 flags
.connected_okay
= false;
137 flags
.dont_retry
= false;
138 flags
.forward_completed
= false;
139 flags
.destinationsFound
= false;
140 debugs(17, 3, "FwdState constructed, this=" << this);
143 // Called once, right after object creation, when it is safe to set self
144 void FwdState::start(Pointer aSelf
)
146 // Protect ourselves from being destroyed when the only Server pointing
147 // to us is gone (while we expect to talk to more Servers later).
148 // Once we set self, we are responsible for clearing it when we do not
149 // expect to talk to any servers.
150 self
= aSelf
; // refcounted
152 // We hope that either the store entry aborts or peer is selected.
153 // Otherwise we are going to leak our object.
155 // Ftp::Relay needs to preserve control connection on data aborts
156 // so it registers its own abort handler that calls ours when needed.
157 if (!request
->flags
.ftpNative
) {
158 AsyncCall::Pointer call
= asyncCall(17, 4, "FwdState::Abort", cbdataDialer(&FwdState::HandleStoreAbort
, this));
159 entry
->registerAbortCallback(call
);
162 // just in case; should already be initialized to false
163 request
->flags
.pinned
= false;
165 #if STRICT_ORIGINAL_DST
166 // Bug 3243: CVE 2009-0801
167 // Bypass of browser same-origin access control in intercepted communication
168 // To resolve this we must force DIRECT and only to the original client destination.
169 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
170 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
171 if (isIntercepted
&& useOriginalDst
) {
172 selectPeerForIntercepted();
177 // do full route options selection
178 startSelectingDestinations(request
, al
, entry
);
181 /// ends forwarding; relies on refcounting so the effect may not be immediate
183 FwdState::stopAndDestroy(const char *reason
)
185 debugs(17, 3, "for " << reason
);
189 PeerSelectionInitiator::subscribed
= false; // may already be false
190 self
= nullptr; // we hope refcounting destroys us soon; may already be nil
191 /* do not place any code here as this object may be gone by now */
194 /// Notify a pending subtask, if any, that we no longer need its help. We do not
195 /// have to do this -- the subtask job will eventually end -- but ending it
196 /// earlier reduces waste and may reduce DoS attack surface.
198 FwdState::cancelStep(const char *reason
)
200 transportWait
.cancel(reason
);
201 encryptionWait
.cancel(reason
);
202 peerWait
.cancel(reason
);
205 #if STRICT_ORIGINAL_DST
206 /// bypasses peerSelect() when dealing with intercepted requests
208 FwdState::selectPeerForIntercepted()
210 // We do not support re-wrapping inside CONNECT.
211 // Our only alternative is to fake a noteDestination() call.
213 // use pinned connection if available
214 if (ConnStateData
*client
= request
->pinnedConnection()) {
215 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
216 entry
->ping_status
= PING_DONE
;
222 // use client original destination as second preferred choice
223 const auto p
= new Comm::Connection();
224 p
->peerType
= ORIGINAL_DST
;
225 p
->remote
= clientConn
->local
;
226 getOutgoingAddress(request
, p
);
228 debugs(17, 3, "using client original destination: " << *p
);
229 destinations
->addPath(p
);
230 destinations
->destinationsFinalized
= true;
231 PeerSelectionInitiator::subscribed
= false;
236 /// updates ALE when we finalize the transaction error (if any)
238 FwdState::updateAleWithFinalError()
243 const auto lte
= LogTagsErrors::FromErrno(err
->type
== ERR_READ_TIMEOUT
? ETIMEDOUT
: err
->xerrno
);
244 al
->cache
.code
.err
.update(lte
);
246 static const auto d
= MakeNamedErrorDetail("WITH_SERVER");
249 al
->updateError(Error(err
->type
, err
->detail
));
253 FwdState::completed()
255 if (flags
.forward_completed
) {
256 debugs(17, DBG_IMPORTANT
, "ERROR: FwdState::completed called on a completed request! Bad!");
260 flags
.forward_completed
= true;
262 request
->hier
.stopPeerClock(false);
264 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
265 debugs(17, 3, "entry aborted");
269 #if URL_CHECKSUM_DEBUG
271 entry
->mem_obj
->checkUrlChecksum();
274 if (entry
->store_status
== STORE_PENDING
) {
275 if (entry
->isEmpty()) {
276 assert(!storedWholeReply_
);
277 if (!err
) // we quit (e.g., fd closed) before an error or content
278 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
, al
));
280 updateAleWithFinalError();
281 errorAppendEntry(entry
, err
);
284 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
285 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
286 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request
));
287 // no flags.dont_retry: completed() is a post-reforward() act
291 updateAleWithFinalError(); // if any
292 if (storedWholeReply_
)
293 entry
->completeSuccessfully(storedWholeReply_
);
295 entry
->completeTruncated("FwdState default");
299 if (storePendingNClients(entry
) > 0)
300 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
304 FwdState::~FwdState()
306 debugs(17, 3, "FwdState destructor start");
308 if (! flags
.forward_completed
)
313 HTTPMSGUNLOCK(request
);
317 entry
->unregisterAbortCallback("FwdState object destructed");
319 entry
->unlock("FwdState");
323 cancelStep("~FwdState");
325 if (Comm::IsConnOpen(serverConn
))
326 closeServerConnection("~FwdState");
328 debugs(17, 3, "FwdState destructed, this=" << this);
332 * This is the entry point for client-side to start forwarding
333 * a transaction. It is a static method that may or may not
334 * allocate a FwdState.
337 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
340 * client_addr == no_addr indicates this is an "internal" request
341 * from peer_digest.c, asn.c, netdb.c, etc and should always
342 * be allowed. yuck, I know.
345 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() && !request
->flags
.internal
) {
347 * Check if this host is allowed to fetch MISSES from us (miss_access).
348 * Intentionally replace the src_addr automatically selected by the checklist code
349 * we do NOT want the indirect client address to be tested here.
351 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, nullptr);
353 ch
.src_addr
= request
->client_addr
;
354 ch
.syncAle(request
, nullptr);
355 if (ch
.fastCheck().denied()) {
356 auto page_id
= FindDenyInfoPage(ch
.currentAnswer(), true);
357 if (page_id
== ERR_NONE
)
358 page_id
= ERR_FORWARDING_DENIED
;
360 const auto anErr
= new ErrorState(page_id
, Http::scForbidden
, request
, al
);
361 errorAppendEntry(entry
, anErr
); // frees anErr
366 debugs(17, 3, "'" << entry
->url() << "'");
368 * This seems like an odd place to bind mem_obj and request.
369 * Might want to assert that request is NULL at this point
371 entry
->mem_obj
->request
= request
;
372 #if URL_CHECKSUM_DEBUG
374 entry
->mem_obj
->checkUrlChecksum();
379 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
380 errorAppendEntry(entry
, anErr
); // frees anErr
384 if (request
->flags
.internal
) {
385 debugs(17, 2, "calling internalStart() due to request flag");
386 internalStart(clientConn
, request
, entry
, al
);
390 switch (request
->url
.getScheme()) {
392 case AnyP::PROTO_URN
:
393 urnStart(request
, entry
, al
);
397 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
406 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
408 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
409 Start(clientConn
, entry
, request
, nullptr);
412 /// subtracts time_t values, returning zero if smaller exceeds the larger value
413 /// time_t might be unsigned so we need to be careful when subtracting times...
415 diffOrZero(const time_t larger
, const time_t smaller
)
417 return (larger
> smaller
) ? (larger
- smaller
) : 0;
420 /// time left to finish the whole forwarding process (which started at fwdStart)
422 FwdState::ForwardTimeout(const time_t fwdStart
)
424 // time already spent on forwarding (0 if clock went backwards)
425 const time_t timeSpent
= diffOrZero(squid_curtime
, fwdStart
);
426 return diffOrZero(Config
.Timeout
.forward
, timeSpent
);
430 FwdState::EnoughTimeToReForward(const time_t fwdStart
)
432 return ForwardTimeout(fwdStart
) > 0;
436 FwdState::useDestinations()
438 if (!destinations
->empty()) {
441 if (PeerSelectionInitiator::subscribed
) {
442 debugs(17, 4, "wait for more destinations to try");
443 return; // expect a noteDestination*() call
446 debugs(17, 3, "Connection failed: " << entry
->url());
448 const auto anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
, al
);
450 } // else use actual error from last connection attempt
452 stopAndDestroy("tried all destinations");
457 FwdState::fail(ErrorState
* errorState
)
459 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
464 if (!errorState
->request
)
465 errorState
->request
= request
;
467 if (err
->type
== ERR_ZERO_SIZE_OBJECT
)
468 reactToZeroSizeObject();
470 destinationReceipt
= nullptr; // may already be nil
473 /// ERR_ZERO_SIZE_OBJECT requires special adjustments
475 FwdState::reactToZeroSizeObject()
477 assert(err
->type
== ERR_ZERO_SIZE_OBJECT
);
479 if (pconnRace
== racePossible
) {
480 debugs(17, 5, "pconn race happened");
481 pconnRace
= raceHappened
;
482 if (destinationReceipt
) {
483 destinations
->reinstatePath(destinationReceipt
);
484 destinationReceipt
= nullptr;
488 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
489 pinned_connection
->pinning
.zeroReply
= true;
490 debugs(17, 4, "zero reply on pinned connection");
495 * Frees fwdState without closing FD or generating an abort
498 FwdState::unregister(Comm::ConnectionPointer
&conn
)
500 debugs(17, 3, entry
->url() );
501 assert(serverConnection() == conn
);
502 assert(Comm::IsConnOpen(conn
));
503 comm_remove_close_handler(conn
->fd
, closeHandler
);
504 closeHandler
= nullptr;
505 serverConn
= nullptr;
506 destinationReceipt
= nullptr;
509 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
511 FwdState::unregister(int fd
)
513 debugs(17, 3, entry
->url() );
514 assert(fd
== serverConnection()->fd
);
515 unregister(serverConn
);
519 * FooClient modules call fwdComplete() when they are done
520 * downloading an object. Then, we either 1) re-forward the
521 * request somewhere else if needed, or 2) call storeComplete()
527 const auto replyStatus
= entry
->mem().baseReply().sline
.status();
528 debugs(17, 3, *entry
<< " status " << replyStatus
<< ' ' << entry
->url());
529 #if URL_CHECKSUM_DEBUG
531 entry
->mem_obj
->checkUrlChecksum();
534 logReplyStatus(n_tries
, replyStatus
);
536 // will already be false if complete() was called before/without dispatch()
537 waitingForDispatched
= false;
540 debugs(17, 3, "re-forwarding " << replyStatus
<< " " << entry
->url());
542 if (Comm::IsConnOpen(serverConn
))
543 unregister(serverConn
);
544 serverConn
= nullptr;
545 destinationReceipt
= nullptr;
547 storedWholeReply_
= nullptr;
553 if (Comm::IsConnOpen(serverConn
))
554 debugs(17, 3, "server FD " << serverConnection()->fd
<< " not re-forwarding status " << replyStatus
);
556 debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus
);
560 stopAndDestroy("forwarding completed");
564 /// Whether a forwarding attempt to some selected destination X is in progress
565 /// (after successfully opening/reusing a transport connection to X).
566 /// See also: transportWait
568 FwdState::transporting() const
570 return peerWait
|| encryptionWait
|| waitingForDispatched
;
574 FwdState::markStoredReplyAsWhole(const char * const whyWeAreSure
)
576 debugs(17, 5, whyWeAreSure
<< " for " << *entry
);
578 // the caller wrote everything to Store, but Store may silently abort writes
579 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
))
582 storedWholeReply_
= whyWeAreSure
;
586 FwdState::noteDestination(Comm::ConnectionPointer path
)
588 flags
.destinationsFound
= true;
591 // We can call usePinned() without fear of clashing with an earlier
592 // forwarding attempt because PINNED must be the first destination.
593 assert(destinations
->empty());
600 destinations
->addPath(path
);
603 assert(!transporting());
605 return; // and continue to wait for FwdState::noteConnection() callback
609 return; // and continue to receive destinations for backup
615 FwdState::noteDestinationsEnd(ErrorState
*selectionError
)
617 PeerSelectionInitiator::subscribed
= false;
618 destinations
->destinationsFinalized
= true;
620 if (!flags
.destinationsFound
) {
621 if (selectionError
) {
622 debugs(17, 3, "Will abort forwarding because path selection has failed.");
623 Must(!err
); // if we tried to connect, then path selection succeeded
624 fail(selectionError
);
627 stopAndDestroy("path selection found no paths");
630 // else continue to use one of the previously noted destinations;
631 // if all of them fail, forwarding as whole will fail
632 Must(!selectionError
); // finding at least one path means selection succeeded
635 assert(!transporting());
637 return; // and continue to wait for FwdState::noteConnection() callback
640 if (transporting()) {
641 // We are already using a previously opened connection (but were also
642 // receiving more destinations in case we need to re-forward).
643 debugs(17, 7, "keep transporting");
647 // destinationsFound, but none of them worked, and we were waiting for more
648 debugs(17, 7, "no more destinations to try after " << n_tries
<< " failed attempts");
650 const auto finalError
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scBadGateway
, request
, al
);
651 static const auto d
= MakeNamedErrorDetail("REFORWARD_TO_NONE");
652 finalError
->detailError(d
);
654 } // else use actual error from last forwarding attempt
655 stopAndDestroy("all found paths have failed");
658 /// makes sure connection opener knows that the destinations have changed
660 FwdState::notifyConnOpener()
662 if (destinations
->notificationPending
) {
663 debugs(17, 7, "reusing pending notification about " << *destinations
);
665 debugs(17, 7, "notifying about " << *destinations
);
666 destinations
->notificationPending
= true;
667 CallJobHere(17, 5, transportWait
.job(), HappyConnOpener
, noteCandidatesChange
);
671 /**** CALLBACK WRAPPERS ************************************************************/
674 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
676 FwdState
*fwd
= (FwdState
*)params
.data
;
680 /**** PRIVATE *****************************************************************/
683 * FwdState::checkRetry
685 * Return TRUE if the request SHOULD be retried. This method is
686 * called when the HTTP connection fails, or when the connection
687 * is closed before reading the end of HTTP headers from the server.
690 FwdState::checkRetry()
695 if (!self
) { // we have aborted before the server called us back
696 debugs(17, 5, "not retrying because of earlier abort");
697 // we will be destroyed when the server clears its Pointer to us
701 if (entry
->store_status
!= STORE_PENDING
)
704 if (!entry
->isEmpty())
707 if (exhaustedTries())
710 if (request
->flags
.pinned
&& !pinnedCanRetry())
713 if (!EnoughTimeToReForward(start_t
))
716 if (flags
.dont_retry
)
719 if (request
->bodyNibbled())
722 // NP: not yet actually connected anywhere. retry is safe.
723 if (!flags
.connected_okay
)
726 if (!checkRetriable())
732 /// Whether we may try sending this request again after a failure.
734 FwdState::checkRetriable()
736 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
737 // complicated] code required to protect the PUT request body from being
738 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
739 if (request
->body_pipe
!= nullptr)
742 // RFC2616 9.1 Safe and Idempotent Methods
743 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
747 FwdState::serverClosed()
749 // XXX: This method logic attempts to tolerate Connection::close() called
750 // for serverConn earlier, by one of our dispatch()ed jobs. If that happens,
751 // serverConn will already be closed here or, worse, it will already be open
752 // for the next forwarding attempt. The current code prevents us getting
753 // stuck, but the long term solution is to stop sharing serverConn.
754 debugs(17, 2, serverConn
);
755 if (Comm::IsConnOpen(serverConn
)) {
756 const auto uses
= fd_table
[serverConn
->fd
].pconn
.uses
;
757 debugs(17, 3, "prior uses: " << uses
);
758 fwdPconnPool
->noteUses(uses
); // XXX: May not have come from fwdPconnPool
759 serverConn
->noteClosure();
761 serverConn
= nullptr;
762 closeHandler
= nullptr;
763 destinationReceipt
= nullptr;
765 // will already be false if this closure happened before/without dispatch()
766 waitingForDispatched
= false;
772 FwdState::retryOrBail()
775 debugs(17, 3, "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
780 // TODO: should we call completed() here and move doneWithRetries there?
783 request
->hier
.stopPeerClock(false);
785 if (self
!= nullptr && !err
&& shutting_down
&& entry
->isEmpty()) {
786 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
787 errorAppendEntry(entry
, anErr
);
790 stopAndDestroy("cannot retry");
793 // If the Server quits before nibbling at the request body, the body sender
794 // will not know (so that we can retry). Call this if we will not retry. We
795 // will notify the sender so that it does not get stuck waiting for space.
797 FwdState::doneWithRetries()
799 if (request
&& request
->body_pipe
!= nullptr)
800 request
->body_pipe
->expectNoConsumption();
803 // called by the server that failed after calling unregister()
805 FwdState::handleUnregisteredServerEnd()
807 debugs(17, 2, "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
808 assert(!Comm::IsConnOpen(serverConn
));
809 serverConn
= nullptr;
810 destinationReceipt
= nullptr;
812 // might already be false due to uncertainties documented in serverClosed()
813 waitingForDispatched
= false;
818 /// starts a preparation step for an established connection; retries on failures
819 template <typename StepStart
>
821 FwdState::advanceDestination(const char *stepDescription
, const Comm::ConnectionPointer
&conn
, const StepStart
&startStep
)
823 // TODO: Extract destination-specific handling from FwdState so that all the
824 // awkward, limited-scope advanceDestination() calls can be replaced with a
825 // single simple try/catch,retry block.
828 // now wait for the step callback
830 debugs (17, 2, "exception while trying to " << stepDescription
<< ": " << CurrentException
);
831 closePendingConnection(conn
, "connection preparation exception");
833 fail(new ErrorState(ERR_GATEWAY_FAILURE
, Http::scInternalServerError
, request
, al
));
838 /// called when a to-peer connection has been successfully obtained or
839 /// when all candidate destinations have been tried and all have failed
841 FwdState::noteConnection(HappyConnOpener::Answer
&answer
)
843 assert(!destinationReceipt
);
845 transportWait
.finish();
847 updateAttempts(answer
.n_tries
);
849 ErrorState
*error
= nullptr;
850 if ((error
= answer
.error
.get())) {
851 flags
.dont_retry
= true; // or HappyConnOpener would not have given up
852 syncHierNote(answer
.conn
, request
->url
.host());
853 Must(!Comm::IsConnOpen(answer
.conn
));
854 answer
.error
.clear(); // preserve error for errorSendComplete()
855 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
856 // The socket could get closed while our callback was queued. Sync
857 // Connection. XXX: Connection::fd may already be stale/invalid here.
858 // We do not know exactly why the connection got closed, so we play it
859 // safe, allowing retries only for persistent (reused) connections
861 destinationReceipt
= answer
.conn
;
862 assert(destinationReceipt
);
864 syncHierNote(answer
.conn
, request
->url
.host());
865 closePendingConnection(answer
.conn
, "conn was closed while waiting for noteConnection");
866 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
869 destinationReceipt
= answer
.conn
;
870 assert(destinationReceipt
);
871 // serverConn remains nil until syncWithServerConn()
881 syncWithServerConn(answer
.conn
, request
->url
.host(), answer
.reused
);
885 // Check if we need to TLS before use
886 if (const auto *peer
= answer
.conn
->getPeer()) {
887 // Assume that it is only possible for the client-first from the
888 // bumping modes to try connect to a remote server. The bumped
889 // requests with other modes are using pinned connections or fails.
890 const bool clientFirstBump
= request
->flags
.sslBumped
;
891 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
892 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
893 const bool originWantsEncryptedTraffic
=
894 request
->method
== Http::METHOD_CONNECT
||
895 request
->flags
.sslPeek
||
897 if (originWantsEncryptedTraffic
&& // the "encrypted traffic" part
898 !peer
->options
.originserver
&& // the "through a proxy" part
899 !peer
->secure
.encryptTransport
) // the "exclude HTTPS proxies" part
900 return advanceDestination("establish tunnel through proxy", answer
.conn
, [this,&answer
] {
901 establishTunnelThruProxy(answer
.conn
);
905 secureConnectionToPeerIfNeeded(answer
.conn
);
909 FwdState::establishTunnelThruProxy(const Comm::ConnectionPointer
&conn
)
911 const auto callback
= asyncCallback(17, 4, FwdState::tunnelEstablishmentDone
, this);
912 HttpRequest::Pointer requestPointer
= request
;
913 const auto tunneler
= new Http::Tunneler(conn
, requestPointer
, callback
, connectingTimeout(conn
), al
);
915 // TODO: Replace this hack with proper Comm::Connection-Pool association
916 // that is not tied to fwdPconnPool and can handle disappearing pools.
917 tunneler
->noteFwdPconnUse
= true;
921 Must(conn
->getPeer());
922 if (!conn
->getPeer()->options
.no_delay
)
923 tunneler
->setDelayId(entry
->mem_obj
->mostBytesAllowed());
925 peerWait
.start(tunneler
, callback
);
928 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
930 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer
&answer
)
934 ErrorState
*error
= nullptr;
935 if (!answer
.positive()) {
937 error
= answer
.squidError
.get();
939 answer
.squidError
.clear(); // preserve error for fail()
940 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
941 // The socket could get closed while our callback was queued. Sync
942 // Connection. XXX: Connection::fd may already be stale/invalid here.
943 closePendingConnection(answer
.conn
, "conn was closed while waiting for tunnelEstablishmentDone");
944 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
945 } else if (!answer
.leftovers
.isEmpty()) {
946 // This should not happen because TLS servers do not speak first. If we
947 // have to handle this, then pass answer.leftovers via a PeerConnector
948 // to ServerBio. See ClientBio::setReadBufData().
949 static int occurrences
= 0;
950 const auto level
= (occurrences
++ < 100) ? DBG_IMPORTANT
: 2;
951 debugs(17, level
, "ERROR: Early data after CONNECT response. " <<
952 "Found " << answer
.leftovers
.length() << " bytes. " <<
953 "Closing " << answer
.conn
);
954 error
= new ErrorState(ERR_CONNECT_FAIL
, Http::scBadGateway
, request
, al
);
955 closePendingConnection(answer
.conn
, "server spoke before tunnelEstablishmentDone");
963 secureConnectionToPeerIfNeeded(answer
.conn
);
966 /// handles an established TCP connection to peer (including origin servers)
968 FwdState::secureConnectionToPeerIfNeeded(const Comm::ConnectionPointer
&conn
)
970 assert(!request
->flags
.pinned
);
972 const auto p
= conn
->getPeer();
973 const bool peerWantsTls
= p
&& p
->secure
.encryptTransport
;
974 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
975 const bool userWillTlsToPeerForUs
= p
&& p
->options
.originserver
&&
976 request
->method
== Http::METHOD_CONNECT
;
977 const bool needTlsToPeer
= peerWantsTls
&& !userWillTlsToPeerForUs
;
978 const bool clientFirstBump
= request
->flags
.sslBumped
; // client-first (already) bumped connection
979 const bool needsBump
= request
->flags
.sslPeek
|| clientFirstBump
;
981 // 'GET https://...' requests. If a peer is used the request is forwarded
983 const bool needTlsToOrigin
= !p
&& request
->url
.getScheme() == AnyP::PROTO_HTTPS
&& !clientFirstBump
;
985 if (needTlsToPeer
|| needTlsToOrigin
|| needsBump
) {
986 return advanceDestination("secure connection to peer", conn
, [this,&conn
] {
987 secureConnectionToPeer(conn
);
991 // if not encrypting just run the post-connect actions
992 successfullyConnectedToPeer(conn
);
995 /// encrypts an established TCP connection to peer (including origin servers)
997 FwdState::secureConnectionToPeer(const Comm::ConnectionPointer
&conn
)
999 HttpRequest::Pointer requestPointer
= request
;
1000 const auto callback
= asyncCallback(17, 4, FwdState::connectedToPeer
, this);
1001 const auto sslNegotiationTimeout
= connectingTimeout(conn
);
1002 Security::PeerConnector
*connector
= nullptr;
1004 if (request
->flags
.sslPeek
)
1005 connector
= new Ssl::PeekingPeerConnector(requestPointer
, conn
, clientConn
, callback
, al
, sslNegotiationTimeout
);
1008 connector
= new Security::BlindPeerConnector(requestPointer
, conn
, callback
, al
, sslNegotiationTimeout
);
1009 connector
->noteFwdPconnUse
= true;
1010 encryptionWait
.start(connector
, callback
);
1013 /// called when all negotiations with the TLS-speaking peer have been completed
1015 FwdState::connectedToPeer(Security::EncryptorAnswer
&answer
)
1017 encryptionWait
.finish();
1019 ErrorState
*error
= nullptr;
1020 if ((error
= answer
.error
.get())) {
1021 assert(!answer
.conn
);
1022 answer
.error
.clear(); // preserve error for errorSendComplete()
1023 } else if (answer
.tunneled
) {
1024 assert(!answer
.conn
);
1025 // TODO: When ConnStateData establishes tunnels, its state changes
1026 // [in ways that may affect logging?]. Consider informing
1027 // ConnStateData about our tunnel or otherwise unifying tunnel
1028 // establishment [side effects].
1029 flags
.dont_retry
= true; // TunnelStateData took forwarding control
1031 complete(); // destroys us
1033 } else if (!Comm::IsConnOpen(answer
.conn
) || fd_table
[answer
.conn
->fd
].closing()) {
1034 // The socket could get closed while our callback was queued. Sync
1035 // Connection. XXX: Connection::fd may already be stale/invalid here.
1036 closePendingConnection(answer
.conn
, "conn was closed while waiting for connectedToPeer");
1037 error
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
);
1046 successfullyConnectedToPeer(answer
.conn
);
1049 /// called when all negotiations with the peer have been completed
1051 FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer
&conn
)
1053 syncWithServerConn(conn
, request
->url
.host(), false);
1055 // should reach ConnStateData before the dispatched Client job starts
1056 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
1057 ConnStateData::notePeerConnection
, serverConnection());
1059 NoteOutgoingConnectionSuccess(serverConnection()->getPeer());
1064 /// commits to using the given open to-peer connection
1066 FwdState::syncWithServerConn(const Comm::ConnectionPointer
&conn
, const char *host
, const bool reused
)
1068 Must(IsConnOpen(conn
));
1070 // no effect on destinationReceipt (which may even be nil here)
1072 closeHandler
= comm_add_close_handler(serverConn
->fd
, fwdServerClosedWrapper
, this);
1075 pconnRace
= racePossible
;
1076 ResetMarkingsToServer(request
, *serverConn
);
1078 pconnRace
= raceImpossible
;
1079 // Comm::ConnOpener already applied proper/current markings
1082 syncHierNote(serverConn
, host
);
1086 FwdState::syncHierNote(const Comm::ConnectionPointer
&server
, const char *host
)
1089 request
->hier
.resetPeerNotes(server
, host
);
1091 al
->hier
.resetPeerNotes(server
, host
);
1094 /// sets n_tries to the given value (while keeping ALE, if any, in sync)
1096 FwdState::updateAttempts(const int newValue
)
1098 Assure(n_tries
<= newValue
); // n_tries cannot decrease
1100 // Squid probably creates at most one FwdState/TunnelStateData object per
1101 // ALE, but, unlike an assignment would, this increment logic works even if
1102 // Squid uses multiple such objects for a given ALE in some esoteric cases.
1104 al
->requestAttempts
+= (newValue
- n_tries
);
1107 debugs(17, 5, n_tries
);
1111 * Called after forwarding path selection (via peer select) has taken place
1112 * and whenever forwarding needs to attempt a new connection (routing failover).
1113 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
1116 FwdState::connectStart()
1118 debugs(17, 3, *destinations
<< " to " << entry
->url());
1120 Must(!request
->pinnedConnection());
1122 assert(!destinations
->empty());
1123 assert(!transporting());
1125 // Ditch error page if it was created before.
1126 // A new one will be created if there's another problem
1129 request
->clearError();
1131 request
->hier
.startPeerClock();
1133 const auto callback
= asyncCallback(17, 5, FwdState::noteConnection
, this);
1134 HttpRequest::Pointer cause
= request
;
1135 const auto cs
= new HappyConnOpener(destinations
, callback
, cause
, start_t
, n_tries
, al
);
1136 cs
->setHost(request
->url
.host());
1137 bool retriable
= checkRetriable();
1138 if (!retriable
&& Config
.accessList
.serverPconnForNonretriable
) {
1139 ACLFilledChecklist
ch(Config
.accessList
.serverPconnForNonretriable
, request
, nullptr);
1141 ch
.syncAle(request
, nullptr);
1142 retriable
= ch
.fastCheck().allowed();
1144 cs
->setRetriable(retriable
);
1145 cs
->allowPersistent(pconnRace
!= raceHappened
);
1146 destinations
->notificationPending
= true; // start() is async
1147 transportWait
.start(cs
, callback
);
1150 /// send request on an existing connection dedicated to the requesting client
1152 FwdState::usePinned()
1154 const auto connManager
= request
->pinnedConnection();
1155 debugs(17, 7, "connection manager: " << connManager
);
1158 // TODO: Refactor syncWithServerConn() and callers to always set
1159 // serverConn inside that method.
1160 serverConn
= ConnStateData::BorrowPinnedConnection(request
, al
);
1161 debugs(17, 5, "connection: " << serverConn
);
1162 } catch (ErrorState
* const anErr
) {
1163 syncHierNote(nullptr, connManager
? connManager
->pinning
.host
: request
->url
.host());
1164 serverConn
= nullptr;
1166 // Connection managers monitor their idle pinned to-server
1167 // connections and close from-client connections upon seeing
1168 // a to-server connection closure. Retrying here is futile.
1169 stopAndDestroy("pinned connection failure");
1173 updateAttempts(n_tries
+ 1);
1175 request
->flags
.pinned
= true;
1177 assert(connManager
);
1178 if (connManager
->pinnedAuth())
1179 request
->flags
.auth
= true;
1181 // the server may close the pinned connection before this request
1182 const auto reused
= true;
1183 syncWithServerConn(serverConn
, connManager
->pinning
.host
, reused
);
1189 FwdState::dispatch()
1191 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
1193 * Assert that server_fd is set. This is to guarantee that fwdState
1194 * is attached to something and will be deallocated when server_fd
1197 assert(Comm::IsConnOpen(serverConn
));
1199 assert(!waitingForDispatched
);
1200 waitingForDispatched
= true;
1202 fd_note(serverConnection()->fd
, entry
->url());
1204 fd_table
[serverConnection()->fd
].noteUse();
1206 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1207 assert(entry
->ping_status
!= PING_WAITING
);
1209 assert(entry
->locked());
1211 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
1213 flags
.connected_okay
= true;
1215 netdbPingSite(request
->url
.host());
1217 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1218 * original client request FD object. It is later used to forward
1219 * remote server's TOS/MARK in the response to the client in case of a MISS.
1221 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
1222 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
1223 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1224 /* Get the netfilter CONNMARK */
1225 clientFde
->nfConnmarkFromServer
= Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened
);
1230 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1231 if (Ip::Qos::TheConfig
.isHitTosActive()) {
1232 if (Comm::IsConnOpen(clientConn
)) {
1233 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1234 /* Get the TOS value for the packet */
1235 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
1241 if (request
->flags
.sslPeek
) {
1242 // we were just asked to peek at the server, and we did that
1243 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
1244 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(serverConnection(), request
));
1245 unregister(serverConn
); // async call owns it now
1246 flags
.dont_retry
= true; // we gave up forwarding control
1248 complete(); // destroys us
1253 if (const auto peer
= serverConnection()->getPeer()) {
1254 ++peer
->stats
.fetches
;
1255 request
->prepForPeering(*peer
);
1258 assert(!request
->flags
.sslPeek
);
1259 request
->prepForDirect();
1261 switch (request
->url
.getScheme()) {
1263 case AnyP::PROTO_HTTPS
:
1267 case AnyP::PROTO_HTTP
:
1271 case AnyP::PROTO_FTP
:
1272 if (request
->flags
.ftpNative
)
1273 Ftp::StartRelay(this);
1275 Ftp::StartGateway(this);
1278 case AnyP::PROTO_URN
:
1279 fatal_dump("Should never get here");
1282 case AnyP::PROTO_WHOIS
:
1286 case AnyP::PROTO_WAIS
: /* Not implemented */
1289 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1290 const auto anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
, al
);
1292 // Set the dont_retry flag because this is not a transient (network) error.
1293 flags
.dont_retry
= true;
1294 if (Comm::IsConnOpen(serverConn
)) {
1295 serverConn
->close(); // trigger cleanup
1303 * FwdState::reforward
1305 * returns TRUE if the transaction SHOULD be re-forwarded to the
1306 * next choice in the serverDestinations list. This method is called when
1307 * peer communication completes normally, or experiences
1308 * some error after receiving the end of HTTP headers.
1311 FwdState::reforward()
1313 StoreEntry
*e
= entry
;
1315 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1316 debugs(17, 3, "entry aborted");
1320 assert(e
->store_status
== STORE_PENDING
);
1322 #if URL_CHECKSUM_DEBUG
1324 e
->mem_obj
->checkUrlChecksum();
1327 debugs(17, 3, e
->url() << "?" );
1329 if (request
->flags
.pinned
&& !pinnedCanRetry()) {
1330 debugs(17, 3, "pinned connection; cannot retry");
1334 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1335 debugs(17, 3, "No, ENTRY_FWD_HDR_WAIT isn't set");
1339 if (exhaustedTries())
1342 if (request
->bodyNibbled())
1345 if (destinations
->empty() && !PeerSelectionInitiator::subscribed
) {
1346 debugs(17, 3, "No alternative forwarding paths left");
1350 const auto s
= entry
->mem().baseReply().sline
.status();
1351 debugs(17, 3, "status " << s
);
1352 return Http::IsReforwardableStatus(s
);
1356 fwdStats(StoreEntry
* s
)
1360 storeAppendPrintf(s
, "Status");
1362 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1363 storeAppendPrintf(s
, "\ttry#%d", j
);
1366 storeAppendPrintf(s
, "\n");
1368 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1369 if (FwdReplyCodes
[0][i
] == 0)
1372 storeAppendPrintf(s
, "%3d", i
);
1374 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1375 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1378 storeAppendPrintf(s
, "\n");
1382 /**** STATIC MEMBER FUNCTIONS *************************************************/
1385 FwdState::initModule()
1387 RegisterWithCacheManager();
1391 FwdState::RegisterWithCacheManager(void)
1393 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1397 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1399 if (status
> Http::scInvalidHeader
)
1404 if (tries
> MAX_FWD_STATS_IDX
)
1405 tries
= MAX_FWD_STATS_IDX
;
1407 ++ FwdReplyCodes
[tries
][status
];
1411 FwdState::exhaustedTries() const
1413 return n_tries
>= Config
.forward_max_tries
;
1417 FwdState::pinnedCanRetry() const
1419 assert(request
->flags
.pinned
);
1421 // pconn race on pinned connection: Currently we do not have any mechanism
1422 // to retry current pinned connection path.
1423 if (pconnRace
== raceHappened
)
1426 // If a bumped connection was pinned, then the TLS client was given our peer
1427 // details. Do not retry because we do not ensure that those details stay
1428 // constant. Step1-bumped connections do not get our TLS peer details, are
1429 // never pinned, and, hence, never reach this method.
1430 if (request
->flags
.sslBumped
)
1433 // The other pinned cases are FTP proxying and connection-based HTTP
1434 // authentication. TODO: Do these cases have restrictions?
1439 FwdState::connectingTimeout(const Comm::ConnectionPointer
&conn
) const
1441 const auto connTimeout
= conn
->connectTimeout(start_t
);
1442 return positiveTimeout(connTimeout
);
1445 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1449 * Formerly static, but now used by client_side_request.cc
1451 /// Checks for a TOS value to apply depending on the ACL
1453 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1455 for (acl_tos
*l
= head
; l
; l
= l
->next
) {
1456 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1463 /// Checks for a netfilter mark value to apply depending on the ACL
1465 aclFindNfMarkConfig(acl_nfmark
* head
, ACLChecklist
* ch
)
1467 for (acl_nfmark
*l
= head
; l
; l
= l
->next
) {
1468 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1469 return l
->markConfig
;
1476 getOutgoingAddress(HttpRequest
* request
, const Comm::ConnectionPointer
&conn
)
1478 // skip if an outgoing address is already set.
1479 if (!conn
->local
.isAnyAddr()) return;
1481 // ensure that at minimum the wildcard local matches remote protocol
1482 if (conn
->remote
.isIPv4())
1483 conn
->local
.setIPv4();
1485 // maybe use TPROXY client address
1486 if (request
&& request
->flags
.spoofClientIp
) {
1487 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1488 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1489 if (Config
.onoff
.tproxy_uses_indirect_client
)
1490 conn
->local
= request
->indirect_client_addr
;
1493 conn
->local
= request
->client_addr
;
1494 conn
->local
.port(0); // let OS pick the source port to prevent address clashes
1495 // some flags need setting on the socket to use this address
1496 conn
->flags
|= COMM_DOBIND
;
1497 conn
->flags
|= COMM_TRANSPARENT
;
1500 // else no tproxy today ...
1503 if (!Config
.accessList
.outgoing_address
) {
1504 return; // anything will do.
1507 ACLFilledChecklist
ch(nullptr, request
, nullptr);
1508 ch
.dst_peer_name
= conn
->getPeer() ? conn
->getPeer()->name
: nullptr;
1509 ch
.dst_addr
= conn
->remote
;
1511 // TODO use the connection details in ACL.
1512 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1514 for (Acl::Address
*l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1516 /* check if the outgoing address is usable to the destination */
1517 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1519 /* check ACLs for this outgoing address */
1520 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
).allowed()) {
1521 conn
->local
= l
->addr
;
1527 /// \returns the TOS value that should be set on the to-peer connection
1529 GetTosToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1531 if (!Ip::Qos::TheConfig
.tosToServer
)
1534 ACLFilledChecklist
ch(nullptr, request
, nullptr);
1535 ch
.dst_peer_name
= conn
.getPeer() ? conn
.getPeer()->name
: nullptr;
1536 ch
.dst_addr
= conn
.remote
;
1537 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1540 /// \returns the Netfilter mark that should be set on the to-peer connection
1542 GetNfmarkToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1544 if (!Ip::Qos::TheConfig
.nfmarkToServer
)
1547 ACLFilledChecklist
ch(nullptr, request
, nullptr);
1548 ch
.dst_peer_name
= conn
.getPeer() ? conn
.getPeer()->name
: nullptr;
1549 ch
.dst_addr
= conn
.remote
;
1550 const auto mc
= aclFindNfMarkConfig(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1555 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1557 // Get the server side TOS and Netfilter mark to be set on the connection.
1558 conn
.tos
= GetTosToServer(request
, conn
);
1559 conn
.nfmark
= GetNfmarkToServer(request
, conn
);
1560 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
) << " netfilter mark " << conn
.nfmark
);
1564 ResetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1566 GetMarkingsToServer(request
, conn
);
1568 // TODO: Avoid these calls if markings has not changed.
1570 Ip::Qos::setSockTos(&conn
, conn
.tos
);
1572 Ip::Qos::setSockNfmark(&conn
, conn
.nfmark
);