2 * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 17 Request Forwarding */
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "clients/HttpTunneler.h"
22 #include "comm/Connection.h"
23 #include "comm/ConnOpener.h"
24 #include "comm/Loops.h"
25 #include "CommCalls.h"
26 #include "errorpage.h"
33 #include "hier_code.h"
35 #include "http/Stream.h"
36 #include "HttpReply.h"
37 #include "HttpRequest.h"
38 #include "icmp/net_db.h"
40 #include "ip/Intercept.h"
41 #include "ip/NfMarkConfig.h"
42 #include "ip/QosConfig.h"
44 #include "MemObject.h"
45 #include "mgr/Registration.h"
46 #include "neighbors.h"
48 #include "PeerPoolMgr.h"
49 #include "security/BlindPeerConnector.h"
50 #include "SquidConfig.h"
51 #include "SquidTime.h"
52 #include "ssl/PeekingPeerConnector.h"
54 #include "StoreClient.h"
58 #include "ssl/cert_validate_message.h"
59 #include "ssl/Config.h"
60 #include "ssl/ErrorDetail.h"
61 #include "ssl/helper.h"
62 #include "ssl/ServerBump.h"
63 #include "ssl/support.h"
65 #include "security/EncryptorAnswer.h"
70 static CLCB fwdServerClosedWrapper
;
71 static CNCB fwdConnectDoneWrapper
;
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes
[MAX_FWD_STATS_IDX
+ 1][Http::scInvalidHeader
+ 1];
78 static PconnPool
*fwdPconnPool
= new PconnPool("server-peers", NULL
);
79 CBDATA_CLASS_INIT(FwdState
);
81 class FwdStatePeerAnswerDialer
: public CallDialer
, public Security::PeerConnector::CbDialer
84 typedef void (FwdState::*Method
)(Security::EncryptorAnswer
&);
86 FwdStatePeerAnswerDialer(Method method
, FwdState
*fwd
):
87 method_(method
), fwd_(fwd
), answer_() {}
90 virtual bool canDial(AsyncCall
&call
) { return fwd_
.valid(); }
91 void dial(AsyncCall
&call
) { ((&(*fwd_
))->*method_
)(answer_
); }
92 virtual void print(std::ostream
&os
) const {
93 os
<< '(' << fwd_
.get() << ", " << answer_
<< ')';
96 /* Security::PeerConnector::CbDialer API */
97 virtual Security::EncryptorAnswer
&answer() { return answer_
; }
101 CbcPointer
<FwdState
> fwd_
;
102 Security::EncryptorAnswer answer_
;
106 FwdState::abort(void* d
)
108 FwdState
* fwd
= (FwdState
*)d
;
109 Pointer tmp
= fwd
; // Grab a temporary pointer to keep the object alive during our scope.
111 if (Comm::IsConnOpen(fwd
->serverConnection())) {
112 fwd
->closeServerConnection("store entry aborted");
114 debugs(17, 7, HERE
<< "store entry aborted; no connection to close");
116 fwd
->serverDestinations
.clear();
117 fwd
->stopAndDestroy("store entry aborted");
121 FwdState::closeServerConnection(const char *reason
)
123 debugs(17, 3, "because " << reason
<< "; " << serverConn
);
124 comm_remove_close_handler(serverConn
->fd
, closeHandler
);
126 fwdPconnPool
->noteUses(fd_table
[serverConn
->fd
].pconn
.uses
);
130 /**** PUBLIC INTERFACE ********************************************************/
132 FwdState::FwdState(const Comm::ConnectionPointer
&client
, StoreEntry
* e
, HttpRequest
* r
, const AccessLogEntryPointer
&alp
):
138 start_t(squid_curtime
),
140 pconnRace(raceImpossible
)
142 debugs(17, 2, "Forwarding client request " << client
<< ", url=" << e
->url());
143 HTTPMSGLOCK(request
);
144 serverDestinations
.reserve(Config
.forward_max_tries
);
146 flags
.connected_okay
= false;
147 flags
.dont_retry
= false;
148 flags
.forward_completed
= false;
149 debugs(17, 3, "FwdState constructed, this=" << this);
152 // Called once, right after object creation, when it is safe to set self
153 void FwdState::start(Pointer aSelf
)
155 // Protect ourselves from being destroyed when the only Server pointing
156 // to us is gone (while we expect to talk to more Servers later).
157 // Once we set self, we are responsible for clearing it when we do not
158 // expect to talk to any servers.
159 self
= aSelf
; // refcounted
161 // We hope that either the store entry aborts or peer is selected.
162 // Otherwise we are going to leak our object.
164 // Ftp::Relay needs to preserve control connection on data aborts
165 // so it registers its own abort handler that calls ours when needed.
166 if (!request
->flags
.ftpNative
)
167 entry
->registerAbort(FwdState::abort
, this);
169 // just in case; should already be initialized to false
170 request
->flags
.pinned
= false;
172 #if STRICT_ORIGINAL_DST
173 // Bug 3243: CVE 2009-0801
174 // Bypass of browser same-origin access control in intercepted communication
175 // To resolve this we must force DIRECT and only to the original client destination.
176 const bool isIntercepted
= request
&& !request
->flags
.redirected
&& (request
->flags
.intercepted
|| request
->flags
.interceptTproxy
);
177 const bool useOriginalDst
= Config
.onoff
.client_dst_passthru
|| (request
&& !request
->flags
.hostVerified
);
178 if (isIntercepted
&& useOriginalDst
) {
179 selectPeerForIntercepted();
185 // do full route options selection
186 startSelectingDestinations(request
, al
, entry
);
189 /// ends forwarding; relies on refcounting so the effect may not be immediate
191 FwdState::stopAndDestroy(const char *reason
)
193 debugs(17, 3, "for " << reason
);
194 PeerSelectionInitiator::subscribed
= false; // may already be false
195 self
= nullptr; // we hope refcounting destroys us soon; may already be nil
196 /* do not place any code here as this object may be gone by now */
199 #if STRICT_ORIGINAL_DST
200 /// bypasses peerSelect() when dealing with intercepted requests
202 FwdState::selectPeerForIntercepted()
204 // We do not support re-wrapping inside CONNECT.
205 // Our only alternative is to fake a noteDestination() call.
207 // use pinned connection if available
208 if (ConnStateData
*client
= request
->pinnedConnection()) {
209 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
210 entry
->ping_status
= PING_DONE
;
212 serverDestinations
.push_back(nullptr);
216 // use client original destination as second preferred choice
217 const auto p
= new Comm::Connection();
218 p
->peerType
= ORIGINAL_DST
;
219 p
->remote
= clientConn
->local
;
220 getOutgoingAddress(request
, p
);
222 debugs(17, 3, HERE
<< "using client original destination: " << *p
);
223 serverDestinations
.push_back(p
);
228 FwdState::completed()
230 if (flags
.forward_completed
) {
231 debugs(17, DBG_IMPORTANT
, HERE
<< "FwdState::completed called on a completed request! Bad!");
235 flags
.forward_completed
= true;
237 request
->hier
.stopPeerClock(false);
239 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
240 debugs(17, 3, HERE
<< "entry aborted");
244 #if URL_CHECKSUM_DEBUG
246 entry
->mem_obj
->checkUrlChecksum();
249 if (entry
->store_status
== STORE_PENDING
) {
250 if (entry
->isEmpty()) {
251 if (!err
) // we quit (e.g., fd closed) before an error or content
252 fail(new ErrorState(ERR_READ_ERROR
, Http::scBadGateway
, request
, al
));
254 errorAppendEntry(entry
, err
);
257 if (request
->flags
.sslPeek
&& request
->clientConnectionManager
.valid()) {
258 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
259 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request
));
264 entry
->releaseRequest();
268 if (storePendingNClients(entry
) > 0)
269 assert(!EBIT_TEST(entry
->flags
, ENTRY_FWD_HDR_WAIT
));
273 FwdState::~FwdState()
275 debugs(17, 3, "FwdState destructor start");
277 if (! flags
.forward_completed
)
282 HTTPMSGUNLOCK(request
);
286 entry
->unregisterAbort();
288 entry
->unlock("FwdState");
292 if (Comm::IsConnOpen(serverConn
))
293 closeServerConnection("~FwdState");
295 serverDestinations
.clear();
297 debugs(17, 3, "FwdState destructed, this=" << this);
301 * This is the entry point for client-side to start forwarding
302 * a transaction. It is a static method that may or may not
303 * allocate a FwdState.
306 FwdState::Start(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
, const AccessLogEntryPointer
&al
)
309 * client_addr == no_addr indicates this is an "internal" request
310 * from peer_digest.c, asn.c, netdb.c, etc and should always
311 * be allowed. yuck, I know.
314 if ( Config
.accessList
.miss
&& !request
->client_addr
.isNoAddr() &&
315 !request
->flags
.internal
&& request
->url
.getScheme() != AnyP::PROTO_CACHE_OBJECT
) {
317 * Check if this host is allowed to fetch MISSES from us (miss_access).
318 * Intentionally replace the src_addr automatically selected by the checklist code
319 * we do NOT want the indirect client address to be tested here.
321 ACLFilledChecklist
ch(Config
.accessList
.miss
, request
, NULL
);
323 ch
.src_addr
= request
->client_addr
;
324 ch
.syncAle(request
, nullptr);
325 if (ch
.fastCheck().denied()) {
327 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, 1);
329 if (page_id
== ERR_NONE
)
330 page_id
= ERR_FORWARDING_DENIED
;
332 const auto anErr
= new ErrorState(page_id
, Http::scForbidden
, request
, al
);
333 errorAppendEntry(entry
, anErr
); // frees anErr
338 debugs(17, 3, HERE
<< "'" << entry
->url() << "'");
340 * This seems like an odd place to bind mem_obj and request.
341 * Might want to assert that request is NULL at this point
343 entry
->mem_obj
->request
= request
;
344 #if URL_CHECKSUM_DEBUG
346 entry
->mem_obj
->checkUrlChecksum();
351 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
352 errorAppendEntry(entry
, anErr
); // frees anErr
356 if (request
->flags
.internal
) {
357 debugs(17, 2, "calling internalStart() due to request flag");
358 internalStart(clientConn
, request
, entry
, al
);
362 switch (request
->url
.getScheme()) {
364 case AnyP::PROTO_CACHE_OBJECT
:
365 debugs(17, 2, "calling CacheManager due to request scheme " << request
->url
.getScheme());
366 CacheManager::GetInstance()->start(clientConn
, request
, entry
, al
);
369 case AnyP::PROTO_URN
:
370 urnStart(request
, entry
, al
);
374 FwdState::Pointer fwd
= new FwdState(clientConn
, entry
, request
, al
);
383 FwdState::fwdStart(const Comm::ConnectionPointer
&clientConn
, StoreEntry
*entry
, HttpRequest
*request
)
385 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
386 Start(clientConn
, entry
, request
, NULL
);
389 /// subtracts time_t values, returning zero if smaller exceeds the larger value
390 /// time_t might be unsigned so we need to be careful when subtracting times...
392 diffOrZero(const time_t larger
, const time_t smaller
)
394 return (larger
> smaller
) ? (larger
- smaller
) : 0;
397 /// time left to finish the whole forwarding process (which started at fwdStart)
399 FwdState::ForwardTimeout(const time_t fwdStart
)
401 // time already spent on forwarding (0 if clock went backwards)
402 const time_t timeSpent
= diffOrZero(squid_curtime
, fwdStart
);
403 return diffOrZero(Config
.Timeout
.forward
, timeSpent
);
407 FwdState::EnoughTimeToReForward(const time_t fwdStart
)
409 return ForwardTimeout(fwdStart
) > 0;
413 FwdState::useDestinations()
415 debugs(17, 3, serverDestinations
.size() << " paths to " << entry
->url());
416 if (!serverDestinations
.empty()) {
417 if (!serverDestinations
[0])
422 if (PeerSelectionInitiator::subscribed
) {
423 debugs(17, 4, "wait for more destinations to try");
424 return; // expect a noteDestination*() call
427 debugs(17, 3, HERE
<< "Connection failed: " << entry
->url());
429 const auto anErr
= new ErrorState(ERR_CANNOT_FORWARD
, Http::scInternalServerError
, request
, al
);
431 } // else use actual error from last connection attempt
433 stopAndDestroy("tried all destinations");
438 FwdState::fail(ErrorState
* errorState
)
440 debugs(17, 3, err_type_str
[errorState
->type
] << " \"" << Http::StatusCodeString(errorState
->httpStatus
) << "\"\n\t" << entry
->url());
445 if (!errorState
->request
)
446 errorState
->request
= request
;
448 if (err
->type
!= ERR_ZERO_SIZE_OBJECT
)
451 if (pconnRace
== racePossible
) {
452 debugs(17, 5, HERE
<< "pconn race happened");
453 pconnRace
= raceHappened
;
456 if (ConnStateData
*pinned_connection
= request
->pinnedConnection()) {
457 pinned_connection
->pinning
.zeroReply
= true;
458 debugs(17, 4, "zero reply on pinned connection");
463 * Frees fwdState without closing FD or generating an abort
466 FwdState::unregister(Comm::ConnectionPointer
&conn
)
468 debugs(17, 3, HERE
<< entry
->url() );
469 assert(serverConnection() == conn
);
470 assert(Comm::IsConnOpen(conn
));
471 comm_remove_close_handler(conn
->fd
, closeHandler
);
476 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
478 FwdState::unregister(int fd
)
480 debugs(17, 3, HERE
<< entry
->url() );
481 assert(fd
== serverConnection()->fd
);
482 unregister(serverConn
);
486 * FooClient modules call fwdComplete() when they are done
487 * downloading an object. Then, we either 1) re-forward the
488 * request somewhere else if needed, or 2) call storeComplete()
494 debugs(17, 3, HERE
<< entry
->url() << "\n\tstatus " << entry
->getReply()->sline
.status());
495 #if URL_CHECKSUM_DEBUG
497 entry
->mem_obj
->checkUrlChecksum();
500 logReplyStatus(n_tries
, entry
->getReply()->sline
.status());
503 debugs(17, 3, HERE
<< "re-forwarding " << entry
->getReply()->sline
.status() << " " << entry
->url());
505 if (Comm::IsConnOpen(serverConn
))
506 unregister(serverConn
);
510 // drop the last path off the selection list. try the next one.
511 if (!serverDestinations
.empty()) // paranoid
512 serverDestinations
.erase(serverDestinations
.begin());
516 if (Comm::IsConnOpen(serverConn
))
517 debugs(17, 3, HERE
<< "server FD " << serverConnection()->fd
<< " not re-forwarding status " << entry
->getReply()->sline
.status());
519 debugs(17, 3, HERE
<< "server (FD closed) not re-forwarding status " << entry
->getReply()->sline
.status());
522 if (!Comm::IsConnOpen(serverConn
))
525 stopAndDestroy("forwarding completed");
530 FwdState::noteDestination(Comm::ConnectionPointer path
)
532 const bool wasBlocked
= serverDestinations
.empty();
533 // XXX: Push even a nil path so that subsequent noteDestination() calls
534 // can rely on wasBlocked to detect ongoing/concurrent attempts.
535 // Upcoming Happy Eyeballs changes will handle this properly.
536 serverDestinations
.push_back(path
);
537 assert(wasBlocked
|| path
); // pinned destinations are always selected first
541 // else continue to use one of the previously noted destinations;
542 // if all of them fail, we may try this path
546 FwdState::noteDestinationsEnd(ErrorState
*selectionError
)
548 PeerSelectionInitiator::subscribed
= false;
549 if (serverDestinations
.empty()) { // was blocked, waiting for more paths
551 if (selectionError
) {
552 debugs(17, 3, "Will abort forwarding because path selection has failed.");
553 Must(!err
); // if we tried to connect, then path selection succeeded
554 fail(selectionError
);
557 debugs(17, 3, "Will abort forwarding because all found paths have failed.");
559 debugs(17, 3, "Will abort forwarding because path selection found no paths.");
561 useDestinations(); // will detect and handle the lack of paths
564 // else continue to use one of the previously noted destinations;
565 // if all of them fail, forwarding as whole will fail
566 Must(!selectionError
); // finding at least one path means selection succeeded
569 /**** CALLBACK WRAPPERS ************************************************************/
572 fwdServerClosedWrapper(const CommCloseCbParams
¶ms
)
574 FwdState
*fwd
= (FwdState
*)params
.data
;
575 fwd
->serverClosed(params
.fd
);
579 fwdConnectDoneWrapper(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
, void *data
)
581 FwdState
*fwd
= (FwdState
*) data
;
582 fwd
->connectDone(conn
, status
, xerrno
);
585 /**** PRIVATE *****************************************************************/
588 * FwdState::checkRetry
590 * Return TRUE if the request SHOULD be retried. This method is
591 * called when the HTTP connection fails, or when the connection
592 * is closed before reading the end of HTTP headers from the server.
595 FwdState::checkRetry()
600 if (!self
) { // we have aborted before the server called us back
601 debugs(17, 5, HERE
<< "not retrying because of earlier abort");
602 // we will be destroyed when the server clears its Pointer to us
606 if (entry
->store_status
!= STORE_PENDING
)
609 if (!entry
->isEmpty())
612 if (exhaustedTries())
615 if (request
->flags
.pinned
&& !pinnedCanRetry())
618 if (!EnoughTimeToReForward(start_t
))
621 if (flags
.dont_retry
)
624 if (request
->bodyNibbled())
627 // NP: not yet actually connected anywhere. retry is safe.
628 if (!flags
.connected_okay
)
631 if (!checkRetriable())
637 /// Whether we may try sending this request again after a failure.
639 FwdState::checkRetriable()
641 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
642 // complicated] code required to protect the PUT request body from being
643 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
644 if (request
->body_pipe
!= NULL
)
647 // RFC2616 9.1 Safe and Idempotent Methods
648 return (request
->method
.isHttpSafe() || request
->method
.isIdempotent());
652 FwdState::serverClosed(int fd
)
654 // XXX: fd is often -1 here
655 debugs(17, 2, "FD " << fd
<< " " << entry
->url() << " after " <<
656 (fd
>= 0 ? fd_table
[fd
].pconn
.uses
: -1) << " requests");
657 if (fd
>= 0 && serverConnection()->fd
== fd
)
658 fwdPconnPool
->noteUses(fd_table
[fd
].pconn
.uses
);
663 FwdState::retryOrBail()
666 debugs(17, 3, HERE
<< "re-forwarding (" << n_tries
<< " tries, " << (squid_curtime
- start_t
) << " secs)");
667 // we should retry the same destination if it failed due to pconn race
668 if (pconnRace
== raceHappened
)
669 debugs(17, 4, HERE
<< "retrying the same destination");
671 serverDestinations
.erase(serverDestinations
.begin()); // last one failed. try another.
676 // TODO: should we call completed() here and move doneWithRetries there?
679 request
->hier
.stopPeerClock(false);
681 if (self
!= NULL
&& !err
&& shutting_down
&& entry
->isEmpty()) {
682 const auto anErr
= new ErrorState(ERR_SHUTTING_DOWN
, Http::scServiceUnavailable
, request
, al
);
683 errorAppendEntry(entry
, anErr
);
686 stopAndDestroy("cannot retry");
689 // If the Server quits before nibbling at the request body, the body sender
690 // will not know (so that we can retry). Call this if we will not retry. We
691 // will notify the sender so that it does not get stuck waiting for space.
693 FwdState::doneWithRetries()
695 if (request
&& request
->body_pipe
!= NULL
)
696 request
->body_pipe
->expectNoConsumption();
699 // called by the server that failed after calling unregister()
701 FwdState::handleUnregisteredServerEnd()
703 debugs(17, 2, HERE
<< "self=" << self
<< " err=" << err
<< ' ' << entry
->url());
704 assert(!Comm::IsConnOpen(serverConn
));
708 /// handles an established TCP connection to peer (including origin servers)
710 FwdState::connectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int xerrno
)
712 if (status
!= Comm::OK
) {
713 ErrorState
*const anErr
= makeConnectingError(ERR_CONNECT_FAIL
);
714 anErr
->xerrno
= xerrno
;
717 /* it might have been a timeout with a partially open link */
720 peerConnectFailed(conn
->getPeer());
729 debugs(17, 3, HERE
<< serverConnection() << ": '" << entry
->url() << "'" );
731 closeHandler
= comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
733 // request->flags.pinned cannot be true in connectDone(). The flag is
734 // only set when we dispatch the request to an existing (pinned) connection.
735 assert(!request
->flags
.pinned
);
737 if (const CachePeer
*peer
= serverConnection()->getPeer()) {
738 // Assume that it is only possible for the client-first from the
739 // bumping modes to try connect to a remote server. The bumped
740 // requests with other modes are using pinned connections or fails.
741 const bool clientFirstBump
= request
->flags
.sslBumped
;
742 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
743 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
744 const bool originWantsEncryptedTraffic
=
745 request
->method
== Http::METHOD_CONNECT
||
746 request
->flags
.sslPeek
||
748 if (originWantsEncryptedTraffic
&& // the "encrypted traffic" part
749 !peer
->options
.originserver
&& // the "through a proxy" part
750 !peer
->secure
.encryptTransport
) // the "exclude HTTPS proxies" part
751 return establishTunnelThruProxy();
754 secureConnectionToPeerIfNeeded();
758 FwdState::establishTunnelThruProxy()
760 AsyncCall::Pointer callback
= asyncCall(17,4,
761 "FwdState::tunnelEstablishmentDone",
762 Http::Tunneler::CbDialer
<FwdState
>(&FwdState::tunnelEstablishmentDone
, this));
763 HttpRequest::Pointer requestPointer
= request
;
764 const auto tunneler
= new Http::Tunneler(serverConnection(), requestPointer
, callback
, connectingTimeout(serverConnection()), al
);
766 Must(serverConnection()->getPeer());
767 if (!serverConnection()->getPeer()->options
.no_delay
)
768 tunneler
->setDelayId(entry
->mem_obj
->mostBytesAllowed());
770 AsyncJob::Start(tunneler
);
771 // and wait for the tunnelEstablishmentDone() call
774 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
776 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer
&answer
)
778 if (answer
.positive()) {
779 if (answer
.leftovers
.isEmpty()) {
780 secureConnectionToPeerIfNeeded();
783 // This should not happen because TLS servers do not speak first. If we
784 // have to handle this, then pass answer.leftovers via a PeerConnector
785 // to ServerBio. See ClientBio::setReadBufData().
786 static int occurrences
= 0;
787 const auto level
= (occurrences
++ < 100) ? DBG_IMPORTANT
: 2;
788 debugs(17, level
, "ERROR: Early data after CONNECT response. " <<
789 "Found " << answer
.leftovers
.length() << " bytes. " <<
790 "Closing " << serverConnection());
791 fail(new ErrorState(ERR_CONNECT_FAIL
, Http::scBadGateway
, request
, al
));
792 closeServerConnection("found early data after CONNECT response");
797 // TODO: Reuse to-peer connections after a CONNECT error response.
799 if (const auto peer
= serverConnection()->getPeer())
800 peerConnectFailed(peer
);
802 const auto error
= answer
.squidError
.get();
804 answer
.squidError
.clear(); // preserve error for fail()
806 closeServerConnection("Squid-generated CONNECT error");
810 /// handles an established TCP connection to peer (including origin servers)
812 FwdState::secureConnectionToPeerIfNeeded()
814 assert(!request
->flags
.pinned
);
816 const CachePeer
*p
= serverConnection()->getPeer();
817 const bool peerWantsTls
= p
&& p
->secure
.encryptTransport
;
818 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
819 const bool userWillTlsToPeerForUs
= p
&& p
->options
.originserver
&&
820 request
->method
== Http::METHOD_CONNECT
;
821 const bool needTlsToPeer
= peerWantsTls
&& !userWillTlsToPeerForUs
;
822 const bool clientFirstBump
= request
->flags
.sslBumped
; // client-first (already) bumped connection
823 const bool needsBump
= request
->flags
.sslPeek
|| clientFirstBump
;
825 // 'GET https://...' requests. If a peer is used the request is forwarded
827 const bool needTlsToOrigin
= !p
&& request
->url
.getScheme() == AnyP::PROTO_HTTPS
&& !clientFirstBump
;
829 if (needTlsToPeer
|| needTlsToOrigin
|| needsBump
) {
830 HttpRequest::Pointer requestPointer
= request
;
831 AsyncCall::Pointer callback
= asyncCall(17,4,
832 "FwdState::ConnectedToPeer",
833 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer
, this));
834 const auto sslNegotiationTimeout
= connectingTimeout(serverDestinations
[0]);
835 Security::PeerConnector
*connector
= nullptr;
837 if (request
->flags
.sslPeek
)
838 connector
= new Ssl::PeekingPeerConnector(requestPointer
, serverConnection(), clientConn
, callback
, al
, sslNegotiationTimeout
);
841 connector
= new Security::BlindPeerConnector(requestPointer
, serverConnection(), callback
, al
, sslNegotiationTimeout
);
842 AsyncJob::Start(connector
); // will call our callback
846 // if not encrypting just run the post-connect actions
847 successfullyConnectedToPeer();
850 /// called when all negotiations with the TLS-speaking peer have been completed
852 FwdState::connectedToPeer(Security::EncryptorAnswer
&answer
)
854 if (ErrorState
*error
= answer
.error
.get()) {
856 answer
.error
.clear(); // preserve error for errorSendComplete()
857 if (CachePeer
*p
= serverConnection()->getPeer())
858 peerConnectFailed(p
);
859 serverConnection()->close();
863 if (answer
.tunneled
) {
864 // TODO: When ConnStateData establishes tunnels, its state changes
865 // [in ways that may affect logging?]. Consider informing
866 // ConnStateData about our tunnel or otherwise unifying tunnel
867 // establishment [side effects].
868 unregister(serverConn
); // async call owns it now
869 complete(); // destroys us
873 successfullyConnectedToPeer();
876 /// called when all negotiations with the peer have been completed
878 FwdState::successfullyConnectedToPeer()
880 // should reach ConnStateData before the dispatched Client job starts
881 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
882 ConnStateData::notePeerConnection
, serverConnection());
884 if (serverConnection()->getPeer())
885 peerConnectSucceded(serverConnection()->getPeer());
887 flags
.connected_okay
= true;
892 FwdState::connectTimeout(int fd
)
894 debugs(17, 2, "fwdConnectTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
895 assert(serverDestinations
[0] != NULL
);
896 assert(fd
== serverDestinations
[0]->fd
);
898 if (entry
->isEmpty()) {
899 const auto anErr
= new ErrorState(ERR_CONNECT_FAIL
, Http::scGatewayTimeout
, request
, al
);
900 anErr
->xerrno
= ETIMEDOUT
;
903 /* This marks the peer DOWN ... */
904 if (serverDestinations
[0]->getPeer())
905 peerConnectFailed(serverDestinations
[0]->getPeer());
908 if (Comm::IsConnOpen(serverDestinations
[0])) {
909 serverDestinations
[0]->close();
913 /// called when serverConn is set to an _open_ to-peer connection
915 FwdState::syncWithServerConn(const char *host
)
917 if (Ip::Qos::TheConfig
.isAclTosActive())
918 Ip::Qos::setSockTos(serverConn
, GetTosToServer(request
));
921 if (Ip::Qos::TheConfig
.isAclNfmarkActive())
922 Ip::Qos::setSockNfmark(serverConn
, GetNfmarkToServer(request
));
925 syncHierNote(serverConn
, host
);
929 FwdState::syncHierNote(const Comm::ConnectionPointer
&server
, const char *host
)
932 request
->hier
.resetPeerNotes(server
, host
);
934 al
->hier
.resetPeerNotes(server
, host
);
938 * Called after forwarding path selection (via peer select) has taken place
939 * and whenever forwarding needs to attempt a new connection (routing failover).
940 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
943 FwdState::connectStart()
945 assert(serverDestinations
.size() > 0);
947 debugs(17, 3, "fwdConnectStart: " << entry
->url());
949 // pinned connections go through usePinned() rather than connectStart()
950 assert(serverDestinations
[0] != nullptr);
951 request
->flags
.pinned
= false;
953 // Ditch the previous error if any.
954 // A new error page will be created if there is another problem.
957 request
->clearError();
959 // Update logging information with the upcoming server connection
960 // destination. Do this early so that any connection establishment errors
961 // are attributed to this destination. If successfully connected, this
962 // destination becomes serverConnection().
963 syncHierNote(serverDestinations
[0], request
->url
.host());
965 request
->hier
.startPeerClock();
967 // Requests bumped at step2+ require their pinned connection. Since we
968 // failed to reuse the pinned connection, we now must terminate the
969 // bumped request. For client-first and step1 bumped requests, the
970 // from-client connection is already bumped, but the connection to the
971 // server is not established/pinned so they must be excluded. We can
972 // recognize step1 bumping by nil ConnStateData::serverBump().
974 const auto clientFirstBump
= request
->clientConnectionManager
.valid() &&
975 (request
->clientConnectionManager
->sslBumpMode
== Ssl::bumpClientFirst
||
976 (request
->clientConnectionManager
->sslBumpMode
== Ssl::bumpBump
&& !request
->clientConnectionManager
->serverBump())
979 const auto clientFirstBump
= false;
980 #endif /* USE_OPENSSL */
981 if (request
->flags
.sslBumped
&& !clientFirstBump
) {
982 // TODO: Factor out/reuse as Occasionally(DBG_IMPORTANT, 2[, occurrences]).
983 static int occurrences
= 0;
984 const auto level
= (occurrences
++ < 100) ? DBG_IMPORTANT
: 2;
985 debugs(17, level
, "BUG: Lost previously bumped from-Squid connection. Rejecting bumped request.");
986 fail(new ErrorState(ERR_CANNOT_FORWARD
, Http::scServiceUnavailable
, request
, al
));
987 self
= nullptr; // refcounted
991 // Use pconn to avoid opening a new connection.
992 const char *host
= NULL
;
993 if (!serverDestinations
[0]->getPeer())
994 host
= request
->url
.host();
996 bool bumpThroughPeer
= request
->flags
.sslBumped
&& serverDestinations
[0]->getPeer();
997 Comm::ConnectionPointer temp
;
998 // Avoid pconns after races so that the same client does not suffer twice.
999 // This does not increase the total number of connections because we just
1000 // closed the connection that failed the race. And re-pinning assumes this.
1001 if (pconnRace
!= raceHappened
&& !bumpThroughPeer
)
1002 temp
= pconnPop(serverDestinations
[0], host
);
1004 const bool openedPconn
= Comm::IsConnOpen(temp
);
1005 pconnRace
= openedPconn
? racePossible
: raceImpossible
;
1007 // if we found an open persistent connection to use. use it.
1010 flags
.connected_okay
= true;
1011 debugs(17, 3, HERE
<< "reusing pconn " << serverConnection());
1014 closeHandler
= comm_add_close_handler(serverConnection()->fd
, fwdServerClosedWrapper
, this);
1016 syncWithServerConn(request
->url
.host());
1022 // We will try to open a new connection, possibly to the same destination.
1023 // We reset serverDestinations[0] in case we are using it again because
1024 // ConnOpener modifies its destination argument.
1025 serverDestinations
[0]->local
.port(0);
1028 #if URL_CHECKSUM_DEBUG
1029 entry
->mem_obj
->checkUrlChecksum();
1032 GetMarkingsToServer(request
, *serverDestinations
[0]);
1034 const AsyncCall::Pointer connector
= commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper
, this));
1035 const auto connTimeout
= connectingTimeout(serverDestinations
[0]);
1036 const auto cs
= new Comm::ConnOpener(serverDestinations
[0], connector
, connTimeout
);
1040 AsyncJob::Start(cs
);
1043 /// send request on an existing connection dedicated to the requesting client
1045 FwdState::usePinned()
1047 // we only handle pinned destinations; others are handled by connectStart()
1048 assert(!serverDestinations
.empty());
1049 assert(!serverDestinations
[0]);
1051 const auto connManager
= request
->pinnedConnection();
1052 debugs(17, 7, "connection manager: " << connManager
);
1054 // the client connection may close while we get here, nullifying connManager
1055 const auto temp
= connManager
? connManager
->borrowPinnedConnection(request
) : nullptr;
1056 debugs(17, 5, "connection: " << temp
);
1058 // the previously pinned idle peer connection may get closed (by the peer)
1059 if (!Comm::IsConnOpen(temp
)) {
1060 syncHierNote(temp
, connManager
? connManager
->pinning
.host
: request
->url
.host());
1061 serverConn
= nullptr;
1062 const auto anErr
= new ErrorState(ERR_ZERO_SIZE_OBJECT
, Http::scServiceUnavailable
, request
, al
);
1064 // Connection managers monitor their idle pinned to-server
1065 // connections and close from-client connections upon seeing
1066 // a to-server connection closure. Retrying here is futile.
1067 stopAndDestroy("pinned connection failure");
1072 flags
.connected_okay
= true;
1074 request
->flags
.pinned
= true;
1076 assert(connManager
);
1077 if (connManager
->pinnedAuth())
1078 request
->flags
.auth
= true;
1080 closeHandler
= comm_add_close_handler(temp
->fd
, fwdServerClosedWrapper
, this);
1082 syncWithServerConn(connManager
->pinning
.host
);
1084 // the server may close the pinned connection before this request
1085 pconnRace
= racePossible
;
1090 FwdState::dispatch()
1092 debugs(17, 3, clientConn
<< ": Fetching " << request
->method
<< ' ' << entry
->url());
1094 * Assert that server_fd is set. This is to guarantee that fwdState
1095 * is attached to something and will be deallocated when server_fd
1098 assert(Comm::IsConnOpen(serverConn
));
1100 fd_note(serverConnection()->fd
, entry
->url());
1102 fd_table
[serverConnection()->fd
].noteUse();
1104 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1105 assert(entry
->ping_status
!= PING_WAITING
);
1107 assert(entry
->locked());
1109 EBIT_SET(entry
->flags
, ENTRY_DISPATCHED
);
1111 netdbPingSite(request
->url
.host());
1113 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1114 * original client request FD object. It is later used to forward
1115 * remote server's TOS/MARK in the response to the client in case of a MISS.
1117 if (Ip::Qos::TheConfig
.isHitNfmarkActive()) {
1118 if (Comm::IsConnOpen(clientConn
) && Comm::IsConnOpen(serverConnection())) {
1119 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1120 /* Get the netfilter CONNMARK */
1121 clientFde
->nfConnmarkFromServer
= Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened
);
1126 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1127 if (Ip::Qos::TheConfig
.isHitTosActive()) {
1128 if (Comm::IsConnOpen(clientConn
)) {
1129 fde
* clientFde
= &fd_table
[clientConn
->fd
]; // XXX: move the fd_table access into Ip::Qos
1130 /* Get the TOS value for the packet */
1131 Ip::Qos::getTosFromServer(serverConnection(), clientFde
);
1137 if (request
->flags
.sslPeek
) {
1138 CallJobHere1(17, 4, request
->clientConnectionManager
, ConnStateData
,
1139 ConnStateData::httpsPeeked
, ConnStateData::PinnedIdleContext(serverConnection(), request
));
1140 unregister(serverConn
); // async call owns it now
1141 complete(); // destroys us
1146 if (const auto peer
= serverConnection()->getPeer()) {
1147 ++peer
->stats
.fetches
;
1148 request
->prepForPeering(*peer
);
1151 assert(!request
->flags
.sslPeek
);
1152 request
->prepForDirect();
1154 switch (request
->url
.getScheme()) {
1156 case AnyP::PROTO_HTTPS
:
1160 case AnyP::PROTO_HTTP
:
1164 case AnyP::PROTO_GOPHER
:
1168 case AnyP::PROTO_FTP
:
1169 if (request
->flags
.ftpNative
)
1170 Ftp::StartRelay(this);
1172 Ftp::StartGateway(this);
1175 case AnyP::PROTO_CACHE_OBJECT
:
1177 case AnyP::PROTO_URN
:
1178 fatal_dump("Should never get here");
1181 case AnyP::PROTO_WHOIS
:
1185 case AnyP::PROTO_WAIS
: /* Not implemented */
1188 debugs(17, DBG_IMPORTANT
, "WARNING: Cannot retrieve '" << entry
->url() << "'.");
1189 const auto anErr
= new ErrorState(ERR_UNSUP_REQ
, Http::scBadRequest
, request
, al
);
1191 // Set the dont_retry flag because this is not a transient (network) error.
1192 flags
.dont_retry
= true;
1193 if (Comm::IsConnOpen(serverConn
)) {
1194 serverConn
->close();
1202 * FwdState::reforward
1204 * returns TRUE if the transaction SHOULD be re-forwarded to the
1205 * next choice in the serverDestinations list. This method is called when
1206 * peer communication completes normally, or experiences
1207 * some error after receiving the end of HTTP headers.
1210 FwdState::reforward()
1212 StoreEntry
*e
= entry
;
1214 if (EBIT_TEST(e
->flags
, ENTRY_ABORTED
)) {
1215 debugs(17, 3, HERE
<< "entry aborted");
1219 assert(e
->store_status
== STORE_PENDING
);
1221 #if URL_CHECKSUM_DEBUG
1223 e
->mem_obj
->checkUrlChecksum();
1226 debugs(17, 3, HERE
<< e
->url() << "?" );
1228 if (request
->flags
.pinned
&& !pinnedCanRetry()) {
1229 debugs(17, 3, "pinned connection; cannot retry");
1233 if (!EBIT_TEST(e
->flags
, ENTRY_FWD_HDR_WAIT
)) {
1234 debugs(17, 3, HERE
<< "No, ENTRY_FWD_HDR_WAIT isn't set");
1238 if (exhaustedTries())
1241 if (request
->bodyNibbled())
1244 if (serverDestinations
.size() <= 1 && !PeerSelectionInitiator::subscribed
) {
1245 // NP: <= 1 since total count includes the recently failed one.
1246 debugs(17, 3, HERE
<< "No alternative forwarding paths left");
1250 const Http::StatusCode s
= e
->getReply()->sline
.status();
1251 debugs(17, 3, HERE
<< "status " << s
);
1252 return reforwardableStatus(s
);
1256 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1257 * on whether this is a validation request. RFC 2616 says that we MUST reply
1258 * with "504 Gateway Timeout" if validation fails and cached reply has
1259 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1262 FwdState::makeConnectingError(const err_type type
) const
1264 return new ErrorState(type
, request
->flags
.needValidation
?
1265 Http::scGatewayTimeout
: Http::scServiceUnavailable
, request
, al
);
1269 fwdStats(StoreEntry
* s
)
1273 storeAppendPrintf(s
, "Status");
1275 for (j
= 1; j
< MAX_FWD_STATS_IDX
; ++j
) {
1276 storeAppendPrintf(s
, "\ttry#%d", j
);
1279 storeAppendPrintf(s
, "\n");
1281 for (i
= 0; i
<= (int) Http::scInvalidHeader
; ++i
) {
1282 if (FwdReplyCodes
[0][i
] == 0)
1285 storeAppendPrintf(s
, "%3d", i
);
1287 for (j
= 0; j
<= MAX_FWD_STATS_IDX
; ++j
) {
1288 storeAppendPrintf(s
, "\t%d", FwdReplyCodes
[j
][i
]);
1291 storeAppendPrintf(s
, "\n");
1295 /**** STATIC MEMBER FUNCTIONS *************************************************/
1298 FwdState::reforwardableStatus(const Http::StatusCode s
) const
1302 case Http::scBadGateway
:
1304 case Http::scGatewayTimeout
:
1307 case Http::scForbidden
:
1309 case Http::scInternalServerError
:
1311 case Http::scNotImplemented
:
1313 case Http::scServiceUnavailable
:
1314 return Config
.retry
.onerror
;
1324 * Decide where details need to be gathered to correctly describe a persistent connection.
1326 * - the address/port details about this link
1327 * - domain name of server at other end of this link (either peer or requested host)
1330 FwdState::pconnPush(Comm::ConnectionPointer
&conn
, const char *domain
)
1332 if (conn
->getPeer()) {
1333 fwdPconnPool
->push(conn
, NULL
);
1335 fwdPconnPool
->push(conn
, domain
);
1339 Comm::ConnectionPointer
1340 FwdState::pconnPop(const Comm::ConnectionPointer
&dest
, const char *domain
)
1342 bool retriable
= checkRetriable();
1343 if (!retriable
&& Config
.accessList
.serverPconnForNonretriable
) {
1344 ACLFilledChecklist
ch(Config
.accessList
.serverPconnForNonretriable
, request
, NULL
);
1346 ch
.syncAle(request
, nullptr);
1347 retriable
= ch
.fastCheck().allowed();
1349 // always call shared pool first because we need to close an idle
1350 // connection there if we have to use a standby connection.
1351 Comm::ConnectionPointer conn
= fwdPconnPool
->pop(dest
, domain
, retriable
);
1352 if (!Comm::IsConnOpen(conn
)) {
1353 // either there was no pconn to pop or this is not a retriable xaction
1354 if (CachePeer
*peer
= dest
->getPeer()) {
1355 if (peer
->standby
.pool
)
1356 conn
= peer
->standby
.pool
->pop(dest
, domain
, true);
1359 return conn
; // open, closed, or nil
1363 FwdState::initModule()
1365 RegisterWithCacheManager();
1369 FwdState::RegisterWithCacheManager(void)
1371 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats
, 0, 1);
1375 FwdState::logReplyStatus(int tries
, const Http::StatusCode status
)
1377 if (status
> Http::scInvalidHeader
)
1382 if (tries
> MAX_FWD_STATS_IDX
)
1383 tries
= MAX_FWD_STATS_IDX
;
1385 ++ FwdReplyCodes
[tries
][status
];
1389 FwdState::exhaustedTries() const
1391 return n_tries
>= Config
.forward_max_tries
;
1395 FwdState::pinnedCanRetry() const
1397 assert(request
->flags
.pinned
);
1399 // pconn race on pinned connection: Currently we do not have any mechanism
1400 // to retry current pinned connection path.
1401 if (pconnRace
== raceHappened
)
1404 // If a bumped connection was pinned, then the TLS client was given our peer
1405 // details. Do not retry because we do not ensure that those details stay
1406 // constant. Step1-bumped connections do not get our TLS peer details, are
1407 // never pinned, and, hence, never reach this method.
1408 if (request
->flags
.sslBumped
)
1411 // The other pinned cases are FTP proxying and connection-based HTTP
1412 // authentication. TODO: Do these cases have restrictions?
1417 FwdState::connectingTimeout(const Comm::ConnectionPointer
&conn
) const
1419 const auto connTimeout
= conn
->connectTimeout(start_t
);
1420 return positiveTimeout(connTimeout
);
1423 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1427 * Formerly static, but now used by client_side_request.cc
1429 /// Checks for a TOS value to apply depending on the ACL
1431 aclMapTOS(acl_tos
* head
, ACLChecklist
* ch
)
1433 for (acl_tos
*l
= head
; l
; l
= l
->next
) {
1434 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1441 /// Checks for a netfilter mark value to apply depending on the ACL
1443 aclFindNfMarkConfig(acl_nfmark
* head
, ACLChecklist
* ch
)
1445 for (acl_nfmark
*l
= head
; l
; l
= l
->next
) {
1446 if (!l
->aclList
|| ch
->fastCheck(l
->aclList
).allowed())
1447 return l
->markConfig
;
1454 getOutgoingAddress(HttpRequest
* request
, Comm::ConnectionPointer conn
)
1456 // skip if an outgoing address is already set.
1457 if (!conn
->local
.isAnyAddr()) return;
1459 // ensure that at minimum the wildcard local matches remote protocol
1460 if (conn
->remote
.isIPv4())
1461 conn
->local
.setIPv4();
1463 // maybe use TPROXY client address
1464 if (request
&& request
->flags
.spoofClientIp
) {
1465 if (!conn
->getPeer() || !conn
->getPeer()->options
.no_tproxy
) {
1466 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1467 if (Config
.onoff
.tproxy_uses_indirect_client
)
1468 conn
->local
= request
->indirect_client_addr
;
1471 conn
->local
= request
->client_addr
;
1472 conn
->local
.port(0); // let OS pick the source port to prevent address clashes
1473 // some flags need setting on the socket to use this address
1474 conn
->flags
|= COMM_DOBIND
;
1475 conn
->flags
|= COMM_TRANSPARENT
;
1478 // else no tproxy today ...
1481 if (!Config
.accessList
.outgoing_address
) {
1482 return; // anything will do.
1485 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1486 ch
.dst_peer_name
= conn
->getPeer() ? conn
->getPeer()->name
: NULL
;
1487 ch
.dst_addr
= conn
->remote
;
1489 // TODO use the connection details in ACL.
1490 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1492 for (Acl::Address
*l
= Config
.accessList
.outgoing_address
; l
; l
= l
->next
) {
1494 /* check if the outgoing address is usable to the destination */
1495 if (conn
->remote
.isIPv4() != l
->addr
.isIPv4()) continue;
1497 /* check ACLs for this outgoing address */
1498 if (!l
->aclList
|| ch
.fastCheck(l
->aclList
).allowed()) {
1499 conn
->local
= l
->addr
;
1506 GetTosToServer(HttpRequest
* request
)
1508 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1509 return aclMapTOS(Ip::Qos::TheConfig
.tosToServer
, &ch
);
1513 GetNfmarkToServer(HttpRequest
* request
)
1515 ACLFilledChecklist
ch(NULL
, request
, NULL
);
1516 const auto mc
= aclFindNfMarkConfig(Ip::Qos::TheConfig
.nfmarkToServer
, &ch
);
1521 GetMarkingsToServer(HttpRequest
* request
, Comm::Connection
&conn
)
1523 // Get the server side TOS and Netfilter mark to be set on the connection.
1524 if (Ip::Qos::TheConfig
.isAclTosActive()) {
1525 conn
.tos
= GetTosToServer(request
);
1526 debugs(17, 3, "from " << conn
.local
<< " tos " << int(conn
.tos
));
1529 #if SO_MARK && USE_LIBCAP
1530 conn
.nfmark
= GetNfmarkToServer(request
);
1531 debugs(17, 3, "from " << conn
.local
<< " netfilter mark " << conn
.nfmark
);