]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
91e0412e13617865cb781cffac6b6ae793910655
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 17 Request Forwarding */
10
11 #include "squid.h"
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "clients/HttpTunneler.h"
22 #include "comm/Connection.h"
23 #include "comm/ConnOpener.h"
24 #include "comm/Loops.h"
25 #include "CommCalls.h"
26 #include "errorpage.h"
27 #include "event.h"
28 #include "fd.h"
29 #include "fde.h"
30 #include "FwdState.h"
31 #include "globals.h"
32 #include "gopher.h"
33 #include "HappyConnOpener.h"
34 #include "hier_code.h"
35 #include "http.h"
36 #include "http/Stream.h"
37 #include "HttpReply.h"
38 #include "HttpRequest.h"
39 #include "icmp/net_db.h"
40 #include "internal.h"
41 #include "ip/Intercept.h"
42 #include "ip/NfMarkConfig.h"
43 #include "ip/QosConfig.h"
44 #include "ip/tools.h"
45 #include "MemObject.h"
46 #include "mgr/Registration.h"
47 #include "neighbors.h"
48 #include "pconn.h"
49 #include "PeerPoolMgr.h"
50 #include "ResolvedPeers.h"
51 #include "security/BlindPeerConnector.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "ssl/PeekingPeerConnector.h"
55 #include "Store.h"
56 #include "StoreClient.h"
57 #include "urn.h"
58 #include "whois.h"
59 #if USE_OPENSSL
60 #include "ssl/cert_validate_message.h"
61 #include "ssl/Config.h"
62 #include "ssl/ErrorDetail.h"
63 #include "ssl/helper.h"
64 #include "ssl/ServerBump.h"
65 #include "ssl/support.h"
66 #else
67 #include "security/EncryptorAnswer.h"
68 #endif
69
70 #include <cerrno>
71
72 static CLCB fwdServerClosedWrapper;
73
74 static OBJH fwdStats;
75
76 #define MAX_FWD_STATS_IDX 9
77 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
78
79 PconnPool *fwdPconnPool = new PconnPool("server-peers", nullptr);
80
81 CBDATA_CLASS_INIT(FwdState);
82
83 class FwdStatePeerAnswerDialer: public CallDialer, public Security::PeerConnector::CbDialer
84 {
85 public:
86 typedef void (FwdState::*Method)(Security::EncryptorAnswer &);
87
88 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
89 method_(method), fwd_(fwd), answer_() {}
90
91 /* CallDialer API */
92 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
93 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
94 virtual void print(std::ostream &os) const {
95 os << '(' << fwd_.get() << ", " << answer_ << ')';
96 }
97
98 /* Security::PeerConnector::CbDialer API */
99 virtual Security::EncryptorAnswer &answer() { return answer_; }
100
101 private:
102 Method method_;
103 CbcPointer<FwdState> fwd_;
104 Security::EncryptorAnswer answer_;
105 };
106
107 void
108 FwdState::abort(void* d)
109 {
110 FwdState* fwd = (FwdState*)d;
111 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
112
113 if (Comm::IsConnOpen(fwd->serverConnection())) {
114 fwd->closeServerConnection("store entry aborted");
115 } else {
116 debugs(17, 7, HERE << "store entry aborted; no connection to close");
117 }
118 fwd->stopAndDestroy("store entry aborted");
119 }
120
121 void
122 FwdState::closeServerConnection(const char *reason)
123 {
124 debugs(17, 3, "because " << reason << "; " << serverConn);
125 comm_remove_close_handler(serverConn->fd, closeHandler);
126 closeHandler = NULL;
127 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
128 serverConn->close();
129 }
130
131 /**** PUBLIC INTERFACE ********************************************************/
132
133 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
134 entry(e),
135 request(r),
136 al(alp),
137 err(NULL),
138 clientConn(client),
139 start_t(squid_curtime),
140 n_tries(0),
141 destinations(new ResolvedPeers()),
142 pconnRace(raceImpossible)
143 {
144 debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
145 HTTPMSGLOCK(request);
146 e->lock("FwdState");
147 flags.connected_okay = false;
148 flags.dont_retry = false;
149 flags.forward_completed = false;
150 flags.destinationsFound = false;
151 debugs(17, 3, "FwdState constructed, this=" << this);
152 }
153
154 // Called once, right after object creation, when it is safe to set self
155 void FwdState::start(Pointer aSelf)
156 {
157 // Protect ourselves from being destroyed when the only Server pointing
158 // to us is gone (while we expect to talk to more Servers later).
159 // Once we set self, we are responsible for clearing it when we do not
160 // expect to talk to any servers.
161 self = aSelf; // refcounted
162
163 // We hope that either the store entry aborts or peer is selected.
164 // Otherwise we are going to leak our object.
165
166 // Ftp::Relay needs to preserve control connection on data aborts
167 // so it registers its own abort handler that calls ours when needed.
168 if (!request->flags.ftpNative)
169 entry->registerAbort(FwdState::abort, this);
170
171 // just in case; should already be initialized to false
172 request->flags.pinned = false;
173
174 #if STRICT_ORIGINAL_DST
175 // Bug 3243: CVE 2009-0801
176 // Bypass of browser same-origin access control in intercepted communication
177 // To resolve this we must force DIRECT and only to the original client destination.
178 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
179 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
180 if (isIntercepted && useOriginalDst) {
181 selectPeerForIntercepted();
182 return;
183 }
184 #endif
185
186 // do full route options selection
187 startSelectingDestinations(request, al, entry);
188 }
189
190 /// ends forwarding; relies on refcounting so the effect may not be immediate
191 void
192 FwdState::stopAndDestroy(const char *reason)
193 {
194 debugs(17, 3, "for " << reason);
195
196 if (opening())
197 cancelOpening(reason);
198
199 PeerSelectionInitiator::subscribed = false; // may already be false
200 self = nullptr; // we hope refcounting destroys us soon; may already be nil
201 /* do not place any code here as this object may be gone by now */
202 }
203
204 /// Notify connOpener that we no longer need connections. We do not have to do
205 /// this -- connOpener would eventually notice on its own, but notifying reduces
206 /// waste and speeds up spare connection opening for other transactions (that
207 /// could otherwise wait for this transaction to use its spare allowance).
208 void
209 FwdState::cancelOpening(const char *reason)
210 {
211 assert(calls.connector);
212 calls.connector->cancel(reason);
213 calls.connector = nullptr;
214 notifyConnOpener();
215 connOpener.clear();
216 }
217
218 #if STRICT_ORIGINAL_DST
219 /// bypasses peerSelect() when dealing with intercepted requests
220 void
221 FwdState::selectPeerForIntercepted()
222 {
223 // We do not support re-wrapping inside CONNECT.
224 // Our only alternative is to fake a noteDestination() call.
225
226 // use pinned connection if available
227 if (ConnStateData *client = request->pinnedConnection()) {
228 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
229 entry->ping_status = PING_DONE;
230
231 usePinned();
232 return;
233 }
234
235 // use client original destination as second preferred choice
236 const auto p = new Comm::Connection();
237 p->peerType = ORIGINAL_DST;
238 p->remote = clientConn->local;
239 getOutgoingAddress(request, p);
240
241 debugs(17, 3, HERE << "using client original destination: " << *p);
242 destinations->addPath(p);
243 destinations->destinationsFinalized = true;
244 PeerSelectionInitiator::subscribed = false;
245 useDestinations();
246 }
247 #endif
248
249 void
250 FwdState::completed()
251 {
252 if (flags.forward_completed) {
253 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
254 return;
255 }
256
257 flags.forward_completed = true;
258
259 request->hier.stopPeerClock(false);
260
261 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
262 debugs(17, 3, HERE << "entry aborted");
263 return ;
264 }
265
266 #if URL_CHECKSUM_DEBUG
267
268 entry->mem_obj->checkUrlChecksum();
269 #endif
270
271 if (entry->store_status == STORE_PENDING) {
272 if (entry->isEmpty()) {
273 if (!err) // we quit (e.g., fd closed) before an error or content
274 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request, al));
275 assert(err);
276 errorAppendEntry(entry, err);
277 err = NULL;
278 #if USE_OPENSSL
279 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
280 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
281 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request));
282 }
283 #endif
284 } else {
285 entry->complete();
286 entry->releaseRequest();
287 }
288 }
289
290 if (storePendingNClients(entry) > 0)
291 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
292
293 }
294
295 FwdState::~FwdState()
296 {
297 debugs(17, 3, "FwdState destructor start");
298
299 if (! flags.forward_completed)
300 completed();
301
302 doneWithRetries();
303
304 HTTPMSGUNLOCK(request);
305
306 delete err;
307
308 entry->unregisterAbort();
309
310 entry->unlock("FwdState");
311
312 entry = NULL;
313
314 if (opening())
315 cancelOpening("~FwdState");
316
317 if (Comm::IsConnOpen(serverConn))
318 closeServerConnection("~FwdState");
319
320 debugs(17, 3, "FwdState destructed, this=" << this);
321 }
322
323 /**
324 * This is the entry point for client-side to start forwarding
325 * a transaction. It is a static method that may or may not
326 * allocate a FwdState.
327 */
328 void
329 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
330 {
331 /** \note
332 * client_addr == no_addr indicates this is an "internal" request
333 * from peer_digest.c, asn.c, netdb.c, etc and should always
334 * be allowed. yuck, I know.
335 */
336
337 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
338 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
339 /**
340 * Check if this host is allowed to fetch MISSES from us (miss_access).
341 * Intentionally replace the src_addr automatically selected by the checklist code
342 * we do NOT want the indirect client address to be tested here.
343 */
344 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
345 ch.al = al;
346 ch.src_addr = request->client_addr;
347 ch.syncAle(request, nullptr);
348 if (ch.fastCheck().denied()) {
349 err_type page_id;
350 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
351
352 if (page_id == ERR_NONE)
353 page_id = ERR_FORWARDING_DENIED;
354
355 const auto anErr = new ErrorState(page_id, Http::scForbidden, request, al);
356 errorAppendEntry(entry, anErr); // frees anErr
357 return;
358 }
359 }
360
361 debugs(17, 3, HERE << "'" << entry->url() << "'");
362 /*
363 * This seems like an odd place to bind mem_obj and request.
364 * Might want to assert that request is NULL at this point
365 */
366 entry->mem_obj->request = request;
367 #if URL_CHECKSUM_DEBUG
368
369 entry->mem_obj->checkUrlChecksum();
370 #endif
371
372 if (shutting_down) {
373 /* more yuck */
374 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
375 errorAppendEntry(entry, anErr); // frees anErr
376 return;
377 }
378
379 if (request->flags.internal) {
380 debugs(17, 2, "calling internalStart() due to request flag");
381 internalStart(clientConn, request, entry, al);
382 return;
383 }
384
385 switch (request->url.getScheme()) {
386
387 case AnyP::PROTO_CACHE_OBJECT:
388 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
389 CacheManager::GetInstance()->start(clientConn, request, entry, al);
390 return;
391
392 case AnyP::PROTO_URN:
393 urnStart(request, entry, al);
394 return;
395
396 default:
397 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
398 fwd->start(fwd);
399 return;
400 }
401
402 /* NOTREACHED */
403 }
404
405 void
406 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
407 {
408 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
409 Start(clientConn, entry, request, NULL);
410 }
411
412 /// subtracts time_t values, returning zero if smaller exceeds the larger value
413 /// time_t might be unsigned so we need to be careful when subtracting times...
414 static inline time_t
415 diffOrZero(const time_t larger, const time_t smaller)
416 {
417 return (larger > smaller) ? (larger - smaller) : 0;
418 }
419
420 /// time left to finish the whole forwarding process (which started at fwdStart)
421 time_t
422 FwdState::ForwardTimeout(const time_t fwdStart)
423 {
424 // time already spent on forwarding (0 if clock went backwards)
425 const time_t timeSpent = diffOrZero(squid_curtime, fwdStart);
426 return diffOrZero(Config.Timeout.forward, timeSpent);
427 }
428
429 bool
430 FwdState::EnoughTimeToReForward(const time_t fwdStart)
431 {
432 return ForwardTimeout(fwdStart) > 0;
433 }
434
435 void
436 FwdState::useDestinations()
437 {
438 if (!destinations->empty()) {
439 connectStart();
440 } else {
441 if (PeerSelectionInitiator::subscribed) {
442 debugs(17, 4, "wait for more destinations to try");
443 return; // expect a noteDestination*() call
444 }
445
446 debugs(17, 3, HERE << "Connection failed: " << entry->url());
447 if (!err) {
448 const auto anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request, al);
449 fail(anErr);
450 } // else use actual error from last connection attempt
451
452 stopAndDestroy("tried all destinations");
453 }
454 }
455
456 void
457 FwdState::fail(ErrorState * errorState)
458 {
459 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
460
461 delete err;
462 err = errorState;
463
464 if (!errorState->request)
465 errorState->request = request;
466
467 if (err->type != ERR_ZERO_SIZE_OBJECT)
468 return;
469
470 if (pconnRace == racePossible) {
471 debugs(17, 5, HERE << "pconn race happened");
472 pconnRace = raceHappened;
473 destinations->retryPath(serverConn);
474 }
475
476 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
477 pinned_connection->pinning.zeroReply = true;
478 debugs(17, 4, "zero reply on pinned connection");
479 }
480 }
481
482 /**
483 * Frees fwdState without closing FD or generating an abort
484 */
485 void
486 FwdState::unregister(Comm::ConnectionPointer &conn)
487 {
488 debugs(17, 3, HERE << entry->url() );
489 assert(serverConnection() == conn);
490 assert(Comm::IsConnOpen(conn));
491 comm_remove_close_handler(conn->fd, closeHandler);
492 closeHandler = NULL;
493 serverConn = NULL;
494 }
495
496 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
497 void
498 FwdState::unregister(int fd)
499 {
500 debugs(17, 3, HERE << entry->url() );
501 assert(fd == serverConnection()->fd);
502 unregister(serverConn);
503 }
504
505 /**
506 * FooClient modules call fwdComplete() when they are done
507 * downloading an object. Then, we either 1) re-forward the
508 * request somewhere else if needed, or 2) call storeComplete()
509 * to finish it off
510 */
511 void
512 FwdState::complete()
513 {
514 const auto replyStatus = entry->mem().baseReply().sline.status();
515 debugs(17, 3, *entry << " status " << replyStatus << ' ' << entry->url());
516 #if URL_CHECKSUM_DEBUG
517
518 entry->mem_obj->checkUrlChecksum();
519 #endif
520
521 logReplyStatus(n_tries, replyStatus);
522
523 if (reforward()) {
524 debugs(17, 3, "re-forwarding " << replyStatus << " " << entry->url());
525
526 if (Comm::IsConnOpen(serverConn))
527 unregister(serverConn);
528
529 entry->reset();
530
531 useDestinations();
532
533 } else {
534 if (Comm::IsConnOpen(serverConn))
535 debugs(17, 3, "server FD " << serverConnection()->fd << " not re-forwarding status " << replyStatus);
536 else
537 debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus);
538 entry->complete();
539
540 if (!Comm::IsConnOpen(serverConn))
541 completed();
542
543 stopAndDestroy("forwarding completed");
544 }
545 }
546
547 void
548 FwdState::noteDestination(Comm::ConnectionPointer path)
549 {
550 flags.destinationsFound = true;
551
552 if (!path) {
553 // We can call usePinned() without fear of clashing with an earlier
554 // forwarding attempt because PINNED must be the first destination.
555 assert(destinations->empty());
556 usePinned();
557 return;
558 }
559
560 debugs(17, 3, path);
561
562 destinations->addPath(path);
563
564 if (Comm::IsConnOpen(serverConn)) {
565 // We are already using a previously opened connection, so we cannot be
566 // waiting for connOpener. We still receive destinations for backup.
567 Must(!opening());
568 return;
569 }
570
571 if (opening()) {
572 notifyConnOpener();
573 return; // and continue to wait for FwdState::noteConnection() callback
574 }
575
576 // This is the first path candidate we have seen. Create connOpener.
577 useDestinations();
578 }
579
580 void
581 FwdState::noteDestinationsEnd(ErrorState *selectionError)
582 {
583 PeerSelectionInitiator::subscribed = false;
584 destinations->destinationsFinalized = true;
585
586 if (!flags.destinationsFound) {
587 if (selectionError) {
588 debugs(17, 3, "Will abort forwarding because path selection has failed.");
589 Must(!err); // if we tried to connect, then path selection succeeded
590 fail(selectionError);
591 }
592 else if (err)
593 debugs(17, 3, "Will abort forwarding because all found paths have failed.");
594 else
595 debugs(17, 3, "Will abort forwarding because path selection found no paths.");
596
597 useDestinations(); // will detect and handle the lack of paths
598 return;
599 }
600 // else continue to use one of the previously noted destinations;
601 // if all of them fail, forwarding as whole will fail
602 Must(!selectionError); // finding at least one path means selection succeeded
603
604 if (Comm::IsConnOpen(serverConn)) {
605 // We are already using a previously opened connection, so we cannot be
606 // waiting for connOpener. We were receiving destinations for backup.
607 Must(!opening());
608 return;
609 }
610
611 Must(opening()); // or we would be stuck with nothing to do or wait for
612 notifyConnOpener();
613 // and continue to wait for FwdState::noteConnection() callback
614 }
615
616 /// makes sure connOpener knows that destinations have changed
617 void
618 FwdState::notifyConnOpener()
619 {
620 if (destinations->notificationPending) {
621 debugs(17, 7, "reusing pending notification about " << *destinations);
622 } else {
623 debugs(17, 7, "notifying about " << *destinations);
624 destinations->notificationPending = true;
625 CallJobHere(17, 5, connOpener, HappyConnOpener, noteCandidatesChange);
626 }
627 }
628
629 /**** CALLBACK WRAPPERS ************************************************************/
630
631 static void
632 fwdServerClosedWrapper(const CommCloseCbParams &params)
633 {
634 FwdState *fwd = (FwdState *)params.data;
635 fwd->serverClosed(params.fd);
636 }
637
638 /**** PRIVATE *****************************************************************/
639
640 /*
641 * FwdState::checkRetry
642 *
643 * Return TRUE if the request SHOULD be retried. This method is
644 * called when the HTTP connection fails, or when the connection
645 * is closed before reading the end of HTTP headers from the server.
646 */
647 bool
648 FwdState::checkRetry()
649 {
650 if (shutting_down)
651 return false;
652
653 if (!self) { // we have aborted before the server called us back
654 debugs(17, 5, HERE << "not retrying because of earlier abort");
655 // we will be destroyed when the server clears its Pointer to us
656 return false;
657 }
658
659 if (entry->store_status != STORE_PENDING)
660 return false;
661
662 if (!entry->isEmpty())
663 return false;
664
665 if (exhaustedTries())
666 return false;
667
668 if (request->flags.pinned && !pinnedCanRetry())
669 return false;
670
671 if (!EnoughTimeToReForward(start_t))
672 return false;
673
674 if (flags.dont_retry)
675 return false;
676
677 if (request->bodyNibbled())
678 return false;
679
680 // NP: not yet actually connected anywhere. retry is safe.
681 if (!flags.connected_okay)
682 return true;
683
684 if (!checkRetriable())
685 return false;
686
687 return true;
688 }
689
690 /// Whether we may try sending this request again after a failure.
691 bool
692 FwdState::checkRetriable()
693 {
694 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
695 // complicated] code required to protect the PUT request body from being
696 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
697 if (request->body_pipe != NULL)
698 return false;
699
700 // RFC2616 9.1 Safe and Idempotent Methods
701 return (request->method.isHttpSafe() || request->method.isIdempotent());
702 }
703
704 void
705 FwdState::serverClosed(int fd)
706 {
707 // XXX: fd is often -1 here
708 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
709 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
710 if (fd >= 0 && serverConnection()->fd == fd)
711 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
712 retryOrBail();
713 }
714
715 void
716 FwdState::retryOrBail()
717 {
718 if (checkRetry()) {
719 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
720 useDestinations();
721 return;
722 }
723
724 // TODO: should we call completed() here and move doneWithRetries there?
725 doneWithRetries();
726
727 request->hier.stopPeerClock(false);
728
729 if (self != NULL && !err && shutting_down && entry->isEmpty()) {
730 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
731 errorAppendEntry(entry, anErr);
732 }
733
734 stopAndDestroy("cannot retry");
735 }
736
737 // If the Server quits before nibbling at the request body, the body sender
738 // will not know (so that we can retry). Call this if we will not retry. We
739 // will notify the sender so that it does not get stuck waiting for space.
740 void
741 FwdState::doneWithRetries()
742 {
743 if (request && request->body_pipe != NULL)
744 request->body_pipe->expectNoConsumption();
745 }
746
747 // called by the server that failed after calling unregister()
748 void
749 FwdState::handleUnregisteredServerEnd()
750 {
751 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
752 assert(!Comm::IsConnOpen(serverConn));
753 retryOrBail();
754 }
755
756 /// called when a to-peer connection has been successfully obtained or
757 /// when all candidate destinations have been tried and all have failed
758 void
759 FwdState::noteConnection(HappyConnOpener::Answer &answer)
760 {
761 calls.connector = nullptr;
762 connOpener.clear();
763
764 Must(n_tries <= answer.n_tries); // n_tries cannot decrease
765 n_tries = answer.n_tries;
766
767 if (const auto error = answer.error.get()) {
768 flags.dont_retry = true; // or HappyConnOpener would not have given up
769 syncHierNote(answer.conn, request->url.host());
770 fail(error);
771 answer.error.clear(); // preserve error for errorSendComplete()
772 retryOrBail(); // will notice flags.dont_retry and bail
773 return;
774 }
775
776 syncWithServerConn(answer.conn, request->url.host(), answer.reused);
777
778 if (answer.reused)
779 return dispatch();
780
781 // Check if we need to TLS before use
782 if (const CachePeer *peer = serverConnection()->getPeer()) {
783 // Assume that it is only possible for the client-first from the
784 // bumping modes to try connect to a remote server. The bumped
785 // requests with other modes are using pinned connections or fails.
786 const bool clientFirstBump = request->flags.sslBumped;
787 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
788 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
789 const bool originWantsEncryptedTraffic =
790 request->method == Http::METHOD_CONNECT ||
791 request->flags.sslPeek ||
792 clientFirstBump;
793 if (originWantsEncryptedTraffic && // the "encrypted traffic" part
794 !peer->options.originserver && // the "through a proxy" part
795 !peer->secure.encryptTransport) // the "exclude HTTPS proxies" part
796 return establishTunnelThruProxy();
797 }
798
799 secureConnectionToPeerIfNeeded();
800 }
801
802 void
803 FwdState::establishTunnelThruProxy()
804 {
805 AsyncCall::Pointer callback = asyncCall(17,4,
806 "FwdState::tunnelEstablishmentDone",
807 Http::Tunneler::CbDialer<FwdState>(&FwdState::tunnelEstablishmentDone, this));
808 HttpRequest::Pointer requestPointer = request;
809 const auto tunneler = new Http::Tunneler(serverConnection(), requestPointer, callback, connectingTimeout(serverConnection()), al);
810 #if USE_DELAY_POOLS
811 Must(serverConnection()->getPeer());
812 if (!serverConnection()->getPeer()->options.no_delay)
813 tunneler->setDelayId(entry->mem_obj->mostBytesAllowed());
814 #endif
815 AsyncJob::Start(tunneler);
816 // and wait for the tunnelEstablishmentDone() call
817 }
818
819 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
820 void
821 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer &answer)
822 {
823 if (answer.positive()) {
824 if (answer.leftovers.isEmpty()) {
825 secureConnectionToPeerIfNeeded();
826 return;
827 }
828 // This should not happen because TLS servers do not speak first. If we
829 // have to handle this, then pass answer.leftovers via a PeerConnector
830 // to ServerBio. See ClientBio::setReadBufData().
831 static int occurrences = 0;
832 const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
833 debugs(17, level, "ERROR: Early data after CONNECT response. " <<
834 "Found " << answer.leftovers.length() << " bytes. " <<
835 "Closing " << serverConnection());
836 fail(new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request, al));
837 closeServerConnection("found early data after CONNECT response");
838 retryOrBail();
839 return;
840 }
841
842 // TODO: Reuse to-peer connections after a CONNECT error response.
843
844 if (const auto peer = serverConnection()->getPeer())
845 peerConnectFailed(peer);
846
847 const auto error = answer.squidError.get();
848 Must(error);
849 answer.squidError.clear(); // preserve error for fail()
850 fail(error);
851 closeServerConnection("Squid-generated CONNECT error");
852 retryOrBail();
853 }
854
855 /// handles an established TCP connection to peer (including origin servers)
856 void
857 FwdState::secureConnectionToPeerIfNeeded()
858 {
859 assert(!request->flags.pinned);
860
861 const CachePeer *p = serverConnection()->getPeer();
862 const bool peerWantsTls = p && p->secure.encryptTransport;
863 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
864 const bool userWillTlsToPeerForUs = p && p->options.originserver &&
865 request->method == Http::METHOD_CONNECT;
866 const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
867 const bool clientFirstBump = request->flags.sslBumped; // client-first (already) bumped connection
868 const bool needsBump = request->flags.sslPeek || clientFirstBump;
869
870 // 'GET https://...' requests. If a peer is used the request is forwarded
871 // as is
872 const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS && !clientFirstBump;
873
874 if (needTlsToPeer || needTlsToOrigin || needsBump) {
875 HttpRequest::Pointer requestPointer = request;
876 AsyncCall::Pointer callback = asyncCall(17,4,
877 "FwdState::ConnectedToPeer",
878 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
879 const auto sslNegotiationTimeout = connectingTimeout(serverConnection());
880 Security::PeerConnector *connector = nullptr;
881 #if USE_OPENSSL
882 if (request->flags.sslPeek)
883 connector = new Ssl::PeekingPeerConnector(requestPointer, serverConnection(), clientConn, callback, al, sslNegotiationTimeout);
884 else
885 #endif
886 connector = new Security::BlindPeerConnector(requestPointer, serverConnection(), callback, al, sslNegotiationTimeout);
887 AsyncJob::Start(connector); // will call our callback
888 return;
889 }
890
891 // if not encrypting just run the post-connect actions
892 successfullyConnectedToPeer();
893 }
894
895 /// called when all negotiations with the TLS-speaking peer have been completed
896 void
897 FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
898 {
899 if (ErrorState *error = answer.error.get()) {
900 fail(error);
901 answer.error.clear(); // preserve error for errorSendComplete()
902 if (CachePeer *p = serverConnection()->getPeer())
903 peerConnectFailed(p);
904 serverConnection()->close();
905 return;
906 }
907
908 if (answer.tunneled) {
909 // TODO: When ConnStateData establishes tunnels, its state changes
910 // [in ways that may affect logging?]. Consider informing
911 // ConnStateData about our tunnel or otherwise unifying tunnel
912 // establishment [side effects].
913 unregister(serverConn); // async call owns it now
914 complete(); // destroys us
915 return;
916 }
917
918 successfullyConnectedToPeer();
919 }
920
921 /// called when all negotiations with the peer have been completed
922 void
923 FwdState::successfullyConnectedToPeer()
924 {
925 // should reach ConnStateData before the dispatched Client job starts
926 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
927 ConnStateData::notePeerConnection, serverConnection());
928
929 if (serverConnection()->getPeer())
930 peerConnectSucceded(serverConnection()->getPeer());
931
932 dispatch();
933 }
934
935 /// commits to using the given open to-peer connection
936 void
937 FwdState::syncWithServerConn(const Comm::ConnectionPointer &conn, const char *host, const bool reused)
938 {
939 Must(IsConnOpen(conn));
940 serverConn = conn;
941
942 closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
943
944 if (reused) {
945 pconnRace = racePossible;
946 ResetMarkingsToServer(request, *serverConn);
947 } else {
948 pconnRace = raceImpossible;
949 // Comm::ConnOpener already applied proper/current markings
950 }
951
952 syncHierNote(serverConn, host);
953 }
954
955 void
956 FwdState::syncHierNote(const Comm::ConnectionPointer &server, const char *host)
957 {
958 if (request)
959 request->hier.resetPeerNotes(server, host);
960 if (al)
961 al->hier.resetPeerNotes(server, host);
962 }
963
964 /**
965 * Called after forwarding path selection (via peer select) has taken place
966 * and whenever forwarding needs to attempt a new connection (routing failover).
967 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
968 */
969 void
970 FwdState::connectStart()
971 {
972 debugs(17, 3, *destinations << " to " << entry->url());
973
974 Must(!request->pinnedConnection());
975
976 assert(!destinations->empty());
977 assert(!opening());
978
979 // Ditch error page if it was created before.
980 // A new one will be created if there's another problem
981 delete err;
982 err = nullptr;
983 request->clearError();
984 serverConn = nullptr;
985
986 request->hier.startPeerClock();
987
988 calls.connector = asyncCall(17, 5, "FwdState::noteConnection", HappyConnOpener::CbDialer<FwdState>(&FwdState::noteConnection, this));
989
990 HttpRequest::Pointer cause = request;
991 const auto cs = new HappyConnOpener(destinations, calls.connector, cause, start_t, n_tries, al);
992 cs->setHost(request->url.host());
993 bool retriable = checkRetriable();
994 if (!retriable && Config.accessList.serverPconnForNonretriable) {
995 ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, nullptr);
996 ch.al = al;
997 ch.syncAle(request, nullptr);
998 retriable = ch.fastCheck().allowed();
999 }
1000 cs->setRetriable(retriable);
1001 cs->allowPersistent(pconnRace != raceHappened);
1002 destinations->notificationPending = true; // start() is async
1003 connOpener = cs;
1004 AsyncJob::Start(cs);
1005 }
1006
1007 /// send request on an existing connection dedicated to the requesting client
1008 void
1009 FwdState::usePinned()
1010 {
1011 const auto connManager = request->pinnedConnection();
1012 debugs(17, 7, "connection manager: " << connManager);
1013
1014 try {
1015 serverConn = ConnStateData::BorrowPinnedConnection(request, al);
1016 debugs(17, 5, "connection: " << serverConn);
1017 } catch (ErrorState * const anErr) {
1018 syncHierNote(nullptr, connManager ? connManager->pinning.host : request->url.host());
1019 serverConn = nullptr;
1020 fail(anErr);
1021 // Connection managers monitor their idle pinned to-server
1022 // connections and close from-client connections upon seeing
1023 // a to-server connection closure. Retrying here is futile.
1024 stopAndDestroy("pinned connection failure");
1025 return;
1026 }
1027
1028 ++n_tries;
1029 request->flags.pinned = true;
1030
1031 assert(connManager);
1032 if (connManager->pinnedAuth())
1033 request->flags.auth = true;
1034
1035 // the server may close the pinned connection before this request
1036 const auto reused = true;
1037 syncWithServerConn(serverConn, connManager->pinning.host, reused);
1038
1039 dispatch();
1040 }
1041
1042 void
1043 FwdState::dispatch()
1044 {
1045 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
1046 /*
1047 * Assert that server_fd is set. This is to guarantee that fwdState
1048 * is attached to something and will be deallocated when server_fd
1049 * is closed.
1050 */
1051 assert(Comm::IsConnOpen(serverConn));
1052
1053 fd_note(serverConnection()->fd, entry->url());
1054
1055 fd_table[serverConnection()->fd].noteUse();
1056
1057 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1058 assert(entry->ping_status != PING_WAITING);
1059
1060 assert(entry->locked());
1061
1062 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
1063
1064 flags.connected_okay = true;
1065
1066 netdbPingSite(request->url.host());
1067
1068 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1069 * original client request FD object. It is later used to forward
1070 * remote server's TOS/MARK in the response to the client in case of a MISS.
1071 */
1072 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
1073 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
1074 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1075 /* Get the netfilter CONNMARK */
1076 clientFde->nfConnmarkFromServer = Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened);
1077 }
1078 }
1079
1080 #if _SQUID_LINUX_
1081 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1082 if (Ip::Qos::TheConfig.isHitTosActive()) {
1083 if (Comm::IsConnOpen(clientConn)) {
1084 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1085 /* Get the TOS value for the packet */
1086 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
1087 }
1088 }
1089 #endif
1090
1091 #if USE_OPENSSL
1092 if (request->flags.sslPeek) {
1093 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1094 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(serverConnection(), request));
1095 unregister(serverConn); // async call owns it now
1096 complete(); // destroys us
1097 return;
1098 }
1099 #endif
1100
1101 if (const auto peer = serverConnection()->getPeer()) {
1102 ++peer->stats.fetches;
1103 request->prepForPeering(*peer);
1104 httpStart(this);
1105 } else {
1106 assert(!request->flags.sslPeek);
1107 request->prepForDirect();
1108
1109 switch (request->url.getScheme()) {
1110
1111 case AnyP::PROTO_HTTPS:
1112 httpStart(this);
1113 break;
1114
1115 case AnyP::PROTO_HTTP:
1116 httpStart(this);
1117 break;
1118
1119 case AnyP::PROTO_GOPHER:
1120 gopherStart(this);
1121 break;
1122
1123 case AnyP::PROTO_FTP:
1124 if (request->flags.ftpNative)
1125 Ftp::StartRelay(this);
1126 else
1127 Ftp::StartGateway(this);
1128 break;
1129
1130 case AnyP::PROTO_CACHE_OBJECT:
1131
1132 case AnyP::PROTO_URN:
1133 fatal_dump("Should never get here");
1134 break;
1135
1136 case AnyP::PROTO_WHOIS:
1137 whoisStart(this);
1138 break;
1139
1140 case AnyP::PROTO_WAIS: /* Not implemented */
1141
1142 default:
1143 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1144 const auto anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request, al);
1145 fail(anErr);
1146 // Set the dont_retry flag because this is not a transient (network) error.
1147 flags.dont_retry = true;
1148 if (Comm::IsConnOpen(serverConn)) {
1149 serverConn->close();
1150 }
1151 break;
1152 }
1153 }
1154 }
1155
1156 /*
1157 * FwdState::reforward
1158 *
1159 * returns TRUE if the transaction SHOULD be re-forwarded to the
1160 * next choice in the serverDestinations list. This method is called when
1161 * peer communication completes normally, or experiences
1162 * some error after receiving the end of HTTP headers.
1163 */
1164 int
1165 FwdState::reforward()
1166 {
1167 StoreEntry *e = entry;
1168
1169 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1170 debugs(17, 3, HERE << "entry aborted");
1171 return 0;
1172 }
1173
1174 assert(e->store_status == STORE_PENDING);
1175 assert(e->mem_obj);
1176 #if URL_CHECKSUM_DEBUG
1177
1178 e->mem_obj->checkUrlChecksum();
1179 #endif
1180
1181 debugs(17, 3, HERE << e->url() << "?" );
1182
1183 if (request->flags.pinned && !pinnedCanRetry()) {
1184 debugs(17, 3, "pinned connection; cannot retry");
1185 return 0;
1186 }
1187
1188 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1189 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1190 return 0;
1191 }
1192
1193 if (exhaustedTries())
1194 return 0;
1195
1196 if (request->bodyNibbled())
1197 return 0;
1198
1199 if (destinations->empty() && !PeerSelectionInitiator::subscribed) {
1200 debugs(17, 3, HERE << "No alternative forwarding paths left");
1201 return 0;
1202 }
1203
1204 const auto s = entry->mem().baseReply().sline.status();
1205 debugs(17, 3, HERE << "status " << s);
1206 return reforwardableStatus(s);
1207 }
1208
1209 static void
1210 fwdStats(StoreEntry * s)
1211 {
1212 int i;
1213 int j;
1214 storeAppendPrintf(s, "Status");
1215
1216 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1217 storeAppendPrintf(s, "\ttry#%d", j);
1218 }
1219
1220 storeAppendPrintf(s, "\n");
1221
1222 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1223 if (FwdReplyCodes[0][i] == 0)
1224 continue;
1225
1226 storeAppendPrintf(s, "%3d", i);
1227
1228 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1229 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1230 }
1231
1232 storeAppendPrintf(s, "\n");
1233 }
1234 }
1235
1236 /**** STATIC MEMBER FUNCTIONS *************************************************/
1237
1238 bool
1239 FwdState::reforwardableStatus(const Http::StatusCode s) const
1240 {
1241 switch (s) {
1242
1243 case Http::scBadGateway:
1244
1245 case Http::scGatewayTimeout:
1246 return true;
1247
1248 case Http::scForbidden:
1249
1250 case Http::scInternalServerError:
1251
1252 case Http::scNotImplemented:
1253
1254 case Http::scServiceUnavailable:
1255 return Config.retry.onerror;
1256
1257 default:
1258 return false;
1259 }
1260
1261 /* NOTREACHED */
1262 }
1263
1264 void
1265 FwdState::initModule()
1266 {
1267 RegisterWithCacheManager();
1268 }
1269
1270 void
1271 FwdState::RegisterWithCacheManager(void)
1272 {
1273 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1274 }
1275
1276 void
1277 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1278 {
1279 if (status > Http::scInvalidHeader)
1280 return;
1281
1282 assert(tries >= 0);
1283
1284 if (tries > MAX_FWD_STATS_IDX)
1285 tries = MAX_FWD_STATS_IDX;
1286
1287 ++ FwdReplyCodes[tries][status];
1288 }
1289
1290 bool
1291 FwdState::exhaustedTries() const
1292 {
1293 return n_tries >= Config.forward_max_tries;
1294 }
1295
1296 bool
1297 FwdState::pinnedCanRetry() const
1298 {
1299 assert(request->flags.pinned);
1300
1301 // pconn race on pinned connection: Currently we do not have any mechanism
1302 // to retry current pinned connection path.
1303 if (pconnRace == raceHappened)
1304 return false;
1305
1306 // If a bumped connection was pinned, then the TLS client was given our peer
1307 // details. Do not retry because we do not ensure that those details stay
1308 // constant. Step1-bumped connections do not get our TLS peer details, are
1309 // never pinned, and, hence, never reach this method.
1310 if (request->flags.sslBumped)
1311 return false;
1312
1313 // The other pinned cases are FTP proxying and connection-based HTTP
1314 // authentication. TODO: Do these cases have restrictions?
1315 return true;
1316 }
1317
1318 time_t
1319 FwdState::connectingTimeout(const Comm::ConnectionPointer &conn) const
1320 {
1321 const auto connTimeout = conn->connectTimeout(start_t);
1322 return positiveTimeout(connTimeout);
1323 }
1324
1325 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1326
1327 /*
1328 * DPW 2007-05-19
1329 * Formerly static, but now used by client_side_request.cc
1330 */
1331 /// Checks for a TOS value to apply depending on the ACL
1332 tos_t
1333 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1334 {
1335 for (acl_tos *l = head; l; l = l->next) {
1336 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1337 return l->tos;
1338 }
1339
1340 return 0;
1341 }
1342
1343 /// Checks for a netfilter mark value to apply depending on the ACL
1344 Ip::NfMarkConfig
1345 aclFindNfMarkConfig(acl_nfmark * head, ACLChecklist * ch)
1346 {
1347 for (acl_nfmark *l = head; l; l = l->next) {
1348 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1349 return l->markConfig;
1350 }
1351
1352 return {};
1353 }
1354
1355 void
1356 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1357 {
1358 // skip if an outgoing address is already set.
1359 if (!conn->local.isAnyAddr()) return;
1360
1361 // ensure that at minimum the wildcard local matches remote protocol
1362 if (conn->remote.isIPv4())
1363 conn->local.setIPv4();
1364
1365 // maybe use TPROXY client address
1366 if (request && request->flags.spoofClientIp) {
1367 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1368 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1369 if (Config.onoff.tproxy_uses_indirect_client)
1370 conn->local = request->indirect_client_addr;
1371 else
1372 #endif
1373 conn->local = request->client_addr;
1374 conn->local.port(0); // let OS pick the source port to prevent address clashes
1375 // some flags need setting on the socket to use this address
1376 conn->flags |= COMM_DOBIND;
1377 conn->flags |= COMM_TRANSPARENT;
1378 return;
1379 }
1380 // else no tproxy today ...
1381 }
1382
1383 if (!Config.accessList.outgoing_address) {
1384 return; // anything will do.
1385 }
1386
1387 ACLFilledChecklist ch(NULL, request, NULL);
1388 ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : NULL;
1389 ch.dst_addr = conn->remote;
1390
1391 // TODO use the connection details in ACL.
1392 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1393
1394 for (Acl::Address *l = Config.accessList.outgoing_address; l; l = l->next) {
1395
1396 /* check if the outgoing address is usable to the destination */
1397 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1398
1399 /* check ACLs for this outgoing address */
1400 if (!l->aclList || ch.fastCheck(l->aclList).allowed()) {
1401 conn->local = l->addr;
1402 return;
1403 }
1404 }
1405 }
1406
1407 /// \returns the TOS value that should be set on the to-peer connection
1408 static tos_t
1409 GetTosToServer(HttpRequest * request, Comm::Connection &conn)
1410 {
1411 if (!Ip::Qos::TheConfig.tosToServer)
1412 return 0;
1413
1414 ACLFilledChecklist ch(NULL, request, NULL);
1415 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1416 ch.dst_addr = conn.remote;
1417 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1418 }
1419
1420 /// \returns the Netfilter mark that should be set on the to-peer connection
1421 static nfmark_t
1422 GetNfmarkToServer(HttpRequest * request, Comm::Connection &conn)
1423 {
1424 if (!Ip::Qos::TheConfig.nfmarkToServer)
1425 return 0;
1426
1427 ACLFilledChecklist ch(NULL, request, NULL);
1428 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1429 ch.dst_addr = conn.remote;
1430 const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1431 return mc.mark;
1432 }
1433
1434 void
1435 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1436 {
1437 // Get the server side TOS and Netfilter mark to be set on the connection.
1438 conn.tos = GetTosToServer(request, conn);
1439 conn.nfmark = GetNfmarkToServer(request, conn);
1440 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos) << " netfilter mark " << conn.nfmark);
1441 }
1442
1443 void
1444 ResetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1445 {
1446 GetMarkingsToServer(request, conn);
1447
1448 // TODO: Avoid these calls if markings has not changed.
1449 if (conn.tos)
1450 Ip::Qos::setSockTos(&conn, conn.tos);
1451 if (conn.nfmark)
1452 Ip::Qos::setSockNfmark(&conn, conn.nfmark);
1453 }
1454