]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
Maintenance: Update astyle version to 3.1 (#841)
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 17 Request Forwarding */
10
11 #include "squid.h"
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "clients/HttpTunneler.h"
22 #include "comm/Connection.h"
23 #include "comm/ConnOpener.h"
24 #include "comm/Loops.h"
25 #include "CommCalls.h"
26 #include "errorpage.h"
27 #include "event.h"
28 #include "fd.h"
29 #include "fde.h"
30 #include "FwdState.h"
31 #include "globals.h"
32 #include "gopher.h"
33 #include "HappyConnOpener.h"
34 #include "hier_code.h"
35 #include "http.h"
36 #include "http/Stream.h"
37 #include "HttpReply.h"
38 #include "HttpRequest.h"
39 #include "icmp/net_db.h"
40 #include "internal.h"
41 #include "ip/Intercept.h"
42 #include "ip/NfMarkConfig.h"
43 #include "ip/QosConfig.h"
44 #include "ip/tools.h"
45 #include "MemObject.h"
46 #include "mgr/Registration.h"
47 #include "neighbors.h"
48 #include "pconn.h"
49 #include "PeerPoolMgr.h"
50 #include "ResolvedPeers.h"
51 #include "security/BlindPeerConnector.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "ssl/PeekingPeerConnector.h"
55 #include "Store.h"
56 #include "StoreClient.h"
57 #include "urn.h"
58 #include "whois.h"
59 #if USE_OPENSSL
60 #include "ssl/cert_validate_message.h"
61 #include "ssl/Config.h"
62 #include "ssl/helper.h"
63 #include "ssl/ServerBump.h"
64 #include "ssl/support.h"
65 #else
66 #include "security/EncryptorAnswer.h"
67 #endif
68
69 #include <cerrno>
70
71 static CLCB fwdServerClosedWrapper;
72
73 static OBJH fwdStats;
74
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
77
78 PconnPool *fwdPconnPool = new PconnPool("server-peers", nullptr);
79
80 CBDATA_CLASS_INIT(FwdState);
81
82 class FwdStatePeerAnswerDialer: public CallDialer, public Security::PeerConnector::CbDialer
83 {
84 public:
85 typedef void (FwdState::*Method)(Security::EncryptorAnswer &);
86
87 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
88 method_(method), fwd_(fwd), answer_() {}
89
90 /* CallDialer API */
91 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
92 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
93 virtual void print(std::ostream &os) const {
94 os << '(' << fwd_.get() << ", " << answer_ << ')';
95 }
96
97 /* Security::PeerConnector::CbDialer API */
98 virtual Security::EncryptorAnswer &answer() { return answer_; }
99
100 private:
101 Method method_;
102 CbcPointer<FwdState> fwd_;
103 Security::EncryptorAnswer answer_;
104 };
105
106 void
107 FwdState::HandleStoreAbort(FwdState *fwd)
108 {
109 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
110
111 if (Comm::IsConnOpen(fwd->serverConnection())) {
112 fwd->closeServerConnection("store entry aborted");
113 } else {
114 debugs(17, 7, HERE << "store entry aborted; no connection to close");
115 }
116 fwd->stopAndDestroy("store entry aborted");
117 }
118
119 void
120 FwdState::closePendingConnection(const Comm::ConnectionPointer &conn, const char *reason)
121 {
122 debugs(17, 3, "because " << reason << "; " << conn);
123 assert(!serverConn);
124 assert(!closeHandler);
125 if (IsConnOpen(conn)) {
126 fwdPconnPool->noteUses(fd_table[conn->fd].pconn.uses);
127 conn->close();
128 }
129 }
130
131 void
132 FwdState::closeServerConnection(const char *reason)
133 {
134 debugs(17, 3, "because " << reason << "; " << serverConn);
135 assert(Comm::IsConnOpen(serverConn));
136 comm_remove_close_handler(serverConn->fd, closeHandler);
137 closeHandler = NULL;
138 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
139 serverConn->close();
140 }
141
142 /**** PUBLIC INTERFACE ********************************************************/
143
144 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
145 entry(e),
146 request(r),
147 al(alp),
148 err(NULL),
149 clientConn(client),
150 start_t(squid_curtime),
151 n_tries(0),
152 destinations(new ResolvedPeers()),
153 pconnRace(raceImpossible)
154 {
155 debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
156 HTTPMSGLOCK(request);
157 e->lock("FwdState");
158 flags.connected_okay = false;
159 flags.dont_retry = false;
160 flags.forward_completed = false;
161 flags.destinationsFound = false;
162 debugs(17, 3, "FwdState constructed, this=" << this);
163 }
164
165 // Called once, right after object creation, when it is safe to set self
166 void FwdState::start(Pointer aSelf)
167 {
168 // Protect ourselves from being destroyed when the only Server pointing
169 // to us is gone (while we expect to talk to more Servers later).
170 // Once we set self, we are responsible for clearing it when we do not
171 // expect to talk to any servers.
172 self = aSelf; // refcounted
173
174 // We hope that either the store entry aborts or peer is selected.
175 // Otherwise we are going to leak our object.
176
177 // Ftp::Relay needs to preserve control connection on data aborts
178 // so it registers its own abort handler that calls ours when needed.
179 if (!request->flags.ftpNative) {
180 AsyncCall::Pointer call = asyncCall(17, 4, "FwdState::Abort", cbdataDialer(&FwdState::HandleStoreAbort, this));
181 entry->registerAbortCallback(call);
182 }
183
184 // just in case; should already be initialized to false
185 request->flags.pinned = false;
186
187 #if STRICT_ORIGINAL_DST
188 // Bug 3243: CVE 2009-0801
189 // Bypass of browser same-origin access control in intercepted communication
190 // To resolve this we must force DIRECT and only to the original client destination.
191 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
192 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
193 if (isIntercepted && useOriginalDst) {
194 selectPeerForIntercepted();
195 return;
196 }
197 #endif
198
199 // do full route options selection
200 startSelectingDestinations(request, al, entry);
201 }
202
203 /// ends forwarding; relies on refcounting so the effect may not be immediate
204 void
205 FwdState::stopAndDestroy(const char *reason)
206 {
207 debugs(17, 3, "for " << reason);
208
209 if (opening())
210 cancelOpening(reason);
211
212 PeerSelectionInitiator::subscribed = false; // may already be false
213 self = nullptr; // we hope refcounting destroys us soon; may already be nil
214 /* do not place any code here as this object may be gone by now */
215 }
216
217 /// Notify connOpener that we no longer need connections. We do not have to do
218 /// this -- connOpener would eventually notice on its own, but notifying reduces
219 /// waste and speeds up spare connection opening for other transactions (that
220 /// could otherwise wait for this transaction to use its spare allowance).
221 void
222 FwdState::cancelOpening(const char *reason)
223 {
224 assert(calls.connector);
225 calls.connector->cancel(reason);
226 calls.connector = nullptr;
227 notifyConnOpener();
228 connOpener.clear();
229 }
230
231 #if STRICT_ORIGINAL_DST
232 /// bypasses peerSelect() when dealing with intercepted requests
233 void
234 FwdState::selectPeerForIntercepted()
235 {
236 // We do not support re-wrapping inside CONNECT.
237 // Our only alternative is to fake a noteDestination() call.
238
239 // use pinned connection if available
240 if (ConnStateData *client = request->pinnedConnection()) {
241 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
242 entry->ping_status = PING_DONE;
243
244 usePinned();
245 return;
246 }
247
248 // use client original destination as second preferred choice
249 const auto p = new Comm::Connection();
250 p->peerType = ORIGINAL_DST;
251 p->remote = clientConn->local;
252 getOutgoingAddress(request, p);
253
254 debugs(17, 3, HERE << "using client original destination: " << *p);
255 destinations->addPath(p);
256 destinations->destinationsFinalized = true;
257 PeerSelectionInitiator::subscribed = false;
258 useDestinations();
259 }
260 #endif
261
262 void
263 FwdState::completed()
264 {
265 if (flags.forward_completed) {
266 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
267 return;
268 }
269
270 flags.forward_completed = true;
271
272 request->hier.stopPeerClock(false);
273
274 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
275 debugs(17, 3, HERE << "entry aborted");
276 return ;
277 }
278
279 #if URL_CHECKSUM_DEBUG
280
281 entry->mem_obj->checkUrlChecksum();
282 #endif
283
284 if (entry->store_status == STORE_PENDING) {
285 if (entry->isEmpty()) {
286 if (!err) // we quit (e.g., fd closed) before an error or content
287 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request, al));
288 assert(err);
289 errorAppendEntry(entry, err);
290 err = NULL;
291 #if USE_OPENSSL
292 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
293 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
294 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request));
295 }
296 #endif
297 } else {
298 entry->complete();
299 entry->releaseRequest();
300 }
301 }
302
303 if (storePendingNClients(entry) > 0)
304 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
305
306 }
307
308 FwdState::~FwdState()
309 {
310 debugs(17, 3, "FwdState destructor start");
311
312 if (! flags.forward_completed)
313 completed();
314
315 doneWithRetries();
316
317 HTTPMSGUNLOCK(request);
318
319 delete err;
320
321 entry->unregisterAbortCallback("FwdState object destructed");
322
323 entry->unlock("FwdState");
324
325 entry = NULL;
326
327 if (opening())
328 cancelOpening("~FwdState");
329
330 if (Comm::IsConnOpen(serverConn))
331 closeServerConnection("~FwdState");
332
333 debugs(17, 3, "FwdState destructed, this=" << this);
334 }
335
336 /**
337 * This is the entry point for client-side to start forwarding
338 * a transaction. It is a static method that may or may not
339 * allocate a FwdState.
340 */
341 void
342 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
343 {
344 /** \note
345 * client_addr == no_addr indicates this is an "internal" request
346 * from peer_digest.c, asn.c, netdb.c, etc and should always
347 * be allowed. yuck, I know.
348 */
349
350 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
351 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
352 /**
353 * Check if this host is allowed to fetch MISSES from us (miss_access).
354 * Intentionally replace the src_addr automatically selected by the checklist code
355 * we do NOT want the indirect client address to be tested here.
356 */
357 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
358 ch.al = al;
359 ch.src_addr = request->client_addr;
360 ch.syncAle(request, nullptr);
361 if (ch.fastCheck().denied()) {
362 err_type page_id;
363 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
364
365 if (page_id == ERR_NONE)
366 page_id = ERR_FORWARDING_DENIED;
367
368 const auto anErr = new ErrorState(page_id, Http::scForbidden, request, al);
369 errorAppendEntry(entry, anErr); // frees anErr
370 return;
371 }
372 }
373
374 debugs(17, 3, HERE << "'" << entry->url() << "'");
375 /*
376 * This seems like an odd place to bind mem_obj and request.
377 * Might want to assert that request is NULL at this point
378 */
379 entry->mem_obj->request = request;
380 #if URL_CHECKSUM_DEBUG
381
382 entry->mem_obj->checkUrlChecksum();
383 #endif
384
385 if (shutting_down) {
386 /* more yuck */
387 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
388 errorAppendEntry(entry, anErr); // frees anErr
389 return;
390 }
391
392 if (request->flags.internal) {
393 debugs(17, 2, "calling internalStart() due to request flag");
394 internalStart(clientConn, request, entry, al);
395 return;
396 }
397
398 switch (request->url.getScheme()) {
399
400 case AnyP::PROTO_CACHE_OBJECT:
401 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
402 CacheManager::GetInstance()->start(clientConn, request, entry, al);
403 return;
404
405 case AnyP::PROTO_URN:
406 urnStart(request, entry, al);
407 return;
408
409 default:
410 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
411 fwd->start(fwd);
412 return;
413 }
414
415 /* NOTREACHED */
416 }
417
418 void
419 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
420 {
421 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
422 Start(clientConn, entry, request, NULL);
423 }
424
425 /// subtracts time_t values, returning zero if smaller exceeds the larger value
426 /// time_t might be unsigned so we need to be careful when subtracting times...
427 static inline time_t
428 diffOrZero(const time_t larger, const time_t smaller)
429 {
430 return (larger > smaller) ? (larger - smaller) : 0;
431 }
432
433 /// time left to finish the whole forwarding process (which started at fwdStart)
434 time_t
435 FwdState::ForwardTimeout(const time_t fwdStart)
436 {
437 // time already spent on forwarding (0 if clock went backwards)
438 const time_t timeSpent = diffOrZero(squid_curtime, fwdStart);
439 return diffOrZero(Config.Timeout.forward, timeSpent);
440 }
441
442 bool
443 FwdState::EnoughTimeToReForward(const time_t fwdStart)
444 {
445 return ForwardTimeout(fwdStart) > 0;
446 }
447
448 void
449 FwdState::useDestinations()
450 {
451 if (!destinations->empty()) {
452 connectStart();
453 } else {
454 if (PeerSelectionInitiator::subscribed) {
455 debugs(17, 4, "wait for more destinations to try");
456 return; // expect a noteDestination*() call
457 }
458
459 debugs(17, 3, HERE << "Connection failed: " << entry->url());
460 if (!err) {
461 const auto anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request, al);
462 fail(anErr);
463 } // else use actual error from last connection attempt
464
465 stopAndDestroy("tried all destinations");
466 }
467 }
468
469 void
470 FwdState::fail(ErrorState * errorState)
471 {
472 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
473
474 delete err;
475 err = errorState;
476
477 if (!errorState->request)
478 errorState->request = request;
479
480 if (err->type != ERR_ZERO_SIZE_OBJECT)
481 return;
482
483 if (pconnRace == racePossible) {
484 debugs(17, 5, HERE << "pconn race happened");
485 pconnRace = raceHappened;
486 if (destinationReceipt) {
487 destinations->reinstatePath(destinationReceipt);
488 destinationReceipt = nullptr;
489 }
490 }
491
492 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
493 pinned_connection->pinning.zeroReply = true;
494 debugs(17, 4, "zero reply on pinned connection");
495 }
496 }
497
498 /**
499 * Frees fwdState without closing FD or generating an abort
500 */
501 void
502 FwdState::unregister(Comm::ConnectionPointer &conn)
503 {
504 debugs(17, 3, HERE << entry->url() );
505 assert(serverConnection() == conn);
506 assert(Comm::IsConnOpen(conn));
507 comm_remove_close_handler(conn->fd, closeHandler);
508 closeHandler = NULL;
509 serverConn = NULL;
510 destinationReceipt = nullptr;
511 }
512
513 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
514 void
515 FwdState::unregister(int fd)
516 {
517 debugs(17, 3, HERE << entry->url() );
518 assert(fd == serverConnection()->fd);
519 unregister(serverConn);
520 }
521
522 /**
523 * FooClient modules call fwdComplete() when they are done
524 * downloading an object. Then, we either 1) re-forward the
525 * request somewhere else if needed, or 2) call storeComplete()
526 * to finish it off
527 */
528 void
529 FwdState::complete()
530 {
531 const auto replyStatus = entry->mem().baseReply().sline.status();
532 debugs(17, 3, *entry << " status " << replyStatus << ' ' << entry->url());
533 #if URL_CHECKSUM_DEBUG
534
535 entry->mem_obj->checkUrlChecksum();
536 #endif
537
538 logReplyStatus(n_tries, replyStatus);
539
540 if (reforward()) {
541 debugs(17, 3, "re-forwarding " << replyStatus << " " << entry->url());
542
543 if (Comm::IsConnOpen(serverConn))
544 unregister(serverConn);
545
546 entry->reset();
547
548 useDestinations();
549
550 } else {
551 if (Comm::IsConnOpen(serverConn))
552 debugs(17, 3, "server FD " << serverConnection()->fd << " not re-forwarding status " << replyStatus);
553 else
554 debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus);
555 entry->complete();
556
557 if (!Comm::IsConnOpen(serverConn))
558 completed();
559
560 stopAndDestroy("forwarding completed");
561 }
562 }
563
564 void
565 FwdState::noteDestination(Comm::ConnectionPointer path)
566 {
567 flags.destinationsFound = true;
568
569 if (!path) {
570 // We can call usePinned() without fear of clashing with an earlier
571 // forwarding attempt because PINNED must be the first destination.
572 assert(destinations->empty());
573 usePinned();
574 return;
575 }
576
577 debugs(17, 3, path);
578
579 destinations->addPath(path);
580
581 if (Comm::IsConnOpen(serverConn)) {
582 // We are already using a previously opened connection, so we cannot be
583 // waiting for connOpener. We still receive destinations for backup.
584 Must(!opening());
585 return;
586 }
587
588 if (opening()) {
589 notifyConnOpener();
590 return; // and continue to wait for FwdState::noteConnection() callback
591 }
592
593 // This is the first path candidate we have seen. Create connOpener.
594 useDestinations();
595 }
596
597 void
598 FwdState::noteDestinationsEnd(ErrorState *selectionError)
599 {
600 PeerSelectionInitiator::subscribed = false;
601 destinations->destinationsFinalized = true;
602
603 if (!flags.destinationsFound) {
604 if (selectionError) {
605 debugs(17, 3, "Will abort forwarding because path selection has failed.");
606 Must(!err); // if we tried to connect, then path selection succeeded
607 fail(selectionError);
608 }
609 else if (err)
610 debugs(17, 3, "Will abort forwarding because all found paths have failed.");
611 else
612 debugs(17, 3, "Will abort forwarding because path selection found no paths.");
613
614 useDestinations(); // will detect and handle the lack of paths
615 return;
616 }
617 // else continue to use one of the previously noted destinations;
618 // if all of them fail, forwarding as whole will fail
619 Must(!selectionError); // finding at least one path means selection succeeded
620
621 if (Comm::IsConnOpen(serverConn)) {
622 // We are already using a previously opened connection, so we cannot be
623 // waiting for connOpener. We were receiving destinations for backup.
624 Must(!opening());
625 return;
626 }
627
628 Must(opening()); // or we would be stuck with nothing to do or wait for
629 notifyConnOpener();
630 // and continue to wait for FwdState::noteConnection() callback
631 }
632
633 /// makes sure connOpener knows that destinations have changed
634 void
635 FwdState::notifyConnOpener()
636 {
637 if (destinations->notificationPending) {
638 debugs(17, 7, "reusing pending notification about " << *destinations);
639 } else {
640 debugs(17, 7, "notifying about " << *destinations);
641 destinations->notificationPending = true;
642 CallJobHere(17, 5, connOpener, HappyConnOpener, noteCandidatesChange);
643 }
644 }
645
646 /**** CALLBACK WRAPPERS ************************************************************/
647
648 static void
649 fwdServerClosedWrapper(const CommCloseCbParams &params)
650 {
651 FwdState *fwd = (FwdState *)params.data;
652 fwd->serverClosed(params.fd);
653 }
654
655 /**** PRIVATE *****************************************************************/
656
657 /*
658 * FwdState::checkRetry
659 *
660 * Return TRUE if the request SHOULD be retried. This method is
661 * called when the HTTP connection fails, or when the connection
662 * is closed before reading the end of HTTP headers from the server.
663 */
664 bool
665 FwdState::checkRetry()
666 {
667 if (shutting_down)
668 return false;
669
670 if (!self) { // we have aborted before the server called us back
671 debugs(17, 5, HERE << "not retrying because of earlier abort");
672 // we will be destroyed when the server clears its Pointer to us
673 return false;
674 }
675
676 if (entry->store_status != STORE_PENDING)
677 return false;
678
679 if (!entry->isEmpty())
680 return false;
681
682 if (exhaustedTries())
683 return false;
684
685 if (request->flags.pinned && !pinnedCanRetry())
686 return false;
687
688 if (!EnoughTimeToReForward(start_t))
689 return false;
690
691 if (flags.dont_retry)
692 return false;
693
694 if (request->bodyNibbled())
695 return false;
696
697 // NP: not yet actually connected anywhere. retry is safe.
698 if (!flags.connected_okay)
699 return true;
700
701 if (!checkRetriable())
702 return false;
703
704 return true;
705 }
706
707 /// Whether we may try sending this request again after a failure.
708 bool
709 FwdState::checkRetriable()
710 {
711 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
712 // complicated] code required to protect the PUT request body from being
713 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
714 if (request->body_pipe != NULL)
715 return false;
716
717 // RFC2616 9.1 Safe and Idempotent Methods
718 return (request->method.isHttpSafe() || request->method.isIdempotent());
719 }
720
721 void
722 FwdState::serverClosed(int fd)
723 {
724 // XXX: fd is often -1 here
725 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
726 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
727 if (fd >= 0 && serverConnection()->fd == fd)
728 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
729 retryOrBail();
730 }
731
732 void
733 FwdState::retryOrBail()
734 {
735 if (checkRetry()) {
736 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
737 useDestinations();
738 return;
739 }
740
741 // TODO: should we call completed() here and move doneWithRetries there?
742 doneWithRetries();
743
744 request->hier.stopPeerClock(false);
745
746 if (self != NULL && !err && shutting_down && entry->isEmpty()) {
747 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
748 errorAppendEntry(entry, anErr);
749 }
750
751 stopAndDestroy("cannot retry");
752 }
753
754 // If the Server quits before nibbling at the request body, the body sender
755 // will not know (so that we can retry). Call this if we will not retry. We
756 // will notify the sender so that it does not get stuck waiting for space.
757 void
758 FwdState::doneWithRetries()
759 {
760 if (request && request->body_pipe != NULL)
761 request->body_pipe->expectNoConsumption();
762 }
763
764 // called by the server that failed after calling unregister()
765 void
766 FwdState::handleUnregisteredServerEnd()
767 {
768 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
769 assert(!Comm::IsConnOpen(serverConn));
770 retryOrBail();
771 }
772
773 /// starts a preparation step for an established connection; retries on failures
774 template <typename StepStart>
775 void
776 FwdState::advanceDestination(const char *stepDescription, const Comm::ConnectionPointer &conn, const StepStart &startStep)
777 {
778 // TODO: Extract destination-specific handling from FwdState so that all the
779 // awkward, limited-scope advanceDestination() calls can be replaced with a
780 // single simple try/catch,retry block.
781 try {
782 startStep();
783 // now wait for the step callback
784 } catch (...) {
785 debugs (17, 2, "exception while trying to " << stepDescription << ": " << CurrentException);
786 closePendingConnection(conn, "connection preparation exception");
787 retryOrBail();
788 }
789 }
790
791 /// called when a to-peer connection has been successfully obtained or
792 /// when all candidate destinations have been tried and all have failed
793 void
794 FwdState::noteConnection(HappyConnOpener::Answer &answer)
795 {
796 assert(!destinationReceipt);
797
798 calls.connector = nullptr;
799 connOpener.clear();
800
801 Must(n_tries <= answer.n_tries); // n_tries cannot decrease
802 n_tries = answer.n_tries;
803
804 ErrorState *error = nullptr;
805 if ((error = answer.error.get())) {
806 flags.dont_retry = true; // or HappyConnOpener would not have given up
807 syncHierNote(answer.conn, request->url.host());
808 Must(!Comm::IsConnOpen(answer.conn));
809 answer.error.clear(); // preserve error for errorSendComplete()
810 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
811 // We do not know exactly why the connection got closed, so we play it
812 // safe, allowing retries only for persistent (reused) connections
813 if (answer.reused) {
814 destinationReceipt = answer.conn;
815 assert(destinationReceipt);
816 }
817 syncHierNote(answer.conn, request->url.host());
818 closePendingConnection(answer.conn, "conn was closed while waiting for noteConnection");
819 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
820 } else {
821 assert(!error);
822 destinationReceipt = answer.conn;
823 assert(destinationReceipt);
824 // serverConn remains nil until syncWithServerConn()
825 }
826
827 if (error) {
828 fail(error);
829 retryOrBail();
830 return;
831 }
832
833 if (answer.reused) {
834 syncWithServerConn(answer.conn, request->url.host(), answer.reused);
835 return dispatch();
836 }
837
838 // Check if we need to TLS before use
839 if (const auto *peer = answer.conn->getPeer()) {
840 // Assume that it is only possible for the client-first from the
841 // bumping modes to try connect to a remote server. The bumped
842 // requests with other modes are using pinned connections or fails.
843 const bool clientFirstBump = request->flags.sslBumped;
844 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
845 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
846 const bool originWantsEncryptedTraffic =
847 request->method == Http::METHOD_CONNECT ||
848 request->flags.sslPeek ||
849 clientFirstBump;
850 if (originWantsEncryptedTraffic && // the "encrypted traffic" part
851 !peer->options.originserver && // the "through a proxy" part
852 !peer->secure.encryptTransport) // the "exclude HTTPS proxies" part
853 return advanceDestination("establish tunnel through proxy", answer.conn, [this,&answer] {
854 establishTunnelThruProxy(answer.conn);
855 });
856 }
857
858 secureConnectionToPeerIfNeeded(answer.conn);
859 }
860
861 void
862 FwdState::establishTunnelThruProxy(const Comm::ConnectionPointer &conn)
863 {
864 AsyncCall::Pointer callback = asyncCall(17,4,
865 "FwdState::tunnelEstablishmentDone",
866 Http::Tunneler::CbDialer<FwdState>(&FwdState::tunnelEstablishmentDone, this));
867 HttpRequest::Pointer requestPointer = request;
868 const auto tunneler = new Http::Tunneler(conn, requestPointer, callback, connectingTimeout(conn), al);
869
870 // TODO: Replace this hack with proper Comm::Connection-Pool association
871 // that is not tied to fwdPconnPool and can handle disappearing pools.
872 tunneler->noteFwdPconnUse = true;
873
874 #if USE_DELAY_POOLS
875 Must(conn);
876 Must(conn->getPeer());
877 if (!conn->getPeer()->options.no_delay)
878 tunneler->setDelayId(entry->mem_obj->mostBytesAllowed());
879 #endif
880 AsyncJob::Start(tunneler);
881 // and wait for the tunnelEstablishmentDone() call
882 }
883
884 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
885 void
886 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer &answer)
887 {
888 ErrorState *error = nullptr;
889 if (!answer.positive()) {
890 Must(!Comm::IsConnOpen(answer.conn));
891 error = answer.squidError.get();
892 Must(error);
893 answer.squidError.clear(); // preserve error for fail()
894 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
895 // The socket could get closed while our callback was queued.
896 // We close Connection here to sync Connection::fd.
897 closePendingConnection(answer.conn, "conn was closed while waiting for tunnelEstablishmentDone");
898 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
899 } else if (!answer.leftovers.isEmpty()) {
900 // This should not happen because TLS servers do not speak first. If we
901 // have to handle this, then pass answer.leftovers via a PeerConnector
902 // to ServerBio. See ClientBio::setReadBufData().
903 static int occurrences = 0;
904 const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
905 debugs(17, level, "ERROR: Early data after CONNECT response. " <<
906 "Found " << answer.leftovers.length() << " bytes. " <<
907 "Closing " << answer.conn);
908 error = new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request, al);
909 closePendingConnection(answer.conn, "server spoke before tunnelEstablishmentDone");
910 }
911 if (error) {
912 fail(error);
913 retryOrBail();
914 return;
915 }
916
917 secureConnectionToPeerIfNeeded(answer.conn);
918 }
919
920 /// handles an established TCP connection to peer (including origin servers)
921 void
922 FwdState::secureConnectionToPeerIfNeeded(const Comm::ConnectionPointer &conn)
923 {
924 assert(!request->flags.pinned);
925
926 const auto p = conn->getPeer();
927 const bool peerWantsTls = p && p->secure.encryptTransport;
928 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
929 const bool userWillTlsToPeerForUs = p && p->options.originserver &&
930 request->method == Http::METHOD_CONNECT;
931 const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
932 const bool clientFirstBump = request->flags.sslBumped; // client-first (already) bumped connection
933 const bool needsBump = request->flags.sslPeek || clientFirstBump;
934
935 // 'GET https://...' requests. If a peer is used the request is forwarded
936 // as is
937 const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS && !clientFirstBump;
938
939 if (needTlsToPeer || needTlsToOrigin || needsBump) {
940 return advanceDestination("secure connection to peer", conn, [this,&conn] {
941 secureConnectionToPeer(conn);
942 });
943 }
944
945 // if not encrypting just run the post-connect actions
946 successfullyConnectedToPeer(conn);
947 }
948
949 /// encrypts an established TCP connection to peer (including origin servers)
950 void
951 FwdState::secureConnectionToPeer(const Comm::ConnectionPointer &conn)
952 {
953 HttpRequest::Pointer requestPointer = request;
954 AsyncCall::Pointer callback = asyncCall(17,4,
955 "FwdState::ConnectedToPeer",
956 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
957 const auto sslNegotiationTimeout = connectingTimeout(conn);
958 Security::PeerConnector *connector = nullptr;
959 #if USE_OPENSSL
960 if (request->flags.sslPeek)
961 connector = new Ssl::PeekingPeerConnector(requestPointer, conn, clientConn, callback, al, sslNegotiationTimeout);
962 else
963 #endif
964 connector = new Security::BlindPeerConnector(requestPointer, conn, callback, al, sslNegotiationTimeout);
965 connector->noteFwdPconnUse = true;
966 AsyncJob::Start(connector); // will call our callback
967 }
968
969 /// called when all negotiations with the TLS-speaking peer have been completed
970 void
971 FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
972 {
973 ErrorState *error = nullptr;
974 if ((error = answer.error.get())) {
975 Must(!Comm::IsConnOpen(answer.conn));
976 answer.error.clear(); // preserve error for errorSendComplete()
977 } else if (answer.tunneled) {
978 // TODO: When ConnStateData establishes tunnels, its state changes
979 // [in ways that may affect logging?]. Consider informing
980 // ConnStateData about our tunnel or otherwise unifying tunnel
981 // establishment [side effects].
982 complete(); // destroys us
983 return;
984 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
985 closePendingConnection(answer.conn, "conn was closed while waiting for connectedToPeer");
986 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
987 }
988
989 if (error) {
990 fail(error);
991 retryOrBail();
992 return;
993 }
994
995 successfullyConnectedToPeer(answer.conn);
996 }
997
998 /// called when all negotiations with the peer have been completed
999 void
1000 FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer &conn)
1001 {
1002 syncWithServerConn(conn, request->url.host(), false);
1003
1004 // should reach ConnStateData before the dispatched Client job starts
1005 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1006 ConnStateData::notePeerConnection, serverConnection());
1007
1008 if (serverConnection()->getPeer())
1009 peerConnectSucceded(serverConnection()->getPeer());
1010
1011 dispatch();
1012 }
1013
1014 /// commits to using the given open to-peer connection
1015 void
1016 FwdState::syncWithServerConn(const Comm::ConnectionPointer &conn, const char *host, const bool reused)
1017 {
1018 Must(IsConnOpen(conn));
1019 serverConn = conn;
1020 // no effect on destinationReceipt (which may even be nil here)
1021
1022 closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
1023
1024 if (reused) {
1025 pconnRace = racePossible;
1026 ResetMarkingsToServer(request, *serverConn);
1027 } else {
1028 pconnRace = raceImpossible;
1029 // Comm::ConnOpener already applied proper/current markings
1030 }
1031
1032 syncHierNote(serverConn, host);
1033 }
1034
1035 void
1036 FwdState::syncHierNote(const Comm::ConnectionPointer &server, const char *host)
1037 {
1038 if (request)
1039 request->hier.resetPeerNotes(server, host);
1040 if (al)
1041 al->hier.resetPeerNotes(server, host);
1042 }
1043
1044 /**
1045 * Called after forwarding path selection (via peer select) has taken place
1046 * and whenever forwarding needs to attempt a new connection (routing failover).
1047 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
1048 */
1049 void
1050 FwdState::connectStart()
1051 {
1052 debugs(17, 3, *destinations << " to " << entry->url());
1053
1054 Must(!request->pinnedConnection());
1055
1056 assert(!destinations->empty());
1057 assert(!opening());
1058
1059 // Ditch error page if it was created before.
1060 // A new one will be created if there's another problem
1061 delete err;
1062 err = nullptr;
1063 request->clearError();
1064 serverConn = nullptr;
1065 destinationReceipt = nullptr;
1066
1067 request->hier.startPeerClock();
1068
1069 calls.connector = asyncCall(17, 5, "FwdState::noteConnection", HappyConnOpener::CbDialer<FwdState>(&FwdState::noteConnection, this));
1070
1071 HttpRequest::Pointer cause = request;
1072 const auto cs = new HappyConnOpener(destinations, calls.connector, cause, start_t, n_tries, al);
1073 cs->setHost(request->url.host());
1074 bool retriable = checkRetriable();
1075 if (!retriable && Config.accessList.serverPconnForNonretriable) {
1076 ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, nullptr);
1077 ch.al = al;
1078 ch.syncAle(request, nullptr);
1079 retriable = ch.fastCheck().allowed();
1080 }
1081 cs->setRetriable(retriable);
1082 cs->allowPersistent(pconnRace != raceHappened);
1083 destinations->notificationPending = true; // start() is async
1084 connOpener = cs;
1085 AsyncJob::Start(cs);
1086 }
1087
1088 /// send request on an existing connection dedicated to the requesting client
1089 void
1090 FwdState::usePinned()
1091 {
1092 const auto connManager = request->pinnedConnection();
1093 debugs(17, 7, "connection manager: " << connManager);
1094
1095 try {
1096 // TODO: Refactor syncWithServerConn() and callers to always set
1097 // serverConn inside that method.
1098 serverConn = ConnStateData::BorrowPinnedConnection(request, al);
1099 debugs(17, 5, "connection: " << serverConn);
1100 } catch (ErrorState * const anErr) {
1101 syncHierNote(nullptr, connManager ? connManager->pinning.host : request->url.host());
1102 serverConn = nullptr;
1103 fail(anErr);
1104 // Connection managers monitor their idle pinned to-server
1105 // connections and close from-client connections upon seeing
1106 // a to-server connection closure. Retrying here is futile.
1107 stopAndDestroy("pinned connection failure");
1108 return;
1109 }
1110
1111 ++n_tries;
1112 request->flags.pinned = true;
1113
1114 assert(connManager);
1115 if (connManager->pinnedAuth())
1116 request->flags.auth = true;
1117
1118 // the server may close the pinned connection before this request
1119 const auto reused = true;
1120 syncWithServerConn(serverConn, connManager->pinning.host, reused);
1121
1122 dispatch();
1123 }
1124
1125 void
1126 FwdState::dispatch()
1127 {
1128 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
1129 /*
1130 * Assert that server_fd is set. This is to guarantee that fwdState
1131 * is attached to something and will be deallocated when server_fd
1132 * is closed.
1133 */
1134 assert(Comm::IsConnOpen(serverConn));
1135
1136 fd_note(serverConnection()->fd, entry->url());
1137
1138 fd_table[serverConnection()->fd].noteUse();
1139
1140 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1141 assert(entry->ping_status != PING_WAITING);
1142
1143 assert(entry->locked());
1144
1145 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
1146
1147 flags.connected_okay = true;
1148
1149 netdbPingSite(request->url.host());
1150
1151 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1152 * original client request FD object. It is later used to forward
1153 * remote server's TOS/MARK in the response to the client in case of a MISS.
1154 */
1155 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
1156 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
1157 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1158 /* Get the netfilter CONNMARK */
1159 clientFde->nfConnmarkFromServer = Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened);
1160 }
1161 }
1162
1163 #if _SQUID_LINUX_
1164 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1165 if (Ip::Qos::TheConfig.isHitTosActive()) {
1166 if (Comm::IsConnOpen(clientConn)) {
1167 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1168 /* Get the TOS value for the packet */
1169 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
1170 }
1171 }
1172 #endif
1173
1174 #if USE_OPENSSL
1175 if (request->flags.sslPeek) {
1176 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1177 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(serverConnection(), request));
1178 unregister(serverConn); // async call owns it now
1179 complete(); // destroys us
1180 return;
1181 }
1182 #endif
1183
1184 if (const auto peer = serverConnection()->getPeer()) {
1185 ++peer->stats.fetches;
1186 request->prepForPeering(*peer);
1187 httpStart(this);
1188 } else {
1189 assert(!request->flags.sslPeek);
1190 request->prepForDirect();
1191
1192 switch (request->url.getScheme()) {
1193
1194 case AnyP::PROTO_HTTPS:
1195 httpStart(this);
1196 break;
1197
1198 case AnyP::PROTO_HTTP:
1199 httpStart(this);
1200 break;
1201
1202 case AnyP::PROTO_GOPHER:
1203 gopherStart(this);
1204 break;
1205
1206 case AnyP::PROTO_FTP:
1207 if (request->flags.ftpNative)
1208 Ftp::StartRelay(this);
1209 else
1210 Ftp::StartGateway(this);
1211 break;
1212
1213 case AnyP::PROTO_CACHE_OBJECT:
1214
1215 case AnyP::PROTO_URN:
1216 fatal_dump("Should never get here");
1217 break;
1218
1219 case AnyP::PROTO_WHOIS:
1220 whoisStart(this);
1221 break;
1222
1223 case AnyP::PROTO_WAIS: /* Not implemented */
1224
1225 default:
1226 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1227 const auto anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request, al);
1228 fail(anErr);
1229 // Set the dont_retry flag because this is not a transient (network) error.
1230 flags.dont_retry = true;
1231 if (Comm::IsConnOpen(serverConn)) {
1232 serverConn->close(); // trigger cleanup
1233 }
1234 break;
1235 }
1236 }
1237 }
1238
1239 /*
1240 * FwdState::reforward
1241 *
1242 * returns TRUE if the transaction SHOULD be re-forwarded to the
1243 * next choice in the serverDestinations list. This method is called when
1244 * peer communication completes normally, or experiences
1245 * some error after receiving the end of HTTP headers.
1246 */
1247 int
1248 FwdState::reforward()
1249 {
1250 StoreEntry *e = entry;
1251
1252 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1253 debugs(17, 3, HERE << "entry aborted");
1254 return 0;
1255 }
1256
1257 assert(e->store_status == STORE_PENDING);
1258 assert(e->mem_obj);
1259 #if URL_CHECKSUM_DEBUG
1260
1261 e->mem_obj->checkUrlChecksum();
1262 #endif
1263
1264 debugs(17, 3, HERE << e->url() << "?" );
1265
1266 if (request->flags.pinned && !pinnedCanRetry()) {
1267 debugs(17, 3, "pinned connection; cannot retry");
1268 return 0;
1269 }
1270
1271 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1272 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1273 return 0;
1274 }
1275
1276 if (exhaustedTries())
1277 return 0;
1278
1279 if (request->bodyNibbled())
1280 return 0;
1281
1282 if (destinations->empty() && !PeerSelectionInitiator::subscribed) {
1283 debugs(17, 3, HERE << "No alternative forwarding paths left");
1284 return 0;
1285 }
1286
1287 const auto s = entry->mem().baseReply().sline.status();
1288 debugs(17, 3, HERE << "status " << s);
1289 return reforwardableStatus(s);
1290 }
1291
1292 static void
1293 fwdStats(StoreEntry * s)
1294 {
1295 int i;
1296 int j;
1297 storeAppendPrintf(s, "Status");
1298
1299 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1300 storeAppendPrintf(s, "\ttry#%d", j);
1301 }
1302
1303 storeAppendPrintf(s, "\n");
1304
1305 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1306 if (FwdReplyCodes[0][i] == 0)
1307 continue;
1308
1309 storeAppendPrintf(s, "%3d", i);
1310
1311 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1312 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1313 }
1314
1315 storeAppendPrintf(s, "\n");
1316 }
1317 }
1318
1319 /**** STATIC MEMBER FUNCTIONS *************************************************/
1320
1321 bool
1322 FwdState::reforwardableStatus(const Http::StatusCode s) const
1323 {
1324 switch (s) {
1325
1326 case Http::scBadGateway:
1327
1328 case Http::scGatewayTimeout:
1329 return true;
1330
1331 case Http::scForbidden:
1332
1333 case Http::scInternalServerError:
1334
1335 case Http::scNotImplemented:
1336
1337 case Http::scServiceUnavailable:
1338 return Config.retry.onerror;
1339
1340 default:
1341 return false;
1342 }
1343
1344 /* NOTREACHED */
1345 }
1346
1347 void
1348 FwdState::initModule()
1349 {
1350 RegisterWithCacheManager();
1351 }
1352
1353 void
1354 FwdState::RegisterWithCacheManager(void)
1355 {
1356 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1357 }
1358
1359 void
1360 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1361 {
1362 if (status > Http::scInvalidHeader)
1363 return;
1364
1365 assert(tries >= 0);
1366
1367 if (tries > MAX_FWD_STATS_IDX)
1368 tries = MAX_FWD_STATS_IDX;
1369
1370 ++ FwdReplyCodes[tries][status];
1371 }
1372
1373 bool
1374 FwdState::exhaustedTries() const
1375 {
1376 return n_tries >= Config.forward_max_tries;
1377 }
1378
1379 bool
1380 FwdState::pinnedCanRetry() const
1381 {
1382 assert(request->flags.pinned);
1383
1384 // pconn race on pinned connection: Currently we do not have any mechanism
1385 // to retry current pinned connection path.
1386 if (pconnRace == raceHappened)
1387 return false;
1388
1389 // If a bumped connection was pinned, then the TLS client was given our peer
1390 // details. Do not retry because we do not ensure that those details stay
1391 // constant. Step1-bumped connections do not get our TLS peer details, are
1392 // never pinned, and, hence, never reach this method.
1393 if (request->flags.sslBumped)
1394 return false;
1395
1396 // The other pinned cases are FTP proxying and connection-based HTTP
1397 // authentication. TODO: Do these cases have restrictions?
1398 return true;
1399 }
1400
1401 time_t
1402 FwdState::connectingTimeout(const Comm::ConnectionPointer &conn) const
1403 {
1404 const auto connTimeout = conn->connectTimeout(start_t);
1405 return positiveTimeout(connTimeout);
1406 }
1407
1408 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1409
1410 /*
1411 * DPW 2007-05-19
1412 * Formerly static, but now used by client_side_request.cc
1413 */
1414 /// Checks for a TOS value to apply depending on the ACL
1415 tos_t
1416 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1417 {
1418 for (acl_tos *l = head; l; l = l->next) {
1419 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1420 return l->tos;
1421 }
1422
1423 return 0;
1424 }
1425
1426 /// Checks for a netfilter mark value to apply depending on the ACL
1427 Ip::NfMarkConfig
1428 aclFindNfMarkConfig(acl_nfmark * head, ACLChecklist * ch)
1429 {
1430 for (acl_nfmark *l = head; l; l = l->next) {
1431 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1432 return l->markConfig;
1433 }
1434
1435 return {};
1436 }
1437
1438 void
1439 getOutgoingAddress(HttpRequest * request, const Comm::ConnectionPointer &conn)
1440 {
1441 // skip if an outgoing address is already set.
1442 if (!conn->local.isAnyAddr()) return;
1443
1444 // ensure that at minimum the wildcard local matches remote protocol
1445 if (conn->remote.isIPv4())
1446 conn->local.setIPv4();
1447
1448 // maybe use TPROXY client address
1449 if (request && request->flags.spoofClientIp) {
1450 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1451 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1452 if (Config.onoff.tproxy_uses_indirect_client)
1453 conn->local = request->indirect_client_addr;
1454 else
1455 #endif
1456 conn->local = request->client_addr;
1457 conn->local.port(0); // let OS pick the source port to prevent address clashes
1458 // some flags need setting on the socket to use this address
1459 conn->flags |= COMM_DOBIND;
1460 conn->flags |= COMM_TRANSPARENT;
1461 return;
1462 }
1463 // else no tproxy today ...
1464 }
1465
1466 if (!Config.accessList.outgoing_address) {
1467 return; // anything will do.
1468 }
1469
1470 ACLFilledChecklist ch(NULL, request, NULL);
1471 ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : NULL;
1472 ch.dst_addr = conn->remote;
1473
1474 // TODO use the connection details in ACL.
1475 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1476
1477 for (Acl::Address *l = Config.accessList.outgoing_address; l; l = l->next) {
1478
1479 /* check if the outgoing address is usable to the destination */
1480 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1481
1482 /* check ACLs for this outgoing address */
1483 if (!l->aclList || ch.fastCheck(l->aclList).allowed()) {
1484 conn->local = l->addr;
1485 return;
1486 }
1487 }
1488 }
1489
1490 /// \returns the TOS value that should be set on the to-peer connection
1491 static tos_t
1492 GetTosToServer(HttpRequest * request, Comm::Connection &conn)
1493 {
1494 if (!Ip::Qos::TheConfig.tosToServer)
1495 return 0;
1496
1497 ACLFilledChecklist ch(NULL, request, NULL);
1498 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1499 ch.dst_addr = conn.remote;
1500 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1501 }
1502
1503 /// \returns the Netfilter mark that should be set on the to-peer connection
1504 static nfmark_t
1505 GetNfmarkToServer(HttpRequest * request, Comm::Connection &conn)
1506 {
1507 if (!Ip::Qos::TheConfig.nfmarkToServer)
1508 return 0;
1509
1510 ACLFilledChecklist ch(NULL, request, NULL);
1511 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1512 ch.dst_addr = conn.remote;
1513 const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1514 return mc.mark;
1515 }
1516
1517 void
1518 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1519 {
1520 // Get the server side TOS and Netfilter mark to be set on the connection.
1521 conn.tos = GetTosToServer(request, conn);
1522 conn.nfmark = GetNfmarkToServer(request, conn);
1523 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos) << " netfilter mark " << conn.nfmark);
1524 }
1525
1526 void
1527 ResetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1528 {
1529 GetMarkingsToServer(request, conn);
1530
1531 // TODO: Avoid these calls if markings has not changed.
1532 if (conn.tos)
1533 Ip::Qos::setSockTos(&conn, conn.tos);
1534 if (conn.nfmark)
1535 Ip::Qos::setSockNfmark(&conn, conn.nfmark);
1536 }
1537