]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
CI: Upgrade GitHub Setup Node and CodeQL actions to Node 20 (#1845)
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 17 Request Forwarding */
10
11 #include "squid.h"
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "base/AsyncCallbacks.h"
18 #include "base/AsyncCbdataCalls.h"
19 #include "CacheManager.h"
20 #include "CachePeer.h"
21 #include "client_side.h"
22 #include "clients/forward.h"
23 #include "clients/HttpTunneler.h"
24 #include "clients/WhoisGateway.h"
25 #include "comm/Connection.h"
26 #include "comm/ConnOpener.h"
27 #include "comm/Loops.h"
28 #include "CommCalls.h"
29 #include "errorpage.h"
30 #include "event.h"
31 #include "fd.h"
32 #include "fde.h"
33 #include "FwdState.h"
34 #include "globals.h"
35 #include "HappyConnOpener.h"
36 #include "hier_code.h"
37 #include "http.h"
38 #include "http/Stream.h"
39 #include "HttpReply.h"
40 #include "HttpRequest.h"
41 #include "icmp/net_db.h"
42 #include "internal.h"
43 #include "ip/Intercept.h"
44 #include "ip/NfMarkConfig.h"
45 #include "ip/QosConfig.h"
46 #include "ip/tools.h"
47 #include "MemObject.h"
48 #include "mgr/Registration.h"
49 #include "neighbors.h"
50 #include "pconn.h"
51 #include "PeerPoolMgr.h"
52 #include "ResolvedPeers.h"
53 #include "security/BlindPeerConnector.h"
54 #include "SquidConfig.h"
55 #include "ssl/PeekingPeerConnector.h"
56 #include "Store.h"
57 #include "StoreClient.h"
58 #include "urn.h"
59 #if USE_OPENSSL
60 #include "ssl/cert_validate_message.h"
61 #include "ssl/Config.h"
62 #include "ssl/helper.h"
63 #include "ssl/ServerBump.h"
64 #include "ssl/support.h"
65 #else
66 #include "security/EncryptorAnswer.h"
67 #endif
68
69 #include <cerrno>
70
71 static CLCB fwdServerClosedWrapper;
72
73 static OBJH fwdStats;
74
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
77
78 PconnPool *fwdPconnPool = new PconnPool("server-peers", nullptr);
79
80 CBDATA_CLASS_INIT(FwdState);
81
82 void
83 FwdState::HandleStoreAbort(FwdState *fwd)
84 {
85 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
86
87 if (Comm::IsConnOpen(fwd->serverConnection())) {
88 fwd->closeServerConnection("store entry aborted");
89 } else {
90 debugs(17, 7, "store entry aborted; no connection to close");
91 }
92 fwd->stopAndDestroy("store entry aborted");
93 }
94
95 void
96 FwdState::closePendingConnection(const Comm::ConnectionPointer &conn, const char *reason)
97 {
98 debugs(17, 3, "because " << reason << "; " << conn);
99 assert(!serverConn);
100 assert(!closeHandler);
101 if (IsConnOpen(conn)) {
102 fwdPconnPool->noteUses(fd_table[conn->fd].pconn.uses);
103 conn->close();
104 }
105 }
106
107 void
108 FwdState::closeServerConnection(const char *reason)
109 {
110 debugs(17, 3, "because " << reason << "; " << serverConn);
111 assert(Comm::IsConnOpen(serverConn));
112 comm_remove_close_handler(serverConn->fd, closeHandler);
113 closeHandler = nullptr;
114 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
115 serverConn->close();
116 }
117
118 /**** PUBLIC INTERFACE ********************************************************/
119
120 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
121 entry(e),
122 request(r),
123 al(alp),
124 err(nullptr),
125 clientConn(client),
126 start_t(squid_curtime),
127 n_tries(0),
128 waitingForDispatched(false),
129 destinations(new ResolvedPeers()),
130 pconnRace(raceImpossible),
131 storedWholeReply_(nullptr)
132 {
133 debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
134 HTTPMSGLOCK(request);
135 e->lock("FwdState");
136 flags.connected_okay = false;
137 flags.dont_retry = false;
138 flags.forward_completed = false;
139 flags.destinationsFound = false;
140 debugs(17, 3, "FwdState constructed, this=" << this);
141 }
142
143 // Called once, right after object creation, when it is safe to set self
144 void FwdState::start(Pointer aSelf)
145 {
146 // Protect ourselves from being destroyed when the only Server pointing
147 // to us is gone (while we expect to talk to more Servers later).
148 // Once we set self, we are responsible for clearing it when we do not
149 // expect to talk to any servers.
150 self = aSelf; // refcounted
151
152 // We hope that either the store entry aborts or peer is selected.
153 // Otherwise we are going to leak our object.
154
155 // Ftp::Relay needs to preserve control connection on data aborts
156 // so it registers its own abort handler that calls ours when needed.
157 if (!request->flags.ftpNative) {
158 AsyncCall::Pointer call = asyncCall(17, 4, "FwdState::Abort", cbdataDialer(&FwdState::HandleStoreAbort, this));
159 entry->registerAbortCallback(call);
160 }
161
162 // just in case; should already be initialized to false
163 request->flags.pinned = false;
164
165 #if STRICT_ORIGINAL_DST
166 // Bug 3243: CVE 2009-0801
167 // Bypass of browser same-origin access control in intercepted communication
168 // To resolve this we must force DIRECT and only to the original client destination.
169 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
170 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
171 if (isIntercepted && useOriginalDst) {
172 selectPeerForIntercepted();
173 return;
174 }
175 #endif
176
177 // do full route options selection
178 startSelectingDestinations(request, al, entry);
179 }
180
181 /// ends forwarding; relies on refcounting so the effect may not be immediate
182 void
183 FwdState::stopAndDestroy(const char *reason)
184 {
185 debugs(17, 3, "for " << reason);
186
187 cancelStep(reason);
188
189 PeerSelectionInitiator::subscribed = false; // may already be false
190 self = nullptr; // we hope refcounting destroys us soon; may already be nil
191 /* do not place any code here as this object may be gone by now */
192 }
193
194 /// Notify a pending subtask, if any, that we no longer need its help. We do not
195 /// have to do this -- the subtask job will eventually end -- but ending it
196 /// earlier reduces waste and may reduce DoS attack surface.
197 void
198 FwdState::cancelStep(const char *reason)
199 {
200 transportWait.cancel(reason);
201 encryptionWait.cancel(reason);
202 peerWait.cancel(reason);
203 }
204
205 #if STRICT_ORIGINAL_DST
206 /// bypasses peerSelect() when dealing with intercepted requests
207 void
208 FwdState::selectPeerForIntercepted()
209 {
210 // We do not support re-wrapping inside CONNECT.
211 // Our only alternative is to fake a noteDestination() call.
212
213 // use pinned connection if available
214 if (ConnStateData *client = request->pinnedConnection()) {
215 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
216 entry->ping_status = PING_DONE;
217
218 usePinned();
219 return;
220 }
221
222 // use client original destination as second preferred choice
223 const auto p = new Comm::Connection();
224 p->peerType = ORIGINAL_DST;
225 p->remote = clientConn->local;
226 getOutgoingAddress(request, p);
227
228 debugs(17, 3, "using client original destination: " << *p);
229 destinations->addPath(p);
230 destinations->destinationsFinalized = true;
231 PeerSelectionInitiator::subscribed = false;
232 useDestinations();
233 }
234 #endif
235
236 /// updates ALE when we finalize the transaction error (if any)
237 void
238 FwdState::updateAleWithFinalError()
239 {
240 if (!err || !al)
241 return;
242
243 const auto lte = LogTagsErrors::FromErrno(err->type == ERR_READ_TIMEOUT ? ETIMEDOUT : err->xerrno);
244 al->cache.code.err.update(lte);
245 if (!err->detail) {
246 static const auto d = MakeNamedErrorDetail("WITH_SERVER");
247 err->detailError(d);
248 }
249 al->updateError(Error(err->type, err->detail));
250 }
251
252 void
253 FwdState::completed()
254 {
255 if (flags.forward_completed) {
256 debugs(17, DBG_IMPORTANT, "ERROR: FwdState::completed called on a completed request! Bad!");
257 return;
258 }
259
260 flags.forward_completed = true;
261
262 request->hier.stopPeerClock(false);
263
264 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
265 debugs(17, 3, "entry aborted");
266 return ;
267 }
268
269 #if URL_CHECKSUM_DEBUG
270
271 entry->mem_obj->checkUrlChecksum();
272 #endif
273
274 if (entry->store_status == STORE_PENDING) {
275 if (entry->isEmpty()) {
276 assert(!storedWholeReply_);
277 if (!err) // we quit (e.g., fd closed) before an error or content
278 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request, al));
279 assert(err);
280 updateAleWithFinalError();
281 errorAppendEntry(entry, err);
282 err = nullptr;
283 #if USE_OPENSSL
284 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
285 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
286 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request));
287 // no flags.dont_retry: completed() is a post-reforward() act
288 }
289 #endif
290 } else {
291 updateAleWithFinalError(); // if any
292 if (storedWholeReply_)
293 entry->completeSuccessfully(storedWholeReply_);
294 else
295 entry->completeTruncated("FwdState default");
296 }
297 }
298
299 if (storePendingNClients(entry) > 0)
300 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
301
302 }
303
304 FwdState::~FwdState()
305 {
306 debugs(17, 3, "FwdState destructor start");
307
308 if (! flags.forward_completed)
309 completed();
310
311 doneWithRetries();
312
313 HTTPMSGUNLOCK(request);
314
315 delete err;
316
317 entry->unregisterAbortCallback("FwdState object destructed");
318
319 entry->unlock("FwdState");
320
321 entry = nullptr;
322
323 cancelStep("~FwdState");
324
325 if (Comm::IsConnOpen(serverConn))
326 closeServerConnection("~FwdState");
327
328 debugs(17, 3, "FwdState destructed, this=" << this);
329 }
330
331 /**
332 * This is the entry point for client-side to start forwarding
333 * a transaction. It is a static method that may or may not
334 * allocate a FwdState.
335 */
336 void
337 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
338 {
339 /** \note
340 * client_addr == no_addr indicates this is an "internal" request
341 * from peer_digest.c, asn.c, netdb.c, etc and should always
342 * be allowed. yuck, I know.
343 */
344
345 if ( Config.accessList.miss && !request->client_addr.isNoAddr() && !request->flags.internal) {
346 /**
347 * Check if this host is allowed to fetch MISSES from us (miss_access).
348 * Intentionally replace the src_addr automatically selected by the checklist code
349 * we do NOT want the indirect client address to be tested here.
350 */
351 ACLFilledChecklist ch(Config.accessList.miss, request, nullptr);
352 ch.al = al;
353 ch.src_addr = request->client_addr;
354 ch.syncAle(request, nullptr);
355 if (ch.fastCheck().denied()) {
356 auto page_id = FindDenyInfoPage(ch.currentAnswer(), true);
357 if (page_id == ERR_NONE)
358 page_id = ERR_FORWARDING_DENIED;
359
360 const auto anErr = new ErrorState(page_id, Http::scForbidden, request, al);
361 errorAppendEntry(entry, anErr); // frees anErr
362 return;
363 }
364 }
365
366 debugs(17, 3, "'" << entry->url() << "'");
367 /*
368 * This seems like an odd place to bind mem_obj and request.
369 * Might want to assert that request is NULL at this point
370 */
371 entry->mem_obj->request = request;
372 #if URL_CHECKSUM_DEBUG
373
374 entry->mem_obj->checkUrlChecksum();
375 #endif
376
377 if (shutting_down) {
378 /* more yuck */
379 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
380 errorAppendEntry(entry, anErr); // frees anErr
381 return;
382 }
383
384 if (request->flags.internal) {
385 debugs(17, 2, "calling internalStart() due to request flag");
386 internalStart(clientConn, request, entry, al);
387 return;
388 }
389
390 switch (request->url.getScheme()) {
391
392 case AnyP::PROTO_URN:
393 urnStart(request, entry, al);
394 return;
395
396 default:
397 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
398 fwd->start(fwd);
399 return;
400 }
401
402 /* NOTREACHED */
403 }
404
405 void
406 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
407 {
408 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
409 Start(clientConn, entry, request, nullptr);
410 }
411
412 /// subtracts time_t values, returning zero if smaller exceeds the larger value
413 /// time_t might be unsigned so we need to be careful when subtracting times...
414 static inline time_t
415 diffOrZero(const time_t larger, const time_t smaller)
416 {
417 return (larger > smaller) ? (larger - smaller) : 0;
418 }
419
420 /// time left to finish the whole forwarding process (which started at fwdStart)
421 time_t
422 FwdState::ForwardTimeout(const time_t fwdStart)
423 {
424 // time already spent on forwarding (0 if clock went backwards)
425 const time_t timeSpent = diffOrZero(squid_curtime, fwdStart);
426 return diffOrZero(Config.Timeout.forward, timeSpent);
427 }
428
429 bool
430 FwdState::EnoughTimeToReForward(const time_t fwdStart)
431 {
432 return ForwardTimeout(fwdStart) > 0;
433 }
434
435 void
436 FwdState::useDestinations()
437 {
438 if (!destinations->empty()) {
439 connectStart();
440 } else {
441 if (PeerSelectionInitiator::subscribed) {
442 debugs(17, 4, "wait for more destinations to try");
443 return; // expect a noteDestination*() call
444 }
445
446 debugs(17, 3, "Connection failed: " << entry->url());
447 if (!err) {
448 const auto anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request, al);
449 fail(anErr);
450 } // else use actual error from last connection attempt
451
452 stopAndDestroy("tried all destinations");
453 }
454 }
455
456 void
457 FwdState::fail(ErrorState * errorState)
458 {
459 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
460
461 delete err;
462 err = errorState;
463
464 if (!errorState->request)
465 errorState->request = request;
466
467 if (err->type == ERR_ZERO_SIZE_OBJECT)
468 reactToZeroSizeObject();
469
470 destinationReceipt = nullptr; // may already be nil
471 }
472
473 /// ERR_ZERO_SIZE_OBJECT requires special adjustments
474 void
475 FwdState::reactToZeroSizeObject()
476 {
477 assert(err->type == ERR_ZERO_SIZE_OBJECT);
478
479 if (pconnRace == racePossible) {
480 debugs(17, 5, "pconn race happened");
481 pconnRace = raceHappened;
482 if (destinationReceipt) {
483 destinations->reinstatePath(destinationReceipt);
484 destinationReceipt = nullptr;
485 }
486 }
487
488 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
489 pinned_connection->pinning.zeroReply = true;
490 debugs(17, 4, "zero reply on pinned connection");
491 }
492 }
493
494 /**
495 * Frees fwdState without closing FD or generating an abort
496 */
497 void
498 FwdState::unregister(Comm::ConnectionPointer &conn)
499 {
500 debugs(17, 3, entry->url() );
501 assert(serverConnection() == conn);
502 assert(Comm::IsConnOpen(conn));
503 comm_remove_close_handler(conn->fd, closeHandler);
504 closeHandler = nullptr;
505 serverConn = nullptr;
506 destinationReceipt = nullptr;
507 }
508
509 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
510 void
511 FwdState::unregister(int fd)
512 {
513 debugs(17, 3, entry->url() );
514 assert(fd == serverConnection()->fd);
515 unregister(serverConn);
516 }
517
518 /**
519 * FooClient modules call fwdComplete() when they are done
520 * downloading an object. Then, we either 1) re-forward the
521 * request somewhere else if needed, or 2) call storeComplete()
522 * to finish it off
523 */
524 void
525 FwdState::complete()
526 {
527 const auto replyStatus = entry->mem().baseReply().sline.status();
528 debugs(17, 3, *entry << " status " << replyStatus << ' ' << entry->url());
529 #if URL_CHECKSUM_DEBUG
530
531 entry->mem_obj->checkUrlChecksum();
532 #endif
533
534 logReplyStatus(n_tries, replyStatus);
535
536 // will already be false if complete() was called before/without dispatch()
537 waitingForDispatched = false;
538
539 if (reforward()) {
540 debugs(17, 3, "re-forwarding " << replyStatus << " " << entry->url());
541
542 if (Comm::IsConnOpen(serverConn))
543 unregister(serverConn);
544 serverConn = nullptr;
545 destinationReceipt = nullptr;
546
547 storedWholeReply_ = nullptr;
548 entry->reset();
549
550 useDestinations();
551
552 } else {
553 if (Comm::IsConnOpen(serverConn))
554 debugs(17, 3, "server FD " << serverConnection()->fd << " not re-forwarding status " << replyStatus);
555 else
556 debugs(17, 3, "server (FD closed) not re-forwarding status " << replyStatus);
557
558 completed();
559
560 stopAndDestroy("forwarding completed");
561 }
562 }
563
564 /// Whether a forwarding attempt to some selected destination X is in progress
565 /// (after successfully opening/reusing a transport connection to X).
566 /// See also: transportWait
567 bool
568 FwdState::transporting() const
569 {
570 return peerWait || encryptionWait || waitingForDispatched;
571 }
572
573 void
574 FwdState::markStoredReplyAsWhole(const char * const whyWeAreSure)
575 {
576 debugs(17, 5, whyWeAreSure << " for " << *entry);
577
578 // the caller wrote everything to Store, but Store may silently abort writes
579 if (EBIT_TEST(entry->flags, ENTRY_ABORTED))
580 return;
581
582 storedWholeReply_ = whyWeAreSure;
583 }
584
585 void
586 FwdState::noteDestination(Comm::ConnectionPointer path)
587 {
588 flags.destinationsFound = true;
589
590 if (!path) {
591 // We can call usePinned() without fear of clashing with an earlier
592 // forwarding attempt because PINNED must be the first destination.
593 assert(destinations->empty());
594 usePinned();
595 return;
596 }
597
598 debugs(17, 3, path);
599
600 destinations->addPath(path);
601
602 if (transportWait) {
603 assert(!transporting());
604 notifyConnOpener();
605 return; // and continue to wait for FwdState::noteConnection() callback
606 }
607
608 if (transporting())
609 return; // and continue to receive destinations for backup
610
611 useDestinations();
612 }
613
614 void
615 FwdState::noteDestinationsEnd(ErrorState *selectionError)
616 {
617 PeerSelectionInitiator::subscribed = false;
618 destinations->destinationsFinalized = true;
619
620 if (!flags.destinationsFound) {
621 if (selectionError) {
622 debugs(17, 3, "Will abort forwarding because path selection has failed.");
623 Must(!err); // if we tried to connect, then path selection succeeded
624 fail(selectionError);
625 }
626
627 stopAndDestroy("path selection found no paths");
628 return;
629 }
630 // else continue to use one of the previously noted destinations;
631 // if all of them fail, forwarding as whole will fail
632 Must(!selectionError); // finding at least one path means selection succeeded
633
634 if (transportWait) {
635 assert(!transporting());
636 notifyConnOpener();
637 return; // and continue to wait for FwdState::noteConnection() callback
638 }
639
640 if (transporting()) {
641 // We are already using a previously opened connection (but were also
642 // receiving more destinations in case we need to re-forward).
643 debugs(17, 7, "keep transporting");
644 return;
645 }
646
647 // destinationsFound, but none of them worked, and we were waiting for more
648 debugs(17, 7, "no more destinations to try after " << n_tries << " failed attempts");
649 if (!err) {
650 const auto finalError = new ErrorState(ERR_CANNOT_FORWARD, Http::scBadGateway, request, al);
651 static const auto d = MakeNamedErrorDetail("REFORWARD_TO_NONE");
652 finalError->detailError(d);
653 fail(finalError);
654 } // else use actual error from last forwarding attempt
655 stopAndDestroy("all found paths have failed");
656 }
657
658 /// makes sure connection opener knows that the destinations have changed
659 void
660 FwdState::notifyConnOpener()
661 {
662 if (destinations->notificationPending) {
663 debugs(17, 7, "reusing pending notification about " << *destinations);
664 } else {
665 debugs(17, 7, "notifying about " << *destinations);
666 destinations->notificationPending = true;
667 CallJobHere(17, 5, transportWait.job(), HappyConnOpener, noteCandidatesChange);
668 }
669 }
670
671 /**** CALLBACK WRAPPERS ************************************************************/
672
673 static void
674 fwdServerClosedWrapper(const CommCloseCbParams &params)
675 {
676 FwdState *fwd = (FwdState *)params.data;
677 fwd->serverClosed();
678 }
679
680 /**** PRIVATE *****************************************************************/
681
682 /*
683 * FwdState::checkRetry
684 *
685 * Return TRUE if the request SHOULD be retried. This method is
686 * called when the HTTP connection fails, or when the connection
687 * is closed before reading the end of HTTP headers from the server.
688 */
689 bool
690 FwdState::checkRetry()
691 {
692 if (shutting_down)
693 return false;
694
695 if (!self) { // we have aborted before the server called us back
696 debugs(17, 5, "not retrying because of earlier abort");
697 // we will be destroyed when the server clears its Pointer to us
698 return false;
699 }
700
701 if (entry->store_status != STORE_PENDING)
702 return false;
703
704 if (!entry->isEmpty())
705 return false;
706
707 if (exhaustedTries())
708 return false;
709
710 if (request->flags.pinned && !pinnedCanRetry())
711 return false;
712
713 if (!EnoughTimeToReForward(start_t))
714 return false;
715
716 if (flags.dont_retry)
717 return false;
718
719 if (request->bodyNibbled())
720 return false;
721
722 // NP: not yet actually connected anywhere. retry is safe.
723 if (!flags.connected_okay)
724 return true;
725
726 if (!checkRetriable())
727 return false;
728
729 return true;
730 }
731
732 /// Whether we may try sending this request again after a failure.
733 bool
734 FwdState::checkRetriable()
735 {
736 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
737 // complicated] code required to protect the PUT request body from being
738 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
739 if (request->body_pipe != nullptr)
740 return false;
741
742 // RFC2616 9.1 Safe and Idempotent Methods
743 return (request->method.isHttpSafe() || request->method.isIdempotent());
744 }
745
746 void
747 FwdState::serverClosed()
748 {
749 // XXX: This method logic attempts to tolerate Connection::close() called
750 // for serverConn earlier, by one of our dispatch()ed jobs. If that happens,
751 // serverConn will already be closed here or, worse, it will already be open
752 // for the next forwarding attempt. The current code prevents us getting
753 // stuck, but the long term solution is to stop sharing serverConn.
754 debugs(17, 2, serverConn);
755 if (Comm::IsConnOpen(serverConn)) {
756 const auto uses = fd_table[serverConn->fd].pconn.uses;
757 debugs(17, 3, "prior uses: " << uses);
758 fwdPconnPool->noteUses(uses); // XXX: May not have come from fwdPconnPool
759 serverConn->noteClosure();
760 }
761 serverConn = nullptr;
762 closeHandler = nullptr;
763 destinationReceipt = nullptr;
764
765 // will already be false if this closure happened before/without dispatch()
766 waitingForDispatched = false;
767
768 retryOrBail();
769 }
770
771 void
772 FwdState::retryOrBail()
773 {
774 if (checkRetry()) {
775 debugs(17, 3, "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
776 useDestinations();
777 return;
778 }
779
780 // TODO: should we call completed() here and move doneWithRetries there?
781 doneWithRetries();
782
783 request->hier.stopPeerClock(false);
784
785 if (self != nullptr && !err && shutting_down && entry->isEmpty()) {
786 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
787 errorAppendEntry(entry, anErr);
788 }
789
790 stopAndDestroy("cannot retry");
791 }
792
793 // If the Server quits before nibbling at the request body, the body sender
794 // will not know (so that we can retry). Call this if we will not retry. We
795 // will notify the sender so that it does not get stuck waiting for space.
796 void
797 FwdState::doneWithRetries()
798 {
799 if (request && request->body_pipe != nullptr)
800 request->body_pipe->expectNoConsumption();
801 }
802
803 // called by the server that failed after calling unregister()
804 void
805 FwdState::handleUnregisteredServerEnd()
806 {
807 debugs(17, 2, "self=" << self << " err=" << err << ' ' << entry->url());
808 assert(!Comm::IsConnOpen(serverConn));
809 serverConn = nullptr;
810 destinationReceipt = nullptr;
811
812 // might already be false due to uncertainties documented in serverClosed()
813 waitingForDispatched = false;
814
815 retryOrBail();
816 }
817
818 /// starts a preparation step for an established connection; retries on failures
819 template <typename StepStart>
820 void
821 FwdState::advanceDestination(const char *stepDescription, const Comm::ConnectionPointer &conn, const StepStart &startStep)
822 {
823 // TODO: Extract destination-specific handling from FwdState so that all the
824 // awkward, limited-scope advanceDestination() calls can be replaced with a
825 // single simple try/catch,retry block.
826 try {
827 startStep();
828 // now wait for the step callback
829 } catch (...) {
830 debugs (17, 2, "exception while trying to " << stepDescription << ": " << CurrentException);
831 closePendingConnection(conn, "connection preparation exception");
832 if (!err)
833 fail(new ErrorState(ERR_GATEWAY_FAILURE, Http::scInternalServerError, request, al));
834 retryOrBail();
835 }
836 }
837
838 /// called when a to-peer connection has been successfully obtained or
839 /// when all candidate destinations have been tried and all have failed
840 void
841 FwdState::noteConnection(HappyConnOpener::Answer &answer)
842 {
843 assert(!destinationReceipt);
844
845 transportWait.finish();
846
847 updateAttempts(answer.n_tries);
848
849 ErrorState *error = nullptr;
850 if ((error = answer.error.get())) {
851 flags.dont_retry = true; // or HappyConnOpener would not have given up
852 syncHierNote(answer.conn, request->url.host());
853 Must(!Comm::IsConnOpen(answer.conn));
854 answer.error.clear(); // preserve error for errorSendComplete()
855 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
856 // The socket could get closed while our callback was queued. Sync
857 // Connection. XXX: Connection::fd may already be stale/invalid here.
858 // We do not know exactly why the connection got closed, so we play it
859 // safe, allowing retries only for persistent (reused) connections
860 if (answer.reused) {
861 destinationReceipt = answer.conn;
862 assert(destinationReceipt);
863 }
864 syncHierNote(answer.conn, request->url.host());
865 closePendingConnection(answer.conn, "conn was closed while waiting for noteConnection");
866 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
867 } else {
868 assert(!error);
869 destinationReceipt = answer.conn;
870 assert(destinationReceipt);
871 // serverConn remains nil until syncWithServerConn()
872 }
873
874 if (error) {
875 fail(error);
876 retryOrBail();
877 return;
878 }
879
880 if (answer.reused) {
881 syncWithServerConn(answer.conn, request->url.host(), answer.reused);
882 return dispatch();
883 }
884
885 // Check if we need to TLS before use
886 if (const auto *peer = answer.conn->getPeer()) {
887 // Assume that it is only possible for the client-first from the
888 // bumping modes to try connect to a remote server. The bumped
889 // requests with other modes are using pinned connections or fails.
890 const bool clientFirstBump = request->flags.sslBumped;
891 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
892 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
893 const bool originWantsEncryptedTraffic =
894 request->method == Http::METHOD_CONNECT ||
895 request->flags.sslPeek ||
896 clientFirstBump;
897 if (originWantsEncryptedTraffic && // the "encrypted traffic" part
898 !peer->options.originserver && // the "through a proxy" part
899 !peer->secure.encryptTransport) // the "exclude HTTPS proxies" part
900 return advanceDestination("establish tunnel through proxy", answer.conn, [this,&answer] {
901 establishTunnelThruProxy(answer.conn);
902 });
903 }
904
905 secureConnectionToPeerIfNeeded(answer.conn);
906 }
907
908 void
909 FwdState::establishTunnelThruProxy(const Comm::ConnectionPointer &conn)
910 {
911 const auto callback = asyncCallback(17, 4, FwdState::tunnelEstablishmentDone, this);
912 HttpRequest::Pointer requestPointer = request;
913 const auto tunneler = new Http::Tunneler(conn, requestPointer, callback, connectingTimeout(conn), al);
914
915 // TODO: Replace this hack with proper Comm::Connection-Pool association
916 // that is not tied to fwdPconnPool and can handle disappearing pools.
917 tunneler->noteFwdPconnUse = true;
918
919 #if USE_DELAY_POOLS
920 Must(conn);
921 Must(conn->getPeer());
922 if (!conn->getPeer()->options.no_delay)
923 tunneler->setDelayId(entry->mem_obj->mostBytesAllowed());
924 #endif
925 peerWait.start(tunneler, callback);
926 }
927
928 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
929 void
930 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer &answer)
931 {
932 peerWait.finish();
933
934 ErrorState *error = nullptr;
935 if (!answer.positive()) {
936 Must(!answer.conn);
937 error = answer.squidError.get();
938 Must(error);
939 answer.squidError.clear(); // preserve error for fail()
940 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
941 // The socket could get closed while our callback was queued. Sync
942 // Connection. XXX: Connection::fd may already be stale/invalid here.
943 closePendingConnection(answer.conn, "conn was closed while waiting for tunnelEstablishmentDone");
944 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
945 } else if (!answer.leftovers.isEmpty()) {
946 // This should not happen because TLS servers do not speak first. If we
947 // have to handle this, then pass answer.leftovers via a PeerConnector
948 // to ServerBio. See ClientBio::setReadBufData().
949 static int occurrences = 0;
950 const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
951 debugs(17, level, "ERROR: Early data after CONNECT response. " <<
952 "Found " << answer.leftovers.length() << " bytes. " <<
953 "Closing " << answer.conn);
954 error = new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request, al);
955 closePendingConnection(answer.conn, "server spoke before tunnelEstablishmentDone");
956 }
957 if (error) {
958 fail(error);
959 retryOrBail();
960 return;
961 }
962
963 secureConnectionToPeerIfNeeded(answer.conn);
964 }
965
966 /// handles an established TCP connection to peer (including origin servers)
967 void
968 FwdState::secureConnectionToPeerIfNeeded(const Comm::ConnectionPointer &conn)
969 {
970 assert(!request->flags.pinned);
971
972 const auto p = conn->getPeer();
973 const bool peerWantsTls = p && p->secure.encryptTransport;
974 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
975 const bool userWillTlsToPeerForUs = p && p->options.originserver &&
976 request->method == Http::METHOD_CONNECT;
977 const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
978 const bool clientFirstBump = request->flags.sslBumped; // client-first (already) bumped connection
979 const bool needsBump = request->flags.sslPeek || clientFirstBump;
980
981 // 'GET https://...' requests. If a peer is used the request is forwarded
982 // as is
983 const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS && !clientFirstBump;
984
985 if (needTlsToPeer || needTlsToOrigin || needsBump) {
986 return advanceDestination("secure connection to peer", conn, [this,&conn] {
987 secureConnectionToPeer(conn);
988 });
989 }
990
991 // if not encrypting just run the post-connect actions
992 successfullyConnectedToPeer(conn);
993 }
994
995 /// encrypts an established TCP connection to peer (including origin servers)
996 void
997 FwdState::secureConnectionToPeer(const Comm::ConnectionPointer &conn)
998 {
999 HttpRequest::Pointer requestPointer = request;
1000 const auto callback = asyncCallback(17, 4, FwdState::connectedToPeer, this);
1001 const auto sslNegotiationTimeout = connectingTimeout(conn);
1002 Security::PeerConnector *connector = nullptr;
1003 #if USE_OPENSSL
1004 if (request->flags.sslPeek)
1005 connector = new Ssl::PeekingPeerConnector(requestPointer, conn, clientConn, callback, al, sslNegotiationTimeout);
1006 else
1007 #endif
1008 connector = new Security::BlindPeerConnector(requestPointer, conn, callback, al, sslNegotiationTimeout);
1009 connector->noteFwdPconnUse = true;
1010 encryptionWait.start(connector, callback);
1011 }
1012
1013 /// called when all negotiations with the TLS-speaking peer have been completed
1014 void
1015 FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
1016 {
1017 encryptionWait.finish();
1018
1019 ErrorState *error = nullptr;
1020 if ((error = answer.error.get())) {
1021 assert(!answer.conn);
1022 answer.error.clear(); // preserve error for errorSendComplete()
1023 } else if (answer.tunneled) {
1024 assert(!answer.conn);
1025 // TODO: When ConnStateData establishes tunnels, its state changes
1026 // [in ways that may affect logging?]. Consider informing
1027 // ConnStateData about our tunnel or otherwise unifying tunnel
1028 // establishment [side effects].
1029 flags.dont_retry = true; // TunnelStateData took forwarding control
1030 entry->abort();
1031 complete(); // destroys us
1032 return;
1033 } else if (!Comm::IsConnOpen(answer.conn) || fd_table[answer.conn->fd].closing()) {
1034 // The socket could get closed while our callback was queued. Sync
1035 // Connection. XXX: Connection::fd may already be stale/invalid here.
1036 closePendingConnection(answer.conn, "conn was closed while waiting for connectedToPeer");
1037 error = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al);
1038 }
1039
1040 if (error) {
1041 fail(error);
1042 retryOrBail();
1043 return;
1044 }
1045
1046 successfullyConnectedToPeer(answer.conn);
1047 }
1048
1049 /// called when all negotiations with the peer have been completed
1050 void
1051 FwdState::successfullyConnectedToPeer(const Comm::ConnectionPointer &conn)
1052 {
1053 syncWithServerConn(conn, request->url.host(), false);
1054
1055 // should reach ConnStateData before the dispatched Client job starts
1056 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1057 ConnStateData::notePeerConnection, serverConnection());
1058
1059 NoteOutgoingConnectionSuccess(serverConnection()->getPeer());
1060
1061 dispatch();
1062 }
1063
1064 /// commits to using the given open to-peer connection
1065 void
1066 FwdState::syncWithServerConn(const Comm::ConnectionPointer &conn, const char *host, const bool reused)
1067 {
1068 Must(IsConnOpen(conn));
1069 serverConn = conn;
1070 // no effect on destinationReceipt (which may even be nil here)
1071
1072 closeHandler = comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
1073
1074 if (reused) {
1075 pconnRace = racePossible;
1076 ResetMarkingsToServer(request, *serverConn);
1077 } else {
1078 pconnRace = raceImpossible;
1079 // Comm::ConnOpener already applied proper/current markings
1080 }
1081
1082 syncHierNote(serverConn, host);
1083 }
1084
1085 void
1086 FwdState::syncHierNote(const Comm::ConnectionPointer &server, const char *host)
1087 {
1088 if (request)
1089 request->hier.resetPeerNotes(server, host);
1090 if (al)
1091 al->hier.resetPeerNotes(server, host);
1092 }
1093
1094 /// sets n_tries to the given value (while keeping ALE, if any, in sync)
1095 void
1096 FwdState::updateAttempts(const int newValue)
1097 {
1098 Assure(n_tries <= newValue); // n_tries cannot decrease
1099
1100 // Squid probably creates at most one FwdState/TunnelStateData object per
1101 // ALE, but, unlike an assignment would, this increment logic works even if
1102 // Squid uses multiple such objects for a given ALE in some esoteric cases.
1103 if (al)
1104 al->requestAttempts += (newValue - n_tries);
1105
1106 n_tries = newValue;
1107 debugs(17, 5, n_tries);
1108 }
1109
1110 /**
1111 * Called after forwarding path selection (via peer select) has taken place
1112 * and whenever forwarding needs to attempt a new connection (routing failover).
1113 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
1114 */
1115 void
1116 FwdState::connectStart()
1117 {
1118 debugs(17, 3, *destinations << " to " << entry->url());
1119
1120 Must(!request->pinnedConnection());
1121
1122 assert(!destinations->empty());
1123 assert(!transporting());
1124
1125 // Ditch error page if it was created before.
1126 // A new one will be created if there's another problem
1127 delete err;
1128 err = nullptr;
1129 request->clearError();
1130
1131 request->hier.startPeerClock();
1132
1133 const auto callback = asyncCallback(17, 5, FwdState::noteConnection, this);
1134 HttpRequest::Pointer cause = request;
1135 const auto cs = new HappyConnOpener(destinations, callback, cause, start_t, n_tries, al);
1136 cs->setHost(request->url.host());
1137 bool retriable = checkRetriable();
1138 if (!retriable && Config.accessList.serverPconnForNonretriable) {
1139 ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, nullptr);
1140 ch.al = al;
1141 ch.syncAle(request, nullptr);
1142 retriable = ch.fastCheck().allowed();
1143 }
1144 cs->setRetriable(retriable);
1145 cs->allowPersistent(pconnRace != raceHappened);
1146 destinations->notificationPending = true; // start() is async
1147 transportWait.start(cs, callback);
1148 }
1149
1150 /// send request on an existing connection dedicated to the requesting client
1151 void
1152 FwdState::usePinned()
1153 {
1154 const auto connManager = request->pinnedConnection();
1155 debugs(17, 7, "connection manager: " << connManager);
1156
1157 try {
1158 // TODO: Refactor syncWithServerConn() and callers to always set
1159 // serverConn inside that method.
1160 serverConn = ConnStateData::BorrowPinnedConnection(request, al);
1161 debugs(17, 5, "connection: " << serverConn);
1162 } catch (ErrorState * const anErr) {
1163 syncHierNote(nullptr, connManager ? connManager->pinning.host : request->url.host());
1164 serverConn = nullptr;
1165 fail(anErr);
1166 // Connection managers monitor their idle pinned to-server
1167 // connections and close from-client connections upon seeing
1168 // a to-server connection closure. Retrying here is futile.
1169 stopAndDestroy("pinned connection failure");
1170 return;
1171 }
1172
1173 updateAttempts(n_tries + 1);
1174
1175 request->flags.pinned = true;
1176
1177 assert(connManager);
1178 if (connManager->pinnedAuth())
1179 request->flags.auth = true;
1180
1181 // the server may close the pinned connection before this request
1182 const auto reused = true;
1183 syncWithServerConn(serverConn, connManager->pinning.host, reused);
1184
1185 dispatch();
1186 }
1187
1188 void
1189 FwdState::dispatch()
1190 {
1191 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
1192 /*
1193 * Assert that server_fd is set. This is to guarantee that fwdState
1194 * is attached to something and will be deallocated when server_fd
1195 * is closed.
1196 */
1197 assert(Comm::IsConnOpen(serverConn));
1198
1199 assert(!waitingForDispatched);
1200 waitingForDispatched = true;
1201
1202 fd_note(serverConnection()->fd, entry->url());
1203
1204 fd_table[serverConnection()->fd].noteUse();
1205
1206 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1207 assert(entry->ping_status != PING_WAITING);
1208
1209 assert(entry->locked());
1210
1211 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
1212
1213 flags.connected_okay = true;
1214
1215 netdbPingSite(request->url.host());
1216
1217 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1218 * original client request FD object. It is later used to forward
1219 * remote server's TOS/MARK in the response to the client in case of a MISS.
1220 */
1221 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
1222 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
1223 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1224 /* Get the netfilter CONNMARK */
1225 clientFde->nfConnmarkFromServer = Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened);
1226 }
1227 }
1228
1229 #if _SQUID_LINUX_
1230 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1231 if (Ip::Qos::TheConfig.isHitTosActive()) {
1232 if (Comm::IsConnOpen(clientConn)) {
1233 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1234 /* Get the TOS value for the packet */
1235 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
1236 }
1237 }
1238 #endif
1239
1240 #if USE_OPENSSL
1241 if (request->flags.sslPeek) {
1242 // we were just asked to peek at the server, and we did that
1243 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1244 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(serverConnection(), request));
1245 unregister(serverConn); // async call owns it now
1246 flags.dont_retry = true; // we gave up forwarding control
1247 entry->abort();
1248 complete(); // destroys us
1249 return;
1250 }
1251 #endif
1252
1253 if (const auto peer = serverConnection()->getPeer()) {
1254 ++peer->stats.fetches;
1255 request->prepForPeering(*peer);
1256 httpStart(this);
1257 } else {
1258 assert(!request->flags.sslPeek);
1259 request->prepForDirect();
1260
1261 switch (request->url.getScheme()) {
1262
1263 case AnyP::PROTO_HTTPS:
1264 httpStart(this);
1265 break;
1266
1267 case AnyP::PROTO_HTTP:
1268 httpStart(this);
1269 break;
1270
1271 case AnyP::PROTO_FTP:
1272 if (request->flags.ftpNative)
1273 Ftp::StartRelay(this);
1274 else
1275 Ftp::StartGateway(this);
1276 break;
1277
1278 case AnyP::PROTO_URN:
1279 fatal_dump("Should never get here");
1280 break;
1281
1282 case AnyP::PROTO_WHOIS:
1283 whoisStart(this);
1284 break;
1285
1286 case AnyP::PROTO_WAIS: /* Not implemented */
1287
1288 default:
1289 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1290 const auto anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request, al);
1291 fail(anErr);
1292 // Set the dont_retry flag because this is not a transient (network) error.
1293 flags.dont_retry = true;
1294 if (Comm::IsConnOpen(serverConn)) {
1295 serverConn->close(); // trigger cleanup
1296 }
1297 break;
1298 }
1299 }
1300 }
1301
1302 /*
1303 * FwdState::reforward
1304 *
1305 * returns TRUE if the transaction SHOULD be re-forwarded to the
1306 * next choice in the serverDestinations list. This method is called when
1307 * peer communication completes normally, or experiences
1308 * some error after receiving the end of HTTP headers.
1309 */
1310 int
1311 FwdState::reforward()
1312 {
1313 StoreEntry *e = entry;
1314
1315 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1316 debugs(17, 3, "entry aborted");
1317 return 0;
1318 }
1319
1320 assert(e->store_status == STORE_PENDING);
1321 assert(e->mem_obj);
1322 #if URL_CHECKSUM_DEBUG
1323
1324 e->mem_obj->checkUrlChecksum();
1325 #endif
1326
1327 debugs(17, 3, e->url() << "?" );
1328
1329 if (request->flags.pinned && !pinnedCanRetry()) {
1330 debugs(17, 3, "pinned connection; cannot retry");
1331 return 0;
1332 }
1333
1334 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1335 debugs(17, 3, "No, ENTRY_FWD_HDR_WAIT isn't set");
1336 return 0;
1337 }
1338
1339 if (exhaustedTries())
1340 return 0;
1341
1342 if (request->bodyNibbled())
1343 return 0;
1344
1345 if (destinations->empty() && !PeerSelectionInitiator::subscribed) {
1346 debugs(17, 3, "No alternative forwarding paths left");
1347 return 0;
1348 }
1349
1350 const auto s = entry->mem().baseReply().sline.status();
1351 debugs(17, 3, "status " << s);
1352 return Http::IsReforwardableStatus(s);
1353 }
1354
1355 static void
1356 fwdStats(StoreEntry * s)
1357 {
1358 int i;
1359 int j;
1360 storeAppendPrintf(s, "Status");
1361
1362 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1363 storeAppendPrintf(s, "\ttry#%d", j);
1364 }
1365
1366 storeAppendPrintf(s, "\n");
1367
1368 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1369 if (FwdReplyCodes[0][i] == 0)
1370 continue;
1371
1372 storeAppendPrintf(s, "%3d", i);
1373
1374 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1375 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1376 }
1377
1378 storeAppendPrintf(s, "\n");
1379 }
1380 }
1381
1382 /**** STATIC MEMBER FUNCTIONS *************************************************/
1383
1384 void
1385 FwdState::initModule()
1386 {
1387 RegisterWithCacheManager();
1388 }
1389
1390 void
1391 FwdState::RegisterWithCacheManager(void)
1392 {
1393 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1394 }
1395
1396 void
1397 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1398 {
1399 if (status > Http::scInvalidHeader)
1400 return;
1401
1402 assert(tries >= 0);
1403
1404 if (tries > MAX_FWD_STATS_IDX)
1405 tries = MAX_FWD_STATS_IDX;
1406
1407 ++ FwdReplyCodes[tries][status];
1408 }
1409
1410 bool
1411 FwdState::exhaustedTries() const
1412 {
1413 return n_tries >= Config.forward_max_tries;
1414 }
1415
1416 bool
1417 FwdState::pinnedCanRetry() const
1418 {
1419 assert(request->flags.pinned);
1420
1421 // pconn race on pinned connection: Currently we do not have any mechanism
1422 // to retry current pinned connection path.
1423 if (pconnRace == raceHappened)
1424 return false;
1425
1426 // If a bumped connection was pinned, then the TLS client was given our peer
1427 // details. Do not retry because we do not ensure that those details stay
1428 // constant. Step1-bumped connections do not get our TLS peer details, are
1429 // never pinned, and, hence, never reach this method.
1430 if (request->flags.sslBumped)
1431 return false;
1432
1433 // The other pinned cases are FTP proxying and connection-based HTTP
1434 // authentication. TODO: Do these cases have restrictions?
1435 return true;
1436 }
1437
1438 time_t
1439 FwdState::connectingTimeout(const Comm::ConnectionPointer &conn) const
1440 {
1441 const auto connTimeout = conn->connectTimeout(start_t);
1442 return positiveTimeout(connTimeout);
1443 }
1444
1445 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1446
1447 /*
1448 * DPW 2007-05-19
1449 * Formerly static, but now used by client_side_request.cc
1450 */
1451 /// Checks for a TOS value to apply depending on the ACL
1452 tos_t
1453 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1454 {
1455 for (acl_tos *l = head; l; l = l->next) {
1456 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1457 return l->tos;
1458 }
1459
1460 return 0;
1461 }
1462
1463 /// Checks for a netfilter mark value to apply depending on the ACL
1464 Ip::NfMarkConfig
1465 aclFindNfMarkConfig(acl_nfmark * head, ACLChecklist * ch)
1466 {
1467 for (acl_nfmark *l = head; l; l = l->next) {
1468 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1469 return l->markConfig;
1470 }
1471
1472 return {};
1473 }
1474
1475 void
1476 getOutgoingAddress(HttpRequest * request, const Comm::ConnectionPointer &conn)
1477 {
1478 // skip if an outgoing address is already set.
1479 if (!conn->local.isAnyAddr()) return;
1480
1481 // ensure that at minimum the wildcard local matches remote protocol
1482 if (conn->remote.isIPv4())
1483 conn->local.setIPv4();
1484
1485 // maybe use TPROXY client address
1486 if (request && request->flags.spoofClientIp) {
1487 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1488 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1489 if (Config.onoff.tproxy_uses_indirect_client)
1490 conn->local = request->indirect_client_addr;
1491 else
1492 #endif
1493 conn->local = request->client_addr;
1494 conn->local.port(0); // let OS pick the source port to prevent address clashes
1495 // some flags need setting on the socket to use this address
1496 conn->flags |= COMM_DOBIND;
1497 conn->flags |= COMM_TRANSPARENT;
1498 return;
1499 }
1500 // else no tproxy today ...
1501 }
1502
1503 if (!Config.accessList.outgoing_address) {
1504 return; // anything will do.
1505 }
1506
1507 ACLFilledChecklist ch(nullptr, request, nullptr);
1508 ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : nullptr;
1509 ch.dst_addr = conn->remote;
1510
1511 // TODO use the connection details in ACL.
1512 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1513
1514 for (Acl::Address *l = Config.accessList.outgoing_address; l; l = l->next) {
1515
1516 /* check if the outgoing address is usable to the destination */
1517 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1518
1519 /* check ACLs for this outgoing address */
1520 if (!l->aclList || ch.fastCheck(l->aclList).allowed()) {
1521 conn->local = l->addr;
1522 return;
1523 }
1524 }
1525 }
1526
1527 /// \returns the TOS value that should be set on the to-peer connection
1528 static tos_t
1529 GetTosToServer(HttpRequest * request, Comm::Connection &conn)
1530 {
1531 if (!Ip::Qos::TheConfig.tosToServer)
1532 return 0;
1533
1534 ACLFilledChecklist ch(nullptr, request, nullptr);
1535 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1536 ch.dst_addr = conn.remote;
1537 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1538 }
1539
1540 /// \returns the Netfilter mark that should be set on the to-peer connection
1541 static nfmark_t
1542 GetNfmarkToServer(HttpRequest * request, Comm::Connection &conn)
1543 {
1544 if (!Ip::Qos::TheConfig.nfmarkToServer)
1545 return 0;
1546
1547 ACLFilledChecklist ch(nullptr, request, nullptr);
1548 ch.dst_peer_name = conn.getPeer() ? conn.getPeer()->name : nullptr;
1549 ch.dst_addr = conn.remote;
1550 const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1551 return mc.mark;
1552 }
1553
1554 void
1555 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1556 {
1557 // Get the server side TOS and Netfilter mark to be set on the connection.
1558 conn.tos = GetTosToServer(request, conn);
1559 conn.nfmark = GetNfmarkToServer(request, conn);
1560 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos) << " netfilter mark " << conn.nfmark);
1561 }
1562
1563 void
1564 ResetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1565 {
1566 GetMarkingsToServer(request, conn);
1567
1568 // TODO: Avoid these calls if markings has not changed.
1569 if (conn.tos)
1570 Ip::Qos::setSockTos(&conn, conn.tos);
1571 if (conn.nfmark)
1572 Ip::Qos::setSockNfmark(&conn, conn.nfmark);
1573 }
1574