]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
9c4a5cb214b18eef0a38d31b27d777f7a9d7de7f
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 17 Request Forwarding */
10
11 #include "squid.h"
12 #include "AccessLogEntry.h"
13 #include "acl/Address.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "clients/HttpTunneler.h"
22 #include "comm/Connection.h"
23 #include "comm/ConnOpener.h"
24 #include "comm/Loops.h"
25 #include "CommCalls.h"
26 #include "errorpage.h"
27 #include "event.h"
28 #include "fd.h"
29 #include "fde.h"
30 #include "FwdState.h"
31 #include "globals.h"
32 #include "gopher.h"
33 #include "hier_code.h"
34 #include "http.h"
35 #include "http/Stream.h"
36 #include "HttpReply.h"
37 #include "HttpRequest.h"
38 #include "icmp/net_db.h"
39 #include "internal.h"
40 #include "ip/Intercept.h"
41 #include "ip/NfMarkConfig.h"
42 #include "ip/QosConfig.h"
43 #include "ip/tools.h"
44 #include "MemObject.h"
45 #include "mgr/Registration.h"
46 #include "neighbors.h"
47 #include "pconn.h"
48 #include "PeerPoolMgr.h"
49 #include "security/BlindPeerConnector.h"
50 #include "SquidConfig.h"
51 #include "SquidTime.h"
52 #include "ssl/PeekingPeerConnector.h"
53 #include "Store.h"
54 #include "StoreClient.h"
55 #include "urn.h"
56 #include "whois.h"
57 #if USE_OPENSSL
58 #include "ssl/cert_validate_message.h"
59 #include "ssl/Config.h"
60 #include "ssl/ErrorDetail.h"
61 #include "ssl/helper.h"
62 #include "ssl/ServerBump.h"
63 #include "ssl/support.h"
64 #else
65 #include "security/EncryptorAnswer.h"
66 #endif
67
68 #include <cerrno>
69
70 static CLCB fwdServerClosedWrapper;
71 static CNCB fwdConnectDoneWrapper;
72
73 static OBJH fwdStats;
74
75 #define MAX_FWD_STATS_IDX 9
76 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
77
78 static PconnPool *fwdPconnPool = new PconnPool("server-peers", NULL);
79 CBDATA_CLASS_INIT(FwdState);
80
81 class FwdStatePeerAnswerDialer: public CallDialer, public Security::PeerConnector::CbDialer
82 {
83 public:
84 typedef void (FwdState::*Method)(Security::EncryptorAnswer &);
85
86 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
87 method_(method), fwd_(fwd), answer_() {}
88
89 /* CallDialer API */
90 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
91 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
92 virtual void print(std::ostream &os) const {
93 os << '(' << fwd_.get() << ", " << answer_ << ')';
94 }
95
96 /* Security::PeerConnector::CbDialer API */
97 virtual Security::EncryptorAnswer &answer() { return answer_; }
98
99 private:
100 Method method_;
101 CbcPointer<FwdState> fwd_;
102 Security::EncryptorAnswer answer_;
103 };
104
105 void
106 FwdState::abort(void* d)
107 {
108 FwdState* fwd = (FwdState*)d;
109 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
110
111 if (Comm::IsConnOpen(fwd->serverConnection())) {
112 fwd->closeServerConnection("store entry aborted");
113 } else {
114 debugs(17, 7, HERE << "store entry aborted; no connection to close");
115 }
116 fwd->serverDestinations.clear();
117 fwd->stopAndDestroy("store entry aborted");
118 }
119
120 void
121 FwdState::closeServerConnection(const char *reason)
122 {
123 debugs(17, 3, "because " << reason << "; " << serverConn);
124 comm_remove_close_handler(serverConn->fd, closeHandler);
125 closeHandler = NULL;
126 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
127 serverConn->close();
128 }
129
130 /**** PUBLIC INTERFACE ********************************************************/
131
132 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
133 entry(e),
134 request(r),
135 al(alp),
136 err(NULL),
137 clientConn(client),
138 start_t(squid_curtime),
139 n_tries(0),
140 pconnRace(raceImpossible)
141 {
142 debugs(17, 2, "Forwarding client request " << client << ", url=" << e->url());
143 HTTPMSGLOCK(request);
144 serverDestinations.reserve(Config.forward_max_tries);
145 e->lock("FwdState");
146 flags.connected_okay = false;
147 flags.dont_retry = false;
148 flags.forward_completed = false;
149 debugs(17, 3, "FwdState constructed, this=" << this);
150 }
151
152 // Called once, right after object creation, when it is safe to set self
153 void FwdState::start(Pointer aSelf)
154 {
155 // Protect ourselves from being destroyed when the only Server pointing
156 // to us is gone (while we expect to talk to more Servers later).
157 // Once we set self, we are responsible for clearing it when we do not
158 // expect to talk to any servers.
159 self = aSelf; // refcounted
160
161 // We hope that either the store entry aborts or peer is selected.
162 // Otherwise we are going to leak our object.
163
164 // Ftp::Relay needs to preserve control connection on data aborts
165 // so it registers its own abort handler that calls ours when needed.
166 if (!request->flags.ftpNative)
167 entry->registerAbort(FwdState::abort, this);
168
169 // just in case; should already be initialized to false
170 request->flags.pinned = false;
171
172 #if STRICT_ORIGINAL_DST
173 // Bug 3243: CVE 2009-0801
174 // Bypass of browser same-origin access control in intercepted communication
175 // To resolve this we must force DIRECT and only to the original client destination.
176 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
177 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
178 if (isIntercepted && useOriginalDst) {
179 selectPeerForIntercepted();
180 useDestinations();
181 return;
182 }
183 #endif
184
185 // do full route options selection
186 startSelectingDestinations(request, al, entry);
187 }
188
189 /// ends forwarding; relies on refcounting so the effect may not be immediate
190 void
191 FwdState::stopAndDestroy(const char *reason)
192 {
193 debugs(17, 3, "for " << reason);
194 PeerSelectionInitiator::subscribed = false; // may already be false
195 self = nullptr; // we hope refcounting destroys us soon; may already be nil
196 /* do not place any code here as this object may be gone by now */
197 }
198
199 #if STRICT_ORIGINAL_DST
200 /// bypasses peerSelect() when dealing with intercepted requests
201 void
202 FwdState::selectPeerForIntercepted()
203 {
204 // We do not support re-wrapping inside CONNECT.
205 // Our only alternative is to fake a noteDestination() call.
206
207 // use pinned connection if available
208 if (ConnStateData *client = request->pinnedConnection()) {
209 // emulate the PeerSelector::selectPinned() "Skip ICP" effect
210 entry->ping_status = PING_DONE;
211
212 serverDestinations.push_back(nullptr);
213 return;
214 }
215
216 // use client original destination as second preferred choice
217 const auto p = new Comm::Connection();
218 p->peerType = ORIGINAL_DST;
219 p->remote = clientConn->local;
220 getOutgoingAddress(request, p);
221
222 debugs(17, 3, HERE << "using client original destination: " << *p);
223 serverDestinations.push_back(p);
224 }
225 #endif
226
227 void
228 FwdState::completed()
229 {
230 if (flags.forward_completed) {
231 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
232 return;
233 }
234
235 flags.forward_completed = true;
236
237 request->hier.stopPeerClock(false);
238
239 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
240 debugs(17, 3, HERE << "entry aborted");
241 return ;
242 }
243
244 #if URL_CHECKSUM_DEBUG
245
246 entry->mem_obj->checkUrlChecksum();
247 #endif
248
249 if (entry->store_status == STORE_PENDING) {
250 if (entry->isEmpty()) {
251 if (!err) // we quit (e.g., fd closed) before an error or content
252 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request, al));
253 assert(err);
254 errorAppendEntry(entry, err);
255 err = NULL;
256 #if USE_OPENSSL
257 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
258 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
259 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(Comm::ConnectionPointer(nullptr), request));
260 }
261 #endif
262 } else {
263 entry->complete();
264 entry->releaseRequest();
265 }
266 }
267
268 if (storePendingNClients(entry) > 0)
269 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
270
271 }
272
273 FwdState::~FwdState()
274 {
275 debugs(17, 3, "FwdState destructor start");
276
277 if (! flags.forward_completed)
278 completed();
279
280 doneWithRetries();
281
282 HTTPMSGUNLOCK(request);
283
284 delete err;
285
286 entry->unregisterAbort();
287
288 entry->unlock("FwdState");
289
290 entry = NULL;
291
292 if (Comm::IsConnOpen(serverConn))
293 closeServerConnection("~FwdState");
294
295 serverDestinations.clear();
296
297 debugs(17, 3, "FwdState destructed, this=" << this);
298 }
299
300 /**
301 * This is the entry point for client-side to start forwarding
302 * a transaction. It is a static method that may or may not
303 * allocate a FwdState.
304 */
305 void
306 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
307 {
308 /** \note
309 * client_addr == no_addr indicates this is an "internal" request
310 * from peer_digest.c, asn.c, netdb.c, etc and should always
311 * be allowed. yuck, I know.
312 */
313
314 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
315 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
316 /**
317 * Check if this host is allowed to fetch MISSES from us (miss_access).
318 * Intentionally replace the src_addr automatically selected by the checklist code
319 * we do NOT want the indirect client address to be tested here.
320 */
321 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
322 ch.al = al;
323 ch.src_addr = request->client_addr;
324 ch.syncAle(request, nullptr);
325 if (ch.fastCheck().denied()) {
326 err_type page_id;
327 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
328
329 if (page_id == ERR_NONE)
330 page_id = ERR_FORWARDING_DENIED;
331
332 const auto anErr = new ErrorState(page_id, Http::scForbidden, request, al);
333 errorAppendEntry(entry, anErr); // frees anErr
334 return;
335 }
336 }
337
338 debugs(17, 3, HERE << "'" << entry->url() << "'");
339 /*
340 * This seems like an odd place to bind mem_obj and request.
341 * Might want to assert that request is NULL at this point
342 */
343 entry->mem_obj->request = request;
344 #if URL_CHECKSUM_DEBUG
345
346 entry->mem_obj->checkUrlChecksum();
347 #endif
348
349 if (shutting_down) {
350 /* more yuck */
351 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
352 errorAppendEntry(entry, anErr); // frees anErr
353 return;
354 }
355
356 if (request->flags.internal) {
357 debugs(17, 2, "calling internalStart() due to request flag");
358 internalStart(clientConn, request, entry, al);
359 return;
360 }
361
362 switch (request->url.getScheme()) {
363
364 case AnyP::PROTO_CACHE_OBJECT:
365 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
366 CacheManager::GetInstance()->start(clientConn, request, entry, al);
367 return;
368
369 case AnyP::PROTO_URN:
370 urnStart(request, entry, al);
371 return;
372
373 default:
374 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
375 fwd->start(fwd);
376 return;
377 }
378
379 /* NOTREACHED */
380 }
381
382 void
383 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
384 {
385 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
386 Start(clientConn, entry, request, NULL);
387 }
388
389 /// subtracts time_t values, returning zero if smaller exceeds the larger value
390 /// time_t might be unsigned so we need to be careful when subtracting times...
391 static inline time_t
392 diffOrZero(const time_t larger, const time_t smaller)
393 {
394 return (larger > smaller) ? (larger - smaller) : 0;
395 }
396
397 /// time left to finish the whole forwarding process (which started at fwdStart)
398 time_t
399 FwdState::ForwardTimeout(const time_t fwdStart)
400 {
401 // time already spent on forwarding (0 if clock went backwards)
402 const time_t timeSpent = diffOrZero(squid_curtime, fwdStart);
403 return diffOrZero(Config.Timeout.forward, timeSpent);
404 }
405
406 bool
407 FwdState::EnoughTimeToReForward(const time_t fwdStart)
408 {
409 return ForwardTimeout(fwdStart) > 0;
410 }
411
412 void
413 FwdState::useDestinations()
414 {
415 debugs(17, 3, serverDestinations.size() << " paths to " << entry->url());
416 if (!serverDestinations.empty()) {
417 if (!serverDestinations[0])
418 usePinned();
419 else
420 connectStart();
421 } else {
422 if (PeerSelectionInitiator::subscribed) {
423 debugs(17, 4, "wait for more destinations to try");
424 return; // expect a noteDestination*() call
425 }
426
427 debugs(17, 3, HERE << "Connection failed: " << entry->url());
428 if (!err) {
429 const auto anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request, al);
430 fail(anErr);
431 } // else use actual error from last connection attempt
432
433 stopAndDestroy("tried all destinations");
434 }
435 }
436
437 void
438 FwdState::fail(ErrorState * errorState)
439 {
440 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
441
442 delete err;
443 err = errorState;
444
445 if (!errorState->request)
446 errorState->request = request;
447
448 if (err->type != ERR_ZERO_SIZE_OBJECT)
449 return;
450
451 if (pconnRace == racePossible) {
452 debugs(17, 5, HERE << "pconn race happened");
453 pconnRace = raceHappened;
454 }
455
456 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
457 pinned_connection->pinning.zeroReply = true;
458 debugs(17, 4, "zero reply on pinned connection");
459 }
460 }
461
462 /**
463 * Frees fwdState without closing FD or generating an abort
464 */
465 void
466 FwdState::unregister(Comm::ConnectionPointer &conn)
467 {
468 debugs(17, 3, HERE << entry->url() );
469 assert(serverConnection() == conn);
470 assert(Comm::IsConnOpen(conn));
471 comm_remove_close_handler(conn->fd, closeHandler);
472 closeHandler = NULL;
473 serverConn = NULL;
474 }
475
476 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
477 void
478 FwdState::unregister(int fd)
479 {
480 debugs(17, 3, HERE << entry->url() );
481 assert(fd == serverConnection()->fd);
482 unregister(serverConn);
483 }
484
485 /**
486 * FooClient modules call fwdComplete() when they are done
487 * downloading an object. Then, we either 1) re-forward the
488 * request somewhere else if needed, or 2) call storeComplete()
489 * to finish it off
490 */
491 void
492 FwdState::complete()
493 {
494 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
495 #if URL_CHECKSUM_DEBUG
496
497 entry->mem_obj->checkUrlChecksum();
498 #endif
499
500 logReplyStatus(n_tries, entry->getReply()->sline.status());
501
502 if (reforward()) {
503 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
504
505 if (Comm::IsConnOpen(serverConn))
506 unregister(serverConn);
507
508 entry->reset();
509
510 // drop the last path off the selection list. try the next one.
511 if (!serverDestinations.empty()) // paranoid
512 serverDestinations.erase(serverDestinations.begin());
513 useDestinations();
514
515 } else {
516 if (Comm::IsConnOpen(serverConn))
517 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
518 else
519 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
520 entry->complete();
521
522 if (!Comm::IsConnOpen(serverConn))
523 completed();
524
525 stopAndDestroy("forwarding completed");
526 }
527 }
528
529 void
530 FwdState::noteDestination(Comm::ConnectionPointer path)
531 {
532 const bool wasBlocked = serverDestinations.empty();
533 // XXX: Push even a nil path so that subsequent noteDestination() calls
534 // can rely on wasBlocked to detect ongoing/concurrent attempts.
535 // Upcoming Happy Eyeballs changes will handle this properly.
536 serverDestinations.push_back(path);
537 assert(wasBlocked || path); // pinned destinations are always selected first
538
539 if (wasBlocked)
540 useDestinations();
541 // else continue to use one of the previously noted destinations;
542 // if all of them fail, we may try this path
543 }
544
545 void
546 FwdState::noteDestinationsEnd(ErrorState *selectionError)
547 {
548 PeerSelectionInitiator::subscribed = false;
549 if (serverDestinations.empty()) { // was blocked, waiting for more paths
550
551 if (selectionError) {
552 debugs(17, 3, "Will abort forwarding because path selection has failed.");
553 Must(!err); // if we tried to connect, then path selection succeeded
554 fail(selectionError);
555 }
556 else if (err)
557 debugs(17, 3, "Will abort forwarding because all found paths have failed.");
558 else
559 debugs(17, 3, "Will abort forwarding because path selection found no paths.");
560
561 useDestinations(); // will detect and handle the lack of paths
562 return;
563 }
564 // else continue to use one of the previously noted destinations;
565 // if all of them fail, forwarding as whole will fail
566 Must(!selectionError); // finding at least one path means selection succeeded
567 }
568
569 /**** CALLBACK WRAPPERS ************************************************************/
570
571 static void
572 fwdServerClosedWrapper(const CommCloseCbParams &params)
573 {
574 FwdState *fwd = (FwdState *)params.data;
575 fwd->serverClosed(params.fd);
576 }
577
578 void
579 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
580 {
581 FwdState *fwd = (FwdState *) data;
582 fwd->connectDone(conn, status, xerrno);
583 }
584
585 /**** PRIVATE *****************************************************************/
586
587 /*
588 * FwdState::checkRetry
589 *
590 * Return TRUE if the request SHOULD be retried. This method is
591 * called when the HTTP connection fails, or when the connection
592 * is closed before reading the end of HTTP headers from the server.
593 */
594 bool
595 FwdState::checkRetry()
596 {
597 if (shutting_down)
598 return false;
599
600 if (!self) { // we have aborted before the server called us back
601 debugs(17, 5, HERE << "not retrying because of earlier abort");
602 // we will be destroyed when the server clears its Pointer to us
603 return false;
604 }
605
606 if (entry->store_status != STORE_PENDING)
607 return false;
608
609 if (!entry->isEmpty())
610 return false;
611
612 if (exhaustedTries())
613 return false;
614
615 if (request->flags.pinned && !pinnedCanRetry())
616 return false;
617
618 if (!EnoughTimeToReForward(start_t))
619 return false;
620
621 if (flags.dont_retry)
622 return false;
623
624 if (request->bodyNibbled())
625 return false;
626
627 // NP: not yet actually connected anywhere. retry is safe.
628 if (!flags.connected_okay)
629 return true;
630
631 if (!checkRetriable())
632 return false;
633
634 return true;
635 }
636
637 /// Whether we may try sending this request again after a failure.
638 bool
639 FwdState::checkRetriable()
640 {
641 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
642 // complicated] code required to protect the PUT request body from being
643 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
644 if (request->body_pipe != NULL)
645 return false;
646
647 // RFC2616 9.1 Safe and Idempotent Methods
648 return (request->method.isHttpSafe() || request->method.isIdempotent());
649 }
650
651 void
652 FwdState::serverClosed(int fd)
653 {
654 // XXX: fd is often -1 here
655 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
656 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
657 if (fd >= 0 && serverConnection()->fd == fd)
658 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
659 retryOrBail();
660 }
661
662 void
663 FwdState::retryOrBail()
664 {
665 if (checkRetry()) {
666 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
667 // we should retry the same destination if it failed due to pconn race
668 if (pconnRace == raceHappened)
669 debugs(17, 4, HERE << "retrying the same destination");
670 else
671 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
672 useDestinations();
673 return;
674 }
675
676 // TODO: should we call completed() here and move doneWithRetries there?
677 doneWithRetries();
678
679 request->hier.stopPeerClock(false);
680
681 if (self != NULL && !err && shutting_down && entry->isEmpty()) {
682 const auto anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request, al);
683 errorAppendEntry(entry, anErr);
684 }
685
686 stopAndDestroy("cannot retry");
687 }
688
689 // If the Server quits before nibbling at the request body, the body sender
690 // will not know (so that we can retry). Call this if we will not retry. We
691 // will notify the sender so that it does not get stuck waiting for space.
692 void
693 FwdState::doneWithRetries()
694 {
695 if (request && request->body_pipe != NULL)
696 request->body_pipe->expectNoConsumption();
697 }
698
699 // called by the server that failed after calling unregister()
700 void
701 FwdState::handleUnregisteredServerEnd()
702 {
703 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
704 assert(!Comm::IsConnOpen(serverConn));
705 retryOrBail();
706 }
707
708 /// handles an established TCP connection to peer (including origin servers)
709 void
710 FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
711 {
712 if (status != Comm::OK) {
713 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
714 anErr->xerrno = xerrno;
715 fail(anErr);
716
717 /* it might have been a timeout with a partially open link */
718 if (conn != NULL) {
719 if (conn->getPeer())
720 peerConnectFailed(conn->getPeer());
721
722 conn->close();
723 }
724 retryOrBail();
725 return;
726 }
727
728 serverConn = conn;
729 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
730
731 closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
732
733 // request->flags.pinned cannot be true in connectDone(). The flag is
734 // only set when we dispatch the request to an existing (pinned) connection.
735 assert(!request->flags.pinned);
736
737 if (const CachePeer *peer = serverConnection()->getPeer()) {
738 // Assume that it is only possible for the client-first from the
739 // bumping modes to try connect to a remote server. The bumped
740 // requests with other modes are using pinned connections or fails.
741 const bool clientFirstBump = request->flags.sslBumped;
742 // We need a CONNECT tunnel to send encrypted traffic through a proxy,
743 // but we do not support TLS inside TLS, so we exclude HTTPS proxies.
744 const bool originWantsEncryptedTraffic =
745 request->method == Http::METHOD_CONNECT ||
746 request->flags.sslPeek ||
747 clientFirstBump;
748 if (originWantsEncryptedTraffic && // the "encrypted traffic" part
749 !peer->options.originserver && // the "through a proxy" part
750 !peer->secure.encryptTransport) // the "exclude HTTPS proxies" part
751 return establishTunnelThruProxy();
752 }
753
754 secureConnectionToPeerIfNeeded();
755 }
756
757 void
758 FwdState::establishTunnelThruProxy()
759 {
760 AsyncCall::Pointer callback = asyncCall(17,4,
761 "FwdState::tunnelEstablishmentDone",
762 Http::Tunneler::CbDialer<FwdState>(&FwdState::tunnelEstablishmentDone, this));
763 HttpRequest::Pointer requestPointer = request;
764 const auto tunneler = new Http::Tunneler(serverConnection(), requestPointer, callback, connectingTimeout(serverConnection()), al);
765 #if USE_DELAY_POOLS
766 Must(serverConnection()->getPeer());
767 if (!serverConnection()->getPeer()->options.no_delay)
768 tunneler->setDelayId(entry->mem_obj->mostBytesAllowed());
769 #endif
770 AsyncJob::Start(tunneler);
771 // and wait for the tunnelEstablishmentDone() call
772 }
773
774 /// resumes operations after the (possibly failed) HTTP CONNECT exchange
775 void
776 FwdState::tunnelEstablishmentDone(Http::TunnelerAnswer &answer)
777 {
778 if (answer.positive()) {
779 if (answer.leftovers.isEmpty()) {
780 secureConnectionToPeerIfNeeded();
781 return;
782 }
783 // This should not happen because TLS servers do not speak first. If we
784 // have to handle this, then pass answer.leftovers via a PeerConnector
785 // to ServerBio. See ClientBio::setReadBufData().
786 static int occurrences = 0;
787 const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
788 debugs(17, level, "ERROR: Early data after CONNECT response. " <<
789 "Found " << answer.leftovers.length() << " bytes. " <<
790 "Closing " << serverConnection());
791 fail(new ErrorState(ERR_CONNECT_FAIL, Http::scBadGateway, request, al));
792 closeServerConnection("found early data after CONNECT response");
793 retryOrBail();
794 return;
795 }
796
797 // TODO: Reuse to-peer connections after a CONNECT error response.
798
799 if (const auto peer = serverConnection()->getPeer())
800 peerConnectFailed(peer);
801
802 const auto error = answer.squidError.get();
803 Must(error);
804 answer.squidError.clear(); // preserve error for fail()
805 fail(error);
806 closeServerConnection("Squid-generated CONNECT error");
807 retryOrBail();
808 }
809
810 /// handles an established TCP connection to peer (including origin servers)
811 void
812 FwdState::secureConnectionToPeerIfNeeded()
813 {
814 assert(!request->flags.pinned);
815
816 const CachePeer *p = serverConnection()->getPeer();
817 const bool peerWantsTls = p && p->secure.encryptTransport;
818 // userWillTlsToPeerForUs assumes CONNECT == HTTPS
819 const bool userWillTlsToPeerForUs = p && p->options.originserver &&
820 request->method == Http::METHOD_CONNECT;
821 const bool needTlsToPeer = peerWantsTls && !userWillTlsToPeerForUs;
822 const bool clientFirstBump = request->flags.sslBumped; // client-first (already) bumped connection
823 const bool needsBump = request->flags.sslPeek || clientFirstBump;
824
825 // 'GET https://...' requests. If a peer is used the request is forwarded
826 // as is
827 const bool needTlsToOrigin = !p && request->url.getScheme() == AnyP::PROTO_HTTPS && !clientFirstBump;
828
829 if (needTlsToPeer || needTlsToOrigin || needsBump) {
830 HttpRequest::Pointer requestPointer = request;
831 AsyncCall::Pointer callback = asyncCall(17,4,
832 "FwdState::ConnectedToPeer",
833 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
834 const auto sslNegotiationTimeout = connectingTimeout(serverDestinations[0]);
835 Security::PeerConnector *connector = nullptr;
836 #if USE_OPENSSL
837 if (request->flags.sslPeek)
838 connector = new Ssl::PeekingPeerConnector(requestPointer, serverConnection(), clientConn, callback, al, sslNegotiationTimeout);
839 else
840 #endif
841 connector = new Security::BlindPeerConnector(requestPointer, serverConnection(), callback, al, sslNegotiationTimeout);
842 AsyncJob::Start(connector); // will call our callback
843 return;
844 }
845
846 // if not encrypting just run the post-connect actions
847 successfullyConnectedToPeer();
848 }
849
850 /// called when all negotiations with the TLS-speaking peer have been completed
851 void
852 FwdState::connectedToPeer(Security::EncryptorAnswer &answer)
853 {
854 if (ErrorState *error = answer.error.get()) {
855 fail(error);
856 answer.error.clear(); // preserve error for errorSendComplete()
857 if (CachePeer *p = serverConnection()->getPeer())
858 peerConnectFailed(p);
859 serverConnection()->close();
860 return;
861 }
862
863 if (answer.tunneled) {
864 // TODO: When ConnStateData establishes tunnels, its state changes
865 // [in ways that may affect logging?]. Consider informing
866 // ConnStateData about our tunnel or otherwise unifying tunnel
867 // establishment [side effects].
868 unregister(serverConn); // async call owns it now
869 complete(); // destroys us
870 return;
871 }
872
873 successfullyConnectedToPeer();
874 }
875
876 /// called when all negotiations with the peer have been completed
877 void
878 FwdState::successfullyConnectedToPeer()
879 {
880 // should reach ConnStateData before the dispatched Client job starts
881 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
882 ConnStateData::notePeerConnection, serverConnection());
883
884 if (serverConnection()->getPeer())
885 peerConnectSucceded(serverConnection()->getPeer());
886
887 flags.connected_okay = true;
888 dispatch();
889 }
890
891 void
892 FwdState::connectTimeout(int fd)
893 {
894 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
895 assert(serverDestinations[0] != NULL);
896 assert(fd == serverDestinations[0]->fd);
897
898 if (entry->isEmpty()) {
899 const auto anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request, al);
900 anErr->xerrno = ETIMEDOUT;
901 fail(anErr);
902
903 /* This marks the peer DOWN ... */
904 if (serverDestinations[0]->getPeer())
905 peerConnectFailed(serverDestinations[0]->getPeer());
906 }
907
908 if (Comm::IsConnOpen(serverDestinations[0])) {
909 serverDestinations[0]->close();
910 }
911 }
912
913 /// called when serverConn is set to an _open_ to-peer connection
914 void
915 FwdState::syncWithServerConn(const char *host)
916 {
917 if (Ip::Qos::TheConfig.isAclTosActive())
918 Ip::Qos::setSockTos(serverConn, GetTosToServer(request));
919
920 #if SO_MARK
921 if (Ip::Qos::TheConfig.isAclNfmarkActive())
922 Ip::Qos::setSockNfmark(serverConn, GetNfmarkToServer(request));
923 #endif
924
925 syncHierNote(serverConn, host);
926 }
927
928 void
929 FwdState::syncHierNote(const Comm::ConnectionPointer &server, const char *host)
930 {
931 if (request)
932 request->hier.resetPeerNotes(server, host);
933 if (al)
934 al->hier.resetPeerNotes(server, host);
935 }
936
937 /**
938 * Called after forwarding path selection (via peer select) has taken place
939 * and whenever forwarding needs to attempt a new connection (routing failover).
940 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
941 */
942 void
943 FwdState::connectStart()
944 {
945 assert(serverDestinations.size() > 0);
946
947 debugs(17, 3, "fwdConnectStart: " << entry->url());
948
949 // pinned connections go through usePinned() rather than connectStart()
950 assert(serverDestinations[0] != nullptr);
951 request->flags.pinned = false;
952
953 // Ditch the previous error if any.
954 // A new error page will be created if there is another problem.
955 delete err;
956 err = nullptr;
957 request->clearError();
958
959 // Update logging information with the upcoming server connection
960 // destination. Do this early so that any connection establishment errors
961 // are attributed to this destination. If successfully connected, this
962 // destination becomes serverConnection().
963 syncHierNote(serverDestinations[0], request->url.host());
964
965 request->hier.startPeerClock();
966
967 // Requests bumped at step2+ require their pinned connection. Since we
968 // failed to reuse the pinned connection, we now must terminate the
969 // bumped request. For client-first and step1 bumped requests, the
970 // from-client connection is already bumped, but the connection to the
971 // server is not established/pinned so they must be excluded. We can
972 // recognize step1 bumping by nil ConnStateData::serverBump().
973 #if USE_OPENSSL
974 const auto clientFirstBump = request->clientConnectionManager.valid() &&
975 (request->clientConnectionManager->sslBumpMode == Ssl::bumpClientFirst ||
976 (request->clientConnectionManager->sslBumpMode == Ssl::bumpBump && !request->clientConnectionManager->serverBump())
977 );
978 #else
979 const auto clientFirstBump = false;
980 #endif /* USE_OPENSSL */
981 if (request->flags.sslBumped && !clientFirstBump) {
982 // TODO: Factor out/reuse as Occasionally(DBG_IMPORTANT, 2[, occurrences]).
983 static int occurrences = 0;
984 const auto level = (occurrences++ < 100) ? DBG_IMPORTANT : 2;
985 debugs(17, level, "BUG: Lost previously bumped from-Squid connection. Rejecting bumped request.");
986 fail(new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request, al));
987 self = nullptr; // refcounted
988 return;
989 }
990
991 // Use pconn to avoid opening a new connection.
992 const char *host = NULL;
993 if (!serverDestinations[0]->getPeer())
994 host = request->url.host();
995
996 bool bumpThroughPeer = request->flags.sslBumped && serverDestinations[0]->getPeer();
997 Comm::ConnectionPointer temp;
998 // Avoid pconns after races so that the same client does not suffer twice.
999 // This does not increase the total number of connections because we just
1000 // closed the connection that failed the race. And re-pinning assumes this.
1001 if (pconnRace != raceHappened && !bumpThroughPeer)
1002 temp = pconnPop(serverDestinations[0], host);
1003
1004 const bool openedPconn = Comm::IsConnOpen(temp);
1005 pconnRace = openedPconn ? racePossible : raceImpossible;
1006
1007 // if we found an open persistent connection to use. use it.
1008 if (openedPconn) {
1009 serverConn = temp;
1010 flags.connected_okay = true;
1011 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
1012 ++n_tries;
1013
1014 closeHandler = comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
1015
1016 syncWithServerConn(request->url.host());
1017
1018 dispatch();
1019 return;
1020 }
1021
1022 // We will try to open a new connection, possibly to the same destination.
1023 // We reset serverDestinations[0] in case we are using it again because
1024 // ConnOpener modifies its destination argument.
1025 serverDestinations[0]->local.port(0);
1026 serverConn = NULL;
1027
1028 #if URL_CHECKSUM_DEBUG
1029 entry->mem_obj->checkUrlChecksum();
1030 #endif
1031
1032 GetMarkingsToServer(request, *serverDestinations[0]);
1033
1034 const AsyncCall::Pointer connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
1035 const auto connTimeout = connectingTimeout(serverDestinations[0]);
1036 const auto cs = new Comm::ConnOpener(serverDestinations[0], connector, connTimeout);
1037 if (host)
1038 cs->setHost(host);
1039 ++n_tries;
1040 AsyncJob::Start(cs);
1041 }
1042
1043 /// send request on an existing connection dedicated to the requesting client
1044 void
1045 FwdState::usePinned()
1046 {
1047 // we only handle pinned destinations; others are handled by connectStart()
1048 assert(!serverDestinations.empty());
1049 assert(!serverDestinations[0]);
1050
1051 const auto connManager = request->pinnedConnection();
1052 debugs(17, 7, "connection manager: " << connManager);
1053
1054 // the client connection may close while we get here, nullifying connManager
1055 const auto temp = connManager ? connManager->borrowPinnedConnection(request) : nullptr;
1056 debugs(17, 5, "connection: " << temp);
1057
1058 // the previously pinned idle peer connection may get closed (by the peer)
1059 if (!Comm::IsConnOpen(temp)) {
1060 syncHierNote(temp, connManager ? connManager->pinning.host : request->url.host());
1061 serverConn = nullptr;
1062 const auto anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request, al);
1063 fail(anErr);
1064 // Connection managers monitor their idle pinned to-server
1065 // connections and close from-client connections upon seeing
1066 // a to-server connection closure. Retrying here is futile.
1067 stopAndDestroy("pinned connection failure");
1068 return;
1069 }
1070
1071 serverConn = temp;
1072 flags.connected_okay = true;
1073 ++n_tries;
1074 request->flags.pinned = true;
1075
1076 assert(connManager);
1077 if (connManager->pinnedAuth())
1078 request->flags.auth = true;
1079
1080 closeHandler = comm_add_close_handler(temp->fd, fwdServerClosedWrapper, this);
1081
1082 syncWithServerConn(connManager->pinning.host);
1083
1084 // the server may close the pinned connection before this request
1085 pconnRace = racePossible;
1086 dispatch();
1087 }
1088
1089 void
1090 FwdState::dispatch()
1091 {
1092 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
1093 /*
1094 * Assert that server_fd is set. This is to guarantee that fwdState
1095 * is attached to something and will be deallocated when server_fd
1096 * is closed.
1097 */
1098 assert(Comm::IsConnOpen(serverConn));
1099
1100 fd_note(serverConnection()->fd, entry->url());
1101
1102 fd_table[serverConnection()->fd].noteUse();
1103
1104 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
1105 assert(entry->ping_status != PING_WAITING);
1106
1107 assert(entry->locked());
1108
1109 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
1110
1111 netdbPingSite(request->url.host());
1112
1113 /* Retrieves remote server TOS or MARK value, and stores it as part of the
1114 * original client request FD object. It is later used to forward
1115 * remote server's TOS/MARK in the response to the client in case of a MISS.
1116 */
1117 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
1118 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
1119 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1120 /* Get the netfilter CONNMARK */
1121 clientFde->nfConnmarkFromServer = Ip::Qos::getNfConnmark(serverConnection(), Ip::Qos::dirOpened);
1122 }
1123 }
1124
1125 #if _SQUID_LINUX_
1126 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
1127 if (Ip::Qos::TheConfig.isHitTosActive()) {
1128 if (Comm::IsConnOpen(clientConn)) {
1129 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
1130 /* Get the TOS value for the packet */
1131 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
1132 }
1133 }
1134 #endif
1135
1136 #if USE_OPENSSL
1137 if (request->flags.sslPeek) {
1138 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
1139 ConnStateData::httpsPeeked, ConnStateData::PinnedIdleContext(serverConnection(), request));
1140 unregister(serverConn); // async call owns it now
1141 complete(); // destroys us
1142 return;
1143 }
1144 #endif
1145
1146 if (const auto peer = serverConnection()->getPeer()) {
1147 ++peer->stats.fetches;
1148 request->prepForPeering(*peer);
1149 httpStart(this);
1150 } else {
1151 assert(!request->flags.sslPeek);
1152 request->prepForDirect();
1153
1154 switch (request->url.getScheme()) {
1155
1156 case AnyP::PROTO_HTTPS:
1157 httpStart(this);
1158 break;
1159
1160 case AnyP::PROTO_HTTP:
1161 httpStart(this);
1162 break;
1163
1164 case AnyP::PROTO_GOPHER:
1165 gopherStart(this);
1166 break;
1167
1168 case AnyP::PROTO_FTP:
1169 if (request->flags.ftpNative)
1170 Ftp::StartRelay(this);
1171 else
1172 Ftp::StartGateway(this);
1173 break;
1174
1175 case AnyP::PROTO_CACHE_OBJECT:
1176
1177 case AnyP::PROTO_URN:
1178 fatal_dump("Should never get here");
1179 break;
1180
1181 case AnyP::PROTO_WHOIS:
1182 whoisStart(this);
1183 break;
1184
1185 case AnyP::PROTO_WAIS: /* Not implemented */
1186
1187 default:
1188 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1189 const auto anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request, al);
1190 fail(anErr);
1191 // Set the dont_retry flag because this is not a transient (network) error.
1192 flags.dont_retry = true;
1193 if (Comm::IsConnOpen(serverConn)) {
1194 serverConn->close();
1195 }
1196 break;
1197 }
1198 }
1199 }
1200
1201 /*
1202 * FwdState::reforward
1203 *
1204 * returns TRUE if the transaction SHOULD be re-forwarded to the
1205 * next choice in the serverDestinations list. This method is called when
1206 * peer communication completes normally, or experiences
1207 * some error after receiving the end of HTTP headers.
1208 */
1209 int
1210 FwdState::reforward()
1211 {
1212 StoreEntry *e = entry;
1213
1214 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1215 debugs(17, 3, HERE << "entry aborted");
1216 return 0;
1217 }
1218
1219 assert(e->store_status == STORE_PENDING);
1220 assert(e->mem_obj);
1221 #if URL_CHECKSUM_DEBUG
1222
1223 e->mem_obj->checkUrlChecksum();
1224 #endif
1225
1226 debugs(17, 3, HERE << e->url() << "?" );
1227
1228 if (request->flags.pinned && !pinnedCanRetry()) {
1229 debugs(17, 3, "pinned connection; cannot retry");
1230 return 0;
1231 }
1232
1233 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1234 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1235 return 0;
1236 }
1237
1238 if (exhaustedTries())
1239 return 0;
1240
1241 if (request->bodyNibbled())
1242 return 0;
1243
1244 if (serverDestinations.size() <= 1 && !PeerSelectionInitiator::subscribed) {
1245 // NP: <= 1 since total count includes the recently failed one.
1246 debugs(17, 3, HERE << "No alternative forwarding paths left");
1247 return 0;
1248 }
1249
1250 const Http::StatusCode s = e->getReply()->sline.status();
1251 debugs(17, 3, HERE << "status " << s);
1252 return reforwardableStatus(s);
1253 }
1254
1255 /**
1256 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1257 * on whether this is a validation request. RFC 2616 says that we MUST reply
1258 * with "504 Gateway Timeout" if validation fails and cached reply has
1259 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1260 */
1261 ErrorState *
1262 FwdState::makeConnectingError(const err_type type) const
1263 {
1264 return new ErrorState(type, request->flags.needValidation ?
1265 Http::scGatewayTimeout : Http::scServiceUnavailable, request, al);
1266 }
1267
1268 static void
1269 fwdStats(StoreEntry * s)
1270 {
1271 int i;
1272 int j;
1273 storeAppendPrintf(s, "Status");
1274
1275 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1276 storeAppendPrintf(s, "\ttry#%d", j);
1277 }
1278
1279 storeAppendPrintf(s, "\n");
1280
1281 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1282 if (FwdReplyCodes[0][i] == 0)
1283 continue;
1284
1285 storeAppendPrintf(s, "%3d", i);
1286
1287 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1288 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1289 }
1290
1291 storeAppendPrintf(s, "\n");
1292 }
1293 }
1294
1295 /**** STATIC MEMBER FUNCTIONS *************************************************/
1296
1297 bool
1298 FwdState::reforwardableStatus(const Http::StatusCode s) const
1299 {
1300 switch (s) {
1301
1302 case Http::scBadGateway:
1303
1304 case Http::scGatewayTimeout:
1305 return true;
1306
1307 case Http::scForbidden:
1308
1309 case Http::scInternalServerError:
1310
1311 case Http::scNotImplemented:
1312
1313 case Http::scServiceUnavailable:
1314 return Config.retry.onerror;
1315
1316 default:
1317 return false;
1318 }
1319
1320 /* NOTREACHED */
1321 }
1322
1323 /**
1324 * Decide where details need to be gathered to correctly describe a persistent connection.
1325 * What is needed:
1326 * - the address/port details about this link
1327 * - domain name of server at other end of this link (either peer or requested host)
1328 */
1329 void
1330 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1331 {
1332 if (conn->getPeer()) {
1333 fwdPconnPool->push(conn, NULL);
1334 } else {
1335 fwdPconnPool->push(conn, domain);
1336 }
1337 }
1338
1339 Comm::ConnectionPointer
1340 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1341 {
1342 bool retriable = checkRetriable();
1343 if (!retriable && Config.accessList.serverPconnForNonretriable) {
1344 ACLFilledChecklist ch(Config.accessList.serverPconnForNonretriable, request, NULL);
1345 ch.al = al;
1346 ch.syncAle(request, nullptr);
1347 retriable = ch.fastCheck().allowed();
1348 }
1349 // always call shared pool first because we need to close an idle
1350 // connection there if we have to use a standby connection.
1351 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, retriable);
1352 if (!Comm::IsConnOpen(conn)) {
1353 // either there was no pconn to pop or this is not a retriable xaction
1354 if (CachePeer *peer = dest->getPeer()) {
1355 if (peer->standby.pool)
1356 conn = peer->standby.pool->pop(dest, domain, true);
1357 }
1358 }
1359 return conn; // open, closed, or nil
1360 }
1361
1362 void
1363 FwdState::initModule()
1364 {
1365 RegisterWithCacheManager();
1366 }
1367
1368 void
1369 FwdState::RegisterWithCacheManager(void)
1370 {
1371 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1372 }
1373
1374 void
1375 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1376 {
1377 if (status > Http::scInvalidHeader)
1378 return;
1379
1380 assert(tries >= 0);
1381
1382 if (tries > MAX_FWD_STATS_IDX)
1383 tries = MAX_FWD_STATS_IDX;
1384
1385 ++ FwdReplyCodes[tries][status];
1386 }
1387
1388 bool
1389 FwdState::exhaustedTries() const
1390 {
1391 return n_tries >= Config.forward_max_tries;
1392 }
1393
1394 bool
1395 FwdState::pinnedCanRetry() const
1396 {
1397 assert(request->flags.pinned);
1398
1399 // pconn race on pinned connection: Currently we do not have any mechanism
1400 // to retry current pinned connection path.
1401 if (pconnRace == raceHappened)
1402 return false;
1403
1404 // If a bumped connection was pinned, then the TLS client was given our peer
1405 // details. Do not retry because we do not ensure that those details stay
1406 // constant. Step1-bumped connections do not get our TLS peer details, are
1407 // never pinned, and, hence, never reach this method.
1408 if (request->flags.sslBumped)
1409 return false;
1410
1411 // The other pinned cases are FTP proxying and connection-based HTTP
1412 // authentication. TODO: Do these cases have restrictions?
1413 return true;
1414 }
1415
1416 time_t
1417 FwdState::connectingTimeout(const Comm::ConnectionPointer &conn) const
1418 {
1419 const auto connTimeout = conn->connectTimeout(start_t);
1420 return positiveTimeout(connTimeout);
1421 }
1422
1423 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1424
1425 /*
1426 * DPW 2007-05-19
1427 * Formerly static, but now used by client_side_request.cc
1428 */
1429 /// Checks for a TOS value to apply depending on the ACL
1430 tos_t
1431 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1432 {
1433 for (acl_tos *l = head; l; l = l->next) {
1434 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1435 return l->tos;
1436 }
1437
1438 return 0;
1439 }
1440
1441 /// Checks for a netfilter mark value to apply depending on the ACL
1442 Ip::NfMarkConfig
1443 aclFindNfMarkConfig(acl_nfmark * head, ACLChecklist * ch)
1444 {
1445 for (acl_nfmark *l = head; l; l = l->next) {
1446 if (!l->aclList || ch->fastCheck(l->aclList).allowed())
1447 return l->markConfig;
1448 }
1449
1450 return {};
1451 }
1452
1453 void
1454 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1455 {
1456 // skip if an outgoing address is already set.
1457 if (!conn->local.isAnyAddr()) return;
1458
1459 // ensure that at minimum the wildcard local matches remote protocol
1460 if (conn->remote.isIPv4())
1461 conn->local.setIPv4();
1462
1463 // maybe use TPROXY client address
1464 if (request && request->flags.spoofClientIp) {
1465 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1466 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1467 if (Config.onoff.tproxy_uses_indirect_client)
1468 conn->local = request->indirect_client_addr;
1469 else
1470 #endif
1471 conn->local = request->client_addr;
1472 conn->local.port(0); // let OS pick the source port to prevent address clashes
1473 // some flags need setting on the socket to use this address
1474 conn->flags |= COMM_DOBIND;
1475 conn->flags |= COMM_TRANSPARENT;
1476 return;
1477 }
1478 // else no tproxy today ...
1479 }
1480
1481 if (!Config.accessList.outgoing_address) {
1482 return; // anything will do.
1483 }
1484
1485 ACLFilledChecklist ch(NULL, request, NULL);
1486 ch.dst_peer_name = conn->getPeer() ? conn->getPeer()->name : NULL;
1487 ch.dst_addr = conn->remote;
1488
1489 // TODO use the connection details in ACL.
1490 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1491
1492 for (Acl::Address *l = Config.accessList.outgoing_address; l; l = l->next) {
1493
1494 /* check if the outgoing address is usable to the destination */
1495 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1496
1497 /* check ACLs for this outgoing address */
1498 if (!l->aclList || ch.fastCheck(l->aclList).allowed()) {
1499 conn->local = l->addr;
1500 return;
1501 }
1502 }
1503 }
1504
1505 tos_t
1506 GetTosToServer(HttpRequest * request)
1507 {
1508 ACLFilledChecklist ch(NULL, request, NULL);
1509 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1510 }
1511
1512 nfmark_t
1513 GetNfmarkToServer(HttpRequest * request)
1514 {
1515 ACLFilledChecklist ch(NULL, request, NULL);
1516 const auto mc = aclFindNfMarkConfig(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1517 return mc.mark;
1518 }
1519
1520 void
1521 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1522 {
1523 // Get the server side TOS and Netfilter mark to be set on the connection.
1524 if (Ip::Qos::TheConfig.isAclTosActive()) {
1525 conn.tos = GetTosToServer(request);
1526 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1527 }
1528
1529 #if SO_MARK && USE_LIBCAP
1530 conn.nfmark = GetNfmarkToServer(request);
1531 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1532 #else
1533 conn.nfmark = 0;
1534 #endif
1535 }
1536