]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 17 Request Forwarding */
10
11 #include "squid.h"
12 #include "AccessLogEntry.h"
13 #include "acl/AclAddress.h"
14 #include "acl/FilledChecklist.h"
15 #include "acl/Gadgets.h"
16 #include "anyp/PortCfg.h"
17 #include "CacheManager.h"
18 #include "CachePeer.h"
19 #include "client_side.h"
20 #include "clients/forward.h"
21 #include "comm/Connection.h"
22 #include "comm/ConnOpener.h"
23 #include "comm/Loops.h"
24 #include "CommCalls.h"
25 #include "errorpage.h"
26 #include "event.h"
27 #include "fd.h"
28 #include "fde.h"
29 #include "FwdState.h"
30 #include "globals.h"
31 #include "gopher.h"
32 #include "hier_code.h"
33 #include "http.h"
34 #include "HttpReply.h"
35 #include "HttpRequest.h"
36 #include "icmp/net_db.h"
37 #include "internal.h"
38 #include "ip/Intercept.h"
39 #include "ip/QosConfig.h"
40 #include "ip/tools.h"
41 #include "MemObject.h"
42 #include "mgr/Registration.h"
43 #include "neighbors.h"
44 #include "pconn.h"
45 #include "PeerPoolMgr.h"
46 #include "PeerSelectState.h"
47 #include "SquidConfig.h"
48 #include "SquidTime.h"
49 #include "Store.h"
50 #include "StoreClient.h"
51 #include "urn.h"
52 #include "whois.h"
53 #if USE_OPENSSL
54 #include "ssl/cert_validate_message.h"
55 #include "ssl/Config.h"
56 #include "ssl/ErrorDetail.h"
57 #include "ssl/helper.h"
58 #include "ssl/PeerConnector.h"
59 #include "ssl/ServerBump.h"
60 #include "ssl/support.h"
61 #endif
62
63 #include <cerrno>
64
65 static PSC fwdPeerSelectionCompleteWrapper;
66 static CLCB fwdServerClosedWrapper;
67 static CNCB fwdConnectDoneWrapper;
68
69 static OBJH fwdStats;
70
71 #define MAX_FWD_STATS_IDX 9
72 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
73
74 static PconnPool *fwdPconnPool = new PconnPool("server-peers", NULL);
75 CBDATA_CLASS_INIT(FwdState);
76
77 #if USE_OPENSSL
78 class FwdStatePeerAnswerDialer: public CallDialer, public Ssl::PeerConnector::CbDialer
79 {
80 public:
81 typedef void (FwdState::*Method)(Ssl::PeerConnectorAnswer &);
82
83 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
84 method_(method), fwd_(fwd), answer_() {}
85
86 /* CallDialer API */
87 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
88 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
89 virtual void print(std::ostream &os) const {
90 os << '(' << fwd_.get() << ", " << answer_ << ')';
91 }
92
93 /* Ssl::PeerConnector::CbDialer API */
94 virtual Ssl::PeerConnectorAnswer &answer() { return answer_; }
95
96 private:
97 Method method_;
98 CbcPointer<FwdState> fwd_;
99 Ssl::PeerConnectorAnswer answer_;
100 };
101 #endif
102
103 void
104 FwdState::abort(void* d)
105 {
106 FwdState* fwd = (FwdState*)d;
107 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
108
109 if (Comm::IsConnOpen(fwd->serverConnection())) {
110 fwd->closeServerConnection("store entry aborted");
111 } else {
112 debugs(17, 7, HERE << "store entry aborted; no connection to close");
113 }
114 fwd->serverDestinations.clear();
115 fwd->self = NULL;
116 }
117
118 void
119 FwdState::closeServerConnection(const char *reason)
120 {
121 debugs(17, 3, "because " << reason << "; " << serverConn);
122 comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
123 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
124 serverConn->close();
125 }
126
127 /**** PUBLIC INTERFACE ********************************************************/
128
129 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
130 al(alp)
131 {
132 debugs(17, 2, HERE << "Forwarding client request " << client << ", url=" << e->url() );
133 entry = e;
134 clientConn = client;
135 request = r;
136 HTTPMSGLOCK(request);
137 pconnRace = raceImpossible;
138 start_t = squid_curtime;
139 serverDestinations.reserve(Config.forward_max_tries);
140 e->lock("FwdState");
141 EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
142 }
143
144 // Called once, right after object creation, when it is safe to set self
145 void FwdState::start(Pointer aSelf)
146 {
147 // Protect ourselves from being destroyed when the only Server pointing
148 // to us is gone (while we expect to talk to more Servers later).
149 // Once we set self, we are responsible for clearing it when we do not
150 // expect to talk to any servers.
151 self = aSelf; // refcounted
152
153 // We hope that either the store entry aborts or peer is selected.
154 // Otherwise we are going to leak our object.
155
156 entry->registerAbort(FwdState::abort, this);
157
158 #if STRICT_ORIGINAL_DST
159 // Bug 3243: CVE 2009-0801
160 // Bypass of browser same-origin access control in intercepted communication
161 // To resolve this we must force DIRECT and only to the original client destination.
162 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
163 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
164 if (isIntercepted && useOriginalDst) {
165 selectPeerForIntercepted();
166 // 3.2 does not suppro re-wrapping inside CONNECT.
167 // our only alternative is to fake destination "found" and continue with the forwarding.
168 startConnectionOrFail();
169 return;
170 }
171 #endif
172
173 // do full route options selection
174 peerSelect(&serverDestinations, request, al, entry, fwdPeerSelectionCompleteWrapper, this);
175 }
176
177 #if STRICT_ORIGINAL_DST
178 /// bypasses peerSelect() when dealing with intercepted requests
179 void
180 FwdState::selectPeerForIntercepted()
181 {
182 // use pinned connection if available
183 Comm::ConnectionPointer p;
184 if (ConnStateData *client = request->pinnedConnection()) {
185 p = client->validatePinnedConnection(request, NULL);
186 if (Comm::IsConnOpen(p)) {
187 /* duplicate peerSelectPinned() effects */
188 p->peerType = PINNED;
189 entry->ping_status = PING_DONE; /* Skip ICP */
190
191 debugs(17, 3, "reusing a pinned conn: " << *p);
192 serverDestinations.push_back(p);
193 } else {
194 debugs(17,2, "Pinned connection is not valid: " << p);
195 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
196 fail(anErr);
197 }
198 // Either use the valid pinned connection or fail if it is invalid.
199 return;
200 }
201
202 // use client original destination as second preferred choice
203 p = new Comm::Connection();
204 p->peerType = ORIGINAL_DST;
205 p->remote = clientConn->local;
206 getOutgoingAddress(request, p);
207
208 debugs(17, 3, HERE << "using client original destination: " << *p);
209 serverDestinations.push_back(p);
210 }
211 #endif
212
213 void
214 FwdState::completed()
215 {
216 if (flags.forward_completed) {
217 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
218 return;
219 }
220
221 flags.forward_completed = true;
222
223 request->hier.stopPeerClock(false);
224
225 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
226 debugs(17, 3, HERE << "entry aborted");
227 return ;
228 }
229
230 #if URL_CHECKSUM_DEBUG
231
232 entry->mem_obj->checkUrlChecksum();
233 #endif
234
235 if (entry->store_status == STORE_PENDING) {
236 if (entry->isEmpty()) {
237 if (!err) // we quit (e.g., fd closed) before an error or content
238 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
239 assert(err);
240 errorAppendEntry(entry, err);
241 err = NULL;
242 #if USE_OPENSSL
243 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
244 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
245 ConnStateData::httpsPeeked, Comm::ConnectionPointer(NULL));
246 }
247 #endif
248 } else {
249 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
250 entry->complete();
251 entry->releaseRequest();
252 }
253 }
254
255 if (storePendingNClients(entry) > 0)
256 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
257
258 }
259
260 FwdState::~FwdState()
261 {
262 debugs(17, 3, HERE << "FwdState destructor starting");
263
264 if (! flags.forward_completed)
265 completed();
266
267 doneWithRetries();
268
269 HTTPMSGUNLOCK(request);
270
271 delete err;
272
273 entry->unregisterAbort();
274
275 entry->unlock("FwdState");
276
277 entry = NULL;
278
279 if (calls.connector != NULL) {
280 calls.connector->cancel("FwdState destructed");
281 calls.connector = NULL;
282 }
283
284 if (Comm::IsConnOpen(serverConn))
285 closeServerConnection("~FwdState");
286
287 serverDestinations.clear();
288
289 debugs(17, 3, HERE << "FwdState destructor done");
290 }
291
292 /**
293 * This is the entry point for client-side to start forwarding
294 * a transaction. It is a static method that may or may not
295 * allocate a FwdState.
296 */
297 void
298 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
299 {
300 /** \note
301 * client_addr == no_addr indicates this is an "internal" request
302 * from peer_digest.c, asn.c, netdb.c, etc and should always
303 * be allowed. yuck, I know.
304 */
305
306 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
307 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
308 /**
309 * Check if this host is allowed to fetch MISSES from us (miss_access).
310 * Intentionally replace the src_addr automatically selected by the checklist code
311 * we do NOT want the indirect client address to be tested here.
312 */
313 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
314 ch.src_addr = request->client_addr;
315 if (ch.fastCheck() == ACCESS_DENIED) {
316 err_type page_id;
317 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
318
319 if (page_id == ERR_NONE)
320 page_id = ERR_FORWARDING_DENIED;
321
322 ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
323 errorAppendEntry(entry, anErr); // frees anErr
324 return;
325 }
326 }
327
328 debugs(17, 3, HERE << "'" << entry->url() << "'");
329 /*
330 * This seems like an odd place to bind mem_obj and request.
331 * Might want to assert that request is NULL at this point
332 */
333 entry->mem_obj->request = request;
334 HTTPMSGLOCK(entry->mem_obj->request);
335 #if URL_CHECKSUM_DEBUG
336
337 entry->mem_obj->checkUrlChecksum();
338 #endif
339
340 if (shutting_down) {
341 /* more yuck */
342 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
343 errorAppendEntry(entry, anErr); // frees anErr
344 return;
345 }
346
347 if (request->flags.internal) {
348 debugs(17, 2, "calling internalStart() due to request flag");
349 internalStart(clientConn, request, entry);
350 return;
351 }
352
353 switch (request->url.getScheme()) {
354
355 case AnyP::PROTO_CACHE_OBJECT:
356 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
357 CacheManager::GetInstance()->Start(clientConn, request, entry);
358 return;
359
360 case AnyP::PROTO_URN:
361 urnStart(request, entry);
362 return;
363
364 default:
365 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
366 fwd->start(fwd);
367 return;
368 }
369
370 /* NOTREACHED */
371 }
372
373 void
374 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
375 {
376 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
377 Start(clientConn, entry, request, NULL);
378 }
379
380 void
381 FwdState::startConnectionOrFail()
382 {
383 debugs(17, 3, HERE << entry->url());
384
385 if (serverDestinations.size() > 0) {
386 // Ditch error page if it was created before.
387 // A new one will be created if there's another problem
388 delete err;
389 err = NULL;
390
391 // Update the logging information about this new server connection.
392 // Done here before anything else so the errors get logged for
393 // this server link regardless of what happens when connecting to it.
394 // IF sucessfuly connected this top destination will become the serverConnection().
395 request->hier.note(serverDestinations[0], request->GetHost());
396 request->clearError();
397
398 connectStart();
399 } else {
400 debugs(17, 3, HERE << "Connection failed: " << entry->url());
401 if (!err) {
402 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
403 fail(anErr);
404 } // else use actual error from last connection attempt
405 self = NULL; // refcounted
406 }
407 }
408
409 void
410 FwdState::fail(ErrorState * errorState)
411 {
412 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
413
414 delete err;
415 err = errorState;
416
417 if (!errorState->request) {
418 errorState->request = request;
419 HTTPMSGLOCK(errorState->request);
420 }
421
422 if (err->type != ERR_ZERO_SIZE_OBJECT)
423 return;
424
425 if (pconnRace == racePossible) {
426 debugs(17, 5, HERE << "pconn race happened");
427 pconnRace = raceHappened;
428 }
429
430 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
431 pinned_connection->pinning.zeroReply = true;
432 flags.dont_retry = true; // we want to propagate failure to the client
433 debugs(17, 4, "zero reply on pinned connection");
434 }
435 }
436
437 /**
438 * Frees fwdState without closing FD or generating an abort
439 */
440 void
441 FwdState::unregister(Comm::ConnectionPointer &conn)
442 {
443 debugs(17, 3, HERE << entry->url() );
444 assert(serverConnection() == conn);
445 assert(Comm::IsConnOpen(conn));
446 comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
447 serverConn = NULL;
448 }
449
450 // \deprecated use unregister(Comm::ConnectionPointer &conn) instead
451 void
452 FwdState::unregister(int fd)
453 {
454 debugs(17, 3, HERE << entry->url() );
455 assert(fd == serverConnection()->fd);
456 unregister(serverConn);
457 }
458
459 /**
460 * FooClient modules call fwdComplete() when they are done
461 * downloading an object. Then, we either 1) re-forward the
462 * request somewhere else if needed, or 2) call storeComplete()
463 * to finish it off
464 */
465 void
466 FwdState::complete()
467 {
468 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
469 #if URL_CHECKSUM_DEBUG
470
471 entry->mem_obj->checkUrlChecksum();
472 #endif
473
474 logReplyStatus(n_tries, entry->getReply()->sline.status());
475
476 if (reforward()) {
477 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
478
479 if (Comm::IsConnOpen(serverConn))
480 unregister(serverConn);
481
482 entry->reset();
483
484 // drop the last path off the selection list. try the next one.
485 serverDestinations.erase(serverDestinations.begin());
486 startConnectionOrFail();
487
488 } else {
489 if (Comm::IsConnOpen(serverConn))
490 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
491 else
492 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
493 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
494 entry->complete();
495
496 if (!Comm::IsConnOpen(serverConn))
497 completed();
498
499 self = NULL; // refcounted
500 }
501 }
502
503 /**** CALLBACK WRAPPERS ************************************************************/
504
505 static void
506 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList *, ErrorState *err, void *data)
507 {
508 FwdState *fwd = (FwdState *) data;
509 if (err)
510 fwd->fail(err);
511 fwd->startConnectionOrFail();
512 }
513
514 static void
515 fwdServerClosedWrapper(const CommCloseCbParams &params)
516 {
517 FwdState *fwd = (FwdState *)params.data;
518 fwd->serverClosed(params.fd);
519 }
520
521 void
522 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
523 {
524 FwdState *fwd = (FwdState *) data;
525 fwd->connectDone(conn, status, xerrno);
526 }
527
528 /**** PRIVATE *****************************************************************/
529
530 /*
531 * FwdState::checkRetry
532 *
533 * Return TRUE if the request SHOULD be retried. This method is
534 * called when the HTTP connection fails, or when the connection
535 * is closed before reading the end of HTTP headers from the server.
536 */
537 bool
538 FwdState::checkRetry()
539 {
540 if (shutting_down)
541 return false;
542
543 if (!self) { // we have aborted before the server called us back
544 debugs(17, 5, HERE << "not retrying because of earlier abort");
545 // we will be destroyed when the server clears its Pointer to us
546 return false;
547 }
548
549 if (entry->store_status != STORE_PENDING)
550 return false;
551
552 if (!entry->isEmpty())
553 return false;
554
555 if (n_tries > Config.forward_max_tries)
556 return false;
557
558 if (squid_curtime - start_t > Config.Timeout.forward)
559 return false;
560
561 if (flags.dont_retry)
562 return false;
563
564 if (request->bodyNibbled())
565 return false;
566
567 // NP: not yet actually connected anywhere. retry is safe.
568 if (!flags.connected_okay)
569 return true;
570
571 if (!checkRetriable())
572 return false;
573
574 return true;
575 }
576
577 /*
578 * FwdState::checkRetriable
579 *
580 * Return TRUE if this is the kind of request that can be retried
581 * after a failure. If the request is not retriable then we don't
582 * want to risk sending it on a persistent connection. Instead we'll
583 * force it to go on a new HTTP connection.
584 */
585 bool
586 FwdState::checkRetriable()
587 {
588 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
589 // complicated] code required to protect the PUT request body from being
590 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
591 if (request->body_pipe != NULL)
592 return false;
593
594 // RFC2616 9.1 Safe and Idempotent Methods
595 return (request->method.isHttpSafe() || request->method.isIdempotent());
596 }
597
598 void
599 FwdState::serverClosed(int fd)
600 {
601 // XXX: fd is often -1 here
602 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
603 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
604 if (fd >= 0 && serverConnection()->fd == fd)
605 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
606 retryOrBail();
607 }
608
609 void
610 FwdState::retryOrBail()
611 {
612 if (checkRetry()) {
613 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
614 // we should retry the same destination if it failed due to pconn race
615 if (pconnRace == raceHappened)
616 debugs(17, 4, HERE << "retrying the same destination");
617 else
618 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
619 startConnectionOrFail();
620 return;
621 }
622
623 // TODO: should we call completed() here and move doneWithRetries there?
624 doneWithRetries();
625
626 request->hier.stopPeerClock(false);
627
628 if (self != NULL && !err && shutting_down) {
629 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
630 errorAppendEntry(entry, anErr);
631 }
632
633 self = NULL; // refcounted
634 }
635
636 // If the Server quits before nibbling at the request body, the body sender
637 // will not know (so that we can retry). Call this if we will not retry. We
638 // will notify the sender so that it does not get stuck waiting for space.
639 void
640 FwdState::doneWithRetries()
641 {
642 if (request && request->body_pipe != NULL)
643 request->body_pipe->expectNoConsumption();
644 }
645
646 // called by the server that failed after calling unregister()
647 void
648 FwdState::handleUnregisteredServerEnd()
649 {
650 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
651 assert(!Comm::IsConnOpen(serverConn));
652 retryOrBail();
653 }
654
655 void
656 FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
657 {
658 if (status != Comm::OK) {
659 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
660 anErr->xerrno = xerrno;
661 fail(anErr);
662
663 /* it might have been a timeout with a partially open link */
664 if (conn != NULL) {
665 if (conn->getPeer())
666 peerConnectFailed(conn->getPeer());
667
668 conn->close();
669 }
670 retryOrBail();
671 return;
672 }
673
674 serverConn = conn;
675 flags.connected_okay = true;
676
677 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
678
679 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
680
681 if (serverConnection()->getPeer())
682 peerConnectSucceded(serverConnection()->getPeer());
683
684 #if USE_OPENSSL
685 if (!request->flags.pinned) {
686 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
687 (!serverConnection()->getPeer() && request->url.getScheme() == AnyP::PROTO_HTTPS) ||
688 request->flags.sslPeek) {
689
690 HttpRequest::Pointer requestPointer = request;
691 AsyncCall::Pointer callback = asyncCall(17,4,
692 "FwdState::ConnectedToPeer",
693 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
694 // Use positive timeout when less than one second is left.
695 const time_t sslNegotiationTimeout = max(static_cast<time_t>(1), timeLeft());
696 Ssl::PeerConnector *connector =
697 new Ssl::PeerConnector(requestPointer, serverConnection(), clientConn, callback, sslNegotiationTimeout);
698 AsyncJob::Start(connector); // will call our callback
699 return;
700 }
701 }
702 #endif
703
704 // should reach ConnStateData before the dispatched Client job starts
705 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
706 ConnStateData::notePeerConnection, serverConnection());
707
708 dispatch();
709 }
710
711 #if USE_OPENSSL
712 void
713 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer &answer)
714 {
715 if (ErrorState *error = answer.error.get()) {
716 fail(error);
717 answer.error.clear(); // preserve error for errorSendComplete()
718 self = NULL;
719 return;
720 }
721
722 dispatch();
723 }
724 #endif
725
726 void
727 FwdState::connectTimeout(int fd)
728 {
729 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
730 assert(serverDestinations[0] != NULL);
731 assert(fd == serverDestinations[0]->fd);
732
733 if (entry->isEmpty()) {
734 ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
735 anErr->xerrno = ETIMEDOUT;
736 fail(anErr);
737
738 /* This marks the peer DOWN ... */
739 if (serverDestinations[0]->getPeer())
740 peerConnectFailed(serverDestinations[0]->getPeer());
741 }
742
743 if (Comm::IsConnOpen(serverDestinations[0])) {
744 serverDestinations[0]->close();
745 }
746 }
747
748 time_t
749 FwdState::timeLeft() const
750 {
751 /* connection timeout */
752 int ctimeout;
753 if (serverDestinations[0]->getPeer()) {
754 ctimeout = serverDestinations[0]->getPeer()->connect_timeout > 0 ?
755 serverDestinations[0]->getPeer()->connect_timeout : Config.Timeout.peer_connect;
756 } else {
757 ctimeout = Config.Timeout.connect;
758 }
759
760 /* calculate total forwarding timeout ??? */
761 int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
762 if (ftimeout < 0)
763 ftimeout = 5;
764
765 if (ftimeout < ctimeout)
766 return (time_t)ftimeout;
767 else
768 return (time_t)ctimeout;
769 }
770
771 /**
772 * Called after forwarding path selection (via peer select) has taken place
773 * and whenever forwarding needs to attempt a new connection (routing failover).
774 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
775 */
776 void
777 FwdState::connectStart()
778 {
779 assert(serverDestinations.size() > 0);
780
781 debugs(17, 3, "fwdConnectStart: " << entry->url());
782
783 request->hier.startPeerClock();
784
785 if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
786 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
787 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
788 fail(anErr);
789 self = NULL; // refcounted
790 return;
791 }
792
793 request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
794 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
795 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
796 if (serverDestinations[0]->peerType == PINNED) {
797 ConnStateData *pinned_connection = request->pinnedConnection();
798 debugs(17,7, "pinned peer connection: " << pinned_connection);
799 // pinned_connection may become nil after a pconn race
800 if (pinned_connection)
801 serverConn = pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer());
802 else
803 serverConn = NULL;
804 if (Comm::IsConnOpen(serverConn)) {
805 pinned_connection->stopPinnedConnectionMonitoring();
806 flags.connected_okay = true;
807 ++n_tries;
808 request->flags.pinned = true;
809 request->hier.note(serverConn, pinned_connection->pinning.host);
810 if (pinned_connection->pinnedAuth())
811 request->flags.auth = true;
812 comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
813
814 /* Update server side TOS and Netfilter mark on the connection. */
815 if (Ip::Qos::TheConfig.isAclTosActive()) {
816 debugs(17, 3, HERE << "setting tos for pinned connection to " << (int)serverConn->tos );
817 serverConn->tos = GetTosToServer(request);
818 Ip::Qos::setSockTos(serverConn, serverConn->tos);
819 }
820 #if SO_MARK
821 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
822 serverConn->nfmark = GetNfmarkToServer(request);
823 Ip::Qos::setSockNfmark(serverConn, serverConn->nfmark);
824 }
825 #endif
826
827 // the server may close the pinned connection before this request
828 pconnRace = racePossible;
829 dispatch();
830 return;
831 }
832 // Pinned connection failure.
833 debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
834 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
835 fail(anErr);
836 self = NULL; // refcounted
837 return;
838 }
839
840 // Use pconn to avoid opening a new connection.
841 const char *host = NULL;
842 if (!serverDestinations[0]->getPeer())
843 host = request->GetHost();
844
845 Comm::ConnectionPointer temp;
846 // Avoid pconns after races so that the same client does not suffer twice.
847 // This does not increase the total number of connections because we just
848 // closed the connection that failed the race. And re-pinning assumes this.
849 if (pconnRace != raceHappened)
850 temp = pconnPop(serverDestinations[0], host);
851
852 const bool openedPconn = Comm::IsConnOpen(temp);
853 pconnRace = openedPconn ? racePossible : raceImpossible;
854
855 // if we found an open persistent connection to use. use it.
856 if (openedPconn) {
857 serverConn = temp;
858 flags.connected_okay = true;
859 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
860 ++n_tries;
861
862 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
863
864 /* Update server side TOS and Netfilter mark on the connection. */
865 if (Ip::Qos::TheConfig.isAclTosActive()) {
866 const tos_t tos = GetTosToServer(request);
867 Ip::Qos::setSockTos(temp, tos);
868 }
869 #if SO_MARK
870 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
871 const nfmark_t nfmark = GetNfmarkToServer(request);
872 Ip::Qos::setSockNfmark(temp, nfmark);
873 }
874 #endif
875
876 dispatch();
877 return;
878 }
879
880 // We will try to open a new connection, possibly to the same destination.
881 // We reset serverDestinations[0] in case we are using it again because
882 // ConnOpener modifies its destination argument.
883 serverDestinations[0]->local.port(0);
884 serverConn = NULL;
885
886 #if URL_CHECKSUM_DEBUG
887 entry->mem_obj->checkUrlChecksum();
888 #endif
889
890 GetMarkingsToServer(request, *serverDestinations[0]);
891
892 calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
893 Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, timeLeft());
894 if (host)
895 cs->setHost(host);
896 AsyncJob::Start(cs);
897 }
898
899 void
900 FwdState::dispatch()
901 {
902 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
903 /*
904 * Assert that server_fd is set. This is to guarantee that fwdState
905 * is attached to something and will be deallocated when server_fd
906 * is closed.
907 */
908 assert(Comm::IsConnOpen(serverConn));
909
910 fd_note(serverConnection()->fd, entry->url());
911
912 fd_table[serverConnection()->fd].noteUse();
913
914 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
915 assert(entry->ping_status != PING_WAITING);
916
917 assert(entry->locked());
918
919 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
920
921 netdbPingSite(request->GetHost());
922
923 /* Retrieves remote server TOS or MARK value, and stores it as part of the
924 * original client request FD object. It is later used to forward
925 * remote server's TOS/MARK in the response to the client in case of a MISS.
926 */
927 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
928 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
929 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
930 /* Get the netfilter mark for the connection */
931 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
932 }
933 }
934
935 #if _SQUID_LINUX_
936 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
937 if (Ip::Qos::TheConfig.isHitTosActive()) {
938 if (Comm::IsConnOpen(clientConn)) {
939 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
940 /* Get the TOS value for the packet */
941 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
942 }
943 }
944 #endif
945
946 #if USE_OPENSSL
947 if (request->flags.sslPeek) {
948 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
949 ConnStateData::httpsPeeked, serverConnection());
950 unregister(serverConn); // async call owns it now
951 complete(); // destroys us
952 return;
953 }
954 #endif
955
956 if (serverConnection()->getPeer() != NULL) {
957 ++ serverConnection()->getPeer()->stats.fetches;
958 request->peer_login = serverConnection()->getPeer()->login;
959 request->peer_domain = serverConnection()->getPeer()->domain;
960 httpStart(this);
961 } else {
962 assert(!request->flags.sslPeek);
963 request->peer_login = NULL;
964 request->peer_domain = NULL;
965
966 switch (request->url.getScheme()) {
967 #if USE_OPENSSL
968
969 case AnyP::PROTO_HTTPS:
970 httpStart(this);
971 break;
972 #endif
973
974 case AnyP::PROTO_HTTP:
975 httpStart(this);
976 break;
977
978 case AnyP::PROTO_GOPHER:
979 gopherStart(this);
980 break;
981
982 case AnyP::PROTO_FTP:
983 if (request->flags.ftpNative)
984 Ftp::StartRelay(this);
985 else
986 Ftp::StartGateway(this);
987 break;
988
989 case AnyP::PROTO_CACHE_OBJECT:
990
991 case AnyP::PROTO_URN:
992 fatal_dump("Should never get here");
993 break;
994
995 case AnyP::PROTO_WHOIS:
996 whoisStart(this);
997 break;
998
999 case AnyP::PROTO_WAIS: /* Not implemented */
1000
1001 default:
1002 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1003 ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
1004 fail(anErr);
1005 // Set the dont_retry flag because this is not a transient (network) error.
1006 flags.dont_retry = true;
1007 if (Comm::IsConnOpen(serverConn)) {
1008 serverConn->close();
1009 }
1010 break;
1011 }
1012 }
1013 }
1014
1015 /*
1016 * FwdState::reforward
1017 *
1018 * returns TRUE if the transaction SHOULD be re-forwarded to the
1019 * next choice in the serverDestinations list. This method is called when
1020 * peer communication completes normally, or experiences
1021 * some error after receiving the end of HTTP headers.
1022 */
1023 int
1024 FwdState::reforward()
1025 {
1026 StoreEntry *e = entry;
1027
1028 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1029 debugs(17, 3, HERE << "entry aborted");
1030 return 0;
1031 }
1032
1033 assert(e->store_status == STORE_PENDING);
1034 assert(e->mem_obj);
1035 #if URL_CHECKSUM_DEBUG
1036
1037 e->mem_obj->checkUrlChecksum();
1038 #endif
1039
1040 debugs(17, 3, HERE << e->url() << "?" );
1041
1042 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1043 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1044 return 0;
1045 }
1046
1047 if (n_tries > Config.forward_max_tries)
1048 return 0;
1049
1050 if (request->bodyNibbled())
1051 return 0;
1052
1053 if (serverDestinations.size() <= 1) {
1054 // NP: <= 1 since total count includes the recently failed one.
1055 debugs(17, 3, HERE << "No alternative forwarding paths left");
1056 return 0;
1057 }
1058
1059 const Http::StatusCode s = e->getReply()->sline.status();
1060 debugs(17, 3, HERE << "status " << s);
1061 return reforwardableStatus(s);
1062 }
1063
1064 /**
1065 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1066 * on whether this is a validation request. RFC 2616 says that we MUST reply
1067 * with "504 Gateway Timeout" if validation fails and cached reply has
1068 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1069 */
1070 ErrorState *
1071 FwdState::makeConnectingError(const err_type type) const
1072 {
1073 return new ErrorState(type, request->flags.needValidation ?
1074 Http::scGatewayTimeout : Http::scServiceUnavailable, request);
1075 }
1076
1077 static void
1078 fwdStats(StoreEntry * s)
1079 {
1080 int i;
1081 int j;
1082 storeAppendPrintf(s, "Status");
1083
1084 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1085 storeAppendPrintf(s, "\ttry#%d", j);
1086 }
1087
1088 storeAppendPrintf(s, "\n");
1089
1090 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1091 if (FwdReplyCodes[0][i] == 0)
1092 continue;
1093
1094 storeAppendPrintf(s, "%3d", i);
1095
1096 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1097 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1098 }
1099
1100 storeAppendPrintf(s, "\n");
1101 }
1102 }
1103
1104 /**** STATIC MEMBER FUNCTIONS *************************************************/
1105
1106 bool
1107 FwdState::reforwardableStatus(const Http::StatusCode s) const
1108 {
1109 switch (s) {
1110
1111 case Http::scBadGateway:
1112
1113 case Http::scGatewayTimeout:
1114 return true;
1115
1116 case Http::scForbidden:
1117
1118 case Http::scInternalServerError:
1119
1120 case Http::scNotImplemented:
1121
1122 case Http::scServiceUnavailable:
1123 return Config.retry.onerror;
1124
1125 default:
1126 return false;
1127 }
1128
1129 /* NOTREACHED */
1130 }
1131
1132 /**
1133 * Decide where details need to be gathered to correctly describe a persistent connection.
1134 * What is needed:
1135 * - the address/port details about this link
1136 * - domain name of server at other end of this link (either peer or requested host)
1137 */
1138 void
1139 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1140 {
1141 if (conn->getPeer()) {
1142 fwdPconnPool->push(conn, NULL);
1143 } else {
1144 fwdPconnPool->push(conn, domain);
1145 }
1146 }
1147
1148 Comm::ConnectionPointer
1149 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1150 {
1151 // always call shared pool first because we need to close an idle
1152 // connection there if we have to use a standby connection.
1153 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, checkRetriable());
1154 if (!Comm::IsConnOpen(conn)) {
1155 // either there was no pconn to pop or this is not a retriable xaction
1156 if (CachePeer *peer = dest->getPeer()) {
1157 if (peer->standby.pool)
1158 conn = peer->standby.pool->pop(dest, domain, true);
1159 }
1160 }
1161 return conn; // open, closed, or nil
1162 }
1163
1164 void
1165 FwdState::initModule()
1166 {
1167 RegisterWithCacheManager();
1168 }
1169
1170 void
1171 FwdState::RegisterWithCacheManager(void)
1172 {
1173 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1174 }
1175
1176 void
1177 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1178 {
1179 if (status > Http::scInvalidHeader)
1180 return;
1181
1182 assert(tries >= 0);
1183
1184 if (tries > MAX_FWD_STATS_IDX)
1185 tries = MAX_FWD_STATS_IDX;
1186
1187 ++ FwdReplyCodes[tries][status];
1188 }
1189
1190 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1191
1192 /*
1193 * DPW 2007-05-19
1194 * Formerly static, but now used by client_side_request.cc
1195 */
1196 /// Checks for a TOS value to apply depending on the ACL
1197 tos_t
1198 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1199 {
1200 acl_tos *l;
1201
1202 for (l = head; l; l = l->next) {
1203 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1204 return l->tos;
1205 }
1206
1207 return 0;
1208 }
1209
1210 /// Checks for a netfilter mark value to apply depending on the ACL
1211 nfmark_t
1212 aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
1213 {
1214 acl_nfmark *l;
1215
1216 for (l = head; l; l = l->next) {
1217 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1218 return l->nfmark;
1219 }
1220
1221 return 0;
1222 }
1223
1224 void
1225 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1226 {
1227 // skip if an outgoing address is already set.
1228 if (!conn->local.isAnyAddr()) return;
1229
1230 // ensure that at minimum the wildcard local matches remote protocol
1231 if (conn->remote.isIPv4())
1232 conn->local.setIPv4();
1233
1234 // maybe use TPROXY client address
1235 if (request && request->flags.spoofClientIp) {
1236 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1237 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1238 if (Config.onoff.tproxy_uses_indirect_client)
1239 conn->local = request->indirect_client_addr;
1240 else
1241 #endif
1242 conn->local = request->client_addr;
1243 // some flags need setting on the socket to use this address
1244 conn->flags |= COMM_DOBIND;
1245 conn->flags |= COMM_TRANSPARENT;
1246 return;
1247 }
1248 // else no tproxy today ...
1249 }
1250
1251 if (!Config.accessList.outgoing_address) {
1252 return; // anything will do.
1253 }
1254
1255 ACLFilledChecklist ch(NULL, request, NULL);
1256 ch.dst_peer = conn->getPeer();
1257 ch.dst_addr = conn->remote;
1258
1259 // TODO use the connection details in ACL.
1260 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1261
1262 AclAddress *l;
1263 for (l = Config.accessList.outgoing_address; l; l = l->next) {
1264
1265 /* check if the outgoing address is usable to the destination */
1266 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1267
1268 /* check ACLs for this outgoing address */
1269 if (!l->aclList || ch.fastCheck(l->aclList) == ACCESS_ALLOWED) {
1270 conn->local = l->addr;
1271 return;
1272 }
1273 }
1274 }
1275
1276 tos_t
1277 GetTosToServer(HttpRequest * request)
1278 {
1279 ACLFilledChecklist ch(NULL, request, NULL);
1280 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1281 }
1282
1283 nfmark_t
1284 GetNfmarkToServer(HttpRequest * request)
1285 {
1286 ACLFilledChecklist ch(NULL, request, NULL);
1287 return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1288 }
1289
1290 void
1291 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1292 {
1293 // Get the server side TOS and Netfilter mark to be set on the connection.
1294 if (Ip::Qos::TheConfig.isAclTosActive()) {
1295 conn.tos = GetTosToServer(request);
1296 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1297 }
1298
1299 #if SO_MARK && USE_LIBCAP
1300 conn.nfmark = GetNfmarkToServer(request);
1301 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1302 #else
1303 conn.nfmark = 0;
1304 #endif
1305 }
1306