]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
merge from trunk r13423
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 */
32
33 #include "squid.h"
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "comm/Connection.h"
43 #include "comm/ConnOpener.h"
44 #include "comm/Loops.h"
45 #include "CommCalls.h"
46 #include "errorpage.h"
47 #include "event.h"
48 #include "fd.h"
49 #include "fde.h"
50 #include "ftp.h"
51 #include "FtpGatewayServer.h"
52 #include "FwdState.h"
53 #include "globals.h"
54 #include "gopher.h"
55 #include "hier_code.h"
56 #include "http.h"
57 #include "HttpReply.h"
58 #include "HttpRequest.h"
59 #include "icmp/net_db.h"
60 #include "internal.h"
61 #include "ip/Intercept.h"
62 #include "ip/QosConfig.h"
63 #include "ip/tools.h"
64 #include "MemObject.h"
65 #include "mgr/Registration.h"
66 #include "neighbors.h"
67 #include "pconn.h"
68 #include "PeerPoolMgr.h"
69 #include "PeerSelectState.h"
70 #include "SquidConfig.h"
71 #include "SquidTime.h"
72 #include "Store.h"
73 #include "StoreClient.h"
74 #include "urn.h"
75 #include "whois.h"
76 #if USE_OPENSSL
77 #include "ssl/cert_validate_message.h"
78 #include "ssl/Config.h"
79 #include "ssl/ErrorDetail.h"
80 #include "ssl/helper.h"
81 #include "ssl/PeerConnector.h"
82 #include "ssl/ServerBump.h"
83 #include "ssl/support.h"
84 #endif
85 #if HAVE_ERRNO_H
86 #include <errno.h>
87 #endif
88
89 static PSC fwdPeerSelectionCompleteWrapper;
90 static CLCB fwdServerClosedWrapper;
91 static CNCB fwdConnectDoneWrapper;
92
93 static OBJH fwdStats;
94
95 #define MAX_FWD_STATS_IDX 9
96 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
97
98 static PconnPool *fwdPconnPool = new PconnPool("server-side", NULL);
99 CBDATA_CLASS_INIT(FwdState);
100
101 #if USE_OPENSSL
102 class FwdStatePeerAnswerDialer: public CallDialer, public Ssl::PeerConnector::CbDialer
103 {
104 public:
105 typedef void (FwdState::*Method)(Ssl::PeerConnectorAnswer &);
106
107 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
108 method_(method), fwd_(fwd), answer_() {}
109
110 /* CallDialer API */
111 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
112 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
113 virtual void print(std::ostream &os) const {
114 os << '(' << fwd_.get() << ", " << answer_ << ')';
115 }
116
117 /* Ssl::PeerConnector::CbDialer API */
118 virtual Ssl::PeerConnectorAnswer &answer() { return answer_; }
119
120 private:
121 Method method_;
122 CbcPointer<FwdState> fwd_;
123 Ssl::PeerConnectorAnswer answer_;
124 };
125 #endif
126
127 void
128 FwdState::abort(void* d)
129 {
130 FwdState* fwd = (FwdState*)d;
131 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
132
133 if (Comm::IsConnOpen(fwd->serverConnection())) {
134 fwd->closeServerConnection("store entry aborted");
135 } else {
136 debugs(17, 7, HERE << "store entry aborted; no connection to close");
137 }
138 fwd->serverDestinations.clear();
139 fwd->self = NULL;
140 }
141
142 void
143 FwdState::closeServerConnection(const char *reason)
144 {
145 debugs(17, 3, "because " << reason << "; " << serverConn);
146 comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
147 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
148 serverConn->close();
149 }
150
151 /**** PUBLIC INTERFACE ********************************************************/
152
153 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
154 al(alp)
155 {
156 debugs(17, 2, HERE << "Forwarding client request " << client << ", url=" << e->url() );
157 entry = e;
158 clientConn = client;
159 request = r;
160 HTTPMSGLOCK(request);
161 pconnRace = raceImpossible;
162 start_t = squid_curtime;
163 serverDestinations.reserve(Config.forward_max_tries);
164 e->lock("FwdState");
165 EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
166 }
167
168 // Called once, right after object creation, when it is safe to set self
169 void FwdState::start(Pointer aSelf)
170 {
171 // Protect ourselves from being destroyed when the only Server pointing
172 // to us is gone (while we expect to talk to more Servers later).
173 // Once we set self, we are responsible for clearing it when we do not
174 // expect to talk to any servers.
175 self = aSelf; // refcounted
176
177 // We hope that either the store entry aborts or peer is selected.
178 // Otherwise we are going to leak our object.
179
180 entry->registerAbort(FwdState::abort, this);
181
182 #if STRICT_ORIGINAL_DST
183 // Bug 3243: CVE 2009-0801
184 // Bypass of browser same-origin access control in intercepted communication
185 // To resolve this we must force DIRECT and only to the original client destination.
186 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
187 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
188 if (isIntercepted && useOriginalDst) {
189 selectPeerForIntercepted();
190 // 3.2 does not suppro re-wrapping inside CONNECT.
191 // our only alternative is to fake destination "found" and continue with the forwarding.
192 startConnectionOrFail();
193 return;
194 }
195 #endif
196
197 // do full route options selection
198 peerSelect(&serverDestinations, request, al, entry, fwdPeerSelectionCompleteWrapper, this);
199 }
200
201 #if STRICT_ORIGINAL_DST
202 /// bypasses peerSelect() when dealing with intercepted requests
203 void
204 FwdState::selectPeerForIntercepted()
205 {
206 // use pinned connection if available
207 Comm::ConnectionPointer p;
208 if (ConnStateData *client = request->pinnedConnection()) {
209 p = client->validatePinnedConnection(request, NULL);
210 if (Comm::IsConnOpen(p)) {
211 /* duplicate peerSelectPinned() effects */
212 p->peerType = PINNED;
213 entry->ping_status = PING_DONE; /* Skip ICP */
214
215 debugs(17, 3, "reusing a pinned conn: " << *p);
216 serverDestinations.push_back(p);
217 } else {
218 debugs(17,2, "Pinned connection is not valid: " << p);
219 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
220 fail(anErr);
221 }
222 // Either use the valid pinned connection or fail if it is invalid.
223 return;
224 }
225
226 // use client original destination as second preferred choice
227 p = new Comm::Connection();
228 p->peerType = ORIGINAL_DST;
229 p->remote = clientConn->local;
230 getOutgoingAddress(request, p);
231
232 debugs(17, 3, HERE << "using client original destination: " << *p);
233 serverDestinations.push_back(p);
234 }
235 #endif
236
237 void
238 FwdState::completed()
239 {
240 if (flags.forward_completed) {
241 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
242 return;
243 }
244
245 flags.forward_completed = true;
246
247 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
248 debugs(17, 3, HERE << "entry aborted");
249 return ;
250 }
251
252 #if URL_CHECKSUM_DEBUG
253
254 entry->mem_obj->checkUrlChecksum();
255 #endif
256
257 if (entry->store_status == STORE_PENDING) {
258 if (entry->isEmpty()) {
259 if (!err) // we quit (e.g., fd closed) before an error or content
260 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
261 assert(err);
262 errorAppendEntry(entry, err);
263 err = NULL;
264 #if USE_OPENSSL
265 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
266 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
267 ConnStateData::httpsPeeked, Comm::ConnectionPointer(NULL));
268 }
269 #endif
270 } else {
271 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
272 entry->complete();
273 entry->releaseRequest();
274 }
275 }
276
277 if (storePendingNClients(entry) > 0)
278 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
279
280 }
281
282 FwdState::~FwdState()
283 {
284 debugs(17, 3, HERE << "FwdState destructor starting");
285
286 if (! flags.forward_completed)
287 completed();
288
289 doneWithRetries();
290
291 HTTPMSGUNLOCK(request);
292
293 delete err;
294
295 entry->unregisterAbort();
296
297 entry->unlock("FwdState");
298
299 entry = NULL;
300
301 if (calls.connector != NULL) {
302 calls.connector->cancel("FwdState destructed");
303 calls.connector = NULL;
304 }
305
306 if (Comm::IsConnOpen(serverConn))
307 closeServerConnection("~FwdState");
308
309 serverDestinations.clear();
310
311 debugs(17, 3, HERE << "FwdState destructor done");
312 }
313
314 /**
315 * This is the entry point for client-side to start forwarding
316 * a transaction. It is a static method that may or may not
317 * allocate a FwdState.
318 */
319 void
320 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
321 {
322 /** \note
323 * client_addr == no_addr indicates this is an "internal" request
324 * from peer_digest.c, asn.c, netdb.c, etc and should always
325 * be allowed. yuck, I know.
326 */
327
328 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
329 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
330 /**
331 * Check if this host is allowed to fetch MISSES from us (miss_access).
332 * Intentionally replace the src_addr automatically selected by the checklist code
333 * we do NOT want the indirect client address to be tested here.
334 */
335 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
336 ch.src_addr = request->client_addr;
337 if (ch.fastCheck() == ACCESS_DENIED) {
338 err_type page_id;
339 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
340
341 if (page_id == ERR_NONE)
342 page_id = ERR_FORWARDING_DENIED;
343
344 ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
345 errorAppendEntry(entry, anErr); // frees anErr
346 return;
347 }
348 }
349
350 debugs(17, 3, HERE << "'" << entry->url() << "'");
351 /*
352 * This seems like an odd place to bind mem_obj and request.
353 * Might want to assert that request is NULL at this point
354 */
355 entry->mem_obj->request = request;
356 HTTPMSGLOCK(entry->mem_obj->request);
357 #if URL_CHECKSUM_DEBUG
358
359 entry->mem_obj->checkUrlChecksum();
360 #endif
361
362 if (shutting_down) {
363 /* more yuck */
364 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
365 errorAppendEntry(entry, anErr); // frees anErr
366 return;
367 }
368
369 if (request->flags.internal) {
370 debugs(17, 2, "calling internalStart() due to request flag");
371 internalStart(clientConn, request, entry);
372 return;
373 }
374
375 switch (request->url.getScheme()) {
376
377 case AnyP::PROTO_CACHE_OBJECT:
378 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
379 CacheManager::GetInstance()->Start(clientConn, request, entry);
380 return;
381
382 case AnyP::PROTO_URN:
383 urnStart(request, entry);
384 return;
385
386 default:
387 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
388 fwd->start(fwd);
389 return;
390 }
391
392 /* NOTREACHED */
393 }
394
395 void
396 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
397 {
398 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
399 Start(clientConn, entry, request, NULL);
400 }
401
402 void
403 FwdState::startConnectionOrFail()
404 {
405 debugs(17, 3, HERE << entry->url());
406
407 if (serverDestinations.size() > 0) {
408 // Ditch error page if it was created before.
409 // A new one will be created if there's another problem
410 delete err;
411 err = NULL;
412
413 // Update the logging information about this new server connection.
414 // Done here before anything else so the errors get logged for
415 // this server link regardless of what happens when connecting to it.
416 // IF sucessfuly connected this top destination will become the serverConnection().
417 request->hier.note(serverDestinations[0], request->GetHost());
418 request->clearError();
419
420 connectStart();
421 } else {
422 debugs(17, 3, HERE << "Connection failed: " << entry->url());
423 if (!err) {
424 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
425 fail(anErr);
426 } // else use actual error from last connection attempt
427 self = NULL; // refcounted
428 }
429 }
430
431 void
432 FwdState::fail(ErrorState * errorState)
433 {
434 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
435
436 delete err;
437 err = errorState;
438
439 if (!errorState->request) {
440 errorState->request = request;
441 HTTPMSGLOCK(errorState->request);
442 }
443
444 if (err->type != ERR_ZERO_SIZE_OBJECT)
445 return;
446
447 if (pconnRace == racePossible) {
448 debugs(17, 5, HERE << "pconn race happened");
449 pconnRace = raceHappened;
450 }
451
452 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
453 pinned_connection->pinning.zeroReply = true;
454 flags.dont_retry = true; // we want to propagate failure to the client
455 debugs(17, 4, "zero reply on pinned connection");
456 }
457 }
458
459 /**
460 * Frees fwdState without closing FD or generating an abort
461 */
462 void
463 FwdState::unregister(Comm::ConnectionPointer &conn)
464 {
465 debugs(17, 3, HERE << entry->url() );
466 assert(serverConnection() == conn);
467 assert(Comm::IsConnOpen(conn));
468 comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
469 serverConn = NULL;
470 }
471
472 // Legacy method to be removed in favor of the above as soon as possible
473 void
474 FwdState::unregister(int fd)
475 {
476 debugs(17, 3, HERE << entry->url() );
477 assert(fd == serverConnection()->fd);
478 unregister(serverConn);
479 }
480
481 /**
482 * server-side modules call fwdComplete() when they are done
483 * downloading an object. Then, we either 1) re-forward the
484 * request somewhere else if needed, or 2) call storeComplete()
485 * to finish it off
486 */
487 void
488 FwdState::complete()
489 {
490 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
491 #if URL_CHECKSUM_DEBUG
492
493 entry->mem_obj->checkUrlChecksum();
494 #endif
495
496 logReplyStatus(n_tries, entry->getReply()->sline.status());
497
498 if (reforward()) {
499 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
500
501 if (Comm::IsConnOpen(serverConn))
502 unregister(serverConn);
503
504 entry->reset();
505
506 // drop the last path off the selection list. try the next one.
507 serverDestinations.erase(serverDestinations.begin());
508 startConnectionOrFail();
509
510 } else {
511 if (Comm::IsConnOpen(serverConn))
512 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
513 else
514 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
515 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
516 entry->complete();
517
518 if (!Comm::IsConnOpen(serverConn))
519 completed();
520
521 self = NULL; // refcounted
522 }
523 }
524
525 /**** CALLBACK WRAPPERS ************************************************************/
526
527 static void
528 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList * unused, ErrorState *err, void *data)
529 {
530 FwdState *fwd = (FwdState *) data;
531 if (err)
532 fwd->fail(err);
533 fwd->startConnectionOrFail();
534 }
535
536 static void
537 fwdServerClosedWrapper(const CommCloseCbParams &params)
538 {
539 FwdState *fwd = (FwdState *)params.data;
540 fwd->serverClosed(params.fd);
541 }
542
543 void
544 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, comm_err_t status, int xerrno, void *data)
545 {
546 FwdState *fwd = (FwdState *) data;
547 fwd->connectDone(conn, status, xerrno);
548 }
549
550 /**** PRIVATE *****************************************************************/
551
552 /*
553 * FwdState::checkRetry
554 *
555 * Return TRUE if the request SHOULD be retried. This method is
556 * called when the HTTP connection fails, or when the connection
557 * is closed before server-side read the end of HTTP headers.
558 */
559 bool
560 FwdState::checkRetry()
561 {
562 if (shutting_down)
563 return false;
564
565 if (!self) { // we have aborted before the server called us back
566 debugs(17, 5, HERE << "not retrying because of earlier abort");
567 // we will be destroyed when the server clears its Pointer to us
568 return false;
569 }
570
571 if (entry->store_status != STORE_PENDING)
572 return false;
573
574 if (!entry->isEmpty())
575 return false;
576
577 if (n_tries > Config.forward_max_tries)
578 return false;
579
580 if (squid_curtime - start_t > Config.Timeout.forward)
581 return false;
582
583 if (flags.dont_retry)
584 return false;
585
586 if (request->bodyNibbled())
587 return false;
588
589 // NP: not yet actually connected anywhere. retry is safe.
590 if (!flags.connected_okay)
591 return true;
592
593 if (!checkRetriable())
594 return false;
595
596 return true;
597 }
598
599 /*
600 * FwdState::checkRetriable
601 *
602 * Return TRUE if this is the kind of request that can be retried
603 * after a failure. If the request is not retriable then we don't
604 * want to risk sending it on a persistent connection. Instead we'll
605 * force it to go on a new HTTP connection.
606 */
607 bool
608 FwdState::checkRetriable()
609 {
610 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
611 // complicated] code required to protect the PUT request body from being
612 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
613 if (request->body_pipe != NULL)
614 return false;
615
616 // RFC2616 9.1 Safe and Idempotent Methods
617 return (request->method.isHttpSafe() || request->method.isIdempotent());
618 }
619
620 void
621 FwdState::serverClosed(int fd)
622 {
623 // XXX: fd is often -1 here
624 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
625 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
626 if (fd >= 0 && serverConnection()->fd == fd)
627 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
628 retryOrBail();
629 }
630
631 void
632 FwdState::retryOrBail()
633 {
634 if (checkRetry()) {
635 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
636 // we should retry the same destination if it failed due to pconn race
637 if (pconnRace == raceHappened)
638 debugs(17, 4, HERE << "retrying the same destination");
639 else
640 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
641 startConnectionOrFail();
642 return;
643 }
644
645 // TODO: should we call completed() here and move doneWithRetries there?
646 doneWithRetries();
647
648 if (self != NULL && !err && shutting_down) {
649 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
650 errorAppendEntry(entry, anErr);
651 }
652
653 self = NULL; // refcounted
654 }
655
656 // If the Server quits before nibbling at the request body, the body sender
657 // will not know (so that we can retry). Call this if we will not retry. We
658 // will notify the sender so that it does not get stuck waiting for space.
659 void
660 FwdState::doneWithRetries()
661 {
662 if (request && request->body_pipe != NULL)
663 request->body_pipe->expectNoConsumption();
664 }
665
666 // called by the server that failed after calling unregister()
667 void
668 FwdState::handleUnregisteredServerEnd()
669 {
670 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
671 assert(!Comm::IsConnOpen(serverConn));
672 retryOrBail();
673 }
674
675 void
676 FwdState::connectDone(const Comm::ConnectionPointer &conn, comm_err_t status, int xerrno)
677 {
678 if (status != COMM_OK) {
679 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
680 anErr->xerrno = xerrno;
681 fail(anErr);
682
683 /* it might have been a timeout with a partially open link */
684 if (conn != NULL) {
685 if (conn->getPeer())
686 peerConnectFailed(conn->getPeer());
687
688 conn->close();
689 }
690 retryOrBail();
691 return;
692 }
693
694 serverConn = conn;
695 flags.connected_okay = true;
696
697 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
698
699 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
700
701 if (serverConnection()->getPeer())
702 peerConnectSucceded(serverConnection()->getPeer());
703
704 #if USE_OPENSSL
705 if (!request->flags.pinned) {
706 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
707 (!serverConnection()->getPeer() && request->url.getScheme() == AnyP::PROTO_HTTPS) ||
708 request->flags.sslPeek) {
709
710 HttpRequest::Pointer requestPointer = request;
711 AsyncCall::Pointer callback = asyncCall(17,4,
712 "FwdState::ConnectedToPeer",
713 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
714 Ssl::PeerConnector *connector =
715 new Ssl::PeerConnector(requestPointer, serverConnection(), callback);
716 AsyncJob::Start(connector); // will call our callback
717 return;
718 }
719 }
720 #endif
721
722 const CbcPointer<ConnStateData> &clientConnState =
723 request->clientConnectionManager;
724 if (clientConnState.valid() && clientConnState->isFtp) {
725 // this is not an idle connection, so we do not want I/O monitoring
726 const bool monitor = false;
727 clientConnState->pinConnection(serverConnection(), request,
728 serverConnection()->getPeer(), false,
729 monitor);
730 }
731
732 dispatch();
733 }
734
735 #if USE_OPENSSL
736 void
737 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer &answer)
738 {
739 if (ErrorState *error = answer.error.get()) {
740 fail(error);
741 answer.error.clear(); // preserve error for errorSendComplete()
742 self = NULL;
743 return;
744 }
745
746 dispatch();
747 }
748 #endif
749
750 void
751 FwdState::connectTimeout(int fd)
752 {
753 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
754 assert(serverDestinations[0] != NULL);
755 assert(fd == serverDestinations[0]->fd);
756
757 if (entry->isEmpty()) {
758 ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
759 anErr->xerrno = ETIMEDOUT;
760 fail(anErr);
761
762 /* This marks the peer DOWN ... */
763 if (serverDestinations[0]->getPeer())
764 peerConnectFailed(serverDestinations[0]->getPeer());
765 }
766
767 if (Comm::IsConnOpen(serverDestinations[0])) {
768 serverDestinations[0]->close();
769 }
770 }
771
772 /**
773 * Called after Forwarding path selection (via peer select) has taken place.
774 * And whenever forwarding needs to attempt a new connection (routing failover)
775 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
776 */
777 void
778 FwdState::connectStart()
779 {
780 assert(serverDestinations.size() > 0);
781
782 debugs(17, 3, "fwdConnectStart: " << entry->url());
783
784 if (!request->hier.first_conn_start.tv_sec) // first attempt
785 request->hier.first_conn_start = current_time;
786
787 /* connection timeout */
788 int ctimeout;
789 if (serverDestinations[0]->getPeer()) {
790 ctimeout = serverDestinations[0]->getPeer()->connect_timeout > 0 ?
791 serverDestinations[0]->getPeer()->connect_timeout : Config.Timeout.peer_connect;
792 } else {
793 ctimeout = Config.Timeout.connect;
794 }
795
796 /* calculate total forwarding timeout ??? */
797 int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
798 if (ftimeout < 0)
799 ftimeout = 5;
800
801 if (ftimeout < ctimeout)
802 ctimeout = ftimeout;
803
804 if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
805 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
806 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
807 fail(anErr);
808 self = NULL; // refcounted
809 return;
810 }
811
812 request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
813 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
814 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
815 if (serverDestinations[0]->peerType == PINNED) {
816 ConnStateData *pinned_connection = request->pinnedConnection();
817 debugs(17,7, "pinned peer connection: " << pinned_connection);
818 // pinned_connection may become nil after a pconn race
819 if (pinned_connection)
820 serverConn = pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer());
821 else
822 serverConn = NULL;
823 if (Comm::IsConnOpen(serverConn)) {
824 pinned_connection->stopPinnedConnectionMonitoring();
825 flags.connected_okay = true;
826 ++n_tries;
827 request->hier.note(serverConn, request->GetHost());
828 request->flags.pinned = true;
829 request->hier.note(serverConn, pinned_connection->pinning.host);
830 if (pinned_connection->pinnedAuth())
831 request->flags.auth = true;
832 comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
833 // the server may close the pinned connection before this request
834 pconnRace = racePossible;
835 dispatch();
836 return;
837 }
838 // Pinned connection failure.
839 debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
840 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
841 fail(anErr);
842 self = NULL; // refcounted
843 return;
844 }
845
846 // Use pconn to avoid opening a new connection.
847 const char *host = NULL;
848 if (!serverDestinations[0]->getPeer())
849 host = request->GetHost();
850
851 Comm::ConnectionPointer temp;
852 // Avoid pconns after races so that the same client does not suffer twice.
853 // This does not increase the total number of connections because we just
854 // closed the connection that failed the race. And re-pinning assumes this.
855 if (pconnRace != raceHappened)
856 temp = pconnPop(serverDestinations[0], host);
857
858 const bool openedPconn = Comm::IsConnOpen(temp);
859 pconnRace = openedPconn ? racePossible : raceImpossible;
860
861 // if we found an open persistent connection to use. use it.
862 if (openedPconn) {
863 serverConn = temp;
864 flags.connected_okay = true;
865 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
866 ++n_tries;
867
868 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
869
870 /* Update server side TOS and Netfilter mark on the connection. */
871 if (Ip::Qos::TheConfig.isAclTosActive()) {
872 const tos_t tos = GetTosToServer(request);
873 Ip::Qos::setSockTos(temp, tos);
874 }
875 #if SO_MARK
876 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
877 const nfmark_t nfmark = GetNfmarkToServer(request);
878 Ip::Qos::setSockNfmark(temp, nfmark);
879 }
880 #endif
881
882 dispatch();
883 return;
884 }
885
886 // We will try to open a new connection, possibly to the same destination.
887 // We reset serverDestinations[0] in case we are using it again because
888 // ConnOpener modifies its destination argument.
889 serverDestinations[0]->local.port(0);
890 serverConn = NULL;
891
892 #if URL_CHECKSUM_DEBUG
893 entry->mem_obj->checkUrlChecksum();
894 #endif
895
896 GetMarkingsToServer(request, *serverDestinations[0]);
897
898 calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
899 Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, ctimeout);
900 if (host)
901 cs->setHost(host);
902 AsyncJob::Start(cs);
903 }
904
905 void
906 FwdState::dispatch()
907 {
908 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
909 /*
910 * Assert that server_fd is set. This is to guarantee that fwdState
911 * is attached to something and will be deallocated when server_fd
912 * is closed.
913 */
914 assert(Comm::IsConnOpen(serverConn));
915
916 fd_note(serverConnection()->fd, entry->url());
917
918 fd_table[serverConnection()->fd].noteUse();
919
920 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
921 assert(entry->ping_status != PING_WAITING);
922
923 assert(entry->locked());
924
925 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
926
927 netdbPingSite(request->GetHost());
928
929 /* Retrieves remote server TOS or MARK value, and stores it as part of the
930 * original client request FD object. It is later used to forward
931 * remote server's TOS/MARK in the response to the client in case of a MISS.
932 */
933 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
934 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
935 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
936 /* Get the netfilter mark for the connection */
937 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
938 }
939 }
940
941 #if _SQUID_LINUX_
942 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
943 if (Ip::Qos::TheConfig.isHitTosActive()) {
944 if (Comm::IsConnOpen(clientConn)) {
945 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
946 /* Get the TOS value for the packet */
947 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
948 }
949 }
950 #endif
951
952 #if USE_OPENSSL
953 if (request->flags.sslPeek) {
954 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
955 ConnStateData::httpsPeeked, serverConnection());
956 unregister(serverConn); // async call owns it now
957 complete(); // destroys us
958 return;
959 }
960 #endif
961
962 if (serverConnection()->getPeer() != NULL) {
963 ++ serverConnection()->getPeer()->stats.fetches;
964 request->peer_login = serverConnection()->getPeer()->login;
965 request->peer_domain = serverConnection()->getPeer()->domain;
966 httpStart(this);
967 } else {
968 assert(!request->flags.sslPeek);
969 request->peer_login = NULL;
970 request->peer_domain = NULL;
971
972 switch (request->url.getScheme()) {
973 #if USE_OPENSSL
974
975 case AnyP::PROTO_HTTPS:
976 httpStart(this);
977 break;
978 #endif
979
980 case AnyP::PROTO_HTTP:
981 httpStart(this);
982 break;
983
984 case AnyP::PROTO_GOPHER:
985 gopherStart(this);
986 break;
987
988 case AnyP::PROTO_FTP:
989 if (request->clientConnectionManager->isFtp)
990 ftpGatewayServerStart(this);
991 else
992 ftpStart(this);
993 break;
994
995 case AnyP::PROTO_CACHE_OBJECT:
996
997 case AnyP::PROTO_URN:
998 fatal_dump("Should never get here");
999 break;
1000
1001 case AnyP::PROTO_WHOIS:
1002 whoisStart(this);
1003 break;
1004
1005 case AnyP::PROTO_WAIS: /* Not implemented */
1006
1007 default:
1008 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1009 ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
1010 fail(anErr);
1011 // Set the dont_retry flag because this is not a transient (network) error.
1012 flags.dont_retry = true;
1013 if (Comm::IsConnOpen(serverConn)) {
1014 serverConn->close();
1015 }
1016 break;
1017 }
1018 }
1019 }
1020
1021 /*
1022 * FwdState::reforward
1023 *
1024 * returns TRUE if the transaction SHOULD be re-forwarded to the
1025 * next choice in the serverDestinations list. This method is called when
1026 * server-side communication completes normally, or experiences
1027 * some error after receiving the end of HTTP headers.
1028 */
1029 int
1030 FwdState::reforward()
1031 {
1032 StoreEntry *e = entry;
1033
1034 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1035 debugs(17, 3, HERE << "entry aborted");
1036 return 0;
1037 }
1038
1039 assert(e->store_status == STORE_PENDING);
1040 assert(e->mem_obj);
1041 #if URL_CHECKSUM_DEBUG
1042
1043 e->mem_obj->checkUrlChecksum();
1044 #endif
1045
1046 debugs(17, 3, HERE << e->url() << "?" );
1047
1048 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1049 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1050 return 0;
1051 }
1052
1053 if (n_tries > Config.forward_max_tries)
1054 return 0;
1055
1056 if (request->bodyNibbled())
1057 return 0;
1058
1059 if (serverDestinations.size() <= 1) {
1060 // NP: <= 1 since total count includes the recently failed one.
1061 debugs(17, 3, HERE << "No alternative forwarding paths left");
1062 return 0;
1063 }
1064
1065 const Http::StatusCode s = e->getReply()->sline.status();
1066 debugs(17, 3, HERE << "status " << s);
1067 return reforwardableStatus(s);
1068 }
1069
1070 /**
1071 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1072 * on whether this is a validation request. RFC 2616 says that we MUST reply
1073 * with "504 Gateway Timeout" if validation fails and cached reply has
1074 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1075 */
1076 ErrorState *
1077 FwdState::makeConnectingError(const err_type type) const
1078 {
1079 return new ErrorState(type, request->flags.needValidation ?
1080 Http::scGatewayTimeout : Http::scServiceUnavailable, request);
1081 }
1082
1083 static void
1084 fwdStats(StoreEntry * s)
1085 {
1086 int i;
1087 int j;
1088 storeAppendPrintf(s, "Status");
1089
1090 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1091 storeAppendPrintf(s, "\ttry#%d", j);
1092 }
1093
1094 storeAppendPrintf(s, "\n");
1095
1096 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1097 if (FwdReplyCodes[0][i] == 0)
1098 continue;
1099
1100 storeAppendPrintf(s, "%3d", i);
1101
1102 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1103 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1104 }
1105
1106 storeAppendPrintf(s, "\n");
1107 }
1108 }
1109
1110 /**** STATIC MEMBER FUNCTIONS *************************************************/
1111
1112 bool
1113 FwdState::reforwardableStatus(const Http::StatusCode s) const
1114 {
1115 switch (s) {
1116
1117 case Http::scBadGateway:
1118
1119 case Http::scGatewayTimeout:
1120 return true;
1121
1122 case Http::scForbidden:
1123
1124 case Http::scInternalServerError:
1125
1126 case Http::scNotImplemented:
1127
1128 case Http::scServiceUnavailable:
1129 return Config.retry.onerror;
1130
1131 default:
1132 return false;
1133 }
1134
1135 /* NOTREACHED */
1136 }
1137
1138 /**
1139 * Decide where details need to be gathered to correctly describe a persistent connection.
1140 * What is needed:
1141 * - the address/port details about this link
1142 * - domain name of server at other end of this link (either peer or requested host)
1143 */
1144 void
1145 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1146 {
1147 if (conn->getPeer()) {
1148 fwdPconnPool->push(conn, NULL);
1149 } else {
1150 fwdPconnPool->push(conn, domain);
1151 }
1152 }
1153
1154 Comm::ConnectionPointer
1155 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1156 {
1157 // always call shared pool first because we need to close an idle
1158 // connection there if we have to use a standby connection.
1159 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, checkRetriable());
1160 if (!Comm::IsConnOpen(conn)) {
1161 // either there was no pconn to pop or this is not a retriable xaction
1162 if (CachePeer *peer = dest->getPeer()) {
1163 if (peer->standby.pool)
1164 conn = peer->standby.pool->pop(dest, domain, true);
1165 }
1166 }
1167 return conn; // open, closed, or nil
1168 }
1169
1170 void
1171 FwdState::initModule()
1172 {
1173 RegisterWithCacheManager();
1174 }
1175
1176 void
1177 FwdState::RegisterWithCacheManager(void)
1178 {
1179 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1180 }
1181
1182 void
1183 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1184 {
1185 if (status > Http::scInvalidHeader)
1186 return;
1187
1188 assert(tries >= 0);
1189
1190 if (tries > MAX_FWD_STATS_IDX)
1191 tries = MAX_FWD_STATS_IDX;
1192
1193 ++ FwdReplyCodes[tries][status];
1194 }
1195
1196 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1197
1198 /*
1199 * DPW 2007-05-19
1200 * Formerly static, but now used by client_side_request.cc
1201 */
1202 /// Checks for a TOS value to apply depending on the ACL
1203 tos_t
1204 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1205 {
1206 acl_tos *l;
1207
1208 for (l = head; l; l = l->next) {
1209 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1210 return l->tos;
1211 }
1212
1213 return 0;
1214 }
1215
1216 /// Checks for a netfilter mark value to apply depending on the ACL
1217 nfmark_t
1218 aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
1219 {
1220 acl_nfmark *l;
1221
1222 for (l = head; l; l = l->next) {
1223 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1224 return l->nfmark;
1225 }
1226
1227 return 0;
1228 }
1229
1230 void
1231 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1232 {
1233 // skip if an outgoing address is already set.
1234 if (!conn->local.isAnyAddr()) return;
1235
1236 // ensure that at minimum the wildcard local matches remote protocol
1237 if (conn->remote.isIPv4())
1238 conn->local.setIPv4();
1239
1240 // maybe use TPROXY client address
1241 if (request && request->flags.spoofClientIp) {
1242 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1243 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1244 if (Config.onoff.tproxy_uses_indirect_client)
1245 conn->local = request->indirect_client_addr;
1246 else
1247 #endif
1248 conn->local = request->client_addr;
1249 // some flags need setting on the socket to use this address
1250 conn->flags |= COMM_DOBIND;
1251 conn->flags |= COMM_TRANSPARENT;
1252 return;
1253 }
1254 // else no tproxy today ...
1255 }
1256
1257 if (!Config.accessList.outgoing_address) {
1258 return; // anything will do.
1259 }
1260
1261 ACLFilledChecklist ch(NULL, request, NULL);
1262 ch.dst_peer = conn->getPeer();
1263 ch.dst_addr = conn->remote;
1264
1265 // TODO use the connection details in ACL.
1266 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1267
1268 AclAddress *l;
1269 for (l = Config.accessList.outgoing_address; l; l = l->next) {
1270
1271 /* check if the outgoing address is usable to the destination */
1272 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1273
1274 /* check ACLs for this outgoing address */
1275 if (!l->aclList || ch.fastCheck(l->aclList) == ACCESS_ALLOWED) {
1276 conn->local = l->addr;
1277 return;
1278 }
1279 }
1280 }
1281
1282 tos_t
1283 GetTosToServer(HttpRequest * request)
1284 {
1285 ACLFilledChecklist ch(NULL, request, NULL);
1286 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1287 }
1288
1289 nfmark_t
1290 GetNfmarkToServer(HttpRequest * request)
1291 {
1292 ACLFilledChecklist ch(NULL, request, NULL);
1293 return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1294 }
1295
1296 void
1297 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1298 {
1299 // Get the server side TOS and Netfilter mark to be set on the connection.
1300 if (Ip::Qos::TheConfig.isAclTosActive()) {
1301 conn.tos = GetTosToServer(request);
1302 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1303 }
1304
1305 #if SO_MARK && USE_LIBCAP
1306 conn.nfmark = GetNfmarkToServer(request);
1307 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1308 #else
1309 conn.nfmark = 0;
1310 #endif
1311 }