]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
Merged from trunk (r13515).
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 */
32
33 #include "squid.h"
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "comm/Connection.h"
43 #include "comm/ConnOpener.h"
44 #include "comm/Loops.h"
45 #include "CommCalls.h"
46 #include "errorpage.h"
47 #include "event.h"
48 #include "fd.h"
49 #include "fde.h"
50 #include "ftp.h"
51 #include "FtpGatewayServer.h"
52 #include "FwdState.h"
53 #include "globals.h"
54 #include "gopher.h"
55 #include "hier_code.h"
56 #include "http.h"
57 #include "HttpReply.h"
58 #include "HttpRequest.h"
59 #include "icmp/net_db.h"
60 #include "internal.h"
61 #include "ip/Intercept.h"
62 #include "ip/QosConfig.h"
63 #include "ip/tools.h"
64 #include "MemObject.h"
65 #include "mgr/Registration.h"
66 #include "neighbors.h"
67 #include "pconn.h"
68 #include "PeerPoolMgr.h"
69 #include "PeerSelectState.h"
70 #include "SquidConfig.h"
71 #include "SquidTime.h"
72 #include "Store.h"
73 #include "StoreClient.h"
74 #include "urn.h"
75 #include "whois.h"
76 #if USE_OPENSSL
77 #include "ssl/cert_validate_message.h"
78 #include "ssl/Config.h"
79 #include "ssl/ErrorDetail.h"
80 #include "ssl/helper.h"
81 #include "ssl/PeerConnector.h"
82 #include "ssl/ServerBump.h"
83 #include "ssl/support.h"
84 #endif
85
86 #include <cerrno>
87
88 static PSC fwdPeerSelectionCompleteWrapper;
89 static CLCB fwdServerClosedWrapper;
90 static CNCB fwdConnectDoneWrapper;
91
92 static OBJH fwdStats;
93
94 #define MAX_FWD_STATS_IDX 9
95 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
96
97 static PconnPool *fwdPconnPool = new PconnPool("server-side", NULL);
98 CBDATA_CLASS_INIT(FwdState);
99
100 #if USE_OPENSSL
101 class FwdStatePeerAnswerDialer: public CallDialer, public Ssl::PeerConnector::CbDialer
102 {
103 public:
104 typedef void (FwdState::*Method)(Ssl::PeerConnectorAnswer &);
105
106 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
107 method_(method), fwd_(fwd), answer_() {}
108
109 /* CallDialer API */
110 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
111 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
112 virtual void print(std::ostream &os) const {
113 os << '(' << fwd_.get() << ", " << answer_ << ')';
114 }
115
116 /* Ssl::PeerConnector::CbDialer API */
117 virtual Ssl::PeerConnectorAnswer &answer() { return answer_; }
118
119 private:
120 Method method_;
121 CbcPointer<FwdState> fwd_;
122 Ssl::PeerConnectorAnswer answer_;
123 };
124 #endif
125
126 void
127 FwdState::abort(void* d)
128 {
129 FwdState* fwd = (FwdState*)d;
130 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
131
132 if (Comm::IsConnOpen(fwd->serverConnection())) {
133 fwd->closeServerConnection("store entry aborted");
134 } else {
135 debugs(17, 7, HERE << "store entry aborted; no connection to close");
136 }
137 fwd->serverDestinations.clear();
138 fwd->self = NULL;
139 }
140
141 void
142 FwdState::closeServerConnection(const char *reason)
143 {
144 debugs(17, 3, "because " << reason << "; " << serverConn);
145 comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
146 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
147 serverConn->close();
148 }
149
150 /**** PUBLIC INTERFACE ********************************************************/
151
152 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
153 al(alp)
154 {
155 debugs(17, 2, HERE << "Forwarding client request " << client << ", url=" << e->url() );
156 entry = e;
157 clientConn = client;
158 request = r;
159 HTTPMSGLOCK(request);
160 pconnRace = raceImpossible;
161 start_t = squid_curtime;
162 serverDestinations.reserve(Config.forward_max_tries);
163 e->lock("FwdState");
164 EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
165 }
166
167 // Called once, right after object creation, when it is safe to set self
168 void FwdState::start(Pointer aSelf)
169 {
170 // Protect ourselves from being destroyed when the only Server pointing
171 // to us is gone (while we expect to talk to more Servers later).
172 // Once we set self, we are responsible for clearing it when we do not
173 // expect to talk to any servers.
174 self = aSelf; // refcounted
175
176 // We hope that either the store entry aborts or peer is selected.
177 // Otherwise we are going to leak our object.
178
179 entry->registerAbort(FwdState::abort, this);
180
181 #if STRICT_ORIGINAL_DST
182 // Bug 3243: CVE 2009-0801
183 // Bypass of browser same-origin access control in intercepted communication
184 // To resolve this we must force DIRECT and only to the original client destination.
185 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
186 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
187 if (isIntercepted && useOriginalDst) {
188 selectPeerForIntercepted();
189 // 3.2 does not suppro re-wrapping inside CONNECT.
190 // our only alternative is to fake destination "found" and continue with the forwarding.
191 startConnectionOrFail();
192 return;
193 }
194 #endif
195
196 // do full route options selection
197 peerSelect(&serverDestinations, request, al, entry, fwdPeerSelectionCompleteWrapper, this);
198 }
199
200 #if STRICT_ORIGINAL_DST
201 /// bypasses peerSelect() when dealing with intercepted requests
202 void
203 FwdState::selectPeerForIntercepted()
204 {
205 // use pinned connection if available
206 Comm::ConnectionPointer p;
207 if (ConnStateData *client = request->pinnedConnection()) {
208 p = client->validatePinnedConnection(request, NULL);
209 if (Comm::IsConnOpen(p)) {
210 /* duplicate peerSelectPinned() effects */
211 p->peerType = PINNED;
212 entry->ping_status = PING_DONE; /* Skip ICP */
213
214 debugs(17, 3, "reusing a pinned conn: " << *p);
215 serverDestinations.push_back(p);
216 } else {
217 debugs(17,2, "Pinned connection is not valid: " << p);
218 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
219 fail(anErr);
220 }
221 // Either use the valid pinned connection or fail if it is invalid.
222 return;
223 }
224
225 // use client original destination as second preferred choice
226 p = new Comm::Connection();
227 p->peerType = ORIGINAL_DST;
228 p->remote = clientConn->local;
229 getOutgoingAddress(request, p);
230
231 debugs(17, 3, HERE << "using client original destination: " << *p);
232 serverDestinations.push_back(p);
233 }
234 #endif
235
236 void
237 FwdState::completed()
238 {
239 if (flags.forward_completed) {
240 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
241 return;
242 }
243
244 flags.forward_completed = true;
245
246 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
247 debugs(17, 3, HERE << "entry aborted");
248 return ;
249 }
250
251 #if URL_CHECKSUM_DEBUG
252
253 entry->mem_obj->checkUrlChecksum();
254 #endif
255
256 if (entry->store_status == STORE_PENDING) {
257 if (entry->isEmpty()) {
258 if (!err) // we quit (e.g., fd closed) before an error or content
259 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
260 assert(err);
261 errorAppendEntry(entry, err);
262 err = NULL;
263 #if USE_OPENSSL
264 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
265 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
266 ConnStateData::httpsPeeked, Comm::ConnectionPointer(NULL));
267 }
268 #endif
269 } else {
270 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
271 entry->complete();
272 entry->releaseRequest();
273 }
274 }
275
276 if (storePendingNClients(entry) > 0)
277 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
278
279 }
280
281 FwdState::~FwdState()
282 {
283 debugs(17, 3, HERE << "FwdState destructor starting");
284
285 if (! flags.forward_completed)
286 completed();
287
288 doneWithRetries();
289
290 HTTPMSGUNLOCK(request);
291
292 delete err;
293
294 entry->unregisterAbort();
295
296 entry->unlock("FwdState");
297
298 entry = NULL;
299
300 if (calls.connector != NULL) {
301 calls.connector->cancel("FwdState destructed");
302 calls.connector = NULL;
303 }
304
305 if (Comm::IsConnOpen(serverConn))
306 closeServerConnection("~FwdState");
307
308 serverDestinations.clear();
309
310 debugs(17, 3, HERE << "FwdState destructor done");
311 }
312
313 /**
314 * This is the entry point for client-side to start forwarding
315 * a transaction. It is a static method that may or may not
316 * allocate a FwdState.
317 */
318 void
319 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
320 {
321 /** \note
322 * client_addr == no_addr indicates this is an "internal" request
323 * from peer_digest.c, asn.c, netdb.c, etc and should always
324 * be allowed. yuck, I know.
325 */
326
327 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
328 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
329 /**
330 * Check if this host is allowed to fetch MISSES from us (miss_access).
331 * Intentionally replace the src_addr automatically selected by the checklist code
332 * we do NOT want the indirect client address to be tested here.
333 */
334 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
335 ch.src_addr = request->client_addr;
336 if (ch.fastCheck() == ACCESS_DENIED) {
337 err_type page_id;
338 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
339
340 if (page_id == ERR_NONE)
341 page_id = ERR_FORWARDING_DENIED;
342
343 ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
344 errorAppendEntry(entry, anErr); // frees anErr
345 return;
346 }
347 }
348
349 debugs(17, 3, HERE << "'" << entry->url() << "'");
350 /*
351 * This seems like an odd place to bind mem_obj and request.
352 * Might want to assert that request is NULL at this point
353 */
354 entry->mem_obj->request = request;
355 HTTPMSGLOCK(entry->mem_obj->request);
356 #if URL_CHECKSUM_DEBUG
357
358 entry->mem_obj->checkUrlChecksum();
359 #endif
360
361 if (shutting_down) {
362 /* more yuck */
363 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
364 errorAppendEntry(entry, anErr); // frees anErr
365 return;
366 }
367
368 if (request->flags.internal) {
369 debugs(17, 2, "calling internalStart() due to request flag");
370 internalStart(clientConn, request, entry);
371 return;
372 }
373
374 switch (request->url.getScheme()) {
375
376 case AnyP::PROTO_CACHE_OBJECT:
377 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
378 CacheManager::GetInstance()->Start(clientConn, request, entry);
379 return;
380
381 case AnyP::PROTO_URN:
382 urnStart(request, entry);
383 return;
384
385 default:
386 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
387 fwd->start(fwd);
388 return;
389 }
390
391 /* NOTREACHED */
392 }
393
394 void
395 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
396 {
397 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
398 Start(clientConn, entry, request, NULL);
399 }
400
401 void
402 FwdState::startConnectionOrFail()
403 {
404 debugs(17, 3, HERE << entry->url());
405
406 if (serverDestinations.size() > 0) {
407 // Ditch error page if it was created before.
408 // A new one will be created if there's another problem
409 delete err;
410 err = NULL;
411
412 // Update the logging information about this new server connection.
413 // Done here before anything else so the errors get logged for
414 // this server link regardless of what happens when connecting to it.
415 // IF sucessfuly connected this top destination will become the serverConnection().
416 request->hier.note(serverDestinations[0], request->GetHost());
417 request->clearError();
418
419 connectStart();
420 } else {
421 debugs(17, 3, HERE << "Connection failed: " << entry->url());
422 if (!err) {
423 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
424 fail(anErr);
425 } // else use actual error from last connection attempt
426 self = NULL; // refcounted
427 }
428 }
429
430 void
431 FwdState::fail(ErrorState * errorState)
432 {
433 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
434
435 delete err;
436 err = errorState;
437
438 if (!errorState->request) {
439 errorState->request = request;
440 HTTPMSGLOCK(errorState->request);
441 }
442
443 if (err->type != ERR_ZERO_SIZE_OBJECT)
444 return;
445
446 if (pconnRace == racePossible) {
447 debugs(17, 5, HERE << "pconn race happened");
448 pconnRace = raceHappened;
449 }
450
451 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
452 pinned_connection->pinning.zeroReply = true;
453 flags.dont_retry = true; // we want to propagate failure to the client
454 debugs(17, 4, "zero reply on pinned connection");
455 }
456 }
457
458 /**
459 * Frees fwdState without closing FD or generating an abort
460 */
461 void
462 FwdState::unregister(Comm::ConnectionPointer &conn)
463 {
464 debugs(17, 3, HERE << entry->url() );
465 assert(serverConnection() == conn);
466 assert(Comm::IsConnOpen(conn));
467 comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
468 serverConn = NULL;
469 }
470
471 // Legacy method to be removed in favor of the above as soon as possible
472 void
473 FwdState::unregister(int fd)
474 {
475 debugs(17, 3, HERE << entry->url() );
476 assert(fd == serverConnection()->fd);
477 unregister(serverConn);
478 }
479
480 /**
481 * server-side modules call fwdComplete() when they are done
482 * downloading an object. Then, we either 1) re-forward the
483 * request somewhere else if needed, or 2) call storeComplete()
484 * to finish it off
485 */
486 void
487 FwdState::complete()
488 {
489 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
490 #if URL_CHECKSUM_DEBUG
491
492 entry->mem_obj->checkUrlChecksum();
493 #endif
494
495 logReplyStatus(n_tries, entry->getReply()->sline.status());
496
497 if (reforward()) {
498 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
499
500 if (Comm::IsConnOpen(serverConn))
501 unregister(serverConn);
502
503 entry->reset();
504
505 // drop the last path off the selection list. try the next one.
506 serverDestinations.erase(serverDestinations.begin());
507 startConnectionOrFail();
508
509 } else {
510 if (Comm::IsConnOpen(serverConn))
511 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
512 else
513 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
514 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
515 entry->complete();
516
517 if (!Comm::IsConnOpen(serverConn))
518 completed();
519
520 self = NULL; // refcounted
521 }
522 }
523
524 /**** CALLBACK WRAPPERS ************************************************************/
525
526 static void
527 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList * unused, ErrorState *err, void *data)
528 {
529 FwdState *fwd = (FwdState *) data;
530 if (err)
531 fwd->fail(err);
532 fwd->startConnectionOrFail();
533 }
534
535 static void
536 fwdServerClosedWrapper(const CommCloseCbParams &params)
537 {
538 FwdState *fwd = (FwdState *)params.data;
539 fwd->serverClosed(params.fd);
540 }
541
542 void
543 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
544 {
545 FwdState *fwd = (FwdState *) data;
546 fwd->connectDone(conn, status, xerrno);
547 }
548
549 /**** PRIVATE *****************************************************************/
550
551 /*
552 * FwdState::checkRetry
553 *
554 * Return TRUE if the request SHOULD be retried. This method is
555 * called when the HTTP connection fails, or when the connection
556 * is closed before server-side read the end of HTTP headers.
557 */
558 bool
559 FwdState::checkRetry()
560 {
561 if (shutting_down)
562 return false;
563
564 if (!self) { // we have aborted before the server called us back
565 debugs(17, 5, HERE << "not retrying because of earlier abort");
566 // we will be destroyed when the server clears its Pointer to us
567 return false;
568 }
569
570 if (entry->store_status != STORE_PENDING)
571 return false;
572
573 if (!entry->isEmpty())
574 return false;
575
576 if (n_tries > Config.forward_max_tries)
577 return false;
578
579 if (squid_curtime - start_t > Config.Timeout.forward)
580 return false;
581
582 if (flags.dont_retry)
583 return false;
584
585 if (request->bodyNibbled())
586 return false;
587
588 // NP: not yet actually connected anywhere. retry is safe.
589 if (!flags.connected_okay)
590 return true;
591
592 if (!checkRetriable())
593 return false;
594
595 return true;
596 }
597
598 /*
599 * FwdState::checkRetriable
600 *
601 * Return TRUE if this is the kind of request that can be retried
602 * after a failure. If the request is not retriable then we don't
603 * want to risk sending it on a persistent connection. Instead we'll
604 * force it to go on a new HTTP connection.
605 */
606 bool
607 FwdState::checkRetriable()
608 {
609 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
610 // complicated] code required to protect the PUT request body from being
611 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
612 if (request->body_pipe != NULL)
613 return false;
614
615 // RFC2616 9.1 Safe and Idempotent Methods
616 return (request->method.isHttpSafe() || request->method.isIdempotent());
617 }
618
619 void
620 FwdState::serverClosed(int fd)
621 {
622 // XXX: fd is often -1 here
623 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
624 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
625 if (fd >= 0 && serverConnection()->fd == fd)
626 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
627 retryOrBail();
628 }
629
630 void
631 FwdState::retryOrBail()
632 {
633 if (checkRetry()) {
634 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
635 // we should retry the same destination if it failed due to pconn race
636 if (pconnRace == raceHappened)
637 debugs(17, 4, HERE << "retrying the same destination");
638 else
639 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
640 startConnectionOrFail();
641 return;
642 }
643
644 // TODO: should we call completed() here and move doneWithRetries there?
645 doneWithRetries();
646
647 if (self != NULL && !err && shutting_down) {
648 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
649 errorAppendEntry(entry, anErr);
650 }
651
652 self = NULL; // refcounted
653 }
654
655 // If the Server quits before nibbling at the request body, the body sender
656 // will not know (so that we can retry). Call this if we will not retry. We
657 // will notify the sender so that it does not get stuck waiting for space.
658 void
659 FwdState::doneWithRetries()
660 {
661 if (request && request->body_pipe != NULL)
662 request->body_pipe->expectNoConsumption();
663 }
664
665 // called by the server that failed after calling unregister()
666 void
667 FwdState::handleUnregisteredServerEnd()
668 {
669 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
670 assert(!Comm::IsConnOpen(serverConn));
671 retryOrBail();
672 }
673
674 void
675 FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
676 {
677 if (status != Comm::OK) {
678 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
679 anErr->xerrno = xerrno;
680 fail(anErr);
681
682 /* it might have been a timeout with a partially open link */
683 if (conn != NULL) {
684 if (conn->getPeer())
685 peerConnectFailed(conn->getPeer());
686
687 conn->close();
688 }
689 retryOrBail();
690 return;
691 }
692
693 serverConn = conn;
694 flags.connected_okay = true;
695
696 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
697
698 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
699
700 if (serverConnection()->getPeer())
701 peerConnectSucceded(serverConnection()->getPeer());
702
703 #if USE_OPENSSL
704 if (!request->flags.pinned) {
705 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
706 (!serverConnection()->getPeer() && request->url.getScheme() == AnyP::PROTO_HTTPS) ||
707 request->flags.sslPeek) {
708
709 HttpRequest::Pointer requestPointer = request;
710 AsyncCall::Pointer callback = asyncCall(17,4,
711 "FwdState::ConnectedToPeer",
712 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
713 // Use positive timeout when less than one second is left.
714 const time_t sslNegotiationTimeout = max(static_cast<time_t>(1), timeLeft());
715 Ssl::PeerConnector *connector =
716 new Ssl::PeerConnector(requestPointer, serverConnection(), callback, sslNegotiationTimeout);
717 AsyncJob::Start(connector); // will call our callback
718 return;
719 }
720 }
721 #endif
722
723 const CbcPointer<ConnStateData> &clientConnState =
724 request->clientConnectionManager;
725 if (clientConnState.valid() && clientConnState->isFtp) {
726 // this is not an idle connection, so we do not want I/O monitoring
727 const bool monitor = false;
728 clientConnState->pinConnection(serverConnection(), request,
729 serverConnection()->getPeer(), false,
730 monitor);
731 }
732
733 dispatch();
734 }
735
736 #if USE_OPENSSL
737 void
738 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer &answer)
739 {
740 if (ErrorState *error = answer.error.get()) {
741 fail(error);
742 answer.error.clear(); // preserve error for errorSendComplete()
743 self = NULL;
744 return;
745 }
746
747 dispatch();
748 }
749 #endif
750
751 void
752 FwdState::connectTimeout(int fd)
753 {
754 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
755 assert(serverDestinations[0] != NULL);
756 assert(fd == serverDestinations[0]->fd);
757
758 if (entry->isEmpty()) {
759 ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
760 anErr->xerrno = ETIMEDOUT;
761 fail(anErr);
762
763 /* This marks the peer DOWN ... */
764 if (serverDestinations[0]->getPeer())
765 peerConnectFailed(serverDestinations[0]->getPeer());
766 }
767
768 if (Comm::IsConnOpen(serverDestinations[0])) {
769 serverDestinations[0]->close();
770 }
771 }
772
773 time_t
774 FwdState::timeLeft() const
775 {
776 /* connection timeout */
777 int ctimeout;
778 if (serverDestinations[0]->getPeer()) {
779 ctimeout = serverDestinations[0]->getPeer()->connect_timeout > 0 ?
780 serverDestinations[0]->getPeer()->connect_timeout : Config.Timeout.peer_connect;
781 } else {
782 ctimeout = Config.Timeout.connect;
783 }
784
785 /* calculate total forwarding timeout ??? */
786 int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
787 if (ftimeout < 0)
788 ftimeout = 5;
789
790 if (ftimeout < ctimeout)
791 return (time_t)ftimeout;
792 else
793 return (time_t)ctimeout;
794 }
795
796 /**
797 * Called after forwarding path selection (via peer select) has taken place
798 * and whenever forwarding needs to attempt a new connection (routing failover).
799 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
800 */
801 void
802 FwdState::connectStart()
803 {
804 assert(serverDestinations.size() > 0);
805
806 debugs(17, 3, "fwdConnectStart: " << entry->url());
807
808 if (!request->hier.first_conn_start.tv_sec) // first attempt
809 request->hier.first_conn_start = current_time;
810
811 if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
812 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
813 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
814 fail(anErr);
815 self = NULL; // refcounted
816 return;
817 }
818
819 request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
820 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
821 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
822 if (serverDestinations[0]->peerType == PINNED) {
823 ConnStateData *pinned_connection = request->pinnedConnection();
824 debugs(17,7, "pinned peer connection: " << pinned_connection);
825 // pinned_connection may become nil after a pconn race
826 if (pinned_connection)
827 serverConn = pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer());
828 else
829 serverConn = NULL;
830 if (Comm::IsConnOpen(serverConn)) {
831 pinned_connection->stopPinnedConnectionMonitoring();
832 flags.connected_okay = true;
833 ++n_tries;
834 request->hier.note(serverConn, request->GetHost());
835 request->flags.pinned = true;
836 request->hier.note(serverConn, pinned_connection->pinning.host);
837 if (pinned_connection->pinnedAuth())
838 request->flags.auth = true;
839 comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
840
841 /* Update server side TOS and Netfilter mark on the connection. */
842 if (Ip::Qos::TheConfig.isAclTosActive()) {
843 debugs(17, 3, HERE << "setting tos for pinned connection to " << (int)serverConn->tos );
844 serverConn->tos = GetTosToServer(request);
845 Ip::Qos::setSockTos(serverConn, serverConn->tos);
846 }
847 #if SO_MARK
848 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
849 serverConn->nfmark = GetNfmarkToServer(request);
850 Ip::Qos::setSockNfmark(serverConn, serverConn->nfmark);
851 }
852 #endif
853
854 // the server may close the pinned connection before this request
855 pconnRace = racePossible;
856 dispatch();
857 return;
858 }
859 // Pinned connection failure.
860 debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
861 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
862 fail(anErr);
863 self = NULL; // refcounted
864 return;
865 }
866
867 // Use pconn to avoid opening a new connection.
868 const char *host = NULL;
869 if (!serverDestinations[0]->getPeer())
870 host = request->GetHost();
871
872 Comm::ConnectionPointer temp;
873 // Avoid pconns after races so that the same client does not suffer twice.
874 // This does not increase the total number of connections because we just
875 // closed the connection that failed the race. And re-pinning assumes this.
876 if (pconnRace != raceHappened)
877 temp = pconnPop(serverDestinations[0], host);
878
879 const bool openedPconn = Comm::IsConnOpen(temp);
880 pconnRace = openedPconn ? racePossible : raceImpossible;
881
882 // if we found an open persistent connection to use. use it.
883 if (openedPconn) {
884 serverConn = temp;
885 flags.connected_okay = true;
886 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
887 ++n_tries;
888
889 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
890
891 /* Update server side TOS and Netfilter mark on the connection. */
892 if (Ip::Qos::TheConfig.isAclTosActive()) {
893 const tos_t tos = GetTosToServer(request);
894 Ip::Qos::setSockTos(temp, tos);
895 }
896 #if SO_MARK
897 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
898 const nfmark_t nfmark = GetNfmarkToServer(request);
899 Ip::Qos::setSockNfmark(temp, nfmark);
900 }
901 #endif
902
903 dispatch();
904 return;
905 }
906
907 // We will try to open a new connection, possibly to the same destination.
908 // We reset serverDestinations[0] in case we are using it again because
909 // ConnOpener modifies its destination argument.
910 serverDestinations[0]->local.port(0);
911 serverConn = NULL;
912
913 #if URL_CHECKSUM_DEBUG
914 entry->mem_obj->checkUrlChecksum();
915 #endif
916
917 GetMarkingsToServer(request, *serverDestinations[0]);
918
919 calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
920 Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, timeLeft());
921 if (host)
922 cs->setHost(host);
923 AsyncJob::Start(cs);
924 }
925
926 void
927 FwdState::dispatch()
928 {
929 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
930 /*
931 * Assert that server_fd is set. This is to guarantee that fwdState
932 * is attached to something and will be deallocated when server_fd
933 * is closed.
934 */
935 assert(Comm::IsConnOpen(serverConn));
936
937 fd_note(serverConnection()->fd, entry->url());
938
939 fd_table[serverConnection()->fd].noteUse();
940
941 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
942 assert(entry->ping_status != PING_WAITING);
943
944 assert(entry->locked());
945
946 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
947
948 netdbPingSite(request->GetHost());
949
950 /* Retrieves remote server TOS or MARK value, and stores it as part of the
951 * original client request FD object. It is later used to forward
952 * remote server's TOS/MARK in the response to the client in case of a MISS.
953 */
954 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
955 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
956 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
957 /* Get the netfilter mark for the connection */
958 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
959 }
960 }
961
962 #if _SQUID_LINUX_
963 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
964 if (Ip::Qos::TheConfig.isHitTosActive()) {
965 if (Comm::IsConnOpen(clientConn)) {
966 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
967 /* Get the TOS value for the packet */
968 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
969 }
970 }
971 #endif
972
973 #if USE_OPENSSL
974 if (request->flags.sslPeek) {
975 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
976 ConnStateData::httpsPeeked, serverConnection());
977 unregister(serverConn); // async call owns it now
978 complete(); // destroys us
979 return;
980 }
981 #endif
982
983 if (serverConnection()->getPeer() != NULL) {
984 ++ serverConnection()->getPeer()->stats.fetches;
985 request->peer_login = serverConnection()->getPeer()->login;
986 request->peer_domain = serverConnection()->getPeer()->domain;
987 httpStart(this);
988 } else {
989 assert(!request->flags.sslPeek);
990 request->peer_login = NULL;
991 request->peer_domain = NULL;
992
993 switch (request->url.getScheme()) {
994 #if USE_OPENSSL
995
996 case AnyP::PROTO_HTTPS:
997 httpStart(this);
998 break;
999 #endif
1000
1001 case AnyP::PROTO_HTTP:
1002 httpStart(this);
1003 break;
1004
1005 case AnyP::PROTO_GOPHER:
1006 gopherStart(this);
1007 break;
1008
1009 case AnyP::PROTO_FTP:
1010 if (request->clientConnectionManager->isFtp)
1011 ftpGatewayServerStart(this);
1012 else
1013 ftpStart(this);
1014 break;
1015
1016 case AnyP::PROTO_CACHE_OBJECT:
1017
1018 case AnyP::PROTO_URN:
1019 fatal_dump("Should never get here");
1020 break;
1021
1022 case AnyP::PROTO_WHOIS:
1023 whoisStart(this);
1024 break;
1025
1026 case AnyP::PROTO_WAIS: /* Not implemented */
1027
1028 default:
1029 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1030 ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
1031 fail(anErr);
1032 // Set the dont_retry flag because this is not a transient (network) error.
1033 flags.dont_retry = true;
1034 if (Comm::IsConnOpen(serverConn)) {
1035 serverConn->close();
1036 }
1037 break;
1038 }
1039 }
1040 }
1041
1042 /*
1043 * FwdState::reforward
1044 *
1045 * returns TRUE if the transaction SHOULD be re-forwarded to the
1046 * next choice in the serverDestinations list. This method is called when
1047 * server-side communication completes normally, or experiences
1048 * some error after receiving the end of HTTP headers.
1049 */
1050 int
1051 FwdState::reforward()
1052 {
1053 StoreEntry *e = entry;
1054
1055 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1056 debugs(17, 3, HERE << "entry aborted");
1057 return 0;
1058 }
1059
1060 assert(e->store_status == STORE_PENDING);
1061 assert(e->mem_obj);
1062 #if URL_CHECKSUM_DEBUG
1063
1064 e->mem_obj->checkUrlChecksum();
1065 #endif
1066
1067 debugs(17, 3, HERE << e->url() << "?" );
1068
1069 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1070 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1071 return 0;
1072 }
1073
1074 if (n_tries > Config.forward_max_tries)
1075 return 0;
1076
1077 if (request->bodyNibbled())
1078 return 0;
1079
1080 if (serverDestinations.size() <= 1) {
1081 // NP: <= 1 since total count includes the recently failed one.
1082 debugs(17, 3, HERE << "No alternative forwarding paths left");
1083 return 0;
1084 }
1085
1086 const Http::StatusCode s = e->getReply()->sline.status();
1087 debugs(17, 3, HERE << "status " << s);
1088 return reforwardableStatus(s);
1089 }
1090
1091 /**
1092 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1093 * on whether this is a validation request. RFC 2616 says that we MUST reply
1094 * with "504 Gateway Timeout" if validation fails and cached reply has
1095 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1096 */
1097 ErrorState *
1098 FwdState::makeConnectingError(const err_type type) const
1099 {
1100 return new ErrorState(type, request->flags.needValidation ?
1101 Http::scGatewayTimeout : Http::scServiceUnavailable, request);
1102 }
1103
1104 static void
1105 fwdStats(StoreEntry * s)
1106 {
1107 int i;
1108 int j;
1109 storeAppendPrintf(s, "Status");
1110
1111 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1112 storeAppendPrintf(s, "\ttry#%d", j);
1113 }
1114
1115 storeAppendPrintf(s, "\n");
1116
1117 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1118 if (FwdReplyCodes[0][i] == 0)
1119 continue;
1120
1121 storeAppendPrintf(s, "%3d", i);
1122
1123 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1124 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1125 }
1126
1127 storeAppendPrintf(s, "\n");
1128 }
1129 }
1130
1131 /**** STATIC MEMBER FUNCTIONS *************************************************/
1132
1133 bool
1134 FwdState::reforwardableStatus(const Http::StatusCode s) const
1135 {
1136 switch (s) {
1137
1138 case Http::scBadGateway:
1139
1140 case Http::scGatewayTimeout:
1141 return true;
1142
1143 case Http::scForbidden:
1144
1145 case Http::scInternalServerError:
1146
1147 case Http::scNotImplemented:
1148
1149 case Http::scServiceUnavailable:
1150 return Config.retry.onerror;
1151
1152 default:
1153 return false;
1154 }
1155
1156 /* NOTREACHED */
1157 }
1158
1159 /**
1160 * Decide where details need to be gathered to correctly describe a persistent connection.
1161 * What is needed:
1162 * - the address/port details about this link
1163 * - domain name of server at other end of this link (either peer or requested host)
1164 */
1165 void
1166 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1167 {
1168 if (conn->getPeer()) {
1169 fwdPconnPool->push(conn, NULL);
1170 } else {
1171 fwdPconnPool->push(conn, domain);
1172 }
1173 }
1174
1175 Comm::ConnectionPointer
1176 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1177 {
1178 // always call shared pool first because we need to close an idle
1179 // connection there if we have to use a standby connection.
1180 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, checkRetriable());
1181 if (!Comm::IsConnOpen(conn)) {
1182 // either there was no pconn to pop or this is not a retriable xaction
1183 if (CachePeer *peer = dest->getPeer()) {
1184 if (peer->standby.pool)
1185 conn = peer->standby.pool->pop(dest, domain, true);
1186 }
1187 }
1188 return conn; // open, closed, or nil
1189 }
1190
1191 void
1192 FwdState::initModule()
1193 {
1194 RegisterWithCacheManager();
1195 }
1196
1197 void
1198 FwdState::RegisterWithCacheManager(void)
1199 {
1200 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1201 }
1202
1203 void
1204 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1205 {
1206 if (status > Http::scInvalidHeader)
1207 return;
1208
1209 assert(tries >= 0);
1210
1211 if (tries > MAX_FWD_STATS_IDX)
1212 tries = MAX_FWD_STATS_IDX;
1213
1214 ++ FwdReplyCodes[tries][status];
1215 }
1216
1217 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1218
1219 /*
1220 * DPW 2007-05-19
1221 * Formerly static, but now used by client_side_request.cc
1222 */
1223 /// Checks for a TOS value to apply depending on the ACL
1224 tos_t
1225 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1226 {
1227 acl_tos *l;
1228
1229 for (l = head; l; l = l->next) {
1230 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1231 return l->tos;
1232 }
1233
1234 return 0;
1235 }
1236
1237 /// Checks for a netfilter mark value to apply depending on the ACL
1238 nfmark_t
1239 aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
1240 {
1241 acl_nfmark *l;
1242
1243 for (l = head; l; l = l->next) {
1244 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1245 return l->nfmark;
1246 }
1247
1248 return 0;
1249 }
1250
1251 void
1252 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1253 {
1254 // skip if an outgoing address is already set.
1255 if (!conn->local.isAnyAddr()) return;
1256
1257 // ensure that at minimum the wildcard local matches remote protocol
1258 if (conn->remote.isIPv4())
1259 conn->local.setIPv4();
1260
1261 // maybe use TPROXY client address
1262 if (request && request->flags.spoofClientIp) {
1263 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1264 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1265 if (Config.onoff.tproxy_uses_indirect_client)
1266 conn->local = request->indirect_client_addr;
1267 else
1268 #endif
1269 conn->local = request->client_addr;
1270 // some flags need setting on the socket to use this address
1271 conn->flags |= COMM_DOBIND;
1272 conn->flags |= COMM_TRANSPARENT;
1273 return;
1274 }
1275 // else no tproxy today ...
1276 }
1277
1278 if (!Config.accessList.outgoing_address) {
1279 return; // anything will do.
1280 }
1281
1282 ACLFilledChecklist ch(NULL, request, NULL);
1283 ch.dst_peer = conn->getPeer();
1284 ch.dst_addr = conn->remote;
1285
1286 // TODO use the connection details in ACL.
1287 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1288
1289 AclAddress *l;
1290 for (l = Config.accessList.outgoing_address; l; l = l->next) {
1291
1292 /* check if the outgoing address is usable to the destination */
1293 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1294
1295 /* check ACLs for this outgoing address */
1296 if (!l->aclList || ch.fastCheck(l->aclList) == ACCESS_ALLOWED) {
1297 conn->local = l->addr;
1298 return;
1299 }
1300 }
1301 }
1302
1303 tos_t
1304 GetTosToServer(HttpRequest * request)
1305 {
1306 ACLFilledChecklist ch(NULL, request, NULL);
1307 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1308 }
1309
1310 nfmark_t
1311 GetNfmarkToServer(HttpRequest * request)
1312 {
1313 ACLFilledChecklist ch(NULL, request, NULL);
1314 return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1315 }
1316
1317 void
1318 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1319 {
1320 // Get the server side TOS and Netfilter mark to be set on the connection.
1321 if (Ip::Qos::TheConfig.isAclTosActive()) {
1322 conn.tos = GetTosToServer(request);
1323 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1324 }
1325
1326 #if SO_MARK && USE_LIBCAP
1327 conn.nfmark = GetNfmarkToServer(request);
1328 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1329 #else
1330 conn.nfmark = 0;
1331 #endif
1332 }