]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
SSL Peek and Splice
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 */
32
33 #include "squid.h"
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "clients/forward.h"
43 #include "comm/Connection.h"
44 #include "comm/ConnOpener.h"
45 #include "comm/Loops.h"
46 #include "CommCalls.h"
47 #include "errorpage.h"
48 #include "event.h"
49 #include "fd.h"
50 #include "fde.h"
51 #include "FwdState.h"
52 #include "globals.h"
53 #include "gopher.h"
54 #include "hier_code.h"
55 #include "http.h"
56 #include "HttpReply.h"
57 #include "HttpRequest.h"
58 #include "icmp/net_db.h"
59 #include "internal.h"
60 #include "ip/Intercept.h"
61 #include "ip/QosConfig.h"
62 #include "ip/tools.h"
63 #include "MemObject.h"
64 #include "mgr/Registration.h"
65 #include "neighbors.h"
66 #include "pconn.h"
67 #include "PeerPoolMgr.h"
68 #include "PeerSelectState.h"
69 #include "SquidConfig.h"
70 #include "SquidTime.h"
71 #include "Store.h"
72 #include "StoreClient.h"
73 #include "urn.h"
74 #include "whois.h"
75 #if USE_OPENSSL
76 #include "ssl/cert_validate_message.h"
77 #include "ssl/Config.h"
78 #include "ssl/ErrorDetail.h"
79 #include "ssl/helper.h"
80 #include "ssl/PeerConnector.h"
81 #include "ssl/ServerBump.h"
82 #include "ssl/support.h"
83 #endif
84
85 #include <cerrno>
86
87 static PSC fwdPeerSelectionCompleteWrapper;
88 static CLCB fwdServerClosedWrapper;
89 static CNCB fwdConnectDoneWrapper;
90
91 static OBJH fwdStats;
92
93 #define MAX_FWD_STATS_IDX 9
94 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
95
96 static PconnPool *fwdPconnPool = new PconnPool("server-side", NULL);
97 CBDATA_CLASS_INIT(FwdState);
98
99 #if USE_OPENSSL
100 class FwdStatePeerAnswerDialer: public CallDialer, public Ssl::PeerConnector::CbDialer
101 {
102 public:
103 typedef void (FwdState::*Method)(Ssl::PeerConnectorAnswer &);
104
105 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
106 method_(method), fwd_(fwd), answer_() {}
107
108 /* CallDialer API */
109 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
110 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
111 virtual void print(std::ostream &os) const {
112 os << '(' << fwd_.get() << ", " << answer_ << ')';
113 }
114
115 /* Ssl::PeerConnector::CbDialer API */
116 virtual Ssl::PeerConnectorAnswer &answer() { return answer_; }
117
118 private:
119 Method method_;
120 CbcPointer<FwdState> fwd_;
121 Ssl::PeerConnectorAnswer answer_;
122 };
123 #endif
124
125 void
126 FwdState::abort(void* d)
127 {
128 FwdState* fwd = (FwdState*)d;
129 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
130
131 if (Comm::IsConnOpen(fwd->serverConnection())) {
132 fwd->closeServerConnection("store entry aborted");
133 } else {
134 debugs(17, 7, HERE << "store entry aborted; no connection to close");
135 }
136 fwd->serverDestinations.clear();
137 fwd->self = NULL;
138 }
139
140 void
141 FwdState::closeServerConnection(const char *reason)
142 {
143 debugs(17, 3, "because " << reason << "; " << serverConn);
144 comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
145 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
146 serverConn->close();
147 }
148
149 /**** PUBLIC INTERFACE ********************************************************/
150
151 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
152 al(alp)
153 {
154 debugs(17, 2, HERE << "Forwarding client request " << client << ", url=" << e->url() );
155 entry = e;
156 clientConn = client;
157 request = r;
158 HTTPMSGLOCK(request);
159 pconnRace = raceImpossible;
160 start_t = squid_curtime;
161 serverDestinations.reserve(Config.forward_max_tries);
162 e->lock("FwdState");
163 EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
164 }
165
166 // Called once, right after object creation, when it is safe to set self
167 void FwdState::start(Pointer aSelf)
168 {
169 // Protect ourselves from being destroyed when the only Server pointing
170 // to us is gone (while we expect to talk to more Servers later).
171 // Once we set self, we are responsible for clearing it when we do not
172 // expect to talk to any servers.
173 self = aSelf; // refcounted
174
175 // We hope that either the store entry aborts or peer is selected.
176 // Otherwise we are going to leak our object.
177
178 entry->registerAbort(FwdState::abort, this);
179
180 #if STRICT_ORIGINAL_DST
181 // Bug 3243: CVE 2009-0801
182 // Bypass of browser same-origin access control in intercepted communication
183 // To resolve this we must force DIRECT and only to the original client destination.
184 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
185 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
186 if (isIntercepted && useOriginalDst) {
187 selectPeerForIntercepted();
188 // 3.2 does not suppro re-wrapping inside CONNECT.
189 // our only alternative is to fake destination "found" and continue with the forwarding.
190 startConnectionOrFail();
191 return;
192 }
193 #endif
194
195 // do full route options selection
196 peerSelect(&serverDestinations, request, al, entry, fwdPeerSelectionCompleteWrapper, this);
197 }
198
199 #if STRICT_ORIGINAL_DST
200 /// bypasses peerSelect() when dealing with intercepted requests
201 void
202 FwdState::selectPeerForIntercepted()
203 {
204 // use pinned connection if available
205 Comm::ConnectionPointer p;
206 if (ConnStateData *client = request->pinnedConnection()) {
207 p = client->validatePinnedConnection(request, NULL);
208 if (Comm::IsConnOpen(p)) {
209 /* duplicate peerSelectPinned() effects */
210 p->peerType = PINNED;
211 entry->ping_status = PING_DONE; /* Skip ICP */
212
213 debugs(17, 3, "reusing a pinned conn: " << *p);
214 serverDestinations.push_back(p);
215 } else {
216 debugs(17,2, "Pinned connection is not valid: " << p);
217 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
218 fail(anErr);
219 }
220 // Either use the valid pinned connection or fail if it is invalid.
221 return;
222 }
223
224 // use client original destination as second preferred choice
225 p = new Comm::Connection();
226 p->peerType = ORIGINAL_DST;
227 p->remote = clientConn->local;
228 getOutgoingAddress(request, p);
229
230 debugs(17, 3, HERE << "using client original destination: " << *p);
231 serverDestinations.push_back(p);
232 }
233 #endif
234
235 void
236 FwdState::completed()
237 {
238 if (flags.forward_completed) {
239 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
240 return;
241 }
242
243 flags.forward_completed = true;
244
245 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
246 debugs(17, 3, HERE << "entry aborted");
247 return ;
248 }
249
250 #if URL_CHECKSUM_DEBUG
251
252 entry->mem_obj->checkUrlChecksum();
253 #endif
254
255 if (entry->store_status == STORE_PENDING) {
256 if (entry->isEmpty()) {
257 if (!err) // we quit (e.g., fd closed) before an error or content
258 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
259 assert(err);
260 errorAppendEntry(entry, err);
261 err = NULL;
262 #if USE_OPENSSL
263 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
264 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
265 ConnStateData::httpsPeeked, Comm::ConnectionPointer(NULL));
266 }
267 #endif
268 } else {
269 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
270 entry->complete();
271 entry->releaseRequest();
272 }
273 }
274
275 if (storePendingNClients(entry) > 0)
276 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
277
278 }
279
280 FwdState::~FwdState()
281 {
282 debugs(17, 3, HERE << "FwdState destructor starting");
283
284 if (! flags.forward_completed)
285 completed();
286
287 doneWithRetries();
288
289 HTTPMSGUNLOCK(request);
290
291 delete err;
292
293 entry->unregisterAbort();
294
295 entry->unlock("FwdState");
296
297 entry = NULL;
298
299 if (calls.connector != NULL) {
300 calls.connector->cancel("FwdState destructed");
301 calls.connector = NULL;
302 }
303
304 if (Comm::IsConnOpen(serverConn))
305 closeServerConnection("~FwdState");
306
307 serverDestinations.clear();
308
309 debugs(17, 3, HERE << "FwdState destructor done");
310 }
311
312 /**
313 * This is the entry point for client-side to start forwarding
314 * a transaction. It is a static method that may or may not
315 * allocate a FwdState.
316 */
317 void
318 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
319 {
320 /** \note
321 * client_addr == no_addr indicates this is an "internal" request
322 * from peer_digest.c, asn.c, netdb.c, etc and should always
323 * be allowed. yuck, I know.
324 */
325
326 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
327 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
328 /**
329 * Check if this host is allowed to fetch MISSES from us (miss_access).
330 * Intentionally replace the src_addr automatically selected by the checklist code
331 * we do NOT want the indirect client address to be tested here.
332 */
333 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
334 ch.src_addr = request->client_addr;
335 if (ch.fastCheck() == ACCESS_DENIED) {
336 err_type page_id;
337 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
338
339 if (page_id == ERR_NONE)
340 page_id = ERR_FORWARDING_DENIED;
341
342 ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
343 errorAppendEntry(entry, anErr); // frees anErr
344 return;
345 }
346 }
347
348 debugs(17, 3, HERE << "'" << entry->url() << "'");
349 /*
350 * This seems like an odd place to bind mem_obj and request.
351 * Might want to assert that request is NULL at this point
352 */
353 entry->mem_obj->request = request;
354 HTTPMSGLOCK(entry->mem_obj->request);
355 #if URL_CHECKSUM_DEBUG
356
357 entry->mem_obj->checkUrlChecksum();
358 #endif
359
360 if (shutting_down) {
361 /* more yuck */
362 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
363 errorAppendEntry(entry, anErr); // frees anErr
364 return;
365 }
366
367 if (request->flags.internal) {
368 debugs(17, 2, "calling internalStart() due to request flag");
369 internalStart(clientConn, request, entry);
370 return;
371 }
372
373 switch (request->url.getScheme()) {
374
375 case AnyP::PROTO_CACHE_OBJECT:
376 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
377 CacheManager::GetInstance()->Start(clientConn, request, entry);
378 return;
379
380 case AnyP::PROTO_URN:
381 urnStart(request, entry);
382 return;
383
384 default:
385 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
386 fwd->start(fwd);
387 return;
388 }
389
390 /* NOTREACHED */
391 }
392
393 void
394 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
395 {
396 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
397 Start(clientConn, entry, request, NULL);
398 }
399
400 void
401 FwdState::startConnectionOrFail()
402 {
403 debugs(17, 3, HERE << entry->url());
404
405 if (serverDestinations.size() > 0) {
406 // Ditch error page if it was created before.
407 // A new one will be created if there's another problem
408 delete err;
409 err = NULL;
410
411 // Update the logging information about this new server connection.
412 // Done here before anything else so the errors get logged for
413 // this server link regardless of what happens when connecting to it.
414 // IF sucessfuly connected this top destination will become the serverConnection().
415 request->hier.note(serverDestinations[0], request->GetHost());
416 request->clearError();
417
418 connectStart();
419 } else {
420 debugs(17, 3, HERE << "Connection failed: " << entry->url());
421 if (!err) {
422 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
423 fail(anErr);
424 } // else use actual error from last connection attempt
425 self = NULL; // refcounted
426 }
427 }
428
429 void
430 FwdState::fail(ErrorState * errorState)
431 {
432 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
433
434 delete err;
435 err = errorState;
436
437 if (!errorState->request) {
438 errorState->request = request;
439 HTTPMSGLOCK(errorState->request);
440 }
441
442 if (err->type != ERR_ZERO_SIZE_OBJECT)
443 return;
444
445 if (pconnRace == racePossible) {
446 debugs(17, 5, HERE << "pconn race happened");
447 pconnRace = raceHappened;
448 }
449
450 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
451 pinned_connection->pinning.zeroReply = true;
452 flags.dont_retry = true; // we want to propagate failure to the client
453 debugs(17, 4, "zero reply on pinned connection");
454 }
455 }
456
457 /**
458 * Frees fwdState without closing FD or generating an abort
459 */
460 void
461 FwdState::unregister(Comm::ConnectionPointer &conn)
462 {
463 debugs(17, 3, HERE << entry->url() );
464 assert(serverConnection() == conn);
465 assert(Comm::IsConnOpen(conn));
466 comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
467 serverConn = NULL;
468 }
469
470 // Legacy method to be removed in favor of the above as soon as possible
471 void
472 FwdState::unregister(int fd)
473 {
474 debugs(17, 3, HERE << entry->url() );
475 assert(fd == serverConnection()->fd);
476 unregister(serverConn);
477 }
478
479 /**
480 * server-side modules call fwdComplete() when they are done
481 * downloading an object. Then, we either 1) re-forward the
482 * request somewhere else if needed, or 2) call storeComplete()
483 * to finish it off
484 */
485 void
486 FwdState::complete()
487 {
488 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
489 #if URL_CHECKSUM_DEBUG
490
491 entry->mem_obj->checkUrlChecksum();
492 #endif
493
494 logReplyStatus(n_tries, entry->getReply()->sline.status());
495
496 if (reforward()) {
497 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
498
499 if (Comm::IsConnOpen(serverConn))
500 unregister(serverConn);
501
502 entry->reset();
503
504 // drop the last path off the selection list. try the next one.
505 serverDestinations.erase(serverDestinations.begin());
506 startConnectionOrFail();
507
508 } else {
509 if (Comm::IsConnOpen(serverConn))
510 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
511 else
512 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
513 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
514 entry->complete();
515
516 if (!Comm::IsConnOpen(serverConn))
517 completed();
518
519 self = NULL; // refcounted
520 }
521 }
522
523 /**** CALLBACK WRAPPERS ************************************************************/
524
525 static void
526 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList * unused, ErrorState *err, void *data)
527 {
528 FwdState *fwd = (FwdState *) data;
529 if (err)
530 fwd->fail(err);
531 fwd->startConnectionOrFail();
532 }
533
534 static void
535 fwdServerClosedWrapper(const CommCloseCbParams &params)
536 {
537 FwdState *fwd = (FwdState *)params.data;
538 fwd->serverClosed(params.fd);
539 }
540
541 void
542 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
543 {
544 FwdState *fwd = (FwdState *) data;
545 fwd->connectDone(conn, status, xerrno);
546 }
547
548 /**** PRIVATE *****************************************************************/
549
550 /*
551 * FwdState::checkRetry
552 *
553 * Return TRUE if the request SHOULD be retried. This method is
554 * called when the HTTP connection fails, or when the connection
555 * is closed before server-side read the end of HTTP headers.
556 */
557 bool
558 FwdState::checkRetry()
559 {
560 if (shutting_down)
561 return false;
562
563 if (!self) { // we have aborted before the server called us back
564 debugs(17, 5, HERE << "not retrying because of earlier abort");
565 // we will be destroyed when the server clears its Pointer to us
566 return false;
567 }
568
569 if (entry->store_status != STORE_PENDING)
570 return false;
571
572 if (!entry->isEmpty())
573 return false;
574
575 if (n_tries > Config.forward_max_tries)
576 return false;
577
578 if (squid_curtime - start_t > Config.Timeout.forward)
579 return false;
580
581 if (flags.dont_retry)
582 return false;
583
584 if (request->bodyNibbled())
585 return false;
586
587 // NP: not yet actually connected anywhere. retry is safe.
588 if (!flags.connected_okay)
589 return true;
590
591 if (!checkRetriable())
592 return false;
593
594 return true;
595 }
596
597 /*
598 * FwdState::checkRetriable
599 *
600 * Return TRUE if this is the kind of request that can be retried
601 * after a failure. If the request is not retriable then we don't
602 * want to risk sending it on a persistent connection. Instead we'll
603 * force it to go on a new HTTP connection.
604 */
605 bool
606 FwdState::checkRetriable()
607 {
608 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
609 // complicated] code required to protect the PUT request body from being
610 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
611 if (request->body_pipe != NULL)
612 return false;
613
614 // RFC2616 9.1 Safe and Idempotent Methods
615 return (request->method.isHttpSafe() || request->method.isIdempotent());
616 }
617
618 void
619 FwdState::serverClosed(int fd)
620 {
621 // XXX: fd is often -1 here
622 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
623 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
624 if (fd >= 0 && serverConnection()->fd == fd)
625 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
626 retryOrBail();
627 }
628
629 void
630 FwdState::retryOrBail()
631 {
632 if (checkRetry()) {
633 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
634 // we should retry the same destination if it failed due to pconn race
635 if (pconnRace == raceHappened)
636 debugs(17, 4, HERE << "retrying the same destination");
637 else
638 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
639 startConnectionOrFail();
640 return;
641 }
642
643 // TODO: should we call completed() here and move doneWithRetries there?
644 doneWithRetries();
645
646 if (self != NULL && !err && shutting_down) {
647 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
648 errorAppendEntry(entry, anErr);
649 }
650
651 self = NULL; // refcounted
652 }
653
654 // If the Server quits before nibbling at the request body, the body sender
655 // will not know (so that we can retry). Call this if we will not retry. We
656 // will notify the sender so that it does not get stuck waiting for space.
657 void
658 FwdState::doneWithRetries()
659 {
660 if (request && request->body_pipe != NULL)
661 request->body_pipe->expectNoConsumption();
662 }
663
664 // called by the server that failed after calling unregister()
665 void
666 FwdState::handleUnregisteredServerEnd()
667 {
668 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
669 assert(!Comm::IsConnOpen(serverConn));
670 retryOrBail();
671 }
672
673 void
674 FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
675 {
676 if (status != Comm::OK) {
677 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
678 anErr->xerrno = xerrno;
679 fail(anErr);
680
681 /* it might have been a timeout with a partially open link */
682 if (conn != NULL) {
683 if (conn->getPeer())
684 peerConnectFailed(conn->getPeer());
685
686 conn->close();
687 }
688 retryOrBail();
689 return;
690 }
691
692 serverConn = conn;
693 flags.connected_okay = true;
694
695 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
696
697 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
698
699 if (serverConnection()->getPeer())
700 peerConnectSucceded(serverConnection()->getPeer());
701
702 #if USE_OPENSSL
703 if (!request->flags.pinned) {
704 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
705 (!serverConnection()->getPeer() && request->url.getScheme() == AnyP::PROTO_HTTPS) ||
706 request->flags.sslPeek) {
707
708 HttpRequest::Pointer requestPointer = request;
709 AsyncCall::Pointer callback = asyncCall(17,4,
710 "FwdState::ConnectedToPeer",
711 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
712 // Use positive timeout when less than one second is left.
713 const time_t sslNegotiationTimeout = max(static_cast<time_t>(1), timeLeft());
714 Ssl::PeerConnector *connector =
715 new Ssl::PeerConnector(requestPointer, serverConnection(), clientConn, callback, sslNegotiationTimeout);
716 AsyncJob::Start(connector); // will call our callback
717 return;
718 }
719 }
720 #endif
721
722 // should reach ConnStateData before the dispatched Client job starts
723 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
724 ConnStateData::notePeerConnection, serverConnection());
725
726 dispatch();
727 }
728
729 #if USE_OPENSSL
730 void
731 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer &answer)
732 {
733 if (ErrorState *error = answer.error.get()) {
734 fail(error);
735 answer.error.clear(); // preserve error for errorSendComplete()
736 self = NULL;
737 return;
738 }
739
740 dispatch();
741 }
742 #endif
743
744 void
745 FwdState::connectTimeout(int fd)
746 {
747 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
748 assert(serverDestinations[0] != NULL);
749 assert(fd == serverDestinations[0]->fd);
750
751 if (entry->isEmpty()) {
752 ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
753 anErr->xerrno = ETIMEDOUT;
754 fail(anErr);
755
756 /* This marks the peer DOWN ... */
757 if (serverDestinations[0]->getPeer())
758 peerConnectFailed(serverDestinations[0]->getPeer());
759 }
760
761 if (Comm::IsConnOpen(serverDestinations[0])) {
762 serverDestinations[0]->close();
763 }
764 }
765
766 time_t
767 FwdState::timeLeft() const
768 {
769 /* connection timeout */
770 int ctimeout;
771 if (serverDestinations[0]->getPeer()) {
772 ctimeout = serverDestinations[0]->getPeer()->connect_timeout > 0 ?
773 serverDestinations[0]->getPeer()->connect_timeout : Config.Timeout.peer_connect;
774 } else {
775 ctimeout = Config.Timeout.connect;
776 }
777
778 /* calculate total forwarding timeout ??? */
779 int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
780 if (ftimeout < 0)
781 ftimeout = 5;
782
783 if (ftimeout < ctimeout)
784 return (time_t)ftimeout;
785 else
786 return (time_t)ctimeout;
787 }
788
789 /**
790 * Called after forwarding path selection (via peer select) has taken place
791 * and whenever forwarding needs to attempt a new connection (routing failover).
792 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
793 */
794 void
795 FwdState::connectStart()
796 {
797 assert(serverDestinations.size() > 0);
798
799 debugs(17, 3, "fwdConnectStart: " << entry->url());
800
801 if (!request->hier.first_conn_start.tv_sec) // first attempt
802 request->hier.first_conn_start = current_time;
803
804 if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
805 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
806 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
807 fail(anErr);
808 self = NULL; // refcounted
809 return;
810 }
811
812 request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
813 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
814 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
815 if (serverDestinations[0]->peerType == PINNED) {
816 ConnStateData *pinned_connection = request->pinnedConnection();
817 debugs(17,7, "pinned peer connection: " << pinned_connection);
818 // pinned_connection may become nil after a pconn race
819 if (pinned_connection)
820 serverConn = pinned_connection->borrowPinnedConnection(request, serverDestinations[0]->getPeer());
821 else
822 serverConn = NULL;
823 if (Comm::IsConnOpen(serverConn)) {
824 pinned_connection->stopPinnedConnectionMonitoring();
825 flags.connected_okay = true;
826 ++n_tries;
827 request->flags.pinned = true;
828 request->hier.note(serverConn, pinned_connection->pinning.host);
829 if (pinned_connection->pinnedAuth())
830 request->flags.auth = true;
831 comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
832
833 /* Update server side TOS and Netfilter mark on the connection. */
834 if (Ip::Qos::TheConfig.isAclTosActive()) {
835 debugs(17, 3, HERE << "setting tos for pinned connection to " << (int)serverConn->tos );
836 serverConn->tos = GetTosToServer(request);
837 Ip::Qos::setSockTos(serverConn, serverConn->tos);
838 }
839 #if SO_MARK
840 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
841 serverConn->nfmark = GetNfmarkToServer(request);
842 Ip::Qos::setSockNfmark(serverConn, serverConn->nfmark);
843 }
844 #endif
845
846 // the server may close the pinned connection before this request
847 pconnRace = racePossible;
848 dispatch();
849 return;
850 }
851 // Pinned connection failure.
852 debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
853 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
854 fail(anErr);
855 self = NULL; // refcounted
856 return;
857 }
858
859 // Use pconn to avoid opening a new connection.
860 const char *host = NULL;
861 if (!serverDestinations[0]->getPeer())
862 host = request->GetHost();
863
864 Comm::ConnectionPointer temp;
865 // Avoid pconns after races so that the same client does not suffer twice.
866 // This does not increase the total number of connections because we just
867 // closed the connection that failed the race. And re-pinning assumes this.
868 if (pconnRace != raceHappened)
869 temp = pconnPop(serverDestinations[0], host);
870
871 const bool openedPconn = Comm::IsConnOpen(temp);
872 pconnRace = openedPconn ? racePossible : raceImpossible;
873
874 // if we found an open persistent connection to use. use it.
875 if (openedPconn) {
876 serverConn = temp;
877 flags.connected_okay = true;
878 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
879 ++n_tries;
880
881 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
882
883 /* Update server side TOS and Netfilter mark on the connection. */
884 if (Ip::Qos::TheConfig.isAclTosActive()) {
885 const tos_t tos = GetTosToServer(request);
886 Ip::Qos::setSockTos(temp, tos);
887 }
888 #if SO_MARK
889 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
890 const nfmark_t nfmark = GetNfmarkToServer(request);
891 Ip::Qos::setSockNfmark(temp, nfmark);
892 }
893 #endif
894
895 dispatch();
896 return;
897 }
898
899 // We will try to open a new connection, possibly to the same destination.
900 // We reset serverDestinations[0] in case we are using it again because
901 // ConnOpener modifies its destination argument.
902 serverDestinations[0]->local.port(0);
903 serverConn = NULL;
904
905 #if URL_CHECKSUM_DEBUG
906 entry->mem_obj->checkUrlChecksum();
907 #endif
908
909 GetMarkingsToServer(request, *serverDestinations[0]);
910
911 calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
912 Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, timeLeft());
913 if (host)
914 cs->setHost(host);
915 AsyncJob::Start(cs);
916 }
917
918 void
919 FwdState::dispatch()
920 {
921 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
922 /*
923 * Assert that server_fd is set. This is to guarantee that fwdState
924 * is attached to something and will be deallocated when server_fd
925 * is closed.
926 */
927 assert(Comm::IsConnOpen(serverConn));
928
929 fd_note(serverConnection()->fd, entry->url());
930
931 fd_table[serverConnection()->fd].noteUse();
932
933 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
934 assert(entry->ping_status != PING_WAITING);
935
936 assert(entry->locked());
937
938 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
939
940 netdbPingSite(request->GetHost());
941
942 /* Retrieves remote server TOS or MARK value, and stores it as part of the
943 * original client request FD object. It is later used to forward
944 * remote server's TOS/MARK in the response to the client in case of a MISS.
945 */
946 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
947 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
948 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
949 /* Get the netfilter mark for the connection */
950 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
951 }
952 }
953
954 #if _SQUID_LINUX_
955 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
956 if (Ip::Qos::TheConfig.isHitTosActive()) {
957 if (Comm::IsConnOpen(clientConn)) {
958 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
959 /* Get the TOS value for the packet */
960 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
961 }
962 }
963 #endif
964
965 #if USE_OPENSSL
966 if (request->flags.sslPeek) {
967 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
968 ConnStateData::httpsPeeked, serverConnection());
969 unregister(serverConn); // async call owns it now
970 complete(); // destroys us
971 return;
972 }
973 #endif
974
975 if (serverConnection()->getPeer() != NULL) {
976 ++ serverConnection()->getPeer()->stats.fetches;
977 request->peer_login = serverConnection()->getPeer()->login;
978 request->peer_domain = serverConnection()->getPeer()->domain;
979 httpStart(this);
980 } else {
981 assert(!request->flags.sslPeek);
982 request->peer_login = NULL;
983 request->peer_domain = NULL;
984
985 switch (request->url.getScheme()) {
986 #if USE_OPENSSL
987
988 case AnyP::PROTO_HTTPS:
989 httpStart(this);
990 break;
991 #endif
992
993 case AnyP::PROTO_HTTP:
994 httpStart(this);
995 break;
996
997 case AnyP::PROTO_GOPHER:
998 gopherStart(this);
999 break;
1000
1001 case AnyP::PROTO_FTP:
1002 if (request->flags.ftpNative)
1003 Ftp::StartRelay(this);
1004 else
1005 Ftp::StartGateway(this);
1006 break;
1007
1008 case AnyP::PROTO_CACHE_OBJECT:
1009
1010 case AnyP::PROTO_URN:
1011 fatal_dump("Should never get here");
1012 break;
1013
1014 case AnyP::PROTO_WHOIS:
1015 whoisStart(this);
1016 break;
1017
1018 case AnyP::PROTO_WAIS: /* Not implemented */
1019
1020 default:
1021 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1022 ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
1023 fail(anErr);
1024 // Set the dont_retry flag because this is not a transient (network) error.
1025 flags.dont_retry = true;
1026 if (Comm::IsConnOpen(serverConn)) {
1027 serverConn->close();
1028 }
1029 break;
1030 }
1031 }
1032 }
1033
1034 /*
1035 * FwdState::reforward
1036 *
1037 * returns TRUE if the transaction SHOULD be re-forwarded to the
1038 * next choice in the serverDestinations list. This method is called when
1039 * server-side communication completes normally, or experiences
1040 * some error after receiving the end of HTTP headers.
1041 */
1042 int
1043 FwdState::reforward()
1044 {
1045 StoreEntry *e = entry;
1046
1047 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1048 debugs(17, 3, HERE << "entry aborted");
1049 return 0;
1050 }
1051
1052 assert(e->store_status == STORE_PENDING);
1053 assert(e->mem_obj);
1054 #if URL_CHECKSUM_DEBUG
1055
1056 e->mem_obj->checkUrlChecksum();
1057 #endif
1058
1059 debugs(17, 3, HERE << e->url() << "?" );
1060
1061 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1062 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1063 return 0;
1064 }
1065
1066 if (n_tries > Config.forward_max_tries)
1067 return 0;
1068
1069 if (request->bodyNibbled())
1070 return 0;
1071
1072 if (serverDestinations.size() <= 1) {
1073 // NP: <= 1 since total count includes the recently failed one.
1074 debugs(17, 3, HERE << "No alternative forwarding paths left");
1075 return 0;
1076 }
1077
1078 const Http::StatusCode s = e->getReply()->sline.status();
1079 debugs(17, 3, HERE << "status " << s);
1080 return reforwardableStatus(s);
1081 }
1082
1083 /**
1084 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1085 * on whether this is a validation request. RFC 2616 says that we MUST reply
1086 * with "504 Gateway Timeout" if validation fails and cached reply has
1087 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1088 */
1089 ErrorState *
1090 FwdState::makeConnectingError(const err_type type) const
1091 {
1092 return new ErrorState(type, request->flags.needValidation ?
1093 Http::scGatewayTimeout : Http::scServiceUnavailable, request);
1094 }
1095
1096 static void
1097 fwdStats(StoreEntry * s)
1098 {
1099 int i;
1100 int j;
1101 storeAppendPrintf(s, "Status");
1102
1103 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1104 storeAppendPrintf(s, "\ttry#%d", j);
1105 }
1106
1107 storeAppendPrintf(s, "\n");
1108
1109 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1110 if (FwdReplyCodes[0][i] == 0)
1111 continue;
1112
1113 storeAppendPrintf(s, "%3d", i);
1114
1115 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1116 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1117 }
1118
1119 storeAppendPrintf(s, "\n");
1120 }
1121 }
1122
1123 /**** STATIC MEMBER FUNCTIONS *************************************************/
1124
1125 bool
1126 FwdState::reforwardableStatus(const Http::StatusCode s) const
1127 {
1128 switch (s) {
1129
1130 case Http::scBadGateway:
1131
1132 case Http::scGatewayTimeout:
1133 return true;
1134
1135 case Http::scForbidden:
1136
1137 case Http::scInternalServerError:
1138
1139 case Http::scNotImplemented:
1140
1141 case Http::scServiceUnavailable:
1142 return Config.retry.onerror;
1143
1144 default:
1145 return false;
1146 }
1147
1148 /* NOTREACHED */
1149 }
1150
1151 /**
1152 * Decide where details need to be gathered to correctly describe a persistent connection.
1153 * What is needed:
1154 * - the address/port details about this link
1155 * - domain name of server at other end of this link (either peer or requested host)
1156 */
1157 void
1158 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1159 {
1160 if (conn->getPeer()) {
1161 fwdPconnPool->push(conn, NULL);
1162 } else {
1163 fwdPconnPool->push(conn, domain);
1164 }
1165 }
1166
1167 Comm::ConnectionPointer
1168 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1169 {
1170 // always call shared pool first because we need to close an idle
1171 // connection there if we have to use a standby connection.
1172 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, checkRetriable());
1173 if (!Comm::IsConnOpen(conn)) {
1174 // either there was no pconn to pop or this is not a retriable xaction
1175 if (CachePeer *peer = dest->getPeer()) {
1176 if (peer->standby.pool)
1177 conn = peer->standby.pool->pop(dest, domain, true);
1178 }
1179 }
1180 return conn; // open, closed, or nil
1181 }
1182
1183 void
1184 FwdState::initModule()
1185 {
1186 RegisterWithCacheManager();
1187 }
1188
1189 void
1190 FwdState::RegisterWithCacheManager(void)
1191 {
1192 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1193 }
1194
1195 void
1196 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1197 {
1198 if (status > Http::scInvalidHeader)
1199 return;
1200
1201 assert(tries >= 0);
1202
1203 if (tries > MAX_FWD_STATS_IDX)
1204 tries = MAX_FWD_STATS_IDX;
1205
1206 ++ FwdReplyCodes[tries][status];
1207 }
1208
1209 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1210
1211 /*
1212 * DPW 2007-05-19
1213 * Formerly static, but now used by client_side_request.cc
1214 */
1215 /// Checks for a TOS value to apply depending on the ACL
1216 tos_t
1217 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1218 {
1219 acl_tos *l;
1220
1221 for (l = head; l; l = l->next) {
1222 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1223 return l->tos;
1224 }
1225
1226 return 0;
1227 }
1228
1229 /// Checks for a netfilter mark value to apply depending on the ACL
1230 nfmark_t
1231 aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
1232 {
1233 acl_nfmark *l;
1234
1235 for (l = head; l; l = l->next) {
1236 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1237 return l->nfmark;
1238 }
1239
1240 return 0;
1241 }
1242
1243 void
1244 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1245 {
1246 // skip if an outgoing address is already set.
1247 if (!conn->local.isAnyAddr()) return;
1248
1249 // ensure that at minimum the wildcard local matches remote protocol
1250 if (conn->remote.isIPv4())
1251 conn->local.setIPv4();
1252
1253 // maybe use TPROXY client address
1254 if (request && request->flags.spoofClientIp) {
1255 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1256 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1257 if (Config.onoff.tproxy_uses_indirect_client)
1258 conn->local = request->indirect_client_addr;
1259 else
1260 #endif
1261 conn->local = request->client_addr;
1262 // some flags need setting on the socket to use this address
1263 conn->flags |= COMM_DOBIND;
1264 conn->flags |= COMM_TRANSPARENT;
1265 return;
1266 }
1267 // else no tproxy today ...
1268 }
1269
1270 if (!Config.accessList.outgoing_address) {
1271 return; // anything will do.
1272 }
1273
1274 ACLFilledChecklist ch(NULL, request, NULL);
1275 ch.dst_peer = conn->getPeer();
1276 ch.dst_addr = conn->remote;
1277
1278 // TODO use the connection details in ACL.
1279 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1280
1281 AclAddress *l;
1282 for (l = Config.accessList.outgoing_address; l; l = l->next) {
1283
1284 /* check if the outgoing address is usable to the destination */
1285 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1286
1287 /* check ACLs for this outgoing address */
1288 if (!l->aclList || ch.fastCheck(l->aclList) == ACCESS_ALLOWED) {
1289 conn->local = l->addr;
1290 return;
1291 }
1292 }
1293 }
1294
1295 tos_t
1296 GetTosToServer(HttpRequest * request)
1297 {
1298 ACLFilledChecklist ch(NULL, request, NULL);
1299 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1300 }
1301
1302 nfmark_t
1303 GetNfmarkToServer(HttpRequest * request)
1304 {
1305 ACLFilledChecklist ch(NULL, request, NULL);
1306 return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1307 }
1308
1309 void
1310 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1311 {
1312 // Get the server side TOS and Netfilter mark to be set on the connection.
1313 if (Ip::Qos::TheConfig.isAclTosActive()) {
1314 conn.tos = GetTosToServer(request);
1315 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1316 }
1317
1318 #if SO_MARK && USE_LIBCAP
1319 conn.nfmark = GetNfmarkToServer(request);
1320 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1321 #else
1322 conn.nfmark = 0;
1323 #endif
1324 }