]> git.ipfire.org Git - thirdparty/squid.git/blob - src/FwdState.cc
Fix tcp outgoing tos bugs
[thirdparty/squid.git] / src / FwdState.cc
1 /*
2 * DEBUG: section 17 Request Forwarding
3 * AUTHOR: Duane Wessels
4 *
5 * SQUID Web Proxy Cache http://www.squid-cache.org/
6 * ----------------------------------------------------------
7 *
8 * Squid is the result of efforts by numerous individuals from
9 * the Internet community; see the CONTRIBUTORS file for full
10 * details. Many organizations have provided support for Squid's
11 * development; see the SPONSORS file for full details. Squid is
12 * Copyrighted (C) 2001 by the Regents of the University of
13 * California; see the COPYRIGHT file for full details. Squid
14 * incorporates software developed and/or copyrighted by other
15 * sources; see the CREDITS file for full details.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
30 *
31 */
32
33 #include "squid.h"
34 #include "AccessLogEntry.h"
35 #include "acl/AclAddress.h"
36 #include "acl/FilledChecklist.h"
37 #include "acl/Gadgets.h"
38 #include "anyp/PortCfg.h"
39 #include "CacheManager.h"
40 #include "CachePeer.h"
41 #include "client_side.h"
42 #include "comm/Connection.h"
43 #include "comm/ConnOpener.h"
44 #include "comm/Loops.h"
45 #include "CommCalls.h"
46 #include "errorpage.h"
47 #include "event.h"
48 #include "fd.h"
49 #include "fde.h"
50 #include "ftp.h"
51 #include "FwdState.h"
52 #include "globals.h"
53 #include "gopher.h"
54 #include "hier_code.h"
55 #include "http.h"
56 #include "HttpReply.h"
57 #include "HttpRequest.h"
58 #include "icmp/net_db.h"
59 #include "internal.h"
60 #include "ip/Intercept.h"
61 #include "ip/QosConfig.h"
62 #include "ip/tools.h"
63 #include "MemObject.h"
64 #include "mgr/Registration.h"
65 #include "neighbors.h"
66 #include "pconn.h"
67 #include "PeerPoolMgr.h"
68 #include "PeerSelectState.h"
69 #include "SquidConfig.h"
70 #include "SquidTime.h"
71 #include "Store.h"
72 #include "StoreClient.h"
73 #include "urn.h"
74 #include "whois.h"
75 #if USE_OPENSSL
76 #include "ssl/cert_validate_message.h"
77 #include "ssl/Config.h"
78 #include "ssl/ErrorDetail.h"
79 #include "ssl/helper.h"
80 #include "ssl/PeerConnector.h"
81 #include "ssl/ServerBump.h"
82 #include "ssl/support.h"
83 #endif
84
85 #include <cerrno>
86
87 static PSC fwdPeerSelectionCompleteWrapper;
88 static CLCB fwdServerClosedWrapper;
89 static CNCB fwdConnectDoneWrapper;
90
91 static OBJH fwdStats;
92
93 #define MAX_FWD_STATS_IDX 9
94 static int FwdReplyCodes[MAX_FWD_STATS_IDX + 1][Http::scInvalidHeader + 1];
95
96 static PconnPool *fwdPconnPool = new PconnPool("server-side", NULL);
97 CBDATA_CLASS_INIT(FwdState);
98
99 #if USE_OPENSSL
100 class FwdStatePeerAnswerDialer: public CallDialer, public Ssl::PeerConnector::CbDialer
101 {
102 public:
103 typedef void (FwdState::*Method)(Ssl::PeerConnectorAnswer &);
104
105 FwdStatePeerAnswerDialer(Method method, FwdState *fwd):
106 method_(method), fwd_(fwd), answer_() {}
107
108 /* CallDialer API */
109 virtual bool canDial(AsyncCall &call) { return fwd_.valid(); }
110 void dial(AsyncCall &call) { ((&(*fwd_))->*method_)(answer_); }
111 virtual void print(std::ostream &os) const {
112 os << '(' << fwd_.get() << ", " << answer_ << ')';
113 }
114
115 /* Ssl::PeerConnector::CbDialer API */
116 virtual Ssl::PeerConnectorAnswer &answer() { return answer_; }
117
118 private:
119 Method method_;
120 CbcPointer<FwdState> fwd_;
121 Ssl::PeerConnectorAnswer answer_;
122 };
123 #endif
124
125 void
126 FwdState::abort(void* d)
127 {
128 FwdState* fwd = (FwdState*)d;
129 Pointer tmp = fwd; // Grab a temporary pointer to keep the object alive during our scope.
130
131 if (Comm::IsConnOpen(fwd->serverConnection())) {
132 fwd->closeServerConnection("store entry aborted");
133 } else {
134 debugs(17, 7, HERE << "store entry aborted; no connection to close");
135 }
136 fwd->serverDestinations.clear();
137 fwd->self = NULL;
138 }
139
140 void
141 FwdState::closeServerConnection(const char *reason)
142 {
143 debugs(17, 3, "because " << reason << "; " << serverConn);
144 comm_remove_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
145 fwdPconnPool->noteUses(fd_table[serverConn->fd].pconn.uses);
146 serverConn->close();
147 }
148
149 /**** PUBLIC INTERFACE ********************************************************/
150
151 FwdState::FwdState(const Comm::ConnectionPointer &client, StoreEntry * e, HttpRequest * r, const AccessLogEntryPointer &alp):
152 al(alp)
153 {
154 debugs(17, 2, HERE << "Forwarding client request " << client << ", url=" << e->url() );
155 entry = e;
156 clientConn = client;
157 request = r;
158 HTTPMSGLOCK(request);
159 pconnRace = raceImpossible;
160 start_t = squid_curtime;
161 serverDestinations.reserve(Config.forward_max_tries);
162 e->lock("FwdState");
163 EBIT_SET(e->flags, ENTRY_FWD_HDR_WAIT);
164 }
165
166 // Called once, right after object creation, when it is safe to set self
167 void FwdState::start(Pointer aSelf)
168 {
169 // Protect ourselves from being destroyed when the only Server pointing
170 // to us is gone (while we expect to talk to more Servers later).
171 // Once we set self, we are responsible for clearing it when we do not
172 // expect to talk to any servers.
173 self = aSelf; // refcounted
174
175 // We hope that either the store entry aborts or peer is selected.
176 // Otherwise we are going to leak our object.
177
178 entry->registerAbort(FwdState::abort, this);
179
180 #if STRICT_ORIGINAL_DST
181 // Bug 3243: CVE 2009-0801
182 // Bypass of browser same-origin access control in intercepted communication
183 // To resolve this we must force DIRECT and only to the original client destination.
184 const bool isIntercepted = request && !request->flags.redirected && (request->flags.intercepted || request->flags.interceptTproxy);
185 const bool useOriginalDst = Config.onoff.client_dst_passthru || (request && !request->flags.hostVerified);
186 if (isIntercepted && useOriginalDst) {
187 selectPeerForIntercepted();
188 // 3.2 does not suppro re-wrapping inside CONNECT.
189 // our only alternative is to fake destination "found" and continue with the forwarding.
190 startConnectionOrFail();
191 return;
192 }
193 #endif
194
195 // do full route options selection
196 peerSelect(&serverDestinations, request, al, entry, fwdPeerSelectionCompleteWrapper, this);
197 }
198
199 #if STRICT_ORIGINAL_DST
200 /// bypasses peerSelect() when dealing with intercepted requests
201 void
202 FwdState::selectPeerForIntercepted()
203 {
204 // use pinned connection if available
205 Comm::ConnectionPointer p;
206 if (ConnStateData *client = request->pinnedConnection()) {
207 p = client->validatePinnedConnection(request, NULL);
208 if (Comm::IsConnOpen(p)) {
209 /* duplicate peerSelectPinned() effects */
210 p->peerType = PINNED;
211 entry->ping_status = PING_DONE; /* Skip ICP */
212
213 debugs(17, 3, "reusing a pinned conn: " << *p);
214 serverDestinations.push_back(p);
215 } else {
216 debugs(17,2, "Pinned connection is not valid: " << p);
217 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
218 fail(anErr);
219 }
220 // Either use the valid pinned connection or fail if it is invalid.
221 return;
222 }
223
224 // use client original destination as second preferred choice
225 p = new Comm::Connection();
226 p->peerType = ORIGINAL_DST;
227 p->remote = clientConn->local;
228 getOutgoingAddress(request, p);
229
230 debugs(17, 3, HERE << "using client original destination: " << *p);
231 serverDestinations.push_back(p);
232 }
233 #endif
234
235 void
236 FwdState::completed()
237 {
238 if (flags.forward_completed) {
239 debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
240 return;
241 }
242
243 flags.forward_completed = true;
244
245 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
246 debugs(17, 3, HERE << "entry aborted");
247 return ;
248 }
249
250 #if URL_CHECKSUM_DEBUG
251
252 entry->mem_obj->checkUrlChecksum();
253 #endif
254
255 if (entry->store_status == STORE_PENDING) {
256 if (entry->isEmpty()) {
257 if (!err) // we quit (e.g., fd closed) before an error or content
258 fail(new ErrorState(ERR_READ_ERROR, Http::scBadGateway, request));
259 assert(err);
260 errorAppendEntry(entry, err);
261 err = NULL;
262 #if USE_OPENSSL
263 if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
264 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
265 ConnStateData::httpsPeeked, Comm::ConnectionPointer(NULL));
266 }
267 #endif
268 } else {
269 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
270 entry->complete();
271 entry->releaseRequest();
272 }
273 }
274
275 if (storePendingNClients(entry) > 0)
276 assert(!EBIT_TEST(entry->flags, ENTRY_FWD_HDR_WAIT));
277
278 }
279
280 FwdState::~FwdState()
281 {
282 debugs(17, 3, HERE << "FwdState destructor starting");
283
284 if (! flags.forward_completed)
285 completed();
286
287 doneWithRetries();
288
289 HTTPMSGUNLOCK(request);
290
291 delete err;
292
293 entry->unregisterAbort();
294
295 entry->unlock("FwdState");
296
297 entry = NULL;
298
299 if (calls.connector != NULL) {
300 calls.connector->cancel("FwdState destructed");
301 calls.connector = NULL;
302 }
303
304 if (Comm::IsConnOpen(serverConn))
305 closeServerConnection("~FwdState");
306
307 serverDestinations.clear();
308
309 debugs(17, 3, HERE << "FwdState destructor done");
310 }
311
312 /**
313 * This is the entry point for client-side to start forwarding
314 * a transaction. It is a static method that may or may not
315 * allocate a FwdState.
316 */
317 void
318 FwdState::Start(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request, const AccessLogEntryPointer &al)
319 {
320 /** \note
321 * client_addr == no_addr indicates this is an "internal" request
322 * from peer_digest.c, asn.c, netdb.c, etc and should always
323 * be allowed. yuck, I know.
324 */
325
326 if ( Config.accessList.miss && !request->client_addr.isNoAddr() &&
327 !request->flags.internal && request->url.getScheme() != AnyP::PROTO_CACHE_OBJECT) {
328 /**
329 * Check if this host is allowed to fetch MISSES from us (miss_access).
330 * Intentionally replace the src_addr automatically selected by the checklist code
331 * we do NOT want the indirect client address to be tested here.
332 */
333 ACLFilledChecklist ch(Config.accessList.miss, request, NULL);
334 ch.src_addr = request->client_addr;
335 if (ch.fastCheck() == ACCESS_DENIED) {
336 err_type page_id;
337 page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, 1);
338
339 if (page_id == ERR_NONE)
340 page_id = ERR_FORWARDING_DENIED;
341
342 ErrorState *anErr = new ErrorState(page_id, Http::scForbidden, request);
343 errorAppendEntry(entry, anErr); // frees anErr
344 return;
345 }
346 }
347
348 debugs(17, 3, HERE << "'" << entry->url() << "'");
349 /*
350 * This seems like an odd place to bind mem_obj and request.
351 * Might want to assert that request is NULL at this point
352 */
353 entry->mem_obj->request = request;
354 HTTPMSGLOCK(entry->mem_obj->request);
355 #if URL_CHECKSUM_DEBUG
356
357 entry->mem_obj->checkUrlChecksum();
358 #endif
359
360 if (shutting_down) {
361 /* more yuck */
362 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
363 errorAppendEntry(entry, anErr); // frees anErr
364 return;
365 }
366
367 if (request->flags.internal) {
368 debugs(17, 2, "calling internalStart() due to request flag");
369 internalStart(clientConn, request, entry);
370 return;
371 }
372
373 switch (request->url.getScheme()) {
374
375 case AnyP::PROTO_CACHE_OBJECT:
376 debugs(17, 2, "calling CacheManager due to request scheme " << request->url.getScheme());
377 CacheManager::GetInstance()->Start(clientConn, request, entry);
378 return;
379
380 case AnyP::PROTO_URN:
381 urnStart(request, entry);
382 return;
383
384 default:
385 FwdState::Pointer fwd = new FwdState(clientConn, entry, request, al);
386 fwd->start(fwd);
387 return;
388 }
389
390 /* NOTREACHED */
391 }
392
393 void
394 FwdState::fwdStart(const Comm::ConnectionPointer &clientConn, StoreEntry *entry, HttpRequest *request)
395 {
396 // Hides AccessLogEntry.h from code that does not supply ALE anyway.
397 Start(clientConn, entry, request, NULL);
398 }
399
400 void
401 FwdState::startConnectionOrFail()
402 {
403 debugs(17, 3, HERE << entry->url());
404
405 if (serverDestinations.size() > 0) {
406 // Ditch error page if it was created before.
407 // A new one will be created if there's another problem
408 delete err;
409 err = NULL;
410
411 // Update the logging information about this new server connection.
412 // Done here before anything else so the errors get logged for
413 // this server link regardless of what happens when connecting to it.
414 // IF sucessfuly connected this top destination will become the serverConnection().
415 request->hier.note(serverDestinations[0], request->GetHost());
416 request->clearError();
417
418 connectStart();
419 } else {
420 debugs(17, 3, HERE << "Connection failed: " << entry->url());
421 if (!err) {
422 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scInternalServerError, request);
423 fail(anErr);
424 } // else use actual error from last connection attempt
425 self = NULL; // refcounted
426 }
427 }
428
429 void
430 FwdState::fail(ErrorState * errorState)
431 {
432 debugs(17, 3, err_type_str[errorState->type] << " \"" << Http::StatusCodeString(errorState->httpStatus) << "\"\n\t" << entry->url());
433
434 delete err;
435 err = errorState;
436
437 if (!errorState->request) {
438 errorState->request = request;
439 HTTPMSGLOCK(errorState->request);
440 }
441
442 if (err->type != ERR_ZERO_SIZE_OBJECT)
443 return;
444
445 if (pconnRace == racePossible) {
446 debugs(17, 5, HERE << "pconn race happened");
447 pconnRace = raceHappened;
448 }
449
450 if (ConnStateData *pinned_connection = request->pinnedConnection()) {
451 pinned_connection->pinning.zeroReply = true;
452 flags.dont_retry = true; // we want to propagate failure to the client
453 debugs(17, 4, "zero reply on pinned connection");
454 }
455 }
456
457 /**
458 * Frees fwdState without closing FD or generating an abort
459 */
460 void
461 FwdState::unregister(Comm::ConnectionPointer &conn)
462 {
463 debugs(17, 3, HERE << entry->url() );
464 assert(serverConnection() == conn);
465 assert(Comm::IsConnOpen(conn));
466 comm_remove_close_handler(conn->fd, fwdServerClosedWrapper, this);
467 serverConn = NULL;
468 }
469
470 // Legacy method to be removed in favor of the above as soon as possible
471 void
472 FwdState::unregister(int fd)
473 {
474 debugs(17, 3, HERE << entry->url() );
475 assert(fd == serverConnection()->fd);
476 unregister(serverConn);
477 }
478
479 /**
480 * server-side modules call fwdComplete() when they are done
481 * downloading an object. Then, we either 1) re-forward the
482 * request somewhere else if needed, or 2) call storeComplete()
483 * to finish it off
484 */
485 void
486 FwdState::complete()
487 {
488 debugs(17, 3, HERE << entry->url() << "\n\tstatus " << entry->getReply()->sline.status());
489 #if URL_CHECKSUM_DEBUG
490
491 entry->mem_obj->checkUrlChecksum();
492 #endif
493
494 logReplyStatus(n_tries, entry->getReply()->sline.status());
495
496 if (reforward()) {
497 debugs(17, 3, HERE << "re-forwarding " << entry->getReply()->sline.status() << " " << entry->url());
498
499 if (Comm::IsConnOpen(serverConn))
500 unregister(serverConn);
501
502 entry->reset();
503
504 // drop the last path off the selection list. try the next one.
505 serverDestinations.erase(serverDestinations.begin());
506 startConnectionOrFail();
507
508 } else {
509 if (Comm::IsConnOpen(serverConn))
510 debugs(17, 3, HERE << "server FD " << serverConnection()->fd << " not re-forwarding status " << entry->getReply()->sline.status());
511 else
512 debugs(17, 3, HERE << "server (FD closed) not re-forwarding status " << entry->getReply()->sline.status());
513 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
514 entry->complete();
515
516 if (!Comm::IsConnOpen(serverConn))
517 completed();
518
519 self = NULL; // refcounted
520 }
521 }
522
523 /**** CALLBACK WRAPPERS ************************************************************/
524
525 static void
526 fwdPeerSelectionCompleteWrapper(Comm::ConnectionList * unused, ErrorState *err, void *data)
527 {
528 FwdState *fwd = (FwdState *) data;
529 if (err)
530 fwd->fail(err);
531 fwd->startConnectionOrFail();
532 }
533
534 static void
535 fwdServerClosedWrapper(const CommCloseCbParams &params)
536 {
537 FwdState *fwd = (FwdState *)params.data;
538 fwd->serverClosed(params.fd);
539 }
540
541 void
542 fwdConnectDoneWrapper(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno, void *data)
543 {
544 FwdState *fwd = (FwdState *) data;
545 fwd->connectDone(conn, status, xerrno);
546 }
547
548 /**** PRIVATE *****************************************************************/
549
550 /*
551 * FwdState::checkRetry
552 *
553 * Return TRUE if the request SHOULD be retried. This method is
554 * called when the HTTP connection fails, or when the connection
555 * is closed before server-side read the end of HTTP headers.
556 */
557 bool
558 FwdState::checkRetry()
559 {
560 if (shutting_down)
561 return false;
562
563 if (!self) { // we have aborted before the server called us back
564 debugs(17, 5, HERE << "not retrying because of earlier abort");
565 // we will be destroyed when the server clears its Pointer to us
566 return false;
567 }
568
569 if (entry->store_status != STORE_PENDING)
570 return false;
571
572 if (!entry->isEmpty())
573 return false;
574
575 if (n_tries > Config.forward_max_tries)
576 return false;
577
578 if (squid_curtime - start_t > Config.Timeout.forward)
579 return false;
580
581 if (flags.dont_retry)
582 return false;
583
584 if (request->bodyNibbled())
585 return false;
586
587 // NP: not yet actually connected anywhere. retry is safe.
588 if (!flags.connected_okay)
589 return true;
590
591 if (!checkRetriable())
592 return false;
593
594 return true;
595 }
596
597 /*
598 * FwdState::checkRetriable
599 *
600 * Return TRUE if this is the kind of request that can be retried
601 * after a failure. If the request is not retriable then we don't
602 * want to risk sending it on a persistent connection. Instead we'll
603 * force it to go on a new HTTP connection.
604 */
605 bool
606 FwdState::checkRetriable()
607 {
608 // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
609 // complicated] code required to protect the PUT request body from being
610 // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
611 if (request->body_pipe != NULL)
612 return false;
613
614 // RFC2616 9.1 Safe and Idempotent Methods
615 return (request->method.isHttpSafe() || request->method.isIdempotent());
616 }
617
618 void
619 FwdState::serverClosed(int fd)
620 {
621 // XXX: fd is often -1 here
622 debugs(17, 2, "FD " << fd << " " << entry->url() << " after " <<
623 (fd >= 0 ? fd_table[fd].pconn.uses : -1) << " requests");
624 if (fd >= 0 && serverConnection()->fd == fd)
625 fwdPconnPool->noteUses(fd_table[fd].pconn.uses);
626 retryOrBail();
627 }
628
629 void
630 FwdState::retryOrBail()
631 {
632 if (checkRetry()) {
633 debugs(17, 3, HERE << "re-forwarding (" << n_tries << " tries, " << (squid_curtime - start_t) << " secs)");
634 // we should retry the same destination if it failed due to pconn race
635 if (pconnRace == raceHappened)
636 debugs(17, 4, HERE << "retrying the same destination");
637 else
638 serverDestinations.erase(serverDestinations.begin()); // last one failed. try another.
639 startConnectionOrFail();
640 return;
641 }
642
643 // TODO: should we call completed() here and move doneWithRetries there?
644 doneWithRetries();
645
646 if (self != NULL && !err && shutting_down) {
647 ErrorState *anErr = new ErrorState(ERR_SHUTTING_DOWN, Http::scServiceUnavailable, request);
648 errorAppendEntry(entry, anErr);
649 }
650
651 self = NULL; // refcounted
652 }
653
654 // If the Server quits before nibbling at the request body, the body sender
655 // will not know (so that we can retry). Call this if we will not retry. We
656 // will notify the sender so that it does not get stuck waiting for space.
657 void
658 FwdState::doneWithRetries()
659 {
660 if (request && request->body_pipe != NULL)
661 request->body_pipe->expectNoConsumption();
662 }
663
664 // called by the server that failed after calling unregister()
665 void
666 FwdState::handleUnregisteredServerEnd()
667 {
668 debugs(17, 2, HERE << "self=" << self << " err=" << err << ' ' << entry->url());
669 assert(!Comm::IsConnOpen(serverConn));
670 retryOrBail();
671 }
672
673 void
674 FwdState::connectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int xerrno)
675 {
676 if (status != Comm::OK) {
677 ErrorState *const anErr = makeConnectingError(ERR_CONNECT_FAIL);
678 anErr->xerrno = xerrno;
679 fail(anErr);
680
681 /* it might have been a timeout with a partially open link */
682 if (conn != NULL) {
683 if (conn->getPeer())
684 peerConnectFailed(conn->getPeer());
685
686 conn->close();
687 }
688 retryOrBail();
689 return;
690 }
691
692 serverConn = conn;
693 flags.connected_okay = true;
694
695 debugs(17, 3, HERE << serverConnection() << ": '" << entry->url() << "'" );
696
697 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
698
699 if (serverConnection()->getPeer())
700 peerConnectSucceded(serverConnection()->getPeer());
701
702 #if USE_OPENSSL
703 if (!request->flags.pinned) {
704 if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
705 (!serverConnection()->getPeer() && request->url.getScheme() == AnyP::PROTO_HTTPS) ||
706 request->flags.sslPeek) {
707
708 HttpRequest::Pointer requestPointer = request;
709 AsyncCall::Pointer callback = asyncCall(17,4,
710 "FwdState::ConnectedToPeer",
711 FwdStatePeerAnswerDialer(&FwdState::connectedToPeer, this));
712 // Use positive timeout when less than one second is left.
713 const time_t sslNegotiationTimeout = max(static_cast<time_t>(1), timeLeft());
714 Ssl::PeerConnector *connector =
715 new Ssl::PeerConnector(requestPointer, serverConnection(), callback, sslNegotiationTimeout);
716 AsyncJob::Start(connector); // will call our callback
717 return;
718 }
719 }
720 #endif
721
722 dispatch();
723 }
724
725 #if USE_OPENSSL
726 void
727 FwdState::connectedToPeer(Ssl::PeerConnectorAnswer &answer)
728 {
729 if (ErrorState *error = answer.error.get()) {
730 fail(error);
731 answer.error.clear(); // preserve error for errorSendComplete()
732 self = NULL;
733 return;
734 }
735
736 dispatch();
737 }
738 #endif
739
740 void
741 FwdState::connectTimeout(int fd)
742 {
743 debugs(17, 2, "fwdConnectTimeout: FD " << fd << ": '" << entry->url() << "'" );
744 assert(serverDestinations[0] != NULL);
745 assert(fd == serverDestinations[0]->fd);
746
747 if (entry->isEmpty()) {
748 ErrorState *anErr = new ErrorState(ERR_CONNECT_FAIL, Http::scGatewayTimeout, request);
749 anErr->xerrno = ETIMEDOUT;
750 fail(anErr);
751
752 /* This marks the peer DOWN ... */
753 if (serverDestinations[0]->getPeer())
754 peerConnectFailed(serverDestinations[0]->getPeer());
755 }
756
757 if (Comm::IsConnOpen(serverDestinations[0])) {
758 serverDestinations[0]->close();
759 }
760 }
761
762 time_t
763 FwdState::timeLeft() const
764 {
765 /* connection timeout */
766 int ctimeout;
767 if (serverDestinations[0]->getPeer()) {
768 ctimeout = serverDestinations[0]->getPeer()->connect_timeout > 0 ?
769 serverDestinations[0]->getPeer()->connect_timeout : Config.Timeout.peer_connect;
770 } else {
771 ctimeout = Config.Timeout.connect;
772 }
773
774 /* calculate total forwarding timeout ??? */
775 int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
776 if (ftimeout < 0)
777 ftimeout = 5;
778
779 if (ftimeout < ctimeout)
780 return (time_t)ftimeout;
781 else
782 return (time_t)ctimeout;
783 }
784
785 /**
786 * Called after forwarding path selection (via peer select) has taken place
787 * and whenever forwarding needs to attempt a new connection (routing failover).
788 * We have a vector of possible localIP->remoteIP paths now ready to start being connected.
789 */
790 void
791 FwdState::connectStart()
792 {
793 assert(serverDestinations.size() > 0);
794
795 debugs(17, 3, "fwdConnectStart: " << entry->url());
796
797 if (!request->hier.first_conn_start.tv_sec) // first attempt
798 request->hier.first_conn_start = current_time;
799
800 if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
801 debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parent proxy are not allowed");
802 ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, Http::scServiceUnavailable, request);
803 fail(anErr);
804 self = NULL; // refcounted
805 return;
806 }
807
808 request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
809 // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
810 // XXX: also, logs will now lie if pinning is broken and leads to an error message.
811 if (serverDestinations[0]->peerType == PINNED) {
812 ConnStateData *pinned_connection = request->pinnedConnection();
813 debugs(17,7, "pinned peer connection: " << pinned_connection);
814 // pinned_connection may become nil after a pconn race
815 if (pinned_connection)
816 serverConn = pinned_connection->validatePinnedConnection(request, serverDestinations[0]->getPeer());
817 else
818 serverConn = NULL;
819 if (Comm::IsConnOpen(serverConn)) {
820 pinned_connection->stopPinnedConnectionMonitoring();
821 flags.connected_okay = true;
822 ++n_tries;
823 request->flags.pinned = true;
824 request->hier.note(serverConn, pinned_connection->pinning.host);
825 if (pinned_connection->pinnedAuth())
826 request->flags.auth = true;
827 comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
828
829 /* Update server side TOS and Netfilter mark on the connection. */
830 if (Ip::Qos::TheConfig.isAclTosActive()) {
831 debugs(17, 3, HERE << "setting tos for pinned connection to " << (int)serverConn->tos );
832 serverConn->tos = GetTosToServer(request);
833 Ip::Qos::setSockTos(serverConn, serverConn->tos);
834 }
835 #if SO_MARK
836 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
837 serverConn->nfmark = GetNfmarkToServer(request);
838 Ip::Qos::setSockNfmark(serverConn, serverConn->nfmark);
839 }
840 #endif
841
842 // the server may close the pinned connection before this request
843 pconnRace = racePossible;
844 dispatch();
845 return;
846 }
847 // Pinned connection failure.
848 debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
849 ErrorState *anErr = new ErrorState(ERR_ZERO_SIZE_OBJECT, Http::scServiceUnavailable, request);
850 fail(anErr);
851 self = NULL; // refcounted
852 return;
853 }
854
855 // Use pconn to avoid opening a new connection.
856 const char *host = NULL;
857 if (!serverDestinations[0]->getPeer())
858 host = request->GetHost();
859
860 Comm::ConnectionPointer temp;
861 // Avoid pconns after races so that the same client does not suffer twice.
862 // This does not increase the total number of connections because we just
863 // closed the connection that failed the race. And re-pinning assumes this.
864 if (pconnRace != raceHappened)
865 temp = pconnPop(serverDestinations[0], host);
866
867 const bool openedPconn = Comm::IsConnOpen(temp);
868 pconnRace = openedPconn ? racePossible : raceImpossible;
869
870 // if we found an open persistent connection to use. use it.
871 if (openedPconn) {
872 serverConn = temp;
873 flags.connected_okay = true;
874 debugs(17, 3, HERE << "reusing pconn " << serverConnection());
875 ++n_tries;
876
877 comm_add_close_handler(serverConnection()->fd, fwdServerClosedWrapper, this);
878
879 /* Update server side TOS and Netfilter mark on the connection. */
880 if (Ip::Qos::TheConfig.isAclTosActive()) {
881 const tos_t tos = GetTosToServer(request);
882 Ip::Qos::setSockTos(temp, tos);
883 }
884 #if SO_MARK
885 if (Ip::Qos::TheConfig.isAclNfmarkActive()) {
886 const nfmark_t nfmark = GetNfmarkToServer(request);
887 Ip::Qos::setSockNfmark(temp, nfmark);
888 }
889 #endif
890
891 dispatch();
892 return;
893 }
894
895 // We will try to open a new connection, possibly to the same destination.
896 // We reset serverDestinations[0] in case we are using it again because
897 // ConnOpener modifies its destination argument.
898 serverDestinations[0]->local.port(0);
899 serverConn = NULL;
900
901 #if URL_CHECKSUM_DEBUG
902 entry->mem_obj->checkUrlChecksum();
903 #endif
904
905 GetMarkingsToServer(request, *serverDestinations[0]);
906
907 calls.connector = commCbCall(17,3, "fwdConnectDoneWrapper", CommConnectCbPtrFun(fwdConnectDoneWrapper, this));
908 Comm::ConnOpener *cs = new Comm::ConnOpener(serverDestinations[0], calls.connector, timeLeft());
909 if (host)
910 cs->setHost(host);
911 AsyncJob::Start(cs);
912 }
913
914 void
915 FwdState::dispatch()
916 {
917 debugs(17, 3, clientConn << ": Fetching " << request->method << ' ' << entry->url());
918 /*
919 * Assert that server_fd is set. This is to guarantee that fwdState
920 * is attached to something and will be deallocated when server_fd
921 * is closed.
922 */
923 assert(Comm::IsConnOpen(serverConn));
924
925 fd_note(serverConnection()->fd, entry->url());
926
927 fd_table[serverConnection()->fd].noteUse();
928
929 /*assert(!EBIT_TEST(entry->flags, ENTRY_DISPATCHED)); */
930 assert(entry->ping_status != PING_WAITING);
931
932 assert(entry->locked());
933
934 EBIT_SET(entry->flags, ENTRY_DISPATCHED);
935
936 netdbPingSite(request->GetHost());
937
938 /* Retrieves remote server TOS or MARK value, and stores it as part of the
939 * original client request FD object. It is later used to forward
940 * remote server's TOS/MARK in the response to the client in case of a MISS.
941 */
942 if (Ip::Qos::TheConfig.isHitNfmarkActive()) {
943 if (Comm::IsConnOpen(clientConn) && Comm::IsConnOpen(serverConnection())) {
944 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
945 /* Get the netfilter mark for the connection */
946 Ip::Qos::getNfmarkFromServer(serverConnection(), clientFde);
947 }
948 }
949
950 #if _SQUID_LINUX_
951 /* Bug 2537: The TOS forward part of QOS only applies to patched Linux kernels. */
952 if (Ip::Qos::TheConfig.isHitTosActive()) {
953 if (Comm::IsConnOpen(clientConn)) {
954 fde * clientFde = &fd_table[clientConn->fd]; // XXX: move the fd_table access into Ip::Qos
955 /* Get the TOS value for the packet */
956 Ip::Qos::getTosFromServer(serverConnection(), clientFde);
957 }
958 }
959 #endif
960
961 #if USE_OPENSSL
962 if (request->flags.sslPeek) {
963 CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
964 ConnStateData::httpsPeeked, serverConnection());
965 unregister(serverConn); // async call owns it now
966 complete(); // destroys us
967 return;
968 }
969 #endif
970
971 if (serverConnection()->getPeer() != NULL) {
972 ++ serverConnection()->getPeer()->stats.fetches;
973 request->peer_login = serverConnection()->getPeer()->login;
974 request->peer_domain = serverConnection()->getPeer()->domain;
975 httpStart(this);
976 } else {
977 assert(!request->flags.sslPeek);
978 request->peer_login = NULL;
979 request->peer_domain = NULL;
980
981 switch (request->url.getScheme()) {
982 #if USE_OPENSSL
983
984 case AnyP::PROTO_HTTPS:
985 httpStart(this);
986 break;
987 #endif
988
989 case AnyP::PROTO_HTTP:
990 httpStart(this);
991 break;
992
993 case AnyP::PROTO_GOPHER:
994 gopherStart(this);
995 break;
996
997 case AnyP::PROTO_FTP:
998 ftpStart(this);
999 break;
1000
1001 case AnyP::PROTO_CACHE_OBJECT:
1002
1003 case AnyP::PROTO_URN:
1004 fatal_dump("Should never get here");
1005 break;
1006
1007 case AnyP::PROTO_WHOIS:
1008 whoisStart(this);
1009 break;
1010
1011 case AnyP::PROTO_WAIS: /* Not implemented */
1012
1013 default:
1014 debugs(17, DBG_IMPORTANT, "WARNING: Cannot retrieve '" << entry->url() << "'.");
1015 ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, Http::scBadRequest, request);
1016 fail(anErr);
1017 // Set the dont_retry flag because this is not a transient (network) error.
1018 flags.dont_retry = true;
1019 if (Comm::IsConnOpen(serverConn)) {
1020 serverConn->close();
1021 }
1022 break;
1023 }
1024 }
1025 }
1026
1027 /*
1028 * FwdState::reforward
1029 *
1030 * returns TRUE if the transaction SHOULD be re-forwarded to the
1031 * next choice in the serverDestinations list. This method is called when
1032 * server-side communication completes normally, or experiences
1033 * some error after receiving the end of HTTP headers.
1034 */
1035 int
1036 FwdState::reforward()
1037 {
1038 StoreEntry *e = entry;
1039
1040 if (EBIT_TEST(e->flags, ENTRY_ABORTED)) {
1041 debugs(17, 3, HERE << "entry aborted");
1042 return 0;
1043 }
1044
1045 assert(e->store_status == STORE_PENDING);
1046 assert(e->mem_obj);
1047 #if URL_CHECKSUM_DEBUG
1048
1049 e->mem_obj->checkUrlChecksum();
1050 #endif
1051
1052 debugs(17, 3, HERE << e->url() << "?" );
1053
1054 if (!EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
1055 debugs(17, 3, HERE << "No, ENTRY_FWD_HDR_WAIT isn't set");
1056 return 0;
1057 }
1058
1059 if (n_tries > Config.forward_max_tries)
1060 return 0;
1061
1062 if (request->bodyNibbled())
1063 return 0;
1064
1065 if (serverDestinations.size() <= 1) {
1066 // NP: <= 1 since total count includes the recently failed one.
1067 debugs(17, 3, HERE << "No alternative forwarding paths left");
1068 return 0;
1069 }
1070
1071 const Http::StatusCode s = e->getReply()->sline.status();
1072 debugs(17, 3, HERE << "status " << s);
1073 return reforwardableStatus(s);
1074 }
1075
1076 /**
1077 * Create "503 Service Unavailable" or "504 Gateway Timeout" error depending
1078 * on whether this is a validation request. RFC 2616 says that we MUST reply
1079 * with "504 Gateway Timeout" if validation fails and cached reply has
1080 * proxy-revalidate, must-revalidate or s-maxage Cache-Control directive.
1081 */
1082 ErrorState *
1083 FwdState::makeConnectingError(const err_type type) const
1084 {
1085 return new ErrorState(type, request->flags.needValidation ?
1086 Http::scGatewayTimeout : Http::scServiceUnavailable, request);
1087 }
1088
1089 static void
1090 fwdStats(StoreEntry * s)
1091 {
1092 int i;
1093 int j;
1094 storeAppendPrintf(s, "Status");
1095
1096 for (j = 1; j < MAX_FWD_STATS_IDX; ++j) {
1097 storeAppendPrintf(s, "\ttry#%d", j);
1098 }
1099
1100 storeAppendPrintf(s, "\n");
1101
1102 for (i = 0; i <= (int) Http::scInvalidHeader; ++i) {
1103 if (FwdReplyCodes[0][i] == 0)
1104 continue;
1105
1106 storeAppendPrintf(s, "%3d", i);
1107
1108 for (j = 0; j <= MAX_FWD_STATS_IDX; ++j) {
1109 storeAppendPrintf(s, "\t%d", FwdReplyCodes[j][i]);
1110 }
1111
1112 storeAppendPrintf(s, "\n");
1113 }
1114 }
1115
1116 /**** STATIC MEMBER FUNCTIONS *************************************************/
1117
1118 bool
1119 FwdState::reforwardableStatus(const Http::StatusCode s) const
1120 {
1121 switch (s) {
1122
1123 case Http::scBadGateway:
1124
1125 case Http::scGatewayTimeout:
1126 return true;
1127
1128 case Http::scForbidden:
1129
1130 case Http::scInternalServerError:
1131
1132 case Http::scNotImplemented:
1133
1134 case Http::scServiceUnavailable:
1135 return Config.retry.onerror;
1136
1137 default:
1138 return false;
1139 }
1140
1141 /* NOTREACHED */
1142 }
1143
1144 /**
1145 * Decide where details need to be gathered to correctly describe a persistent connection.
1146 * What is needed:
1147 * - the address/port details about this link
1148 * - domain name of server at other end of this link (either peer or requested host)
1149 */
1150 void
1151 FwdState::pconnPush(Comm::ConnectionPointer &conn, const char *domain)
1152 {
1153 if (conn->getPeer()) {
1154 fwdPconnPool->push(conn, NULL);
1155 } else {
1156 fwdPconnPool->push(conn, domain);
1157 }
1158 }
1159
1160 Comm::ConnectionPointer
1161 FwdState::pconnPop(const Comm::ConnectionPointer &dest, const char *domain)
1162 {
1163 // always call shared pool first because we need to close an idle
1164 // connection there if we have to use a standby connection.
1165 Comm::ConnectionPointer conn = fwdPconnPool->pop(dest, domain, checkRetriable());
1166 if (!Comm::IsConnOpen(conn)) {
1167 // either there was no pconn to pop or this is not a retriable xaction
1168 if (CachePeer *peer = dest->getPeer()) {
1169 if (peer->standby.pool)
1170 conn = peer->standby.pool->pop(dest, domain, true);
1171 }
1172 }
1173 return conn; // open, closed, or nil
1174 }
1175
1176 void
1177 FwdState::initModule()
1178 {
1179 RegisterWithCacheManager();
1180 }
1181
1182 void
1183 FwdState::RegisterWithCacheManager(void)
1184 {
1185 Mgr::RegisterAction("forward", "Request Forwarding Statistics", fwdStats, 0, 1);
1186 }
1187
1188 void
1189 FwdState::logReplyStatus(int tries, const Http::StatusCode status)
1190 {
1191 if (status > Http::scInvalidHeader)
1192 return;
1193
1194 assert(tries >= 0);
1195
1196 if (tries > MAX_FWD_STATS_IDX)
1197 tries = MAX_FWD_STATS_IDX;
1198
1199 ++ FwdReplyCodes[tries][status];
1200 }
1201
1202 /**** PRIVATE NON-MEMBER FUNCTIONS ********************************************/
1203
1204 /*
1205 * DPW 2007-05-19
1206 * Formerly static, but now used by client_side_request.cc
1207 */
1208 /// Checks for a TOS value to apply depending on the ACL
1209 tos_t
1210 aclMapTOS(acl_tos * head, ACLChecklist * ch)
1211 {
1212 acl_tos *l;
1213
1214 for (l = head; l; l = l->next) {
1215 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1216 return l->tos;
1217 }
1218
1219 return 0;
1220 }
1221
1222 /// Checks for a netfilter mark value to apply depending on the ACL
1223 nfmark_t
1224 aclMapNfmark(acl_nfmark * head, ACLChecklist * ch)
1225 {
1226 acl_nfmark *l;
1227
1228 for (l = head; l; l = l->next) {
1229 if (!l->aclList || ch->fastCheck(l->aclList) == ACCESS_ALLOWED)
1230 return l->nfmark;
1231 }
1232
1233 return 0;
1234 }
1235
1236 void
1237 getOutgoingAddress(HttpRequest * request, Comm::ConnectionPointer conn)
1238 {
1239 // skip if an outgoing address is already set.
1240 if (!conn->local.isAnyAddr()) return;
1241
1242 // ensure that at minimum the wildcard local matches remote protocol
1243 if (conn->remote.isIPv4())
1244 conn->local.setIPv4();
1245
1246 // maybe use TPROXY client address
1247 if (request && request->flags.spoofClientIp) {
1248 if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
1249 #if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
1250 if (Config.onoff.tproxy_uses_indirect_client)
1251 conn->local = request->indirect_client_addr;
1252 else
1253 #endif
1254 conn->local = request->client_addr;
1255 // some flags need setting on the socket to use this address
1256 conn->flags |= COMM_DOBIND;
1257 conn->flags |= COMM_TRANSPARENT;
1258 return;
1259 }
1260 // else no tproxy today ...
1261 }
1262
1263 if (!Config.accessList.outgoing_address) {
1264 return; // anything will do.
1265 }
1266
1267 ACLFilledChecklist ch(NULL, request, NULL);
1268 ch.dst_peer = conn->getPeer();
1269 ch.dst_addr = conn->remote;
1270
1271 // TODO use the connection details in ACL.
1272 // needs a bit of rework in ACLFilledChecklist to use Comm::Connection instead of ConnStateData
1273
1274 AclAddress *l;
1275 for (l = Config.accessList.outgoing_address; l; l = l->next) {
1276
1277 /* check if the outgoing address is usable to the destination */
1278 if (conn->remote.isIPv4() != l->addr.isIPv4()) continue;
1279
1280 /* check ACLs for this outgoing address */
1281 if (!l->aclList || ch.fastCheck(l->aclList) == ACCESS_ALLOWED) {
1282 conn->local = l->addr;
1283 return;
1284 }
1285 }
1286 }
1287
1288 tos_t
1289 GetTosToServer(HttpRequest * request)
1290 {
1291 ACLFilledChecklist ch(NULL, request, NULL);
1292 return aclMapTOS(Ip::Qos::TheConfig.tosToServer, &ch);
1293 }
1294
1295 nfmark_t
1296 GetNfmarkToServer(HttpRequest * request)
1297 {
1298 ACLFilledChecklist ch(NULL, request, NULL);
1299 return aclMapNfmark(Ip::Qos::TheConfig.nfmarkToServer, &ch);
1300 }
1301
1302 void
1303 GetMarkingsToServer(HttpRequest * request, Comm::Connection &conn)
1304 {
1305 // Get the server side TOS and Netfilter mark to be set on the connection.
1306 if (Ip::Qos::TheConfig.isAclTosActive()) {
1307 conn.tos = GetTosToServer(request);
1308 debugs(17, 3, "from " << conn.local << " tos " << int(conn.tos));
1309 }
1310
1311 #if SO_MARK && USE_LIBCAP
1312 conn.nfmark = GetNfmarkToServer(request);
1313 debugs(17, 3, "from " << conn.local << " netfilter mark " << conn.nfmark);
1314 #else
1315 conn.nfmark = 0;
1316 #endif
1317 }