]> git.ipfire.org Git - thirdparty/squid.git/blame_incremental - src/client_side.cc
Simplify appending SBuf to String (#2108)
[thirdparty/squid.git] / src / client_side.cc
... / ...
CommitLineData
1/*
2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 33 Client-side Routines */
10
11/**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60#include "squid.h"
61#include "acl/FilledChecklist.h"
62#include "anyp/Host.h"
63#include "anyp/PortCfg.h"
64#include "base/AsyncCallbacks.h"
65#include "base/Subscription.h"
66#include "base/TextException.h"
67#include "CachePeer.h"
68#include "client_db.h"
69#include "client_side.h"
70#include "client_side_reply.h"
71#include "client_side_request.h"
72#include "ClientRequestContext.h"
73#include "clientStream.h"
74#include "comm.h"
75#include "comm/Connection.h"
76#include "comm/Loops.h"
77#include "comm/Read.h"
78#include "comm/TcpAcceptor.h"
79#include "comm/Write.h"
80#include "CommCalls.h"
81#include "debug/Messages.h"
82#include "error/ExceptionErrorDetail.h"
83#include "errorpage.h"
84#include "fd.h"
85#include "fde.h"
86#include "fqdncache.h"
87#include "FwdState.h"
88#include "globals.h"
89#include "helper.h"
90#include "helper/Reply.h"
91#include "http.h"
92#include "http/one/RequestParser.h"
93#include "http/one/TeChunkedParser.h"
94#include "http/Stream.h"
95#include "HttpHdrContRange.h"
96#include "HttpHeaderTools.h"
97#include "HttpReply.h"
98#include "HttpRequest.h"
99#include "internal.h"
100#include "ipc/FdNotes.h"
101#include "ipc/StartListening.h"
102#include "log/access_log.h"
103#include "MemBuf.h"
104#include "MemObject.h"
105#include "mime_header.h"
106#include "parser/Tokenizer.h"
107#include "proxyp/Header.h"
108#include "proxyp/Parser.h"
109#include "sbuf/Stream.h"
110#include "security/Certificate.h"
111#include "security/CommunicationSecrets.h"
112#include "security/Io.h"
113#include "security/KeyLog.h"
114#include "security/NegotiationHistory.h"
115#include "servers/forward.h"
116#include "SquidConfig.h"
117#include "StatCounters.h"
118#include "StatHist.h"
119#include "Store.h"
120#include "TimeOrTag.h"
121#include "tools.h"
122
123#if USE_AUTH
124#include "auth/UserRequest.h"
125#endif
126#if USE_DELAY_POOLS
127#include "ClientInfo.h"
128#include "MessageDelayPools.h"
129#endif
130#if USE_OPENSSL
131#include "ssl/bio.h"
132#include "ssl/context_storage.h"
133#include "ssl/gadgets.h"
134#include "ssl/helper.h"
135#include "ssl/ProxyCerts.h"
136#include "ssl/ServerBump.h"
137#include "ssl/support.h"
138#endif
139
140#include <climits>
141#include <cmath>
142#include <limits>
143
144#if HAVE_SYSTEMD_SD_DAEMON_H
145#include <systemd/sd-daemon.h>
146#endif
147
148// TODO: Remove this custom dialer and simplify by creating the TcpAcceptor
149// subscription later, inside clientListenerConnectionOpened() callback, just
150// like htcpOpenPorts(), icpOpenPorts(), and snmpPortOpened() do it.
151/// dials clientListenerConnectionOpened call
152class ListeningStartedDialer:
153 public CallDialer,
154 public WithAnswer<Ipc::StartListeningAnswer>
155{
156public:
157 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
158 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
159 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
160
161 /* CallDialer API */
162 void print(std::ostream &os) const override {
163 os << '(' << answer_ << ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
164 }
165
166 virtual bool canDial(AsyncCall &) const { return true; }
167 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
168
169 /* WithAnswer API */
170 Ipc::StartListeningAnswer &answer() override { return answer_; }
171
172public:
173 Handler handler;
174
175private:
176 // answer_.conn (set/updated by IPC code) is portCfg.listenConn (used by us)
177 Ipc::StartListeningAnswer answer_; ///< StartListening() results
178 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
179 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
180 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
181};
182
183static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
184
185static IOACB httpAccept;
186static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
187
188static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
189static void clientUpdateStatCounters(const LogTags &logType);
190static void clientUpdateHierCounters(HierarchyLogEntry *);
191static bool clientPingHasFinished(ping_data const *aPing);
192void prepareLogWithRequestDetails(HttpRequest *, const AccessLogEntryPointer &);
193static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
194
195char *skipLeadingSpace(char *aString);
196
197void
198clientUpdateStatCounters(const LogTags &logType)
199{
200 ++statCounter.client_http.requests;
201
202 if (logType.isTcpHit())
203 ++statCounter.client_http.hits;
204
205 if (logType.oldType == LOG_TCP_HIT)
206 ++statCounter.client_http.disk_hits;
207 else if (logType.oldType == LOG_TCP_MEM_HIT)
208 ++statCounter.client_http.mem_hits;
209}
210
211void
212clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
213{
214 statCounter.client_http.allSvcTime.count(svc_time);
215 /**
216 * The idea here is not to be complete, but to get service times
217 * for only well-defined types. For example, we don't include
218 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
219 * (we *tried* to validate it, but failed).
220 */
221
222 switch (logType.oldType) {
223
224 case LOG_TCP_REFRESH_UNMODIFIED:
225 statCounter.client_http.nearHitSvcTime.count(svc_time);
226 break;
227
228 case LOG_TCP_INM_HIT:
229 case LOG_TCP_IMS_HIT:
230 statCounter.client_http.nearMissSvcTime.count(svc_time);
231 break;
232
233 case LOG_TCP_HIT:
234
235 case LOG_TCP_MEM_HIT:
236
237 case LOG_TCP_OFFLINE_HIT:
238 statCounter.client_http.hitSvcTime.count(svc_time);
239 break;
240
241 case LOG_TCP_MISS:
242
243 case LOG_TCP_CLIENT_REFRESH_MISS:
244 statCounter.client_http.missSvcTime.count(svc_time);
245 break;
246
247 default:
248 /* make compiler warnings go away */
249 break;
250 }
251}
252
253bool
254clientPingHasFinished(ping_data const *aPing)
255{
256 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
257 return true;
258
259 return false;
260}
261
262void
263clientUpdateHierCounters(HierarchyLogEntry * someEntry)
264{
265 ping_data *i;
266
267 switch (someEntry->code) {
268#if USE_CACHE_DIGESTS
269
270 case CD_PARENT_HIT:
271
272 case CD_SIBLING_HIT:
273 ++ statCounter.cd.times_used;
274 break;
275#endif
276
277 case SIBLING_HIT:
278
279 case PARENT_HIT:
280
281 case FIRST_PARENT_MISS:
282
283 case CLOSEST_PARENT_MISS:
284 ++ statCounter.icp.times_used;
285 i = &someEntry->ping;
286
287 if (clientPingHasFinished(i))
288 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
289
290 if (i->timeout)
291 ++ statCounter.icp.query_timeouts;
292
293 break;
294
295 case CLOSEST_PARENT:
296
297 case CLOSEST_DIRECT:
298 ++ statCounter.netdb.times_used;
299
300 break;
301
302 default:
303 break;
304 }
305}
306
307void
308ClientHttpRequest::updateCounters()
309{
310 clientUpdateStatCounters(loggingTags());
311
312 if (request->error)
313 ++ statCounter.client_http.errors;
314
315 clientUpdateStatHistCounters(loggingTags(),
316 tvSubMsec(al->cache.start_time, current_time));
317
318 clientUpdateHierCounters(&request->hier);
319}
320
321void
322prepareLogWithRequestDetails(HttpRequest *request, const AccessLogEntryPointer &aLogEntry)
323{
324 assert(request);
325 assert(aLogEntry != nullptr);
326
327 if (Config.onoff.log_mime_hdrs) {
328 MemBuf mb;
329 mb.init();
330 request->header.packInto(&mb);
331 //This is the request after adaptation or redirection
332 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
333
334 // the virgin request is saved to aLogEntry->request
335 if (aLogEntry->request) {
336 mb.reset();
337 aLogEntry->request->header.packInto(&mb);
338 aLogEntry->headers.request = xstrdup(mb.buf);
339 }
340
341#if USE_ADAPTATION
342 const Adaptation::History::Pointer ah = request->adaptLogHistory();
343 if (ah != nullptr) {
344 mb.reset();
345 ah->lastMeta.packInto(&mb);
346 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
347 }
348#endif
349
350 mb.clean();
351 }
352
353#if ICAP_CLIENT
354 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
355 if (ih != nullptr)
356 ih->processingTime(aLogEntry->icap.processingTime);
357#endif
358
359 aLogEntry->http.method = request->method;
360 aLogEntry->http.version = request->http_ver;
361 aLogEntry->hier = request->hier;
362 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
363
364 // Adapted request, if any, inherits and then collects all the stats, but
365 // the virgin request gets logged instead; copy the stats to log them.
366 // TODO: avoid losses by keeping these stats in a shared history object?
367 if (aLogEntry->request) {
368 aLogEntry->request->dnsWait = request->dnsWait;
369 aLogEntry->request->error = request->error;
370 }
371}
372
373void
374ClientHttpRequest::logRequest()
375{
376 if (!out.size && loggingTags().oldType == LOG_TAG_NONE)
377 debugs(33, 5, "logging half-baked transaction: " << log_uri);
378
379 al->icp.opcode = ICP_INVALID;
380 al->url = log_uri;
381 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
382
383 const auto findReply = [this]() -> const HttpReply * {
384 if (al->reply)
385 return al->reply.getRaw();
386 if (const auto le = loggingEntry())
387 return le->hasFreshestReply();
388 return nullptr;
389 };
390 if (const auto reply = findReply()) {
391 al->http.code = reply->sline.status();
392 al->http.content_type = reply->content_type.termedBuf();
393 }
394
395 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
396
397 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
398 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
399
400 al->http.clientRequestSz.header = req_sz;
401 // the virgin request is saved to al->request
402 if (al->request && al->request->body_pipe)
403 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
404 al->http.clientReplySz.header = out.headers_sz;
405 // XXX: calculate without payload encoding or headers !!
406 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
407
408 al->cache.highOffset = out.offset;
409
410 tvSub(al->cache.trTime, al->cache.start_time, current_time);
411
412 if (request)
413 prepareLogWithRequestDetails(request, al);
414
415#if USE_OPENSSL && 0
416
417 /* This is broken. Fails if the connection has been closed. Needs
418 * to snarf the ssl details some place earlier..
419 */
420 if (getConn() != NULL)
421 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
422
423#endif
424
425 if (request) {
426 SBuf matched;
427 for (auto h: Config.notes) {
428 if (h->match(request, al->reply.getRaw(), al, matched)) {
429 request->notes()->add(h->key(), matched);
430 debugs(33, 3, h->key() << " " << matched);
431 }
432 }
433 // The al->notes and request->notes must point to the same object.
434 al->syncNotes(request);
435
436 HTTPMSGUNLOCK(al->adapted_request);
437 al->adapted_request = request;
438 HTTPMSGLOCK(al->adapted_request);
439 }
440
441 ACLFilledChecklist checklist(nullptr, request);
442 checklist.updateAle(al);
443 // no need checklist.syncAle(): already synced
444 accessLogLog(al, &checklist);
445
446 bool updatePerformanceCounters = true;
447 if (Config.accessList.stats_collection) {
448 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request);
449 statsCheck.updateAle(al);
450 updatePerformanceCounters = statsCheck.fastCheck().allowed();
451 }
452
453 if (updatePerformanceCounters) {
454 if (request)
455 updateCounters();
456
457 if (getConn() != nullptr && getConn()->clientConnection != nullptr)
458 clientdbUpdate(getConn()->clientConnection->remote, loggingTags(), AnyP::PROTO_HTTP, out.size);
459 }
460}
461
462void
463ClientHttpRequest::freeResources()
464{
465 safe_free(uri);
466 safe_free(redirect.location);
467 range_iter.boundary.clean();
468 clearRequest();
469
470 if (client_stream.tail)
471 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
472}
473
474void
475httpRequestFree(void *data)
476{
477 ClientHttpRequest *http = (ClientHttpRequest *)data;
478 assert(http != nullptr);
479 delete http;
480}
481
482/* This is a handler normally called by comm_close() */
483void ConnStateData::connStateClosed(const CommCloseCbParams &)
484{
485 if (clientConnection) {
486 clientConnection->noteClosure();
487 // keep closed clientConnection for logging, clientdb cleanup, etc.
488 }
489 deleteThis("ConnStateData::connStateClosed");
490}
491
492#if USE_AUTH
493void
494ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
495{
496 if (auth_ == nullptr) {
497 if (aur != nullptr) {
498 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
499 auth_ = aur;
500 }
501 return;
502 }
503
504 // clobered with self-pointer
505 // NP: something nasty is going on in Squid, but harmless.
506 if (aur == auth_) {
507 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
508 return;
509 }
510
511 /*
512 * Connection-auth relies on a single set of credentials being preserved
513 * for all requests on a connection once they have been setup.
514 * There are several things which need to happen to preserve security
515 * when connection-auth credentials change unexpectedly or are unset.
516 *
517 * 1) auth helper released from any active state
518 *
519 * They can only be reserved by a handshake process which this
520 * connection can now never complete.
521 * This prevents helpers hanging when their connections close.
522 *
523 * 2) pinning is expected to be removed and server conn closed
524 *
525 * The upstream link is authenticated with the same credentials.
526 * Expecting the same level of consistency we should have received.
527 * This prevents upstream being faced with multiple or missing
528 * credentials after authentication.
529 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
530 * we just trigger that cleanup here via comm_reset_close() or
531 * ConnStateData::stopReceiving()
532 *
533 * 3) the connection needs to close.
534 *
535 * This prevents attackers injecting requests into a connection,
536 * or gateways wrongly multiplexing users into a single connection.
537 *
538 * When credentials are missing closure needs to follow an auth
539 * challenge for best recovery by the client.
540 *
541 * When credentials change there is nothing we can do but abort as
542 * fast as possible. Sending TCP RST instead of an HTTP response
543 * is the best-case action.
544 */
545
546 // clobbered with nul-pointer
547 if (aur == nullptr) {
548 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
549 auth_->releaseAuthServer();
550 auth_ = nullptr;
551 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
552 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
553 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
554 stopReceiving("connection-auth removed");
555 return;
556 }
557
558 // clobbered with alternative credentials
559 if (aur != auth_) {
560 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
561 auth_->releaseAuthServer();
562 auth_ = nullptr;
563 // this is a fatal type of problem.
564 // Close the connection immediately with TCP RST to abort all traffic flow
565 comm_reset_close(clientConnection);
566 return;
567 }
568
569 /* NOT REACHABLE */
570}
571#endif
572
573void
574ConnStateData::resetReadTimeout(const time_t timeout)
575{
576 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
577 AsyncCall::Pointer callback = JobCallback(33, 5, TimeoutDialer, this, ConnStateData::requestTimeout);
578 commSetConnTimeout(clientConnection, timeout, callback);
579}
580
581void
582ConnStateData::extendLifetime()
583{
584 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
585 AsyncCall::Pointer callback = JobCallback(5, 4, TimeoutDialer, this, ConnStateData::lifetimeTimeout);
586 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, callback);
587}
588
589// cleans up before destructor is called
590void
591ConnStateData::swanSong()
592{
593 debugs(33, 2, clientConnection);
594
595 flags.readMore = false;
596 clientdbEstablished(clientConnection->remote, -1); /* decrement */
597
598 terminateAll(ERR_NONE, LogTagsErrors());
599 checkLogging();
600
601 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
602 unpinConnection(true);
603
604 Server::swanSong();
605
606#if USE_AUTH
607 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
608 setAuth(nullptr, "ConnStateData::SwanSong cleanup");
609#endif
610
611 flags.swanSang = true;
612}
613
614void
615ConnStateData::callException(const std::exception &ex)
616{
617 Server::callException(ex); // logs ex and stops the job
618
619 ErrorDetail::Pointer errorDetail;
620 if (const auto tex = dynamic_cast<const TextException*>(&ex))
621 errorDetail = new ExceptionErrorDetail(tex->id());
622 else
623 errorDetail = new ExceptionErrorDetail(Here().id());
624 updateError(ERR_GATEWAY_FAILURE, errorDetail);
625}
626
627void
628ConnStateData::updateError(const Error &error)
629{
630 if (const auto context = pipeline.front()) {
631 const auto http = context->http;
632 assert(http);
633 http->updateError(error);
634 } else {
635 bareError.update(error);
636 }
637}
638
639bool
640ConnStateData::isOpen() const
641{
642 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
643 Comm::IsConnOpen(clientConnection) &&
644 !fd_table[clientConnection->fd].closing();
645}
646
647ConnStateData::~ConnStateData()
648{
649 debugs(33, 3, clientConnection);
650
651 if (isOpen())
652 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData did not close " << clientConnection);
653
654 if (!flags.swanSang)
655 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData was not destroyed properly; " << clientConnection);
656
657 if (bodyPipe != nullptr)
658 stopProducingFor(bodyPipe, false);
659
660 delete bodyParser; // TODO: pool
661
662#if USE_OPENSSL
663 delete sslServerBump;
664#endif
665}
666
667/**
668 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
669 * This is the client-side persistent connection flag. We need
670 * to set this relatively early in the request processing
671 * to handle hacks for broken servers and clients.
672 */
673void
674clientSetKeepaliveFlag(ClientHttpRequest * http)
675{
676 HttpRequest *request = http->request;
677
678 debugs(33, 3, "http_ver = " << request->http_ver);
679 debugs(33, 3, "method = " << request->method);
680
681 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
682 request->flags.proxyKeepalive = request->persistent();
683}
684
685int
686clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
687{
688 if (Config.maxRequestBodySize &&
689 bodyLength > Config.maxRequestBodySize)
690 return 1; /* too large */
691
692 return 0;
693}
694
695bool
696ClientHttpRequest::multipartRangeRequest() const
697{
698 return request->multipartRangeRequest();
699}
700
701void
702clientPackTermBound(String boundary, MemBuf *mb)
703{
704 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
705 debugs(33, 6, "buf offset: " << mb->size);
706}
707
708void
709clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
710{
711 HttpHeader hdr(hoReply);
712 assert(rep);
713 assert(spec);
714
715 /* put boundary */
716 debugs(33, 5, "appending boundary: " << boundary);
717 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
718 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
719
720 /* stuff the header with required entries and pack it */
721
722 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
723 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
724
725 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
726
727 hdr.packInto(mb);
728 hdr.clean();
729
730 /* append <crlf> (we packed a header, not a reply) */
731 mb->append("\r\n", 2);
732}
733
734/** returns expected content length for multi-range replies
735 * note: assumes that httpHdrRangeCanonize has already been called
736 * warning: assumes that HTTP headers for individual ranges at the
737 * time of the actuall assembly will be exactly the same as
738 * the headers when clientMRangeCLen() is called */
739int64_t
740ClientHttpRequest::mRangeCLen() const
741{
742 int64_t clen = 0;
743 MemBuf mb;
744
745 assert(memObject());
746
747 mb.init();
748 HttpHdrRange::iterator pos = request->range->begin();
749
750 while (pos != request->range->end()) {
751 /* account for headers for this range */
752 mb.reset();
753 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
754 *pos, range_iter.boundary, &mb);
755 clen += mb.size;
756
757 /* account for range content */
758 clen += (*pos)->length;
759
760 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
761 ++pos;
762 }
763
764 /* account for the terminating boundary */
765 mb.reset();
766
767 clientPackTermBound(range_iter.boundary, &mb);
768
769 clen += mb.size;
770
771 mb.clean();
772
773 return clen;
774}
775
776/**
777 * generates a "unique" boundary string for multipart responses
778 * the caller is responsible for cleaning the string */
779String
780ClientHttpRequest::rangeBoundaryStr() const
781{
782 const char *key;
783 String b(visible_appname_string);
784 b.append(":",1);
785 key = storeEntry()->getMD5Text();
786 b.append(key, strlen(key));
787 return b;
788}
789
790/**
791 * Write a chunk of data to a client socket. If the reply is present,
792 * send the reply headers down the wire too, and clean them up when
793 * finished.
794 * Pre-condition:
795 * The request is one backed by a connection, not an internal request.
796 * data context is not NULL
797 * There are no more entries in the stream chain.
798 */
799void
800clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
801 HttpReply * rep, StoreIOBuffer receivedData)
802{
803 // do not try to deliver if client already ABORTED
804 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
805 return;
806
807 /* Test preconditions */
808 assert(node != nullptr);
809 /* TODO: handle this rather than asserting
810 * - it should only ever happen if we cause an abort and
811 * the callback chain loops back to here, so we can simply return.
812 * However, that itself shouldn't happen, so it stays as an assert for now.
813 */
814 assert(cbdataReferenceValid(node));
815 assert(node->node.next == nullptr);
816 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
817 assert(context != nullptr);
818
819 /* TODO: check offset is what we asked for */
820
821 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
822 if (context != http->getConn()->pipeline.front())
823 context->deferRecipientForLater(node, rep, receivedData);
824 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
825 context->deferRecipientForLater(node, rep, receivedData);
826 else
827 http->getConn()->handleReply(rep, receivedData);
828}
829
830/**
831 * Called when a downstream node is no longer interested in
832 * our data. As we are a terminal node, this means on aborts
833 * only
834 */
835void
836clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
837{
838 /* Test preconditions */
839 assert(node != nullptr);
840 /* TODO: handle this rather than asserting
841 * - it should only ever happen if we cause an abort and
842 * the callback chain loops back to here, so we can simply return.
843 * However, that itself shouldn't happen, so it stays as an assert for now.
844 */
845 assert(cbdataReferenceValid(node));
846 /* Set null by ContextFree */
847 assert(node->node.next == nullptr);
848 /* this is the assert discussed above */
849 assert(nullptr == dynamic_cast<Http::Stream *>(node->data.getRaw()));
850 /* We are only called when the client socket shutsdown.
851 * Tell the prev pipeline member we're finished
852 */
853 clientStreamDetach(node, http);
854}
855
856void
857ConnStateData::readNextRequest()
858{
859 debugs(33, 5, clientConnection << " reading next req");
860
861 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
862 /**
863 * Set the timeout BEFORE calling readSomeData().
864 */
865 resetReadTimeout(clientConnection->timeLeft(idleTimeout()));
866
867 readSomeData();
868 /** Please don't do anything with the FD past here! */
869}
870
871static void
872ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
873{
874 debugs(33, 2, conn->clientConnection << " Sending next");
875
876 /** If the client stream is waiting on a socket write to occur, then */
877
878 if (deferredRequest->flags.deferred) {
879 /** NO data is allowed to have been sent. */
880 assert(deferredRequest->http->out.size == 0);
881 /** defer now. */
882 clientSocketRecipient(deferredRequest->deferredparams.node,
883 deferredRequest->http,
884 deferredRequest->deferredparams.rep,
885 deferredRequest->deferredparams.queuedBuffer);
886 }
887
888 /** otherwise, the request is still active in a callbacksomewhere,
889 * and we are done
890 */
891}
892
893void
894ConnStateData::kick()
895{
896 if (!Comm::IsConnOpen(clientConnection)) {
897 debugs(33, 2, clientConnection << " Connection was closed");
898 return;
899 }
900
901 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
902 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
903 clientConnection->close();
904 return;
905 }
906
907 /** \par
908 * We are done with the response, and we are either still receiving request
909 * body (early response!) or have already stopped receiving anything.
910 *
911 * If we are still receiving, then parseRequests() below will fail.
912 * (XXX: but then we will call readNextRequest() which may succeed and
913 * execute a smuggled request as we are not done with the current request).
914 *
915 * If we stopped because we got everything, then try the next request.
916 *
917 * If we stopped receiving because of an error, then close now to avoid
918 * getting stuck and to prevent accidental request smuggling.
919 */
920
921 if (const char *reason = stoppedReceiving()) {
922 debugs(33, 3, "closing for earlier request error: " << reason);
923 clientConnection->close();
924 return;
925 }
926
927 /** \par
928 * Attempt to parse a request from the request buffer.
929 * If we've been fed a pipelined request it may already
930 * be in our read buffer.
931 */
932
933 parseRequests();
934
935 if (!isOpen())
936 return;
937
938 /** \par
939 * At this point we either have a parsed request (which we've
940 * kicked off the processing for) or not. If we have a deferred
941 * request (parsed but deferred for pipeling processing reasons)
942 * then look at processing it. If not, simply kickstart
943 * another read.
944 */
945 Http::StreamPointer deferredRequest = pipeline.front();
946 if (deferredRequest != nullptr) {
947 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
948 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
949 } else if (flags.readMore) {
950 debugs(33, 3, clientConnection << ": calling readNextRequest()");
951 readNextRequest();
952 } else {
953 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
954 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
955 }
956}
957
958void
959ConnStateData::stopSending(const char *error)
960{
961 debugs(33, 4, "sending error (" << clientConnection << "): " << error <<
962 "; old receiving error: " <<
963 (stoppedReceiving() ? stoppedReceiving_ : "none"));
964
965 if (const char *oldError = stoppedSending()) {
966 debugs(33, 3, "already stopped sending: " << oldError);
967 return; // nothing has changed as far as this connection is concerned
968 }
969 stoppedSending_ = error;
970
971 if (!stoppedReceiving()) {
972 if (const int64_t expecting = mayNeedToReadMoreBody()) {
973 debugs(33, 5, "must still read " << expecting <<
974 " request body bytes with " << inBuf.length() << " unused");
975 return; // wait for the request receiver to finish reading
976 }
977 }
978
979 clientConnection->close();
980}
981
982void
983ConnStateData::afterClientWrite(size_t size)
984{
985 if (pipeline.empty())
986 return;
987
988 auto ctx = pipeline.front();
989 if (size) {
990 statCounter.client_http.kbytes_out += size;
991 if (ctx->http->loggingTags().isTcpHit())
992 statCounter.client_http.hit_kbytes_out += size;
993 }
994 ctx->writeComplete(size);
995}
996
997Http::Stream *
998ConnStateData::abortRequestParsing(const char *const uri)
999{
1000 ClientHttpRequest *http = new ClientHttpRequest(this);
1001 http->req_sz = inBuf.length();
1002 http->setErrorUri(uri);
1003 auto *context = new Http::Stream(clientConnection, http);
1004 StoreIOBuffer tempBuffer;
1005 tempBuffer.data = context->reqbuf;
1006 tempBuffer.length = HTTP_REQBUF_SZ;
1007 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1008 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1009 clientSocketDetach, context, tempBuffer);
1010 return context;
1011}
1012
1013void
1014ConnStateData::startShutdown()
1015{
1016 // RegisteredRunner API callback - Squid has been shut down
1017
1018 // if connection is idle terminate it now,
1019 // otherwise wait for grace period to end
1020 if (pipeline.empty())
1021 endingShutdown();
1022}
1023
1024void
1025ConnStateData::endingShutdown()
1026{
1027 // RegisteredRunner API callback - Squid shutdown grace period is over
1028
1029 // force the client connection to close immediately
1030 // swanSong() in the close handler will cleanup.
1031 if (Comm::IsConnOpen(clientConnection))
1032 clientConnection->close();
1033}
1034
1035char *
1036skipLeadingSpace(char *aString)
1037{
1038 char *result = aString;
1039
1040 while (xisspace(*aString))
1041 ++aString;
1042
1043 return result;
1044}
1045
1046/**
1047 * 'end' defaults to NULL for backwards compatibility
1048 * remove default value if we ever get rid of NULL-terminated
1049 * request buffers.
1050 */
1051const char *
1052findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1053{
1054 if (nullptr == end) {
1055 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1056 assert(end);
1057 }
1058
1059 for (; end > uriAndHTTPVersion; --end) {
1060 if (*end == '\n' || *end == '\r')
1061 continue;
1062
1063 if (xisspace(*end)) {
1064 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1065 return end + 1;
1066 else
1067 break;
1068 }
1069 }
1070
1071 return nullptr;
1072}
1073
1074static char *
1075prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1076{
1077 int vhost = conn->port->vhost;
1078 int vport = conn->port->vport;
1079 static char ipbuf[MAX_IPSTRLEN];
1080
1081 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1082
1083 // XXX: re-use proper URL parser for this
1084 SBuf url = hp->requestUri(); // use full provided URI if we abort
1085 do { // use a loop so we can break out of it
1086 ::Parser::Tokenizer tok(url);
1087 if (tok.skip('/')) // origin-form URL already.
1088 break;
1089
1090 if (conn->port->vhost)
1091 return nullptr; /* already in good shape */
1092
1093 // skip the URI scheme
1094 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1095 static const SBuf uriSchemeEnd("://");
1096 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1097 break;
1098
1099 // skip the authority segment
1100 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1101 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1102 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1103 if (!tok.skipAll(authority))
1104 break;
1105
1106 static const SBuf slashUri("/");
1107 const SBuf t = tok.remaining();
1108 if (t.isEmpty())
1109 url = slashUri;
1110 else if (t[0]=='/') // looks like path
1111 url = t;
1112 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1113 url = slashUri;
1114 url.append(t);
1115 } // else do nothing. invalid path
1116
1117 } while(false);
1118
1119#if SHOULD_REJECT_UNKNOWN_URLS
1120 // reject URI which are not well-formed even after the processing above
1121 if (url.isEmpty() || url[0] != '/') {
1122 hp->parseStatusCode = Http::scBadRequest;
1123 return conn->abortRequestParsing("error:invalid-request");
1124 }
1125#endif
1126
1127 if (vport < 0)
1128 vport = conn->clientConnection->local.port();
1129
1130 char *receivedHost = nullptr;
1131 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1132 SBuf host(receivedHost);
1133 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1134 if (vport > 0) {
1135 // remove existing :port (if any), cope with IPv6+ without port
1136 const auto lastColonPos = host.rfind(':');
1137 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1138 host.chop(0, lastColonPos); // truncate until the last colon
1139 }
1140 host.appendf(":%d", vport);
1141 } // else nothing to alter port-wise.
1142 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1143 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1144 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1145 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1146 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1147 return uri;
1148 } else if (conn->port->defaultsite /* && !vhost */) {
1149 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1150 char vportStr[32];
1151 vportStr[0] = '\0';
1152 if (vport > 0) {
1153 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1154 }
1155 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1156 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1157 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1158 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1159 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1160 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1161 return uri;
1162 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1163 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1164 /* Put the local socket IP address as the hostname, with whatever vport we found */
1165 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1166 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1167 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1168 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1169 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1170 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1171 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1172 return uri;
1173 }
1174
1175 return nullptr;
1176}
1177
1178static char *
1179buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1180{
1181 char *uri = nullptr;
1182 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1183 if (const char *host = hp->getHostHeaderField()) {
1184 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1185 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1186 uri = static_cast<char *>(xcalloc(url_sz, 1));
1187 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1188 SQUIDSBUFPRINT(scheme),
1189 host,
1190 SQUIDSBUFPRINT(hp->requestUri()));
1191 }
1192 return uri;
1193}
1194
1195char *
1196ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1197{
1198 Must(switchedToHttps());
1199
1200 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1201 return nullptr; /* already in good shape */
1202
1203 char *uri = buildUrlFromHost(this, hp);
1204#if USE_OPENSSL
1205 if (!uri) {
1206 Must(tlsConnectPort);
1207 Must(!tlsConnectHostOrIp.isEmpty());
1208 SBuf useHost;
1209 if (!tlsClientSni().isEmpty())
1210 useHost = tlsClientSni();
1211 else
1212 useHost = tlsConnectHostOrIp;
1213
1214 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1215 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1216 uri = static_cast<char *>(xcalloc(url_sz, 1));
1217 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%hu" SQUIDSBUFPH,
1218 SQUIDSBUFPRINT(scheme),
1219 SQUIDSBUFPRINT(useHost),
1220 *tlsConnectPort,
1221 SQUIDSBUFPRINT(hp->requestUri()));
1222 }
1223#endif
1224 if (uri)
1225 debugs(33, 5, "TLS switching host rewrite: " << uri);
1226 return uri;
1227}
1228
1229static char *
1230prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1231{
1232 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1233 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1234 return nullptr; /* already in good shape */
1235
1236 char *uri = buildUrlFromHost(conn, hp);
1237 if (!uri) {
1238 /* Put the local socket IP address as the hostname. */
1239 static char ipbuf[MAX_IPSTRLEN];
1240 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1241 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1242 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1243 uri = static_cast<char *>(xcalloc(url_sz, 1));
1244 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1245 SQUIDSBUFPRINT(scheme),
1246 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1247 }
1248
1249 if (uri)
1250 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1251 return uri;
1252}
1253
1254Http::Stream *
1255ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1256{
1257 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1258 {
1259 Must(hp);
1260
1261 if (preservingClientData_)
1262 preservedClientData = inBuf;
1263
1264 const bool parsedOk = hp->parse(inBuf);
1265
1266 // sync the buffers after parsing.
1267 inBuf = hp->remaining();
1268
1269 if (hp->needsMoreData()) {
1270 debugs(33, 5, "Incomplete request, waiting for end of request line");
1271 return nullptr;
1272 }
1273
1274 if (!parsedOk) {
1275 const bool tooBig =
1276 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1277 hp->parseStatusCode == Http::scUriTooLong;
1278 auto result = abortRequestParsing(
1279 tooBig ? "error:request-too-large" : "error:invalid-request");
1280 // assume that remaining leftovers belong to this bad request
1281 if (!inBuf.isEmpty())
1282 consumeInput(inBuf.length());
1283 return result;
1284 }
1285 }
1286
1287 /* We know the whole request is in parser now */
1288 debugs(11, 2, "HTTP Client " << clientConnection);
1289 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1290 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1291 hp->mimeHeader() <<
1292 "\n----------");
1293
1294 /* deny CONNECT via accelerated ports */
1295 if (hp->method() == Http::METHOD_CONNECT && port != nullptr && port->flags.accelSurrogate) {
1296 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1297 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1298 hp->parseStatusCode = Http::scMethodNotAllowed;
1299 return abortRequestParsing("error:method-not-allowed");
1300 }
1301
1302 /* HTTP/2 connection magic prefix starts with "PRI ".
1303 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1304 * If seen it signals a broken client or proxy has corrupted the traffic.
1305 */
1306 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1307 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1308 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1309 hp->parseStatusCode = Http::scMethodNotAllowed;
1310 return abortRequestParsing("error:method-not-allowed");
1311 }
1312
1313 if (hp->method() == Http::METHOD_NONE) {
1314 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1315 hp->parseStatusCode = Http::scMethodNotAllowed;
1316 return abortRequestParsing("error:unsupported-request-method");
1317 }
1318
1319 // Process headers after request line
1320 debugs(33, 3, "complete request received. " <<
1321 "prefix_sz = " << hp->messageHeaderSize() <<
1322 ", request-line-size=" << hp->firstLineSize() <<
1323 ", mime-header-size=" << hp->headerBlockSize() <<
1324 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1325
1326 /* Ok, all headers are received */
1327 ClientHttpRequest *http = new ClientHttpRequest(this);
1328
1329 http->req_sz = hp->messageHeaderSize();
1330 Http::Stream *result = new Http::Stream(clientConnection, http);
1331
1332 StoreIOBuffer tempBuffer;
1333 tempBuffer.data = result->reqbuf;
1334 tempBuffer.length = HTTP_REQBUF_SZ;
1335
1336 ClientStreamData newServer = new clientReplyContext(http);
1337 ClientStreamData newClient = result;
1338 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1339 clientReplyStatus, newServer, clientSocketRecipient,
1340 clientSocketDetach, newClient, tempBuffer);
1341
1342 /* set url */
1343 debugs(33,5, "Prepare absolute URL from " <<
1344 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1345 /* Rewrite the URL in transparent or accelerator mode */
1346 /* NP: there are several cases to traverse here:
1347 * - standard mode (forward proxy)
1348 * - transparent mode (TPROXY)
1349 * - transparent mode with failures
1350 * - intercept mode (NAT)
1351 * - intercept mode with failures
1352 * - accelerator mode (reverse proxy)
1353 * - internal relative-URL
1354 * - mixed combos of the above with internal URL
1355 * - remote interception with PROXY protocol
1356 * - remote reverse-proxy with PROXY protocol
1357 */
1358 if (switchedToHttps()) {
1359 http->uri = prepareTlsSwitchingURL(hp);
1360 } else if (transparent()) {
1361 /* intercept or transparent mode, properly working with no failures */
1362 http->uri = prepareTransparentURL(this, hp);
1363
1364 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1365 /* internal URL mode */
1366 // XXX: By prepending our name and port, we create an absolute URL
1367 // that may mismatch the (yet unparsed) Host header in the request.
1368 http->uri = xstrdup(internalLocalUri(nullptr, hp->requestUri()));
1369
1370 } else if (port->flags.accelSurrogate) {
1371 /* accelerator mode */
1372 http->uri = prepareAcceleratedURL(this, hp);
1373 http->flags.accel = true;
1374 }
1375
1376 if (!http->uri) {
1377 /* No special rewrites have been applied above, use the
1378 * requested url. may be rewritten later, so make extra room */
1379 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1380 http->uri = (char *)xcalloc(url_sz, 1);
1381 SBufToCstring(http->uri, hp->requestUri());
1382 }
1383
1384 result->flags.parsed_ok = 1;
1385 return result;
1386}
1387
1388bool
1389ConnStateData::shouldCloseOnEof() const
1390{
1391 if (pipeline.empty() && inBuf.isEmpty()) {
1392 debugs(33, 4, "yes, without active requests and unparsed input");
1393 return true;
1394 }
1395
1396 if (!Config.onoff.half_closed_clients) {
1397 debugs(33, 3, "yes, without half_closed_clients");
1398 return true;
1399 }
1400
1401 // Squid currently tries to parse (possibly again) a partially received
1402 // request after an EOF with half_closed_clients. To give that last parse in
1403 // afterClientRead() a chance, we ignore partially parsed requests here.
1404 debugs(33, 3, "no, honoring half_closed_clients");
1405 return false;
1406}
1407
1408void
1409ConnStateData::consumeInput(const size_t byteCount)
1410{
1411 assert(byteCount > 0 && byteCount <= inBuf.length());
1412 inBuf.consume(byteCount);
1413 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1414}
1415
1416void
1417ConnStateData::clientAfterReadingRequests()
1418{
1419 // Were we expecting to read more request body from half-closed connection?
1420 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1421 debugs(33, 3, "truncated body: closing half-closed " << clientConnection);
1422 clientConnection->close();
1423 return;
1424 }
1425
1426 if (flags.readMore)
1427 readSomeData();
1428}
1429
1430void
1431ConnStateData::quitAfterError(HttpRequest *request)
1432{
1433 // From HTTP p.o.v., we do not have to close after every error detected
1434 // at the client-side, but many such errors do require closure and the
1435 // client-side code is bad at handling errors so we play it safe.
1436 if (request)
1437 request->flags.proxyKeepalive = false;
1438 flags.readMore = false;
1439 debugs(33,4, "Will close after error: " << clientConnection);
1440}
1441
1442#if USE_OPENSSL
1443bool ConnStateData::serveDelayedError(Http::Stream *context)
1444{
1445 ClientHttpRequest *http = context->http;
1446
1447 if (!sslServerBump)
1448 return false;
1449
1450 assert(sslServerBump->entry);
1451 // Did we create an error entry while processing CONNECT?
1452 if (!sslServerBump->entry->isEmpty()) {
1453 quitAfterError(http->request);
1454
1455 // Get the saved error entry and send it to the client by replacing the
1456 // ClientHttpRequest store entry with it.
1457 clientStreamNode *node = context->getClientReplyContext();
1458 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1459 assert(repContext);
1460 debugs(33, 5, "Responding with delated error for " << http->uri);
1461 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1462
1463 // Get error details from the fake certificate-peeking request.
1464 http->request->error.update(sslServerBump->request->error);
1465 context->pullData();
1466 return true;
1467 }
1468
1469 // In bump-server-first mode, we have not necessarily seen the intended
1470 // server name at certificate-peeking time. Check for domain mismatch now,
1471 // when we can extract the intended name from the bumped HTTP request.
1472 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1473 HttpRequest *request = http->request;
1474 const auto host = request->url.parsedHost();
1475 if (host && Ssl::HasSubjectName(*srvCert, *host)) {
1476 debugs(33, 5, "certificate matches requested host: " << *host);
1477 return false;
1478 } else {
1479 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1480 "does not match request target " << RawPointer(host));
1481
1482 bool allowDomainMismatch = false;
1483 if (Config.ssl_client.cert_error) {
1484 ACLFilledChecklist check(Config.ssl_client.cert_error, nullptr);
1485 const auto sslErrors = std::make_unique<Security::CertErrors>(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1486 check.sslErrors = sslErrors.get();
1487 clientAclChecklistFill(check, http);
1488 allowDomainMismatch = check.fastCheck().allowed();
1489 }
1490
1491 if (!allowDomainMismatch) {
1492 quitAfterError(request);
1493
1494 clientStreamNode *node = context->getClientReplyContext();
1495 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1496 assert (repContext);
1497
1498 request->hier = sslServerBump->request->hier;
1499
1500 // Create an error object and fill it
1501 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1502 err->src_addr = clientConnection->remote;
1503 const Security::ErrorDetail::Pointer errDetail = new Security::ErrorDetail(
1504 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1505 srvCert, nullptr);
1506 updateError(ERR_SECURE_CONNECT_FAIL, errDetail);
1507 repContext->setReplyToError(request->method, err);
1508 assert(context->http->out.offset == 0);
1509 context->pullData();
1510 return true;
1511 }
1512 }
1513 }
1514
1515 return false;
1516}
1517#endif // USE_OPENSSL
1518
1519/// initiate tunneling if possible or return false otherwise
1520bool
1521ConnStateData::tunnelOnError(const err_type requestError)
1522{
1523 if (!Config.accessList.on_unsupported_protocol) {
1524 debugs(33, 5, "disabled; send error: " << requestError);
1525 return false;
1526 }
1527
1528 if (!preservingClientData_) {
1529 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1530 return false;
1531 }
1532
1533 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, nullptr);
1534 checklist.requestErrorType = requestError;
1535 fillChecklist(checklist);
1536 const auto &answer = checklist.fastCheck();
1537 if (answer.allowed() && answer.kind == 1) {
1538 debugs(33, 3, "Request will be tunneled to server");
1539 const auto context = pipeline.front();
1540 const auto http = context ? context->http : nullptr;
1541 const auto request = http ? http->request : nullptr;
1542 if (context)
1543 context->finished(); // Will remove from pipeline queue
1544 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, nullptr, nullptr, 0);
1545 return initiateTunneledRequest(request, "unknown-protocol", preservedClientData);
1546 }
1547 debugs(33, 3, "denied; send error: " << requestError);
1548 return false;
1549}
1550
1551void
1552clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1553{
1554 /*
1555 * DPW 2007-05-18
1556 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1557 * to here because calling comm_reset_close() causes http to
1558 * be freed before accessing.
1559 */
1560 if (request != nullptr && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1561 debugs(33, 3, "Sending TCP RST on " << conn->clientConnection);
1562 conn->flags.readMore = false;
1563 comm_reset_close(conn->clientConnection);
1564 }
1565}
1566
1567void
1568clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1569{
1570 ClientHttpRequest *http = context->http;
1571 bool mustReplyToOptions = false;
1572 bool expectBody = false;
1573
1574 // We already have the request parsed and checked, so we
1575 // only need to go through the final body/conn setup to doCallouts().
1576 assert(http->request);
1577 HttpRequest::Pointer request = http->request;
1578
1579 // temporary hack to avoid splitting this huge function with sensitive code
1580 const bool isFtp = !hp;
1581
1582 // Some blobs below are still HTTP-specific, but we would have to rewrite
1583 // this entire function to remove them from the FTP code path. Connection
1584 // setup and body_pipe preparation blobs are needed for FTP.
1585
1586 request->manager(conn, http->al);
1587
1588 request->flags.accelerated = http->flags.accel;
1589 request->flags.sslBumped=conn->switchedToHttps();
1590 // TODO: decouple http->flags.accel from request->flags.sslBumped
1591 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1592 !conn->port->allow_direct : 0;
1593 request->sources |= isFtp ? Http::Message::srcFtp :
1594 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1595#if USE_AUTH
1596 if (request->flags.sslBumped) {
1597 if (conn->getAuth() != nullptr)
1598 request->auth_user_request = conn->getAuth();
1599 }
1600#endif
1601
1602 http->checkForInternalAccess();
1603
1604 if (!isFtp) {
1605 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1606 // for now Squid only supports HTTP requests
1607 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1608 assert(request->http_ver.protocol == http_ver.protocol);
1609 request->http_ver.major = http_ver.major;
1610 request->http_ver.minor = http_ver.minor;
1611 }
1612
1613 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1614 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1615 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions) {
1616 clientStreamNode *node = context->getClientReplyContext();
1617 conn->quitAfterError(request.getRaw());
1618 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1619 assert (repContext);
1620 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, nullptr,
1621 conn, request.getRaw(), nullptr, nullptr);
1622 assert(context->http->out.offset == 0);
1623 context->pullData();
1624 clientProcessRequestFinished(conn, request);
1625 return;
1626 }
1627
1628 const auto frameStatus = request->checkEntityFraming();
1629 if (frameStatus != Http::scNone) {
1630 clientStreamNode *node = context->getClientReplyContext();
1631 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1632 assert (repContext);
1633 conn->quitAfterError(request.getRaw());
1634 repContext->setReplyToError(ERR_INVALID_REQ, frameStatus, nullptr, conn, request.getRaw(), nullptr, nullptr);
1635 assert(context->http->out.offset == 0);
1636 context->pullData();
1637 clientProcessRequestFinished(conn, request);
1638 return;
1639 }
1640
1641 clientSetKeepaliveFlag(http);
1642 // Let tunneling code be fully responsible for CONNECT requests
1643 if (http->request->method == Http::METHOD_CONNECT) {
1644 context->mayUseConnection(true);
1645 conn->flags.readMore = false;
1646 }
1647
1648#if USE_OPENSSL
1649 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1650 clientProcessRequestFinished(conn, request);
1651 return;
1652 }
1653#endif
1654
1655 /* Do we expect a request-body? */
1656 const auto chunked = request->header.chunked();
1657 expectBody = chunked || request->content_length > 0;
1658 if (!context->mayUseConnection() && expectBody) {
1659 request->body_pipe = conn->expectRequestBody(
1660 chunked ? -1 : request->content_length);
1661
1662 /* Is it too large? */
1663 if (!chunked && // if chunked, we will check as we accumulate
1664 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1665 clientStreamNode *node = context->getClientReplyContext();
1666 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1667 assert (repContext);
1668 conn->quitAfterError(request.getRaw());
1669 repContext->setReplyToError(ERR_TOO_BIG,
1670 Http::scContentTooLarge, nullptr,
1671 conn, http->request, nullptr, nullptr);
1672 assert(context->http->out.offset == 0);
1673 context->pullData();
1674 clientProcessRequestFinished(conn, request);
1675 return;
1676 }
1677
1678 if (!isFtp) {
1679 // We may stop producing, comm_close, and/or call setReplyToError()
1680 // below, so quit on errors to avoid http->doCallouts()
1681 if (!conn->handleRequestBodyData()) {
1682 clientProcessRequestFinished(conn, request);
1683 return;
1684 }
1685
1686 if (!request->body_pipe->productionEnded()) {
1687 debugs(33, 5, "need more request body");
1688 context->mayUseConnection(true);
1689 assert(conn->flags.readMore);
1690 }
1691 }
1692 }
1693
1694 http->calloutContext = new ClientRequestContext(http);
1695
1696 http->doCallouts();
1697
1698 clientProcessRequestFinished(conn, request);
1699}
1700
1701void
1702ConnStateData::add(const Http::StreamPointer &context)
1703{
1704 debugs(33, 3, context << " to " << pipeline.count() << '/' << pipeline.nrequests);
1705 if (bareError) {
1706 debugs(33, 5, "assigning " << bareError);
1707 assert(context);
1708 assert(context->http);
1709 context->http->updateError(bareError);
1710 bareError.clear();
1711 }
1712 pipeline.add(context);
1713}
1714
1715int
1716ConnStateData::pipelinePrefetchMax() const
1717{
1718 // TODO: Support pipelined requests through pinned connections.
1719 if (pinning.pinned)
1720 return 0;
1721 return Config.pipeline_max_prefetch;
1722}
1723
1724/**
1725 * Limit the number of concurrent requests.
1726 * \return true when there are available position(s) in the pipeline queue for another request.
1727 * \return false when the pipeline queue is full or disabled.
1728 */
1729bool
1730ConnStateData::concurrentRequestQueueFilled() const
1731{
1732 const int existingRequestCount = pipeline.count();
1733
1734 // default to the configured pipeline size.
1735 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1736#if USE_OPENSSL
1737 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1738#else
1739 const int internalRequest = 0;
1740#endif
1741 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1742
1743 // when queue filled already we can't add more.
1744 if (existingRequestCount >= concurrentRequestLimit) {
1745 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1746 debugs(33, 5, clientConnection << " deferring new request until one is done");
1747 return true;
1748 }
1749
1750 return false;
1751}
1752
1753/**
1754 * Perform proxy_protocol_access ACL tests on the client which
1755 * connected to PROXY protocol port to see if we trust the
1756 * sender enough to accept their PROXY header claim.
1757 */
1758bool
1759ConnStateData::proxyProtocolValidateClient()
1760{
1761 if (!Config.accessList.proxyProtocol)
1762 return proxyProtocolError("PROXY client not permitted by default ACL");
1763
1764 ACLFilledChecklist ch(Config.accessList.proxyProtocol, nullptr);
1765 fillChecklist(ch);
1766 if (!ch.fastCheck().allowed())
1767 return proxyProtocolError("PROXY client not permitted by ACLs");
1768
1769 return true;
1770}
1771
1772/**
1773 * Perform cleanup on PROXY protocol errors.
1774 * If header parsing hits a fatal error terminate the connection,
1775 * otherwise wait for more data.
1776 */
1777bool
1778ConnStateData::proxyProtocolError(const char *msg)
1779{
1780 if (msg) {
1781 // This is important to know, but maybe not so much that flooding the log is okay.
1782#if QUIET_PROXY_PROTOCOL
1783 // display the first of every 32 occurrences at level 1, the others at level 2.
1784 static uint8_t hide = 0;
1785 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1786#else
1787 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1788#endif
1789 mustStop(msg);
1790 }
1791 return false;
1792}
1793
1794/// Attempts to extract a PROXY protocol header from the input buffer and,
1795/// upon success, stores the parsed header in proxyProtocolHeader_.
1796/// \returns true if the header was successfully parsed
1797/// \returns false if more data is needed to parse the header or on error
1798bool
1799ConnStateData::parseProxyProtocolHeader()
1800{
1801 try {
1802 const auto parsed = ProxyProtocol::Parse(inBuf);
1803 proxyProtocolHeader_ = parsed.header;
1804 assert(bool(proxyProtocolHeader_));
1805 inBuf.consume(parsed.size);
1806 needProxyProtocolHeader_ = false;
1807 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1808 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1809 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1810 if ((clientConnection->flags & COMM_TRANSPARENT))
1811 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1812 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1813 }
1814 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1815 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1816 return false;
1817 } catch (const std::exception &e) {
1818 return proxyProtocolError(e.what());
1819 }
1820 return true;
1821}
1822
1823void
1824ConnStateData::receivedFirstByte()
1825{
1826 if (receivedFirstByte_)
1827 return;
1828
1829 receivedFirstByte_ = true;
1830 resetReadTimeout(Config.Timeout.request);
1831}
1832
1833/// Attempt to parse one or more requests from the input buffer.
1834/// May close the connection.
1835void
1836ConnStateData::parseRequests()
1837{
1838 debugs(33, 5, clientConnection << ": attempting to parse");
1839
1840 // Loop while we have read bytes that are not needed for producing the body
1841 // On errors, bodyPipe may become nil, but readMore will be cleared
1842 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1843
1844 // Prohibit concurrent requests when using a pinned to-server connection
1845 // because our Client classes do not support request pipelining.
1846 if (pinning.pinned && !pinning.readHandler) {
1847 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1848 break;
1849 }
1850
1851 /* Limit the number of concurrent requests */
1852 if (concurrentRequestQueueFilled())
1853 break;
1854
1855 // try to parse the PROXY protocol header magic bytes
1856 if (needProxyProtocolHeader_) {
1857 if (!parseProxyProtocolHeader())
1858 break;
1859
1860 // we have been waiting for PROXY to provide client-IP
1861 // for some lookups, ie rDNS
1862 whenClientIpKnown();
1863
1864 // Done with PROXY protocol which has cleared preservingClientData_.
1865 // If the next protocol supports on_unsupported_protocol, then its
1866 // parseOneRequest() must reset preservingClientData_.
1867 assert(!preservingClientData_);
1868 }
1869
1870 if (Http::StreamPointer context = parseOneRequest()) {
1871 debugs(33, 5, clientConnection << ": done parsing a request");
1872 extendLifetime();
1873 context->registerWithConn();
1874
1875#if USE_OPENSSL
1876 if (switchedToHttps())
1877 parsedBumpedRequestCount++;
1878#endif
1879
1880 processParsedRequest(context);
1881
1882 if (context->mayUseConnection()) {
1883 debugs(33, 3, "Not parsing new requests, as this request may need the connection");
1884 break;
1885 }
1886 } else {
1887 debugs(33, 5, clientConnection << ": not enough request data: " <<
1888 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1889 Must(inBuf.length() < Config.maxRequestHeaderSize);
1890 break;
1891 }
1892 }
1893
1894 debugs(33, 7, "buffered leftovers: " << inBuf.length());
1895
1896 if (isOpen() && commIsHalfClosed(clientConnection->fd)) {
1897 if (pipeline.empty()) {
1898 // we processed what we could parse, and no more data is coming
1899 debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection);
1900 clientConnection->close();
1901 } else {
1902 // we parsed what we could, and no more data is coming
1903 debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection);
1904 flags.readMore = false; // may already be false
1905 }
1906 }
1907}
1908
1909void
1910ConnStateData::afterClientRead()
1911{
1912#if USE_OPENSSL
1913 if (parsingTlsHandshake) {
1914 parseTlsHandshake();
1915 return;
1916 }
1917#endif
1918
1919 /* Process next request */
1920 if (pipeline.empty())
1921 fd_note(clientConnection->fd, "Reading next request");
1922
1923 parseRequests();
1924
1925 if (!isOpen())
1926 return;
1927
1928 clientAfterReadingRequests();
1929}
1930
1931/**
1932 * called when new request data has been read from the socket
1933 *
1934 * \retval false called comm_close or setReplyToError (the caller should bail)
1935 * \retval true we did not call comm_close or setReplyToError
1936 */
1937bool
1938ConnStateData::handleReadData()
1939{
1940 // if we are reading a body, stuff data into the body pipe
1941 if (bodyPipe != nullptr)
1942 return handleRequestBodyData();
1943 return true;
1944}
1945
1946/**
1947 * called when new request body data has been buffered in inBuf
1948 * may close the connection if we were closing and piped everything out
1949 *
1950 * \retval false called comm_close or setReplyToError (the caller should bail)
1951 * \retval true we did not call comm_close or setReplyToError
1952 */
1953bool
1954ConnStateData::handleRequestBodyData()
1955{
1956 assert(bodyPipe != nullptr);
1957
1958 if (bodyParser) { // chunked encoding
1959 if (const err_type error = handleChunkedRequestBody()) {
1960 abortChunkedRequestBody(error);
1961 return false;
1962 }
1963 } else { // identity encoding
1964 debugs(33,5, "handling plain request body for " << clientConnection);
1965 const auto putSize = bodyPipe->putMoreData(inBuf.rawContent(), inBuf.length());
1966 if (putSize > 0)
1967 consumeInput(putSize);
1968
1969 if (!bodyPipe->mayNeedMoreData()) {
1970 // BodyPipe will clear us automagically when we produced everything
1971 bodyPipe = nullptr;
1972 }
1973 }
1974
1975 if (!bodyPipe) {
1976 debugs(33,5, "produced entire request body for " << clientConnection);
1977
1978 if (const char *reason = stoppedSending()) {
1979 /* we've finished reading like good clients,
1980 * now do the close that initiateClose initiated.
1981 */
1982 debugs(33, 3, "closing for earlier sending error: " << reason);
1983 clientConnection->close();
1984 return false;
1985 }
1986 }
1987
1988 return true;
1989}
1990
1991/// parses available chunked encoded body bytes, checks size, returns errors
1992err_type
1993ConnStateData::handleChunkedRequestBody()
1994{
1995 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
1996
1997 try { // the parser will throw on errors
1998
1999 if (inBuf.isEmpty()) // nothing to do
2000 return ERR_NONE;
2001
2002 BodyPipeCheckout bpc(*bodyPipe);
2003 bodyParser->setPayloadBuffer(&bpc.buf);
2004 const bool parsed = bodyParser->parse(inBuf);
2005 inBuf = bodyParser->remaining(); // sync buffers
2006 bpc.checkIn();
2007
2008 // dechunk then check: the size limit applies to _dechunked_ content
2009 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2010 return ERR_TOO_BIG;
2011
2012 if (parsed) {
2013 finishDechunkingRequest(true);
2014 Must(!bodyPipe);
2015 return ERR_NONE; // nil bodyPipe implies body end for the caller
2016 }
2017
2018 // if chunk parser needs data, then the body pipe must need it too
2019 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2020
2021 // if parser needs more space and we can consume nothing, we will stall
2022 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2023 } catch (...) { // TODO: be more specific
2024 debugs(33, 3, "malformed chunks" << bodyPipe->status());
2025 return ERR_INVALID_REQ;
2026 }
2027
2028 debugs(33, 7, "need more chunked data" << *bodyPipe->status());
2029 return ERR_NONE;
2030}
2031
2032/// quit on errors related to chunked request body handling
2033void
2034ConnStateData::abortChunkedRequestBody(const err_type error)
2035{
2036 finishDechunkingRequest(false);
2037
2038 // XXX: The code below works if we fail during initial request parsing,
2039 // but if we fail when the server connection is used already, the server may send
2040 // us its response too, causing various assertions. How to prevent that?
2041#if WE_KNOW_HOW_TO_SEND_ERRORS
2042 Http::StreamPointer context = pipeline.front();
2043 if (context != NULL && !context->http->out.offset) { // output nothing yet
2044 clientStreamNode *node = context->getClientReplyContext();
2045 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2046 assert(repContext);
2047 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2048 Http::scContentTooLarge : HTTP_BAD_REQUEST;
2049 repContext->setReplyToError(error, scode,
2050 repContext->http->uri,
2051 CachePeer,
2052 repContext->http->request,
2053 inBuf, nullptr);
2054 context->pullData();
2055 } else {
2056 // close or otherwise we may get stuck as nobody will notice the error?
2057 comm_reset_close(clientConnection);
2058 }
2059#else
2060 debugs(33, 3, "aborting chunked request without error " << error);
2061 comm_reset_close(clientConnection);
2062#endif
2063 flags.readMore = false;
2064}
2065
2066void
2067ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2068{
2069 // request reader may get stuck waiting for space if nobody consumes body
2070 if (bodyPipe != nullptr)
2071 bodyPipe->enableAutoConsumption();
2072
2073 // kids extend
2074}
2075
2076/** general lifetime handler for HTTP requests */
2077void
2078ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2079{
2080 if (!Comm::IsConnOpen(io.conn))
2081 return;
2082
2083 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2084 updateError(error);
2085 if (tunnelOnError(error))
2086 return;
2087
2088 /*
2089 * Just close the connection to not confuse browsers
2090 * using persistent connections. Some browsers open
2091 * a connection and then do not use it until much
2092 * later (presumeably because the request triggering
2093 * the open has already been completed on another
2094 * connection)
2095 */
2096 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2097 io.conn->close();
2098}
2099
2100void
2101ConnStateData::lifetimeTimeout(const CommTimeoutCbParams &io)
2102{
2103 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout" <<
2104 Debug::Extra << "connection: " << io.conn);
2105
2106 LogTagsErrors lte;
2107 lte.timedout = true;
2108 terminateAll(ERR_LIFETIME_EXP, lte);
2109}
2110
2111ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2112 AsyncJob("ConnStateData"), // kids overwrite
2113 Server(xact)
2114#if USE_OPENSSL
2115 , tlsParser(Security::HandshakeParser::fromClient)
2116#endif
2117{
2118 // store the details required for creating more MasterXaction objects as new requests come in
2119 log_addr = xact->tcpClient->remote;
2120 log_addr.applyClientMask(Config.Addrs.client_netmask);
2121
2122 // register to receive notice of Squid signal events
2123 // which may affect long persisting client connections
2124 registerRunner();
2125}
2126
2127void
2128ConnStateData::start()
2129{
2130 BodyProducer::start();
2131 HttpControlMsgSink::start();
2132
2133 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2134 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2135#if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2136 int i = IP_PMTUDISC_DONT;
2137 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2138 int xerrno = errno;
2139 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2140 }
2141#else
2142 static bool reported = false;
2143
2144 if (!reported) {
2145 debugs(33, DBG_IMPORTANT, "WARNING: Path MTU discovery disabling is not supported on your platform.");
2146 reported = true;
2147 }
2148#endif
2149 }
2150
2151 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2152 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2153 comm_add_close_handler(clientConnection->fd, call);
2154
2155 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2156 if (needProxyProtocolHeader_) {
2157 if (!proxyProtocolValidateClient()) // will close the connection on failure
2158 return;
2159 } else
2160 whenClientIpKnown();
2161
2162 // requires needProxyProtocolHeader_ which is initialized above
2163 preservingClientData_ = shouldPreserveClientData();
2164}
2165
2166void
2167ConnStateData::whenClientIpKnown()
2168{
2169 debugs(33, 7, clientConnection->remote);
2170 if (Dns::ResolveClientAddressesAsap)
2171 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2172
2173 clientdbEstablished(clientConnection->remote, 1);
2174
2175#if USE_DELAY_POOLS
2176 fd_table[clientConnection->fd].clientInfo = nullptr;
2177
2178 if (!Config.onoff.client_db)
2179 return; // client delay pools require client_db
2180
2181 const auto &pools = ClientDelayPools::Instance()->pools;
2182 if (pools.size()) {
2183 ACLFilledChecklist ch(nullptr, nullptr);
2184 fillChecklist(ch);
2185 // TODO: we check early to limit error response bandwidth but we
2186 // should recheck when we can honor delay_pool_uses_indirect
2187 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2188
2189 /* pools require explicit 'allow' to assign a client into them */
2190 if (pools[pool]->access) {
2191 ch.changeAcl(pools[pool]->access);
2192 const auto &answer = ch.fastCheck();
2193 if (answer.allowed()) {
2194
2195 /* request client information from db after we did all checks
2196 this will save hash lookup if client failed checks */
2197 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2198 assert(cli);
2199
2200 /* put client info in FDE */
2201 fd_table[clientConnection->fd].clientInfo = cli;
2202
2203 /* setup write limiter for this request */
2204 const double burst = floor(0.5 +
2205 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2206 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2207 break;
2208 } else {
2209 debugs(83, 4, "Delay pool " << pool << " skipped because ACL " << answer);
2210 }
2211 }
2212 }
2213 }
2214#endif
2215
2216 // kids must extend to actually start doing something (e.g., reading)
2217}
2218
2219Security::IoResult
2220ConnStateData::acceptTls()
2221{
2222 const auto handshakeResult = Security::Accept(*clientConnection);
2223
2224#if USE_OPENSSL
2225 // log ASAP, even if the handshake has not completed (or failed)
2226 const auto fd = clientConnection->fd;
2227 assert(fd >= 0);
2228 keyLogger.checkpoint(*fd_table[fd].ssl, *this);
2229#else
2230 // TODO: Support fd_table[fd].ssl dereference in other builds.
2231#endif
2232
2233 return handshakeResult;
2234}
2235
2236/** Handle a new connection on an HTTP socket. */
2237void
2238httpAccept(const CommAcceptCbParams &params)
2239{
2240 Assure(params.port);
2241
2242 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2243
2244 if (params.flag != Comm::OK) {
2245 // Its possible the call was still queued when the client disconnected
2246 debugs(33, 2, params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2247 return;
2248 }
2249
2250 debugs(33, 4, params.conn << ": accepted");
2251 fd_note(params.conn->fd, "client http connect");
2252 const auto xact = MasterXaction::MakePortful(params.port);
2253 xact->tcpClient = params.conn;
2254
2255 // Socket is ready, setup the connection manager to start using it
2256 auto *srv = Http::NewServer(xact);
2257 // XXX: do not abandon the MasterXaction object
2258 AsyncJob::Start(srv); // usually async-calls readSomeData()
2259}
2260
2261/// Create TLS connection structure and update fd_table
2262static bool
2263httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2264{
2265 const auto conn = connState->clientConnection;
2266 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2267 debugs(33, 5, "will negotiate TLS on " << conn);
2268 return true;
2269 }
2270
2271 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2272 conn->close();
2273 return false;
2274}
2275
2276/** negotiate an SSL connection */
2277static void
2278clientNegotiateSSL(int fd, void *data)
2279{
2280 ConnStateData *conn = (ConnStateData *)data;
2281
2282 const auto handshakeResult = conn->acceptTls();
2283 switch (handshakeResult.category) {
2284 case Security::IoResult::ioSuccess:
2285 break;
2286
2287 case Security::IoResult::ioWantRead:
2288 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
2289 return;
2290
2291 case Security::IoResult::ioWantWrite:
2292 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
2293 return;
2294
2295 case Security::IoResult::ioError:
2296 debugs(83, (handshakeResult.important ? Important(62) : 2), "ERROR: Cannot accept a TLS connection" <<
2297 Debug::Extra << "problem: " << WithExtras(handshakeResult));
2298 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2299 // path because we cannot know the intended connection target?
2300 conn->updateError(ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
2301 conn->clientConnection->close();
2302 return;
2303 }
2304
2305 Security::SessionPointer session(fd_table[fd].ssl);
2306
2307#if USE_OPENSSL
2308 if (Security::SessionIsResumed(session)) {
2309 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2310 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2311 ":" << (int)fd_table[fd].remote_port << ")");
2312 } else {
2313 if (Debug::Enabled(83, 4)) {
2314 /* Write out the SSL session details.. actually the call below, but
2315 * OpenSSL headers do strange typecasts confusing GCC.. */
2316 /* PEM_write_SSL_SESSION(DebugStream(), SSL_get_session(ssl)); */
2317#if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2318 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2319 PEM_STRING_SSL_SESSION, DebugStream(),
2320 reinterpret_cast<char *>(SSL_get_session(session.get())),
2321 nullptr, nullptr, 0, nullptr, nullptr);
2322
2323#elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2324
2325 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2326 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2327 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2328 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2329 * Because there are two possible usable cast, if you get an error here, try the other
2330 * commented line. */
2331
2332 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2333 DebugStream(),
2334 reinterpret_cast<char *>(SSL_get_session(session.get())),
2335 nullptr, nullptr, 0, nullptr, nullptr);
2336 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2337 DebugStream(),
2338 reinterpret_cast<char *>(SSL_get_session(session.get())),
2339 nullptr, nullptr, 0, nullptr, nullptr);
2340 */
2341#else
2342 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2343
2344#endif
2345 /* Note: This does not automatically fflush the log file.. */
2346 }
2347
2348 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2349 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2350 fd_table[fd].remote_port << ")");
2351 }
2352#else
2353 debugs(83, 2, "TLS session reuse not yet implemented.");
2354#endif
2355
2356 // Connection established. Retrieve TLS connection parameters for logging.
2357 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2358
2359#if USE_OPENSSL
2360 X509 *client_cert = SSL_get_peer_certificate(session.get());
2361
2362 if (client_cert) {
2363 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2364 Security::SubjectName(*client_cert));
2365
2366 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2367 Security::IssuerName(*client_cert));
2368
2369 X509_free(client_cert);
2370 } else {
2371 debugs(83, 5, "FD " << fd << " has no client certificate.");
2372 }
2373#else
2374 debugs(83, 2, "Client certificate requesting not yet implemented.");
2375#endif
2376
2377 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2378 if (auto xact = conn->pipeline.front()) {
2379 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2380 xact->finished();
2381 // cannot proceed with encryption if requests wait for plain responses
2382 Must(conn->pipeline.empty());
2383 }
2384 /* careful: finished() above frees request, host, etc. */
2385
2386 conn->readSomeData();
2387}
2388
2389/**
2390 * If Security::ContextPointer is given, starts reading the TLS handshake.
2391 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2392 */
2393static void
2394httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2395{
2396 assert(connState);
2397 const Comm::ConnectionPointer &details = connState->clientConnection;
2398
2399 if (!ctx || !httpsCreate(connState, ctx))
2400 return;
2401
2402 connState->resetReadTimeout(Config.Timeout.request);
2403
2404 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2405}
2406
2407#if USE_OPENSSL
2408/**
2409 * A callback function to use with the ACLFilledChecklist callback.
2410 */
2411static void
2412httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2413{
2414 ConnStateData *connState = (ConnStateData *) data;
2415
2416 // if the connection is closed or closing, just return.
2417 if (!connState->isOpen())
2418 return;
2419
2420 if (answer.allowed()) {
2421 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2422 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2423 } else {
2424 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2425 connState->sslBumpMode = Ssl::bumpSplice;
2426 }
2427
2428 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2429 connState->clientConnection->close();
2430 return;
2431 }
2432
2433 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2434 connState->clientConnection->close();
2435}
2436#endif
2437
2438/** handle a new HTTPS connection */
2439static void
2440httpsAccept(const CommAcceptCbParams &params)
2441{
2442 Assure(params.port);
2443
2444 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2445
2446 if (params.flag != Comm::OK) {
2447 // Its possible the call was still queued when the client disconnected
2448 debugs(33, 2, "httpsAccept: " << params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2449 return;
2450 }
2451
2452 const auto xact = MasterXaction::MakePortful(params.port);
2453 xact->tcpClient = params.conn;
2454
2455 debugs(33, 4, params.conn << " accepted, starting SSL negotiation.");
2456 fd_note(params.conn->fd, "client https connect");
2457
2458 // Socket is ready, setup the connection manager to start using it
2459 auto *srv = Https::NewServer(xact);
2460 // XXX: do not abandon the MasterXaction object
2461 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2462}
2463
2464void
2465ConnStateData::postHttpsAccept()
2466{
2467 if (port->flags.tunnelSslBumping) {
2468#if USE_OPENSSL
2469 debugs(33, 5, "accept transparent connection: " << clientConnection);
2470
2471 if (!Config.accessList.ssl_bump) {
2472 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2473 return;
2474 }
2475
2476 const auto mx = MasterXaction::MakePortful(port);
2477 mx->tcpClient = clientConnection;
2478 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2479 // using tproxy/intercept provided destination IP and port.
2480 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2481 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2482 HttpRequest *request = new HttpRequest(mx);
2483 static char ip[MAX_IPSTRLEN];
2484 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2485 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2486 request->url.port(clientConnection->local.port());
2487 request->myportname = port->name;
2488 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2489 CodeContext::Reset(connectAle);
2490 // TODO: Use these request/ALE when waiting for new bumped transactions.
2491
2492 auto acl_checklist = ACLFilledChecklist::Make(Config.accessList.ssl_bump, request);
2493 fillChecklist(*acl_checklist);
2494 // Build a local AccessLogEntry to allow requiresAle() acls work
2495 acl_checklist->al = connectAle;
2496 acl_checklist->al->cache.start_time = current_time;
2497 acl_checklist->al->tcpClient = clientConnection;
2498 acl_checklist->al->cache.port = port;
2499 acl_checklist->al->cache.caddr = log_addr;
2500 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2501 acl_checklist->al->updateError(bareError);
2502 HTTPMSGUNLOCK(acl_checklist->al->request);
2503 acl_checklist->al->request = request;
2504 HTTPMSGLOCK(acl_checklist->al->request);
2505 Http::StreamPointer context = pipeline.front();
2506 ClientHttpRequest *http = context ? context->http : nullptr;
2507 const char *log_uri = http ? http->log_uri : nullptr;
2508 acl_checklist->syncAle(request, log_uri);
2509 ACLFilledChecklist::NonBlockingCheck(std::move(acl_checklist), httpsSslBumpAccessCheckDone, this);
2510#else
2511 fatal("FATAL: SSL-Bump requires --with-openssl");
2512#endif
2513 return;
2514 } else {
2515 httpsEstablish(this, port->secure.staticContext);
2516 }
2517}
2518
2519#if USE_OPENSSL
2520void
2521ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2522{
2523 ConnStateData * state_data = (ConnStateData *)(data);
2524 state_data->sslCrtdHandleReply(reply);
2525}
2526
2527void
2528ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2529{
2530 if (!isOpen()) {
2531 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2532 return;
2533 }
2534
2535 if (reply.result == Helper::BrokenHelper) {
2536 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2537 } else if (!reply.other().hasContent()) {
2538 debugs(1, DBG_IMPORTANT, "\"ssl_crtd\" helper returned <NULL> reply.");
2539 } else {
2540 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2541 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2542 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2543 } else {
2544 if (reply.result != Helper::Okay) {
2545 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2546 } else {
2547 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2548 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2549 doPeekAndSpliceStep();
2550 auto ssl = fd_table[clientConnection->fd].ssl.get();
2551 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2552 if (!ret)
2553 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2554
2555 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2556 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2557 } else {
2558 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2559 if (ctx && !sslBumpCertKey.isEmpty())
2560 storeTlsContextToCache(sslBumpCertKey, ctx);
2561 getSslContextDone(ctx);
2562 }
2563 return;
2564 }
2565 }
2566 }
2567 Security::ContextPointer nil;
2568 getSslContextDone(nil);
2569}
2570
2571void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2572{
2573 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2574
2575 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2576 if (connectedOk) {
2577 if (X509 *mimicCert = sslServerBump->serverCert.get())
2578 certProperties.mimicCert.resetAndLock(mimicCert);
2579
2580 ACLFilledChecklist checklist(nullptr, sslServerBump->request.getRaw());
2581 fillChecklist(checklist);
2582
2583 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != nullptr; ca = ca->next) {
2584 // If the algorithm already set, then ignore it.
2585 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2586 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2587 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2588 continue;
2589
2590 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2591 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2592 const char *param = ca->param;
2593
2594 // For parameterless CN adaptation, use hostname from the
2595 // CONNECT request.
2596 if (ca->alg == Ssl::algSetCommonName) {
2597 if (!param)
2598 param = tlsConnectHostOrIp.c_str();
2599 certProperties.commonName = param;
2600 certProperties.setCommonName = true;
2601 } else if (ca->alg == Ssl::algSetValidAfter)
2602 certProperties.setValidAfter = true;
2603 else if (ca->alg == Ssl::algSetValidBefore)
2604 certProperties.setValidBefore = true;
2605
2606 debugs(33, 5, "Matches certificate adaptation aglorithm: " <<
2607 alg << " param: " << (param ? param : "-"));
2608 }
2609 }
2610
2611 certProperties.signAlgorithm = Ssl::algSignEnd;
2612 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != nullptr; sg = sg->next) {
2613 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2614 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2615 break;
2616 }
2617 }
2618 } else {// did not try to connect (e.g. client-first) or failed to connect
2619 // In case of an error while connecting to the secure server, use a
2620 // trusted certificate, with no mimicked fields and no adaptation
2621 // algorithms. There is nothing we can mimic, so we want to minimize the
2622 // number of warnings the user will have to see to get to the error page.
2623 // We will close the connection, so that the trust is not extended to
2624 // non-Squid content.
2625 certProperties.signAlgorithm = Ssl::algSignTrusted;
2626 }
2627
2628 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2629
2630 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2631 assert(port->secure.untrustedSigningCa.cert);
2632 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2633 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2634 } else {
2635 assert(port->secure.signingCa.cert.get());
2636 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2637
2638 if (port->secure.signingCa.pkey)
2639 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2640 }
2641 signAlgorithm = certProperties.signAlgorithm;
2642
2643 certProperties.signHash = Ssl::DefaultSignHash;
2644}
2645
2646Security::ContextPointer
2647ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2648{
2649 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2650 const auto ssl_ctx_cache = Ssl::TheGlobalContextStorage().getLocalStorage(port->s);
2651 if (const auto ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2652 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2653 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2654 return *ctx;
2655 } else {
2656 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2657 if (ssl_ctx_cache)
2658 ssl_ctx_cache->del(cacheKey);
2659 }
2660 }
2661 return Security::ContextPointer(nullptr);
2662}
2663
2664void
2665ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2666{
2667 const auto ssl_ctx_cache = Ssl::TheGlobalContextStorage().getLocalStorage(port->s);
2668 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, ctx)) {
2669 // If it is not in storage delete after using. Else storage deleted it.
2670 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2671 }
2672}
2673
2674void
2675ConnStateData::getSslContextStart()
2676{
2677 if (port->secure.generateHostCertificates) {
2678 Ssl::CertificateProperties certProperties;
2679 buildSslCertGenerationParams(certProperties);
2680
2681 // Disable caching for bumpPeekAndSplice mode
2682 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2683 sslBumpCertKey.clear();
2684 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2685 assert(!sslBumpCertKey.isEmpty());
2686
2687 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2688 if (ctx) {
2689 getSslContextDone(ctx);
2690 return;
2691 }
2692 }
2693
2694#if USE_SSL_CRTD
2695 try {
2696 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2697 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2698 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2699 request_message.composeRequest(certProperties);
2700 debugs(33, 5, "SSL crtd request: " << request_message.compose().c_str());
2701 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2702 return;
2703 } catch (const std::exception &e) {
2704 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2705 "request for " << certProperties.commonName <<
2706 " certificate: " << e.what() << "; will now block to " <<
2707 "generate that certificate.");
2708 // fall through to do blocking in-process generation.
2709 }
2710#endif // USE_SSL_CRTD
2711
2712 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName);
2713 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2714 doPeekAndSpliceStep();
2715 auto ssl = fd_table[clientConnection->fd].ssl.get();
2716 if (!Ssl::configureSSL(ssl, certProperties, *port))
2717 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2718
2719 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2720 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2721 } else {
2722 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2723 if (dynCtx && !sslBumpCertKey.isEmpty())
2724 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2725 getSslContextDone(dynCtx);
2726 }
2727 return;
2728 }
2729
2730 Security::ContextPointer nil;
2731 getSslContextDone(nil);
2732}
2733
2734void
2735ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2736{
2737 if (port->secure.generateHostCertificates && !ctx) {
2738 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2739 }
2740
2741 // If generated ssl context = nullptr, try to use static ssl context.
2742 if (!ctx) {
2743 if (!port->secure.staticContext) {
2744 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2745 clientConnection->close();
2746 return;
2747 } else {
2748 debugs(33, 5, "Using static TLS context.");
2749 ctx = port->secure.staticContext;
2750 }
2751 }
2752
2753 if (!httpsCreate(this, ctx))
2754 return;
2755
2756 // bumped intercepted conns should already have Config.Timeout.request set
2757 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2758 // to make sure the connection does not get stuck on non-SSL clients.
2759 resetReadTimeout(Config.Timeout.request);
2760
2761 switchedToHttps_ = true;
2762
2763 auto ssl = fd_table[clientConnection->fd].ssl.get();
2764 BIO *b = SSL_get_rbio(ssl);
2765 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2766 bio->setReadBufData(inBuf);
2767 inBuf.clear();
2768 clientNegotiateSSL(clientConnection->fd, this);
2769}
2770
2771void
2772ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2773{
2774 assert(!switchedToHttps_);
2775 Must(http->request);
2776 auto &request = http->request;
2777
2778 // Depending on receivedFirstByte_, we are at the start of either an
2779 // established CONNECT tunnel with the client or an intercepted TCP (and
2780 // presumably TLS) connection from the client. Expect TLS Client Hello.
2781 const auto insideConnectTunnel = receivedFirstByte_;
2782 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2783
2784 tlsConnectHostOrIp = request->url.hostOrIp();
2785 tlsConnectPort = request->url.port();
2786 resetSslCommonName(request->url.host());
2787
2788 // We are going to read new request
2789 flags.readMore = true;
2790
2791 // keep version major.minor details the same.
2792 // but we are now performing the HTTPS handshake traffic
2793 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2794
2795 // If sslServerBump is set, then we have decided to deny CONNECT
2796 // and now want to switch to SSL to send the error to the client
2797 // without even peeking at the origin server certificate.
2798 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2799 request->flags.sslPeek = true;
2800 sslServerBump = new Ssl::ServerBump(http);
2801 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2802 request->flags.sslPeek = true;
2803 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2804 }
2805
2806 // commSetConnTimeout() was called for this request before we switched.
2807 // Fix timeout to request_start_timeout
2808 resetReadTimeout(Config.Timeout.request_start_timeout);
2809 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2810 // a bumbed "connect" request on non transparent port.
2811 receivedFirstByte_ = false;
2812 // Get more data to peek at Tls
2813 parsingTlsHandshake = true;
2814
2815 // If the protocol has changed, then reset preservingClientData_.
2816 // Otherwise, its value initially set in start() is still valid/fresh.
2817 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2818 if (insideConnectTunnel)
2819 preservingClientData_ = shouldPreserveClientData();
2820
2821 readSomeData();
2822}
2823
2824void
2825ConnStateData::parseTlsHandshake()
2826{
2827 Must(parsingTlsHandshake);
2828
2829 assert(!inBuf.isEmpty());
2830 receivedFirstByte();
2831 fd_note(clientConnection->fd, "Parsing TLS handshake");
2832
2833 // stops being nil if we fail to parse the handshake
2834 ErrorDetail::Pointer parseErrorDetails;
2835
2836 try {
2837 if (!tlsParser.parseHello(inBuf)) {
2838 // need more data to finish parsing
2839 readSomeData();
2840 return;
2841 }
2842 }
2843 catch (const TextException &ex) {
2844 debugs(83, 2, "exception: " << ex);
2845 parseErrorDetails = new ExceptionErrorDetail(ex.id());
2846 }
2847 catch (...) {
2848 debugs(83, 2, "exception: " << CurrentException);
2849 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2850 parseErrorDetails = d;
2851 }
2852
2853 parsingTlsHandshake = false;
2854
2855 // client data may be needed for splicing and for
2856 // tunneling unsupportedProtocol after an error
2857 preservedClientData = inBuf;
2858
2859 // Even if the parser failed, each TLS detail should either be set
2860 // correctly or still be "unknown"; copying unknown detail is a no-op.
2861 Security::TlsDetails::Pointer const &details = tlsParser.details;
2862 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2863 if (details && !details->serverName.isEmpty()) {
2864 resetSslCommonName(details->serverName.c_str());
2865 tlsClientSni_ = details->serverName;
2866 }
2867
2868 // We should disable read/write handlers
2869 Comm::ResetSelect(clientConnection->fd);
2870
2871 if (parseErrorDetails) {
2872 Http::StreamPointer context = pipeline.front();
2873 Must(context && context->http);
2874 HttpRequest::Pointer request = context->http->request;
2875 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2876 updateError(ERR_PROTOCOL_UNKNOWN, parseErrorDetails);
2877 if (!tunnelOnError(ERR_PROTOCOL_UNKNOWN))
2878 clientConnection->close();
2879 return;
2880 }
2881
2882 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
2883 getSslContextStart();
2884 return;
2885 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
2886 debugs(83, 5, "server-first skips step2; start forwarding the request");
2887 sslServerBump->step = XactionStep::tlsBump3;
2888 Http::StreamPointer context = pipeline.front();
2889 ClientHttpRequest *http = context ? context->http : nullptr;
2890 // will call httpsPeeked() with certificate and connection, eventually
2891 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
2892 } else {
2893 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
2894 startPeekAndSplice();
2895 }
2896}
2897
2898static void
2899httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
2900{
2901 ConnStateData *connState = (ConnStateData *) data;
2902
2903 // if the connection is closed or closing, just return.
2904 if (!connState->isOpen())
2905 return;
2906
2907 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
2908 assert(connState->serverBump());
2909 Ssl::BumpMode bumpAction;
2910 if (answer.allowed()) {
2911 bumpAction = (Ssl::BumpMode)answer.kind;
2912 } else
2913 bumpAction = Ssl::bumpSplice;
2914
2915 connState->serverBump()->act.step2 = bumpAction;
2916 connState->sslBumpMode = bumpAction;
2917 Http::StreamPointer context = connState->pipeline.front();
2918 if (ClientHttpRequest *http = (context ? context->http : nullptr))
2919 http->al->ssl.bumpMode = bumpAction;
2920
2921 if (bumpAction == Ssl::bumpTerminate) {
2922 connState->clientConnection->close();
2923 } else if (bumpAction != Ssl::bumpSplice) {
2924 connState->startPeekAndSplice();
2925 } else if (!connState->splice())
2926 connState->clientConnection->close();
2927}
2928
2929bool
2930ConnStateData::splice()
2931{
2932 // normally we can splice here, because we just got client hello message
2933
2934 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
2935 // we should not lose any raw bytes when switching to raw I/O here.
2936 if (fd_table[clientConnection->fd].ssl.get())
2937 fd_table[clientConnection->fd].useDefaultIo();
2938
2939 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
2940 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
2941 transferProtocol = Http::ProtocolVersion();
2942 assert(!pipeline.empty());
2943 Http::StreamPointer context = pipeline.front();
2944 Must(context);
2945 Must(context->http);
2946 ClientHttpRequest *http = context->http;
2947 HttpRequest::Pointer request = http->request;
2948 context->finished();
2949 if (transparent()) {
2950 // For transparent connections, make a new fake CONNECT request, now
2951 // with SNI as target. doCallout() checks, adaptations may need that.
2952 return fakeAConnectRequest("splice", preservedClientData);
2953 } else {
2954 // For non transparent connections make a new tunneled CONNECT, which
2955 // also sets the HttpRequest::flags::forceTunnel flag to avoid
2956 // respond with "Connection Established" to the client.
2957 // This fake CONNECT request required to allow use of SNI in
2958 // doCallout() checks and adaptations.
2959 return initiateTunneledRequest(request, "splice", preservedClientData);
2960 }
2961}
2962
2963void
2964ConnStateData::startPeekAndSplice()
2965{
2966 // This is the Step2 of the SSL bumping
2967 assert(sslServerBump);
2968 Http::StreamPointer context = pipeline.front();
2969 ClientHttpRequest *http = context ? context->http : nullptr;
2970
2971 if (sslServerBump->at(XactionStep::tlsBump1)) {
2972 sslServerBump->step = XactionStep::tlsBump2;
2973 // Run a accessList check to check if want to splice or continue bumping
2974
2975 auto acl_checklist = ACLFilledChecklist::Make(Config.accessList.ssl_bump, sslServerBump->request.getRaw());
2976 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
2977 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
2978 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
2979 fillChecklist(*acl_checklist);
2980 ACLFilledChecklist::NonBlockingCheck(std::move(acl_checklist), httpsSslBumpStep2AccessCheckDone, this);
2981 return;
2982 }
2983
2984 // will call httpsPeeked() with certificate and connection, eventually
2985 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
2986 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
2987
2988 if (!httpsCreate(this, unConfiguredCTX))
2989 return;
2990
2991 switchedToHttps_ = true;
2992
2993 auto ssl = fd_table[clientConnection->fd].ssl.get();
2994 BIO *b = SSL_get_rbio(ssl);
2995 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2996 bio->setReadBufData(inBuf);
2997 bio->hold(true);
2998
2999 // We have successfully parsed client Hello, but our TLS handshake parser is
3000 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3001 // can honor on_unsupported_protocol if needed. If there are no errors, we
3002 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3003 // also allow an ioWantRead result in case some fancy TLS extension that
3004 // Squid does not yet understand requires reading post-Hello client bytes.
3005 const auto handshakeResult = acceptTls();
3006 if (!handshakeResult.wantsIo())
3007 return handleSslBumpHandshakeError(handshakeResult);
3008
3009 // We need to reset inBuf here, to be used by incoming requests in the case
3010 // of SSL bump
3011 inBuf.clear();
3012
3013 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3014 sslServerBump->step = XactionStep::tlsBump3;
3015 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3016}
3017
3018/// process a problematic Security::Accept() result on the SslBump code path
3019void
3020ConnStateData::handleSslBumpHandshakeError(const Security::IoResult &handshakeResult)
3021{
3022 auto errCategory = ERR_NONE;
3023
3024 switch (handshakeResult.category) {
3025 case Security::IoResult::ioSuccess: {
3026 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3027 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3028 break;
3029 }
3030
3031 case Security::IoResult::ioWantRead: {
3032 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3033 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3034 break;
3035 }
3036
3037 case Security::IoResult::ioWantWrite: {
3038 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3039 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3040 break;
3041 }
3042
3043 case Security::IoResult::ioError:
3044 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: Cannot SslBump-accept a TLS connection" <<
3045 Debug::Extra << "problem: " << WithExtras(handshakeResult));
3046 updateError(errCategory = ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
3047 break;
3048
3049 }
3050
3051 if (!tunnelOnError(errCategory))
3052 clientConnection->close();
3053}
3054
3055void
3056ConnStateData::doPeekAndSpliceStep()
3057{
3058 auto ssl = fd_table[clientConnection->fd].ssl.get();
3059 BIO *b = SSL_get_rbio(ssl);
3060 assert(b);
3061 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3062
3063 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3064 bio->hold(false);
3065
3066 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3067 switchedToHttps_ = true;
3068}
3069
3070void
3071ConnStateData::httpsPeeked(PinnedIdleContext pic)
3072{
3073 Must(sslServerBump != nullptr);
3074 Must(sslServerBump->request == pic.request);
3075 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3076
3077 if (Comm::IsConnOpen(pic.connection)) {
3078 notePinnedConnectionBecameIdle(pic);
3079 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3080 } else
3081 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3082
3083 getSslContextStart();
3084}
3085
3086#endif /* USE_OPENSSL */
3087
3088bool
3089ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, const char *reason, const SBuf &payload)
3090{
3091 // fake a CONNECT request to force connState to tunnel
3092 SBuf connectHost;
3093 AnyP::Port connectPort;
3094
3095 if (pinning.serverConnection != nullptr) {
3096 static char ip[MAX_IPSTRLEN];
3097 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3098 if (const auto remotePort = pinning.serverConnection->remote.port())
3099 connectPort = remotePort;
3100 } else if (cause) {
3101 connectHost = cause->url.hostOrIp();
3102 connectPort = cause->url.port();
3103#if USE_OPENSSL
3104 } else if (!tlsConnectHostOrIp.isEmpty()) {
3105 connectHost = tlsConnectHostOrIp;
3106 connectPort = tlsConnectPort;
3107#endif
3108 } else if (transparent()) {
3109 static char ip[MAX_IPSTRLEN];
3110 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3111 connectPort = clientConnection->local.port();
3112 }
3113
3114 if (!connectPort) {
3115 // Typical cases are malformed HTTP requests on http_port and malformed
3116 // TLS handshakes on non-bumping https_port. TODO: Discover these
3117 // problems earlier so that they can be classified/detailed better.
3118 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3119 // TODO: throw when NonBlockingCheck() callbacks gain job protections
3120 static const auto d = MakeNamedErrorDetail("TUNNEL_TARGET");
3121 updateError(ERR_INVALID_REQ, d);
3122 return false;
3123 }
3124
3125 debugs(33, 2, "Request tunneling for " << reason);
3126 const auto http = buildFakeRequest(connectHost, *connectPort, payload);
3127 HttpRequest::Pointer request = http->request;
3128 request->flags.forceTunnel = true;
3129 http->calloutContext = new ClientRequestContext(http);
3130 http->doCallouts();
3131 clientProcessRequestFinished(this, request);
3132 return true;
3133}
3134
3135bool
3136ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3137{
3138 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3139
3140 SBuf connectHost;
3141 assert(transparent());
3142 const unsigned short connectPort = clientConnection->local.port();
3143
3144#if USE_OPENSSL
3145 if (!tlsClientSni_.isEmpty())
3146 connectHost.assign(tlsClientSni_);
3147 else
3148#endif
3149 {
3150 static char ip[MAX_IPSTRLEN];
3151 clientConnection->local.toHostStr(ip, sizeof(ip));
3152 connectHost.assign(ip);
3153 }
3154
3155 ClientHttpRequest *http = buildFakeRequest(connectHost, connectPort, payload);
3156
3157 http->calloutContext = new ClientRequestContext(http);
3158 HttpRequest::Pointer request = http->request;
3159 http->doCallouts();
3160 clientProcessRequestFinished(this, request);
3161 return true;
3162}
3163
3164ClientHttpRequest *
3165ConnStateData::buildFakeRequest(SBuf &useHost, const AnyP::KnownPort usePort, const SBuf &payload)
3166{
3167 ClientHttpRequest *http = new ClientHttpRequest(this);
3168 Http::Stream *stream = new Http::Stream(clientConnection, http);
3169
3170 StoreIOBuffer tempBuffer;
3171 tempBuffer.data = stream->reqbuf;
3172 tempBuffer.length = HTTP_REQBUF_SZ;
3173
3174 ClientStreamData newServer = new clientReplyContext(http);
3175 ClientStreamData newClient = stream;
3176 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3177 clientReplyStatus, newServer, clientSocketRecipient,
3178 clientSocketDetach, newClient, tempBuffer);
3179
3180 stream->flags.parsed_ok = 1; // Do we need it?
3181 stream->mayUseConnection(true);
3182 extendLifetime();
3183 stream->registerWithConn();
3184
3185 const auto mx = MasterXaction::MakePortful(port);
3186 mx->tcpClient = clientConnection;
3187 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3188 // clientProcessRequest
3189 HttpRequest::Pointer request = new HttpRequest(mx);
3190 request->url.setScheme(AnyP::PROTO_AUTHORITY_FORM, nullptr);
3191 request->method = Http::METHOD_CONNECT;
3192 request->url.host(useHost.c_str());
3193 request->url.port(usePort);
3194
3195 http->uri = SBufToCstring(request->effectiveRequestUri());
3196 http->initRequest(request.getRaw());
3197
3198 request->manager(this, http->al);
3199
3200 request->header.putStr(Http::HOST, useHost.c_str());
3201
3202 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3203#if USE_AUTH
3204 if (getAuth())
3205 request->auth_user_request = getAuth();
3206#endif
3207
3208 inBuf = payload;
3209 flags.readMore = false;
3210
3211 return http;
3212}
3213
3214/// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3215static bool
3216OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3217{
3218 if (!Comm::IsConnOpen(c)) {
3219 Must(NHttpSockets > 0); // we tried to open some
3220 --NHttpSockets; // there will be fewer sockets than planned
3221 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3222
3223 if (!NHttpSockets) // we could not open any listen sockets at all
3224 fatalf("Unable to open %s",FdNote(portType));
3225
3226 return false;
3227 }
3228 return true;
3229}
3230
3231/// find any unused HttpSockets[] slot and store fd there or return false
3232static bool
3233AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3234{
3235 bool found = false;
3236 for (int i = 0; i < NHttpSockets && !found; ++i) {
3237 if ((found = HttpSockets[i] < 0))
3238 HttpSockets[i] = conn->fd;
3239 }
3240 return found;
3241}
3242
3243static void
3244clientHttpConnectionsOpen(void)
3245{
3246 const auto savedContext = CodeContext::Current();
3247 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3248 CodeContext::Reset(s);
3249 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3250
3251 if (MAXTCPLISTENPORTS == NHttpSockets) {
3252 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3253 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3254 continue;
3255 }
3256
3257#if USE_OPENSSL
3258 if (s->flags.tunnelSslBumping) {
3259 if (!Config.accessList.ssl_bump) {
3260 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3261 s->flags.tunnelSslBumping = false;
3262 }
3263 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3264 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3265 s->flags.tunnelSslBumping = false;
3266 if (s->transport.protocol == AnyP::PROTO_HTTP)
3267 s->secure.encryptTransport = false;
3268 }
3269 if (s->flags.tunnelSslBumping) {
3270 // Create ssl_ctx cache for this port.
3271 Ssl::TheGlobalContextStorage().addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3272 }
3273 }
3274#endif
3275
3276 if (s->secure.encryptTransport && !s->secure.staticContext) {
3277 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3278 continue;
3279 }
3280
3281 const auto protocol = s->transport.protocol;
3282 assert(protocol == AnyP::PROTO_HTTP || protocol == AnyP::PROTO_HTTPS);
3283 const auto isHttps = protocol == AnyP::PROTO_HTTPS;
3284 using AcceptCall = CommCbFunPtrCallT<CommAcceptCbPtrFun>;
3285 RefCount<AcceptCall> subCall = commCbCall(5, 5, isHttps ? "httpsAccept" : "httpAccept",
3286 CommAcceptCbPtrFun(isHttps ? httpsAccept : httpAccept, CommAcceptCbParams(nullptr)));
3287 clientStartListeningOn(s, subCall, isHttps ? Ipc::fdnHttpsSocket : Ipc::fdnHttpSocket);
3288 }
3289 CodeContext::Reset(savedContext);
3290}
3291
3292void
3293clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3294{
3295 // Fill out a Comm::Connection which IPC will open as a listener for us
3296 port->listenConn = new Comm::Connection;
3297 port->listenConn->local = port->s;
3298 port->listenConn->flags =
3299 COMM_NONBLOCKING |
3300 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3301 (port->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3302 (port->workerQueues ? COMM_REUSEPORT : 0);
3303
3304 // route new connections to subCall
3305 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3306 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3307 const auto listenCall =
3308 asyncCall(33, 2, "clientListenerConnectionOpened",
3309 ListeningStartedDialer(&clientListenerConnectionOpened,
3310 port, fdNote, sub));
3311 AsyncCallback<Ipc::StartListeningAnswer> callback(listenCall);
3312 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, callback);
3313
3314 assert(NHttpSockets < MAXTCPLISTENPORTS);
3315 HttpSockets[NHttpSockets] = -1;
3316 ++NHttpSockets;
3317}
3318
3319/// process clientHttpConnectionsOpen result
3320static void
3321clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3322{
3323 Must(s != nullptr);
3324
3325 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3326 return;
3327
3328 Must(Comm::IsConnOpen(s->listenConn));
3329
3330 // TCP: setup a job to handle accept() with subscribed handler
3331 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3332
3333 debugs(1, Important(13), "Accepting " <<
3334 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3335 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3336 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3337 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3338 << FdNote(portTypeNote) << " connections at "
3339 << s->listenConn);
3340
3341 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3342
3343#if HAVE_LIBSYSTEMD
3344 // When the very first port opens, tell systemd we are able to serve connections.
3345 // Subsequent sd_notify() calls, including calls during reconfiguration,
3346 // do nothing because the first call parameter is 1.
3347 // XXX: Send the notification only after opening all configured ports.
3348 if (opt_foreground || opt_no_daemon) {
3349 const auto result = sd_notify(1, "READY=1");
3350 if (result < 0) {
3351 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3352 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3353 }
3354 }
3355#endif
3356}
3357
3358void
3359clientOpenListenSockets(void)
3360{
3361 clientHttpConnectionsOpen();
3362 Ftp::StartListening();
3363
3364 if (NHttpSockets < 1)
3365 fatal("No HTTP, HTTPS, or FTP ports configured");
3366}
3367
3368void
3369clientConnectionsClose()
3370{
3371 const auto savedContext = CodeContext::Current();
3372 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3373 CodeContext::Reset(s);
3374 if (s->listenConn != nullptr) {
3375 debugs(1, Important(14), "Closing HTTP(S) port " << s->listenConn->local);
3376 s->listenConn->close();
3377 s->listenConn = nullptr;
3378 }
3379 }
3380 CodeContext::Reset(savedContext);
3381
3382 Ftp::StopListening();
3383
3384 // TODO see if we can drop HttpSockets array entirely */
3385 for (int i = 0; i < NHttpSockets; ++i) {
3386 HttpSockets[i] = -1;
3387 }
3388
3389 NHttpSockets = 0;
3390}
3391
3392int
3393varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3394{
3395 SBuf vary(request->vary_headers);
3396 const auto &reply = entry->mem().freshestReply();
3397 auto has_vary = reply.header.has(Http::HdrType::VARY);
3398#if X_ACCELERATOR_VARY
3399
3400 has_vary |=
3401 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3402#endif
3403
3404 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3405 if (!vary.isEmpty()) {
3406 /* Oops... something odd is going on here.. */
3407 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3408 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3409 request->vary_headers.clear();
3410 return VARY_CANCEL;
3411 }
3412
3413 if (!has_vary) {
3414 /* This is not a varying object */
3415 return VARY_NONE;
3416 }
3417
3418 /* virtual "vary" object found. Calculate the vary key and
3419 * continue the search
3420 */
3421 vary = httpMakeVaryMark(request, &reply);
3422
3423 if (!vary.isEmpty()) {
3424 request->vary_headers = vary;
3425 return VARY_OTHER;
3426 } else {
3427 /* Ouch.. we cannot handle this kind of variance */
3428 /* XXX This cannot really happen, but just to be complete */
3429 return VARY_CANCEL;
3430 }
3431 } else {
3432 if (vary.isEmpty()) {
3433 vary = httpMakeVaryMark(request, &reply);
3434
3435 if (!vary.isEmpty())
3436 request->vary_headers = vary;
3437 }
3438
3439 if (vary.isEmpty()) {
3440 /* Ouch.. we cannot handle this kind of variance */
3441 /* XXX This cannot really happen, but just to be complete */
3442 return VARY_CANCEL;
3443 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3444 return VARY_MATCH;
3445 } else {
3446 /* Oops.. we have already been here and still haven't
3447 * found the requested variant. Bail out
3448 */
3449 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3450 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3451 return VARY_CANCEL;
3452 }
3453 }
3454}
3455
3456ACLFilledChecklist::MakingPointer
3457clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3458{
3459 auto checklist = ACLFilledChecklist::Make(acl, nullptr);
3460 clientAclChecklistFill(*checklist, http);
3461 return checklist;
3462}
3463
3464void
3465clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3466{
3467 assert(http);
3468
3469 if (!checklist.request && http->request)
3470 checklist.setRequest(http->request);
3471
3472 if (!checklist.al && http->al) {
3473 checklist.updateAle(http->al);
3474 checklist.syncAle(http->request, http->log_uri);
3475 }
3476
3477 if (const auto conn = http->getConn())
3478 checklist.setConn(conn); // may already be set
3479}
3480
3481void
3482ConnStateData::fillChecklist(ACLFilledChecklist &checklist) const
3483{
3484 const auto context = pipeline.front();
3485 if (const auto http = context ? context->http : nullptr)
3486 return clientAclChecklistFill(checklist, http); // calls checklist.setConn()
3487
3488 // no requests, but we always have connection-level details
3489 // TODO: ACL checks should not require a mutable ConnStateData. Adjust the
3490 // code that accidentally violates that principle to remove this const_cast!
3491 checklist.setConn(const_cast<ConnStateData*>(this));
3492
3493 // Set other checklist fields inside our fillConnectionLevelDetails() rather
3494 // than here because clientAclChecklistFill() code path calls that method
3495 // (via ACLFilledChecklist::setConn()) rather than calling us directly.
3496}
3497
3498void
3499ConnStateData::fillConnectionLevelDetails(ACLFilledChecklist &checklist) const
3500{
3501 assert(checklist.conn() == this);
3502 assert(clientConnection);
3503
3504 if (!checklist.request) { // preserve (better) addresses supplied by setRequest()
3505 checklist.src_addr = clientConnection->remote;
3506 checklist.my_addr = clientConnection->local; // TODO: or port->s?
3507 }
3508
3509#if USE_OPENSSL
3510 if (!checklist.sslErrors && sslServerBump)
3511 checklist.sslErrors = sslServerBump->sslErrors();
3512#endif
3513}
3514
3515bool
3516ConnStateData::transparent() const
3517{
3518 return clientConnection != nullptr && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3519}
3520
3521BodyPipe::Pointer
3522ConnStateData::expectRequestBody(int64_t size)
3523{
3524 bodyPipe = new BodyPipe(this);
3525 if (size >= 0)
3526 bodyPipe->setBodySize(size);
3527 else
3528 startDechunkingRequest();
3529 return bodyPipe;
3530}
3531
3532int64_t
3533ConnStateData::mayNeedToReadMoreBody() const
3534{
3535 if (!bodyPipe)
3536 return 0; // request without a body or read/produced all body bytes
3537
3538 if (!bodyPipe->bodySizeKnown())
3539 return -1; // probably need to read more, but we cannot be sure
3540
3541 const int64_t needToProduce = bodyPipe->unproducedSize();
3542 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3543
3544 if (needToProduce <= haveAvailable)
3545 return 0; // we have read what we need (but are waiting for pipe space)
3546
3547 return needToProduce - haveAvailable;
3548}
3549
3550void
3551ConnStateData::stopReceiving(const char *error)
3552{
3553 debugs(33, 4, "receiving error (" << clientConnection << "): " << error <<
3554 "; old sending error: " <<
3555 (stoppedSending() ? stoppedSending_ : "none"));
3556
3557 if (const char *oldError = stoppedReceiving()) {
3558 debugs(33, 3, "already stopped receiving: " << oldError);
3559 return; // nothing has changed as far as this connection is concerned
3560 }
3561
3562 stoppedReceiving_ = error;
3563
3564 if (const char *sendError = stoppedSending()) {
3565 debugs(33, 3, "closing because also stopped sending: " << sendError);
3566 clientConnection->close();
3567 }
3568}
3569
3570void
3571ConnStateData::expectNoForwarding()
3572{
3573 if (bodyPipe != nullptr) {
3574 debugs(33, 4, "no consumer for virgin body " << bodyPipe->status());
3575 bodyPipe->expectNoConsumption();
3576 }
3577}
3578
3579/// initialize dechunking state
3580void
3581ConnStateData::startDechunkingRequest()
3582{
3583 Must(bodyPipe != nullptr);
3584 debugs(33, 5, "start dechunking" << bodyPipe->status());
3585 assert(!bodyParser);
3586 bodyParser = new Http1::TeChunkedParser;
3587}
3588
3589/// put parsed content into input buffer and clean up
3590void
3591ConnStateData::finishDechunkingRequest(bool withSuccess)
3592{
3593 debugs(33, 5, "finish dechunking: " << withSuccess);
3594
3595 if (bodyPipe != nullptr) {
3596 debugs(33, 7, "dechunked tail: " << bodyPipe->status());
3597 BodyPipe::Pointer myPipe = bodyPipe;
3598 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3599 Must(!bodyPipe); // we rely on it being nil after we are done with body
3600 if (withSuccess) {
3601 Must(myPipe->bodySizeKnown());
3602 Http::StreamPointer context = pipeline.front();
3603 if (context != nullptr && context->http && context->http->request)
3604 context->http->request->setContentLength(myPipe->bodySize());
3605 }
3606 }
3607
3608 delete bodyParser;
3609 bodyParser = nullptr;
3610}
3611
3612// XXX: this is an HTTP/1-only operation
3613void
3614ConnStateData::sendControlMsg(HttpControlMsg msg)
3615{
3616 if (const auto context = pipeline.front()) {
3617 if (context->http)
3618 context->http->al->reply = msg.reply;
3619 }
3620
3621 if (!isOpen()) {
3622 debugs(33, 3, "ignoring 1xx due to earlier closure");
3623 return;
3624 }
3625
3626 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3627 if (!pipeline.empty()) {
3628 HttpReply::Pointer rep(msg.reply);
3629 Must(rep);
3630 // remember the callback
3631 cbControlMsgSent = msg.cbSuccess;
3632
3633 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3634 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3635
3636 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3637 // but still inform the caller (so it may resume its operation)
3638 doneWithControlMsg();
3639 }
3640 return;
3641 }
3642
3643 debugs(33, 3, " closing due to missing context for 1xx");
3644 clientConnection->close();
3645}
3646
3647void
3648ConnStateData::doneWithControlMsg()
3649{
3650 HttpControlMsgSink::doneWithControlMsg();
3651
3652 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3653 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3654 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3655 }
3656}
3657
3658/// Our close handler called by Comm when the pinned connection is closed
3659void
3660ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3661{
3662 // FwdState might repin a failed connection sooner than this close
3663 // callback is called for the failed connection.
3664 assert(pinning.serverConnection == io.conn);
3665 pinning.closeHandler = nullptr; // Comm unregisters handlers before calling
3666 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3667 pinning.serverConnection->noteClosure();
3668 unpinConnection(false);
3669
3670 if (sawZeroReply && clientConnection != nullptr) {
3671 debugs(33, 3, "Closing client connection on pinned zero reply.");
3672 clientConnection->close();
3673 }
3674
3675}
3676
3677void
3678ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3679{
3680 pinConnection(pinServer, *request);
3681}
3682
3683void
3684ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3685{
3686 Must(pic.connection);
3687 Must(pic.request);
3688 pinConnection(pic.connection, *pic.request);
3689
3690 // monitor pinned server connection for remote-end closures.
3691 startPinnedConnectionMonitoring();
3692
3693 if (pipeline.empty())
3694 kick(); // in case parseRequests() was blocked by a busy pic.connection
3695}
3696
3697/// Forward future client requests using the given server connection.
3698void
3699ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3700{
3701 if (Comm::IsConnOpen(pinning.serverConnection) &&
3702 pinning.serverConnection->fd == pinServer->fd) {
3703 debugs(33, 3, "already pinned" << pinServer);
3704 return;
3705 }
3706
3707 unpinConnection(true); // closes pinned connection, if any, and resets fields
3708
3709 pinning.serverConnection = pinServer;
3710
3711 debugs(33, 3, pinning.serverConnection);
3712
3713 Must(pinning.serverConnection != nullptr);
3714
3715 const char *pinnedHost = "[unknown]";
3716 pinning.host = xstrdup(request.url.host());
3717 pinning.port = request.url.port();
3718 pinnedHost = pinning.host;
3719 pinning.pinned = true;
3720 pinning.auth = request.flags.connectionAuth;
3721 char stmp[MAX_IPSTRLEN];
3722 char desc[FD_DESC_SZ];
3723 const auto peer = pinning.peer();
3724 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3725 (pinning.auth || !peer) ? pinnedHost : peer->name,
3726 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3727 clientConnection->fd);
3728 fd_note(pinning.serverConnection->fd, desc);
3729
3730 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3731 pinning.closeHandler = JobCallback(33, 5,
3732 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3733 // remember the pinned connection so that cb does not unpin a fresher one
3734 typedef CommCloseCbParams Params;
3735 Params &params = GetCommParams<Params>(pinning.closeHandler);
3736 params.conn = pinning.serverConnection;
3737 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3738}
3739
3740/// [re]start monitoring pinned connection for peer closures so that we can
3741/// propagate them to an _idle_ client pinned to that peer
3742void
3743ConnStateData::startPinnedConnectionMonitoring()
3744{
3745 if (pinning.readHandler != nullptr)
3746 return; // already monitoring
3747
3748 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3749 pinning.readHandler = JobCallback(33, 3,
3750 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3751 Comm::Read(pinning.serverConnection, pinning.readHandler);
3752}
3753
3754void
3755ConnStateData::stopPinnedConnectionMonitoring()
3756{
3757 if (pinning.readHandler != nullptr) {
3758 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3759 pinning.readHandler = nullptr;
3760 }
3761}
3762
3763#if USE_OPENSSL
3764bool
3765ConnStateData::handleIdleClientPinnedTlsRead()
3766{
3767 // A ready-for-reading connection means that the TLS server either closed
3768 // the connection, sent us some unexpected HTTP data, or started TLS
3769 // renegotiations. We should close the connection except for the last case.
3770
3771 Must(pinning.serverConnection != nullptr);
3772 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3773 if (!ssl)
3774 return false;
3775
3776 char buf[1];
3777 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3778
3779 if (readResult > 0 || SSL_pending(ssl) > 0) {
3780 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3781 return false;
3782 }
3783
3784 switch(const int error = SSL_get_error(ssl, readResult)) {
3785 case SSL_ERROR_WANT_WRITE:
3786 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3787 [[fallthrough]]; // to restart monitoring, for now
3788
3789 case SSL_ERROR_NONE:
3790 case SSL_ERROR_WANT_READ:
3791 startPinnedConnectionMonitoring();
3792 return true;
3793
3794 default:
3795 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3796 return false;
3797 }
3798
3799 // not reached
3800 return true;
3801}
3802#endif
3803
3804/// Our read handler called by Comm when the server either closes an idle pinned connection or
3805/// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3806void
3807ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3808{
3809 pinning.readHandler = nullptr; // Comm unregisters handlers before calling
3810
3811 if (io.flag == Comm::ERR_CLOSING)
3812 return; // close handler will clean up
3813
3814 Must(pinning.serverConnection == io.conn);
3815
3816#if USE_OPENSSL
3817 if (handleIdleClientPinnedTlsRead())
3818 return;
3819#endif
3820
3821 const bool clientIsIdle = pipeline.empty();
3822
3823 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3824 io.size << (clientIsIdle ? " with idle client" : ""));
3825
3826 pinning.serverConnection->close();
3827
3828 // If we are still sending data to the client, do not close now. When we are done sending,
3829 // ConnStateData::kick() checks pinning.serverConnection and will close.
3830 // However, if we are idle, then we must close to inform the idle client and minimize races.
3831 if (clientIsIdle && clientConnection != nullptr)
3832 clientConnection->close();
3833}
3834
3835Comm::ConnectionPointer
3836ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3837{
3838 debugs(33, 7, pinning.serverConnection);
3839 Must(request);
3840
3841 const auto pinningError = [&](const err_type type) {
3842 unpinConnection(true);
3843 HttpRequestPointer requestPointer = request;
3844 return ErrorState::NewForwarding(type, requestPointer, ale);
3845 };
3846
3847 if (!Comm::IsConnOpen(pinning.serverConnection))
3848 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3849
3850 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3851 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3852
3853 if (pinning.port != request->url.port())
3854 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3855
3856 if (pinning.serverConnection->toGoneCachePeer())
3857 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3858
3859 if (pinning.peerAccessDenied)
3860 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3861
3862 stopPinnedConnectionMonitoring();
3863 return pinning.serverConnection;
3864}
3865
3866Comm::ConnectionPointer
3867ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3868{
3869 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
3870 return connManager->borrowPinnedConnection(request, ale);
3871
3872 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3873 // there is no point since the client connection is now gone
3874 HttpRequestPointer requestPointer = request;
3875 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
3876}
3877
3878void
3879ConnStateData::unpinConnection(const bool andClose)
3880{
3881 debugs(33, 3, pinning.serverConnection);
3882
3883 if (Comm::IsConnOpen(pinning.serverConnection)) {
3884 if (pinning.closeHandler != nullptr) {
3885 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3886 pinning.closeHandler = nullptr;
3887 }
3888
3889 stopPinnedConnectionMonitoring();
3890
3891 // close the server side socket if requested
3892 if (andClose)
3893 pinning.serverConnection->close();
3894 pinning.serverConnection = nullptr;
3895 }
3896
3897 safe_free(pinning.host);
3898
3899 pinning.zeroReply = false;
3900 pinning.peerAccessDenied = false;
3901
3902 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3903 * connection has gone away */
3904}
3905
3906void
3907ConnStateData::terminateAll(const Error &rawError, const LogTagsErrors &lte)
3908{
3909 auto error = rawError; // (cheap) copy so that we can detail
3910 // We detail even ERR_NONE: There should be no transactions left, and
3911 // detailed ERR_NONE will be unused. Otherwise, this detail helps in triage.
3912 if (error.details.empty()) {
3913 static const auto d = MakeNamedErrorDetail("WITH_CLIENT");
3914 error.details.push_back(d);
3915 }
3916
3917 debugs(33, 3, pipeline.count() << '/' << pipeline.nrequests << " after " << error);
3918
3919 if (pipeline.empty()) {
3920 bareError.update(error); // XXX: bareLogTagsErrors
3921 } else {
3922 // We terminate the current CONNECT/PUT/etc. context below, logging any
3923 // error details, but that context may leave unparsed bytes behind.
3924 // Consume them to stop checkLogging() from logging them again later.
3925 const auto intputToConsume =
3926#if USE_OPENSSL
3927 parsingTlsHandshake ? "TLS handshake" : // more specific than CONNECT
3928#endif
3929 bodyPipe ? "HTTP request body" :
3930 pipeline.back()->mayUseConnection() ? "HTTP CONNECT" :
3931 nullptr;
3932
3933 while (const auto context = pipeline.front()) {
3934 context->noteIoError(error, lte);
3935 context->finished(); // cleanup and self-deregister
3936 assert(context != pipeline.front());
3937 }
3938
3939 if (intputToConsume && !inBuf.isEmpty()) {
3940 debugs(83, 5, "forgetting client " << intputToConsume << " bytes: " << inBuf.length());
3941 inBuf.clear();
3942 }
3943 }
3944
3945 clientConnection->close();
3946}
3947
3948/// log the last (attempt at) transaction if nobody else did
3949void
3950ConnStateData::checkLogging()
3951{
3952 // to simplify our logic, we assume that terminateAll() has been called
3953 assert(pipeline.empty());
3954
3955 // do not log connections that closed after a transaction (it is normal)
3956 // TODO: access_log needs ACLs to match received-no-bytes connections
3957 if (pipeline.nrequests && inBuf.isEmpty())
3958 return;
3959
3960 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
3961 ClientHttpRequest http(this);
3962 http.req_sz = inBuf.length();
3963 // XXX: Or we died while waiting for the pinned connection to become idle.
3964 http.setErrorUri("error:transaction-end-before-headers");
3965 http.updateError(bareError);
3966}
3967
3968bool
3969ConnStateData::shouldPreserveClientData() const
3970{
3971 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
3972 if (needProxyProtocolHeader_)
3973 return false;
3974
3975 // If our decision here is negative, configuration changes are irrelevant.
3976 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
3977 if (!Config.accessList.on_unsupported_protocol)
3978 return false;
3979
3980 // TODO: Figure out whether/how we can support FTP tunneling.
3981 if (port->transport.protocol == AnyP::PROTO_FTP)
3982 return false;
3983
3984#if USE_OPENSSL
3985 if (parsingTlsHandshake)
3986 return true;
3987
3988 // the 1st HTTP request on a bumped connection
3989 if (!parsedBumpedRequestCount && switchedToHttps())
3990 return true;
3991#endif
3992
3993 // the 1st HTTP(S) request on a connection to an intercepting port
3994 if (!pipeline.nrequests && transparent())
3995 return true;
3996
3997 return false;
3998}
3999
4000NotePairs::Pointer
4001ConnStateData::notes()
4002{
4003 if (!theNotes)
4004 theNotes = new NotePairs;
4005 return theNotes;
4006}
4007
4008std::ostream &
4009operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4010{
4011 return os << pic.connection << ", request=" << pic.request;
4012}
4013
4014std::ostream &
4015operator <<(std::ostream &os, const ConnStateData::ServerConnectionContext &scc)
4016{
4017 return os << scc.conn_ << ", srv_bytes=" << scc.preReadServerBytes.length();
4018}
4019