]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
74f2d85d78f90d1993383c71d016fd0c14311eba
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/AsyncCallbacks.h"
64 #include "base/Subscription.h"
65 #include "base/TextException.h"
66 #include "CachePeer.h"
67 #include "client_db.h"
68 #include "client_side.h"
69 #include "client_side_reply.h"
70 #include "client_side_request.h"
71 #include "ClientRequestContext.h"
72 #include "clientStream.h"
73 #include "comm.h"
74 #include "comm/Connection.h"
75 #include "comm/Loops.h"
76 #include "comm/Read.h"
77 #include "comm/TcpAcceptor.h"
78 #include "comm/Write.h"
79 #include "CommCalls.h"
80 #include "debug/Messages.h"
81 #include "error/ExceptionErrorDetail.h"
82 #include "errorpage.h"
83 #include "fd.h"
84 #include "fde.h"
85 #include "fqdncache.h"
86 #include "FwdState.h"
87 #include "globals.h"
88 #include "helper.h"
89 #include "helper/Reply.h"
90 #include "http.h"
91 #include "http/one/RequestParser.h"
92 #include "http/one/TeChunkedParser.h"
93 #include "http/Stream.h"
94 #include "HttpHdrContRange.h"
95 #include "HttpHeaderTools.h"
96 #include "HttpReply.h"
97 #include "HttpRequest.h"
98 #include "ident/Config.h"
99 #include "ident/Ident.h"
100 #include "internal.h"
101 #include "ipc/FdNotes.h"
102 #include "ipc/StartListening.h"
103 #include "log/access_log.h"
104 #include "MemBuf.h"
105 #include "MemObject.h"
106 #include "mime_header.h"
107 #include "parser/Tokenizer.h"
108 #include "proxyp/Header.h"
109 #include "proxyp/Parser.h"
110 #include "sbuf/Stream.h"
111 #include "security/Certificate.h"
112 #include "security/CommunicationSecrets.h"
113 #include "security/Io.h"
114 #include "security/KeyLog.h"
115 #include "security/NegotiationHistory.h"
116 #include "servers/forward.h"
117 #include "SquidConfig.h"
118 #include "StatCounters.h"
119 #include "StatHist.h"
120 #include "Store.h"
121 #include "TimeOrTag.h"
122 #include "tools.h"
123
124 #if USE_AUTH
125 #include "auth/UserRequest.h"
126 #endif
127 #if USE_DELAY_POOLS
128 #include "ClientInfo.h"
129 #include "MessageDelayPools.h"
130 #endif
131 #if USE_OPENSSL
132 #include "ssl/bio.h"
133 #include "ssl/context_storage.h"
134 #include "ssl/gadgets.h"
135 #include "ssl/helper.h"
136 #include "ssl/ProxyCerts.h"
137 #include "ssl/ServerBump.h"
138 #include "ssl/support.h"
139 #endif
140
141 #include <climits>
142 #include <cmath>
143 #include <limits>
144
145 #if HAVE_SYSTEMD_SD_DAEMON_H
146 #include <systemd/sd-daemon.h>
147 #endif
148
149 // TODO: Remove this custom dialer and simplify by creating the TcpAcceptor
150 // subscription later, inside clientListenerConnectionOpened() callback, just
151 // like htcpOpenPorts(), icpOpenPorts(), and snmpPortOpened() do it.
152 /// dials clientListenerConnectionOpened call
153 class ListeningStartedDialer:
154 public CallDialer,
155 public WithAnswer<Ipc::StartListeningAnswer>
156 {
157 public:
158 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
159 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
160 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
161
162 /* CallDialer API */
163 void print(std::ostream &os) const override {
164 os << '(' << answer_ << ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
165 }
166
167 virtual bool canDial(AsyncCall &) const { return true; }
168 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
169
170 /* WithAnswer API */
171 Ipc::StartListeningAnswer &answer() override { return answer_; }
172
173 public:
174 Handler handler;
175
176 private:
177 // answer_.conn (set/updated by IPC code) is portCfg.listenConn (used by us)
178 Ipc::StartListeningAnswer answer_; ///< StartListening() results
179 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
180 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
181 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
182 };
183
184 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
185
186 static IOACB httpAccept;
187 #if USE_IDENT
188 static IDCB clientIdentDone;
189 #endif
190 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
191
192 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
193 static void clientUpdateStatCounters(const LogTags &logType);
194 static void clientUpdateHierCounters(HierarchyLogEntry *);
195 static bool clientPingHasFinished(ping_data const *aPing);
196 void prepareLogWithRequestDetails(HttpRequest *, const AccessLogEntryPointer &);
197 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
198
199 char *skipLeadingSpace(char *aString);
200
201 #if USE_IDENT
202 static void
203 clientIdentDone(const char *ident, void *data)
204 {
205 ConnStateData *conn = (ConnStateData *)data;
206 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
207 }
208 #endif
209
210 void
211 clientUpdateStatCounters(const LogTags &logType)
212 {
213 ++statCounter.client_http.requests;
214
215 if (logType.isTcpHit())
216 ++statCounter.client_http.hits;
217
218 if (logType.oldType == LOG_TCP_HIT)
219 ++statCounter.client_http.disk_hits;
220 else if (logType.oldType == LOG_TCP_MEM_HIT)
221 ++statCounter.client_http.mem_hits;
222 }
223
224 void
225 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
226 {
227 statCounter.client_http.allSvcTime.count(svc_time);
228 /**
229 * The idea here is not to be complete, but to get service times
230 * for only well-defined types. For example, we don't include
231 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
232 * (we *tried* to validate it, but failed).
233 */
234
235 switch (logType.oldType) {
236
237 case LOG_TCP_REFRESH_UNMODIFIED:
238 statCounter.client_http.nearHitSvcTime.count(svc_time);
239 break;
240
241 case LOG_TCP_INM_HIT:
242 case LOG_TCP_IMS_HIT:
243 statCounter.client_http.nearMissSvcTime.count(svc_time);
244 break;
245
246 case LOG_TCP_HIT:
247
248 case LOG_TCP_MEM_HIT:
249
250 case LOG_TCP_OFFLINE_HIT:
251 statCounter.client_http.hitSvcTime.count(svc_time);
252 break;
253
254 case LOG_TCP_MISS:
255
256 case LOG_TCP_CLIENT_REFRESH_MISS:
257 statCounter.client_http.missSvcTime.count(svc_time);
258 break;
259
260 default:
261 /* make compiler warnings go away */
262 break;
263 }
264 }
265
266 bool
267 clientPingHasFinished(ping_data const *aPing)
268 {
269 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
270 return true;
271
272 return false;
273 }
274
275 void
276 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
277 {
278 ping_data *i;
279
280 switch (someEntry->code) {
281 #if USE_CACHE_DIGESTS
282
283 case CD_PARENT_HIT:
284
285 case CD_SIBLING_HIT:
286 ++ statCounter.cd.times_used;
287 break;
288 #endif
289
290 case SIBLING_HIT:
291
292 case PARENT_HIT:
293
294 case FIRST_PARENT_MISS:
295
296 case CLOSEST_PARENT_MISS:
297 ++ statCounter.icp.times_used;
298 i = &someEntry->ping;
299
300 if (clientPingHasFinished(i))
301 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
302
303 if (i->timeout)
304 ++ statCounter.icp.query_timeouts;
305
306 break;
307
308 case CLOSEST_PARENT:
309
310 case CLOSEST_DIRECT:
311 ++ statCounter.netdb.times_used;
312
313 break;
314
315 default:
316 break;
317 }
318 }
319
320 void
321 ClientHttpRequest::updateCounters()
322 {
323 clientUpdateStatCounters(loggingTags());
324
325 if (request->error)
326 ++ statCounter.client_http.errors;
327
328 clientUpdateStatHistCounters(loggingTags(),
329 tvSubMsec(al->cache.start_time, current_time));
330
331 clientUpdateHierCounters(&request->hier);
332 }
333
334 void
335 prepareLogWithRequestDetails(HttpRequest *request, const AccessLogEntryPointer &aLogEntry)
336 {
337 assert(request);
338 assert(aLogEntry != nullptr);
339
340 if (Config.onoff.log_mime_hdrs) {
341 MemBuf mb;
342 mb.init();
343 request->header.packInto(&mb);
344 //This is the request after adaptation or redirection
345 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
346
347 // the virgin request is saved to aLogEntry->request
348 if (aLogEntry->request) {
349 mb.reset();
350 aLogEntry->request->header.packInto(&mb);
351 aLogEntry->headers.request = xstrdup(mb.buf);
352 }
353
354 #if USE_ADAPTATION
355 const Adaptation::History::Pointer ah = request->adaptLogHistory();
356 if (ah != nullptr) {
357 mb.reset();
358 ah->lastMeta.packInto(&mb);
359 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
360 }
361 #endif
362
363 mb.clean();
364 }
365
366 #if ICAP_CLIENT
367 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
368 if (ih != nullptr)
369 ih->processingTime(aLogEntry->icap.processingTime);
370 #endif
371
372 aLogEntry->http.method = request->method;
373 aLogEntry->http.version = request->http_ver;
374 aLogEntry->hier = request->hier;
375 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
376
377 // Adapted request, if any, inherits and then collects all the stats, but
378 // the virgin request gets logged instead; copy the stats to log them.
379 // TODO: avoid losses by keeping these stats in a shared history object?
380 if (aLogEntry->request) {
381 aLogEntry->request->dnsWait = request->dnsWait;
382 aLogEntry->request->error = request->error;
383 }
384 }
385
386 void
387 ClientHttpRequest::logRequest()
388 {
389 if (!out.size && loggingTags().oldType == LOG_TAG_NONE)
390 debugs(33, 5, "logging half-baked transaction: " << log_uri);
391
392 al->icp.opcode = ICP_INVALID;
393 al->url = log_uri;
394 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
395
396 const auto findReply = [this]() -> const HttpReply * {
397 if (al->reply)
398 return al->reply.getRaw();
399 if (const auto le = loggingEntry())
400 return le->hasFreshestReply();
401 return nullptr;
402 };
403 if (const auto reply = findReply()) {
404 al->http.code = reply->sline.status();
405 al->http.content_type = reply->content_type.termedBuf();
406 }
407
408 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
409
410 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
411 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
412
413 al->http.clientRequestSz.header = req_sz;
414 // the virgin request is saved to al->request
415 if (al->request && al->request->body_pipe)
416 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
417 al->http.clientReplySz.header = out.headers_sz;
418 // XXX: calculate without payload encoding or headers !!
419 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
420
421 al->cache.highOffset = out.offset;
422
423 tvSub(al->cache.trTime, al->cache.start_time, current_time);
424
425 if (request)
426 prepareLogWithRequestDetails(request, al);
427
428 #if USE_OPENSSL && 0
429
430 /* This is broken. Fails if the connection has been closed. Needs
431 * to snarf the ssl details some place earlier..
432 */
433 if (getConn() != NULL)
434 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
435
436 #endif
437
438 /* Add notes (if we have a request to annotate) */
439 if (request) {
440 SBuf matched;
441 for (auto h: Config.notes) {
442 if (h->match(request, al->reply.getRaw(), al, matched)) {
443 request->notes()->add(h->key(), matched);
444 debugs(33, 3, h->key() << " " << matched);
445 }
446 }
447 // The al->notes and request->notes must point to the same object.
448 al->syncNotes(request);
449 }
450
451 ACLFilledChecklist checklist(nullptr, request, nullptr);
452 if (al->reply) {
453 checklist.reply = al->reply.getRaw();
454 HTTPMSGLOCK(checklist.reply);
455 }
456
457 if (request) {
458 HTTPMSGUNLOCK(al->adapted_request);
459 al->adapted_request = request;
460 HTTPMSGLOCK(al->adapted_request);
461 }
462 // no need checklist.syncAle(): already synced
463 checklist.al = al;
464 accessLogLog(al, &checklist);
465
466 bool updatePerformanceCounters = true;
467 if (Config.accessList.stats_collection) {
468 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, nullptr);
469 statsCheck.al = al;
470 if (al->reply) {
471 statsCheck.reply = al->reply.getRaw();
472 HTTPMSGLOCK(statsCheck.reply);
473 }
474 updatePerformanceCounters = statsCheck.fastCheck().allowed();
475 }
476
477 if (updatePerformanceCounters) {
478 if (request)
479 updateCounters();
480
481 if (getConn() != nullptr && getConn()->clientConnection != nullptr)
482 clientdbUpdate(getConn()->clientConnection->remote, loggingTags(), AnyP::PROTO_HTTP, out.size);
483 }
484 }
485
486 void
487 ClientHttpRequest::freeResources()
488 {
489 safe_free(uri);
490 safe_free(redirect.location);
491 range_iter.boundary.clean();
492 clearRequest();
493
494 if (client_stream.tail)
495 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
496 }
497
498 void
499 httpRequestFree(void *data)
500 {
501 ClientHttpRequest *http = (ClientHttpRequest *)data;
502 assert(http != nullptr);
503 delete http;
504 }
505
506 /* This is a handler normally called by comm_close() */
507 void ConnStateData::connStateClosed(const CommCloseCbParams &)
508 {
509 if (clientConnection) {
510 clientConnection->noteClosure();
511 // keep closed clientConnection for logging, clientdb cleanup, etc.
512 }
513 deleteThis("ConnStateData::connStateClosed");
514 }
515
516 #if USE_AUTH
517 void
518 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
519 {
520 if (auth_ == nullptr) {
521 if (aur != nullptr) {
522 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
523 auth_ = aur;
524 }
525 return;
526 }
527
528 // clobered with self-pointer
529 // NP: something nasty is going on in Squid, but harmless.
530 if (aur == auth_) {
531 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
532 return;
533 }
534
535 /*
536 * Connection-auth relies on a single set of credentials being preserved
537 * for all requests on a connection once they have been setup.
538 * There are several things which need to happen to preserve security
539 * when connection-auth credentials change unexpectedly or are unset.
540 *
541 * 1) auth helper released from any active state
542 *
543 * They can only be reserved by a handshake process which this
544 * connection can now never complete.
545 * This prevents helpers hanging when their connections close.
546 *
547 * 2) pinning is expected to be removed and server conn closed
548 *
549 * The upstream link is authenticated with the same credentials.
550 * Expecting the same level of consistency we should have received.
551 * This prevents upstream being faced with multiple or missing
552 * credentials after authentication.
553 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
554 * we just trigger that cleanup here via comm_reset_close() or
555 * ConnStateData::stopReceiving()
556 *
557 * 3) the connection needs to close.
558 *
559 * This prevents attackers injecting requests into a connection,
560 * or gateways wrongly multiplexing users into a single connection.
561 *
562 * When credentials are missing closure needs to follow an auth
563 * challenge for best recovery by the client.
564 *
565 * When credentials change there is nothing we can do but abort as
566 * fast as possible. Sending TCP RST instead of an HTTP response
567 * is the best-case action.
568 */
569
570 // clobbered with nul-pointer
571 if (aur == nullptr) {
572 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
573 auth_->releaseAuthServer();
574 auth_ = nullptr;
575 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
576 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
577 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
578 stopReceiving("connection-auth removed");
579 return;
580 }
581
582 // clobbered with alternative credentials
583 if (aur != auth_) {
584 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
585 auth_->releaseAuthServer();
586 auth_ = nullptr;
587 // this is a fatal type of problem.
588 // Close the connection immediately with TCP RST to abort all traffic flow
589 comm_reset_close(clientConnection);
590 return;
591 }
592
593 /* NOT REACHABLE */
594 }
595 #endif
596
597 void
598 ConnStateData::resetReadTimeout(const time_t timeout)
599 {
600 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
601 AsyncCall::Pointer callback = JobCallback(33, 5, TimeoutDialer, this, ConnStateData::requestTimeout);
602 commSetConnTimeout(clientConnection, timeout, callback);
603 }
604
605 void
606 ConnStateData::extendLifetime()
607 {
608 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
609 AsyncCall::Pointer callback = JobCallback(5, 4, TimeoutDialer, this, ConnStateData::lifetimeTimeout);
610 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, callback);
611 }
612
613 // cleans up before destructor is called
614 void
615 ConnStateData::swanSong()
616 {
617 debugs(33, 2, clientConnection);
618
619 flags.readMore = false;
620 clientdbEstablished(clientConnection->remote, -1); /* decrement */
621
622 terminateAll(ERR_NONE, LogTagsErrors());
623 checkLogging();
624
625 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
626 unpinConnection(true);
627
628 Server::swanSong();
629
630 #if USE_AUTH
631 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
632 setAuth(nullptr, "ConnStateData::SwanSong cleanup");
633 #endif
634
635 flags.swanSang = true;
636 }
637
638 void
639 ConnStateData::callException(const std::exception &ex)
640 {
641 Server::callException(ex); // logs ex and stops the job
642
643 ErrorDetail::Pointer errorDetail;
644 if (const auto tex = dynamic_cast<const TextException*>(&ex))
645 errorDetail = new ExceptionErrorDetail(tex->id());
646 else
647 errorDetail = new ExceptionErrorDetail(Here().id());
648 updateError(ERR_GATEWAY_FAILURE, errorDetail);
649 }
650
651 void
652 ConnStateData::updateError(const Error &error)
653 {
654 if (const auto context = pipeline.front()) {
655 const auto http = context->http;
656 assert(http);
657 http->updateError(error);
658 } else {
659 bareError.update(error);
660 }
661 }
662
663 bool
664 ConnStateData::isOpen() const
665 {
666 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
667 Comm::IsConnOpen(clientConnection) &&
668 !fd_table[clientConnection->fd].closing();
669 }
670
671 ConnStateData::~ConnStateData()
672 {
673 debugs(33, 3, clientConnection);
674
675 if (isOpen())
676 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData did not close " << clientConnection);
677
678 if (!flags.swanSang)
679 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData was not destroyed properly; " << clientConnection);
680
681 if (bodyPipe != nullptr)
682 stopProducingFor(bodyPipe, false);
683
684 delete bodyParser; // TODO: pool
685
686 #if USE_OPENSSL
687 delete sslServerBump;
688 #endif
689 }
690
691 /**
692 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
693 * This is the client-side persistent connection flag. We need
694 * to set this relatively early in the request processing
695 * to handle hacks for broken servers and clients.
696 */
697 void
698 clientSetKeepaliveFlag(ClientHttpRequest * http)
699 {
700 HttpRequest *request = http->request;
701
702 debugs(33, 3, "http_ver = " << request->http_ver);
703 debugs(33, 3, "method = " << request->method);
704
705 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
706 request->flags.proxyKeepalive = request->persistent();
707 }
708
709 int
710 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
711 {
712 if (Config.maxRequestBodySize &&
713 bodyLength > Config.maxRequestBodySize)
714 return 1; /* too large */
715
716 return 0;
717 }
718
719 bool
720 ClientHttpRequest::multipartRangeRequest() const
721 {
722 return request->multipartRangeRequest();
723 }
724
725 void
726 clientPackTermBound(String boundary, MemBuf *mb)
727 {
728 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
729 debugs(33, 6, "buf offset: " << mb->size);
730 }
731
732 void
733 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
734 {
735 HttpHeader hdr(hoReply);
736 assert(rep);
737 assert(spec);
738
739 /* put boundary */
740 debugs(33, 5, "appending boundary: " << boundary);
741 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
742 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
743
744 /* stuff the header with required entries and pack it */
745
746 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
747 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
748
749 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
750
751 hdr.packInto(mb);
752 hdr.clean();
753
754 /* append <crlf> (we packed a header, not a reply) */
755 mb->append("\r\n", 2);
756 }
757
758 /** returns expected content length for multi-range replies
759 * note: assumes that httpHdrRangeCanonize has already been called
760 * warning: assumes that HTTP headers for individual ranges at the
761 * time of the actuall assembly will be exactly the same as
762 * the headers when clientMRangeCLen() is called */
763 int64_t
764 ClientHttpRequest::mRangeCLen() const
765 {
766 int64_t clen = 0;
767 MemBuf mb;
768
769 assert(memObject());
770
771 mb.init();
772 HttpHdrRange::iterator pos = request->range->begin();
773
774 while (pos != request->range->end()) {
775 /* account for headers for this range */
776 mb.reset();
777 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
778 *pos, range_iter.boundary, &mb);
779 clen += mb.size;
780
781 /* account for range content */
782 clen += (*pos)->length;
783
784 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
785 ++pos;
786 }
787
788 /* account for the terminating boundary */
789 mb.reset();
790
791 clientPackTermBound(range_iter.boundary, &mb);
792
793 clen += mb.size;
794
795 mb.clean();
796
797 return clen;
798 }
799
800 /**
801 * generates a "unique" boundary string for multipart responses
802 * the caller is responsible for cleaning the string */
803 String
804 ClientHttpRequest::rangeBoundaryStr() const
805 {
806 const char *key;
807 String b(visible_appname_string);
808 b.append(":",1);
809 key = storeEntry()->getMD5Text();
810 b.append(key, strlen(key));
811 return b;
812 }
813
814 /**
815 * Write a chunk of data to a client socket. If the reply is present,
816 * send the reply headers down the wire too, and clean them up when
817 * finished.
818 * Pre-condition:
819 * The request is one backed by a connection, not an internal request.
820 * data context is not NULL
821 * There are no more entries in the stream chain.
822 */
823 void
824 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
825 HttpReply * rep, StoreIOBuffer receivedData)
826 {
827 // do not try to deliver if client already ABORTED
828 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
829 return;
830
831 /* Test preconditions */
832 assert(node != nullptr);
833 /* TODO: handle this rather than asserting
834 * - it should only ever happen if we cause an abort and
835 * the callback chain loops back to here, so we can simply return.
836 * However, that itself shouldn't happen, so it stays as an assert for now.
837 */
838 assert(cbdataReferenceValid(node));
839 assert(node->node.next == nullptr);
840 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
841 assert(context != nullptr);
842
843 /* TODO: check offset is what we asked for */
844
845 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
846 if (context != http->getConn()->pipeline.front())
847 context->deferRecipientForLater(node, rep, receivedData);
848 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
849 context->deferRecipientForLater(node, rep, receivedData);
850 else
851 http->getConn()->handleReply(rep, receivedData);
852 }
853
854 /**
855 * Called when a downstream node is no longer interested in
856 * our data. As we are a terminal node, this means on aborts
857 * only
858 */
859 void
860 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
861 {
862 /* Test preconditions */
863 assert(node != nullptr);
864 /* TODO: handle this rather than asserting
865 * - it should only ever happen if we cause an abort and
866 * the callback chain loops back to here, so we can simply return.
867 * However, that itself shouldn't happen, so it stays as an assert for now.
868 */
869 assert(cbdataReferenceValid(node));
870 /* Set null by ContextFree */
871 assert(node->node.next == nullptr);
872 /* this is the assert discussed above */
873 assert(nullptr == dynamic_cast<Http::Stream *>(node->data.getRaw()));
874 /* We are only called when the client socket shutsdown.
875 * Tell the prev pipeline member we're finished
876 */
877 clientStreamDetach(node, http);
878 }
879
880 void
881 ConnStateData::readNextRequest()
882 {
883 debugs(33, 5, clientConnection << " reading next req");
884
885 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
886 /**
887 * Set the timeout BEFORE calling readSomeData().
888 */
889 resetReadTimeout(clientConnection->timeLeft(idleTimeout()));
890
891 readSomeData();
892 /** Please don't do anything with the FD past here! */
893 }
894
895 static void
896 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
897 {
898 debugs(33, 2, conn->clientConnection << " Sending next");
899
900 /** If the client stream is waiting on a socket write to occur, then */
901
902 if (deferredRequest->flags.deferred) {
903 /** NO data is allowed to have been sent. */
904 assert(deferredRequest->http->out.size == 0);
905 /** defer now. */
906 clientSocketRecipient(deferredRequest->deferredparams.node,
907 deferredRequest->http,
908 deferredRequest->deferredparams.rep,
909 deferredRequest->deferredparams.queuedBuffer);
910 }
911
912 /** otherwise, the request is still active in a callbacksomewhere,
913 * and we are done
914 */
915 }
916
917 void
918 ConnStateData::kick()
919 {
920 if (!Comm::IsConnOpen(clientConnection)) {
921 debugs(33, 2, clientConnection << " Connection was closed");
922 return;
923 }
924
925 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
926 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
927 clientConnection->close();
928 return;
929 }
930
931 /** \par
932 * We are done with the response, and we are either still receiving request
933 * body (early response!) or have already stopped receiving anything.
934 *
935 * If we are still receiving, then clientParseRequest() below will fail.
936 * (XXX: but then we will call readNextRequest() which may succeed and
937 * execute a smuggled request as we are not done with the current request).
938 *
939 * If we stopped because we got everything, then try the next request.
940 *
941 * If we stopped receiving because of an error, then close now to avoid
942 * getting stuck and to prevent accidental request smuggling.
943 */
944
945 if (const char *reason = stoppedReceiving()) {
946 debugs(33, 3, "closing for earlier request error: " << reason);
947 clientConnection->close();
948 return;
949 }
950
951 /** \par
952 * Attempt to parse a request from the request buffer.
953 * If we've been fed a pipelined request it may already
954 * be in our read buffer.
955 *
956 \par
957 * This needs to fall through - if we're unlucky and parse the _last_ request
958 * from our read buffer we may never re-register for another client read.
959 */
960
961 if (clientParseRequests()) {
962 debugs(33, 3, clientConnection << ": parsed next request from buffer");
963 }
964
965 /** \par
966 * Either we need to kick-start another read or, if we have
967 * a half-closed connection, kill it after the last request.
968 * This saves waiting for half-closed connections to finished being
969 * half-closed _AND_ then, sometimes, spending "Timeout" time in
970 * the keepalive "Waiting for next request" state.
971 */
972 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
973 debugs(33, 3, "half-closed client with no pending requests, closing");
974 clientConnection->close();
975 return;
976 }
977
978 /** \par
979 * At this point we either have a parsed request (which we've
980 * kicked off the processing for) or not. If we have a deferred
981 * request (parsed but deferred for pipeling processing reasons)
982 * then look at processing it. If not, simply kickstart
983 * another read.
984 */
985 Http::StreamPointer deferredRequest = pipeline.front();
986 if (deferredRequest != nullptr) {
987 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
988 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
989 } else if (flags.readMore) {
990 debugs(33, 3, clientConnection << ": calling readNextRequest()");
991 readNextRequest();
992 } else {
993 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
994 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
995 }
996 }
997
998 void
999 ConnStateData::stopSending(const char *error)
1000 {
1001 debugs(33, 4, "sending error (" << clientConnection << "): " << error <<
1002 "; old receiving error: " <<
1003 (stoppedReceiving() ? stoppedReceiving_ : "none"));
1004
1005 if (const char *oldError = stoppedSending()) {
1006 debugs(33, 3, "already stopped sending: " << oldError);
1007 return; // nothing has changed as far as this connection is concerned
1008 }
1009 stoppedSending_ = error;
1010
1011 if (!stoppedReceiving()) {
1012 if (const int64_t expecting = mayNeedToReadMoreBody()) {
1013 debugs(33, 5, "must still read " << expecting <<
1014 " request body bytes with " << inBuf.length() << " unused");
1015 return; // wait for the request receiver to finish reading
1016 }
1017 }
1018
1019 clientConnection->close();
1020 }
1021
1022 void
1023 ConnStateData::afterClientWrite(size_t size)
1024 {
1025 if (pipeline.empty())
1026 return;
1027
1028 auto ctx = pipeline.front();
1029 if (size) {
1030 statCounter.client_http.kbytes_out += size;
1031 if (ctx->http->loggingTags().isTcpHit())
1032 statCounter.client_http.hit_kbytes_out += size;
1033 }
1034 ctx->writeComplete(size);
1035 }
1036
1037 Http::Stream *
1038 ConnStateData::abortRequestParsing(const char *const uri)
1039 {
1040 ClientHttpRequest *http = new ClientHttpRequest(this);
1041 http->req_sz = inBuf.length();
1042 http->setErrorUri(uri);
1043 auto *context = new Http::Stream(clientConnection, http);
1044 StoreIOBuffer tempBuffer;
1045 tempBuffer.data = context->reqbuf;
1046 tempBuffer.length = HTTP_REQBUF_SZ;
1047 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1048 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1049 clientSocketDetach, context, tempBuffer);
1050 return context;
1051 }
1052
1053 void
1054 ConnStateData::startShutdown()
1055 {
1056 // RegisteredRunner API callback - Squid has been shut down
1057
1058 // if connection is idle terminate it now,
1059 // otherwise wait for grace period to end
1060 if (pipeline.empty())
1061 endingShutdown();
1062 }
1063
1064 void
1065 ConnStateData::endingShutdown()
1066 {
1067 // RegisteredRunner API callback - Squid shutdown grace period is over
1068
1069 // force the client connection to close immediately
1070 // swanSong() in the close handler will cleanup.
1071 if (Comm::IsConnOpen(clientConnection))
1072 clientConnection->close();
1073 }
1074
1075 char *
1076 skipLeadingSpace(char *aString)
1077 {
1078 char *result = aString;
1079
1080 while (xisspace(*aString))
1081 ++aString;
1082
1083 return result;
1084 }
1085
1086 /**
1087 * 'end' defaults to NULL for backwards compatibility
1088 * remove default value if we ever get rid of NULL-terminated
1089 * request buffers.
1090 */
1091 const char *
1092 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1093 {
1094 if (nullptr == end) {
1095 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1096 assert(end);
1097 }
1098
1099 for (; end > uriAndHTTPVersion; --end) {
1100 if (*end == '\n' || *end == '\r')
1101 continue;
1102
1103 if (xisspace(*end)) {
1104 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1105 return end + 1;
1106 else
1107 break;
1108 }
1109 }
1110
1111 return nullptr;
1112 }
1113
1114 static char *
1115 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1116 {
1117 int vhost = conn->port->vhost;
1118 int vport = conn->port->vport;
1119 static char ipbuf[MAX_IPSTRLEN];
1120
1121 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1122
1123 static const SBuf cache_object("cache_object://");
1124 if (hp->requestUri().startsWith(cache_object))
1125 return nullptr; /* already in good shape */
1126
1127 // XXX: re-use proper URL parser for this
1128 SBuf url = hp->requestUri(); // use full provided URI if we abort
1129 do { // use a loop so we can break out of it
1130 ::Parser::Tokenizer tok(url);
1131 if (tok.skip('/')) // origin-form URL already.
1132 break;
1133
1134 if (conn->port->vhost)
1135 return nullptr; /* already in good shape */
1136
1137 // skip the URI scheme
1138 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1139 static const SBuf uriSchemeEnd("://");
1140 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1141 break;
1142
1143 // skip the authority segment
1144 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1145 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1146 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1147 if (!tok.skipAll(authority))
1148 break;
1149
1150 static const SBuf slashUri("/");
1151 const SBuf t = tok.remaining();
1152 if (t.isEmpty())
1153 url = slashUri;
1154 else if (t[0]=='/') // looks like path
1155 url = t;
1156 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1157 url = slashUri;
1158 url.append(t);
1159 } // else do nothing. invalid path
1160
1161 } while(false);
1162
1163 #if SHOULD_REJECT_UNKNOWN_URLS
1164 // reject URI which are not well-formed even after the processing above
1165 if (url.isEmpty() || url[0] != '/') {
1166 hp->parseStatusCode = Http::scBadRequest;
1167 return conn->abortRequestParsing("error:invalid-request");
1168 }
1169 #endif
1170
1171 if (vport < 0)
1172 vport = conn->clientConnection->local.port();
1173
1174 char *receivedHost = nullptr;
1175 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1176 SBuf host(receivedHost);
1177 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1178 if (vport > 0) {
1179 // remove existing :port (if any), cope with IPv6+ without port
1180 const auto lastColonPos = host.rfind(':');
1181 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1182 host.chop(0, lastColonPos); // truncate until the last colon
1183 }
1184 host.appendf(":%d", vport);
1185 } // else nothing to alter port-wise.
1186 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1187 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1188 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1189 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1190 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1191 return uri;
1192 } else if (conn->port->defaultsite /* && !vhost */) {
1193 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1194 char vportStr[32];
1195 vportStr[0] = '\0';
1196 if (vport > 0) {
1197 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1198 }
1199 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1200 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1201 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1202 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1203 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1204 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1205 return uri;
1206 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1207 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1208 /* Put the local socket IP address as the hostname, with whatever vport we found */
1209 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1210 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1211 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1212 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1213 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1214 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1215 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1216 return uri;
1217 }
1218
1219 return nullptr;
1220 }
1221
1222 static char *
1223 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1224 {
1225 char *uri = nullptr;
1226 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1227 if (const char *host = hp->getHostHeaderField()) {
1228 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1229 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1230 uri = static_cast<char *>(xcalloc(url_sz, 1));
1231 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1232 SQUIDSBUFPRINT(scheme),
1233 host,
1234 SQUIDSBUFPRINT(hp->requestUri()));
1235 }
1236 return uri;
1237 }
1238
1239 char *
1240 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1241 {
1242 Must(switchedToHttps());
1243
1244 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1245 return nullptr; /* already in good shape */
1246
1247 char *uri = buildUrlFromHost(this, hp);
1248 #if USE_OPENSSL
1249 if (!uri) {
1250 Must(tlsConnectPort);
1251 Must(!tlsConnectHostOrIp.isEmpty());
1252 SBuf useHost;
1253 if (!tlsClientSni().isEmpty())
1254 useHost = tlsClientSni();
1255 else
1256 useHost = tlsConnectHostOrIp;
1257
1258 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1259 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1260 uri = static_cast<char *>(xcalloc(url_sz, 1));
1261 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1262 SQUIDSBUFPRINT(scheme),
1263 SQUIDSBUFPRINT(useHost),
1264 tlsConnectPort,
1265 SQUIDSBUFPRINT(hp->requestUri()));
1266 }
1267 #endif
1268 if (uri)
1269 debugs(33, 5, "TLS switching host rewrite: " << uri);
1270 return uri;
1271 }
1272
1273 static char *
1274 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1275 {
1276 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1277 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1278 return nullptr; /* already in good shape */
1279
1280 char *uri = buildUrlFromHost(conn, hp);
1281 if (!uri) {
1282 /* Put the local socket IP address as the hostname. */
1283 static char ipbuf[MAX_IPSTRLEN];
1284 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1285 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1286 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1287 uri = static_cast<char *>(xcalloc(url_sz, 1));
1288 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1289 SQUIDSBUFPRINT(scheme),
1290 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1291 }
1292
1293 if (uri)
1294 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1295 return uri;
1296 }
1297
1298 Http::Stream *
1299 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1300 {
1301 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1302 {
1303 Must(hp);
1304
1305 if (preservingClientData_)
1306 preservedClientData = inBuf;
1307
1308 const bool parsedOk = hp->parse(inBuf);
1309
1310 // sync the buffers after parsing.
1311 inBuf = hp->remaining();
1312
1313 if (hp->needsMoreData()) {
1314 debugs(33, 5, "Incomplete request, waiting for end of request line");
1315 return nullptr;
1316 }
1317
1318 if (!parsedOk) {
1319 const bool tooBig =
1320 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1321 hp->parseStatusCode == Http::scUriTooLong;
1322 auto result = abortRequestParsing(
1323 tooBig ? "error:request-too-large" : "error:invalid-request");
1324 // assume that remaining leftovers belong to this bad request
1325 if (!inBuf.isEmpty())
1326 consumeInput(inBuf.length());
1327 return result;
1328 }
1329 }
1330
1331 /* We know the whole request is in parser now */
1332 debugs(11, 2, "HTTP Client " << clientConnection);
1333 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1334 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1335 hp->mimeHeader() <<
1336 "\n----------");
1337
1338 /* deny CONNECT via accelerated ports */
1339 if (hp->method() == Http::METHOD_CONNECT && port != nullptr && port->flags.accelSurrogate) {
1340 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1341 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1342 hp->parseStatusCode = Http::scMethodNotAllowed;
1343 return abortRequestParsing("error:method-not-allowed");
1344 }
1345
1346 /* HTTP/2 connection magic prefix starts with "PRI ".
1347 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1348 * If seen it signals a broken client or proxy has corrupted the traffic.
1349 */
1350 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1351 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1352 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1353 hp->parseStatusCode = Http::scMethodNotAllowed;
1354 return abortRequestParsing("error:method-not-allowed");
1355 }
1356
1357 if (hp->method() == Http::METHOD_NONE) {
1358 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1359 hp->parseStatusCode = Http::scMethodNotAllowed;
1360 return abortRequestParsing("error:unsupported-request-method");
1361 }
1362
1363 // Process headers after request line
1364 debugs(33, 3, "complete request received. " <<
1365 "prefix_sz = " << hp->messageHeaderSize() <<
1366 ", request-line-size=" << hp->firstLineSize() <<
1367 ", mime-header-size=" << hp->headerBlockSize() <<
1368 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1369
1370 /* Ok, all headers are received */
1371 ClientHttpRequest *http = new ClientHttpRequest(this);
1372
1373 http->req_sz = hp->messageHeaderSize();
1374 Http::Stream *result = new Http::Stream(clientConnection, http);
1375
1376 StoreIOBuffer tempBuffer;
1377 tempBuffer.data = result->reqbuf;
1378 tempBuffer.length = HTTP_REQBUF_SZ;
1379
1380 ClientStreamData newServer = new clientReplyContext(http);
1381 ClientStreamData newClient = result;
1382 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1383 clientReplyStatus, newServer, clientSocketRecipient,
1384 clientSocketDetach, newClient, tempBuffer);
1385
1386 /* set url */
1387 debugs(33,5, "Prepare absolute URL from " <<
1388 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1389 /* Rewrite the URL in transparent or accelerator mode */
1390 /* NP: there are several cases to traverse here:
1391 * - standard mode (forward proxy)
1392 * - transparent mode (TPROXY)
1393 * - transparent mode with failures
1394 * - intercept mode (NAT)
1395 * - intercept mode with failures
1396 * - accelerator mode (reverse proxy)
1397 * - internal relative-URL
1398 * - mixed combos of the above with internal URL
1399 * - remote interception with PROXY protocol
1400 * - remote reverse-proxy with PROXY protocol
1401 */
1402 if (switchedToHttps()) {
1403 http->uri = prepareTlsSwitchingURL(hp);
1404 } else if (transparent()) {
1405 /* intercept or transparent mode, properly working with no failures */
1406 http->uri = prepareTransparentURL(this, hp);
1407
1408 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1409 /* internal URL mode */
1410 /* prepend our name & port */
1411 http->uri = xstrdup(internalLocalUri(nullptr, hp->requestUri()));
1412 // We just re-wrote the URL. Must replace the Host: header.
1413 // But have not parsed there yet!! flag for local-only handling.
1414 http->flags.internal = true;
1415
1416 } else if (port->flags.accelSurrogate) {
1417 /* accelerator mode */
1418 http->uri = prepareAcceleratedURL(this, hp);
1419 http->flags.accel = true;
1420 }
1421
1422 if (!http->uri) {
1423 /* No special rewrites have been applied above, use the
1424 * requested url. may be rewritten later, so make extra room */
1425 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1426 http->uri = (char *)xcalloc(url_sz, 1);
1427 SBufToCstring(http->uri, hp->requestUri());
1428 }
1429
1430 result->flags.parsed_ok = 1;
1431 return result;
1432 }
1433
1434 bool
1435 ConnStateData::shouldCloseOnEof() const
1436 {
1437 if (pipeline.empty() && inBuf.isEmpty()) {
1438 debugs(33, 4, "yes, without active requests and unparsed input");
1439 return true;
1440 }
1441
1442 if (!Config.onoff.half_closed_clients) {
1443 debugs(33, 3, "yes, without half_closed_clients");
1444 return true;
1445 }
1446
1447 // Squid currently tries to parse (possibly again) a partially received
1448 // request after an EOF with half_closed_clients. To give that last parse in
1449 // afterClientRead() a chance, we ignore partially parsed requests here.
1450 debugs(33, 3, "no, honoring half_closed_clients");
1451 return false;
1452 }
1453
1454 void
1455 ConnStateData::consumeInput(const size_t byteCount)
1456 {
1457 assert(byteCount > 0 && byteCount <= inBuf.length());
1458 inBuf.consume(byteCount);
1459 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1460 }
1461
1462 void
1463 ConnStateData::clientAfterReadingRequests()
1464 {
1465 // Were we expecting to read more request body from half-closed connection?
1466 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1467 debugs(33, 3, "truncated body: closing half-closed " << clientConnection);
1468 clientConnection->close();
1469 return;
1470 }
1471
1472 if (flags.readMore)
1473 readSomeData();
1474 }
1475
1476 void
1477 ConnStateData::quitAfterError(HttpRequest *request)
1478 {
1479 // From HTTP p.o.v., we do not have to close after every error detected
1480 // at the client-side, but many such errors do require closure and the
1481 // client-side code is bad at handling errors so we play it safe.
1482 if (request)
1483 request->flags.proxyKeepalive = false;
1484 flags.readMore = false;
1485 debugs(33,4, "Will close after error: " << clientConnection);
1486 }
1487
1488 #if USE_OPENSSL
1489 bool ConnStateData::serveDelayedError(Http::Stream *context)
1490 {
1491 ClientHttpRequest *http = context->http;
1492
1493 if (!sslServerBump)
1494 return false;
1495
1496 assert(sslServerBump->entry);
1497 // Did we create an error entry while processing CONNECT?
1498 if (!sslServerBump->entry->isEmpty()) {
1499 quitAfterError(http->request);
1500
1501 // Get the saved error entry and send it to the client by replacing the
1502 // ClientHttpRequest store entry with it.
1503 clientStreamNode *node = context->getClientReplyContext();
1504 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1505 assert(repContext);
1506 debugs(33, 5, "Responding with delated error for " << http->uri);
1507 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1508
1509 // Get error details from the fake certificate-peeking request.
1510 http->request->error.update(sslServerBump->request->error);
1511 context->pullData();
1512 return true;
1513 }
1514
1515 // In bump-server-first mode, we have not necessarily seen the intended
1516 // server name at certificate-peeking time. Check for domain mismatch now,
1517 // when we can extract the intended name from the bumped HTTP request.
1518 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1519 HttpRequest *request = http->request;
1520 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1521 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1522 "does not match domainname " << request->url.host());
1523
1524 bool allowDomainMismatch = false;
1525 if (Config.ssl_client.cert_error) {
1526 ACLFilledChecklist check(Config.ssl_client.cert_error, nullptr);
1527 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1528 clientAclChecklistFill(check, http);
1529 allowDomainMismatch = check.fastCheck().allowed();
1530 delete check.sslErrors;
1531 check.sslErrors = nullptr;
1532 }
1533
1534 if (!allowDomainMismatch) {
1535 quitAfterError(request);
1536
1537 clientStreamNode *node = context->getClientReplyContext();
1538 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1539 assert (repContext);
1540
1541 request->hier = sslServerBump->request->hier;
1542
1543 // Create an error object and fill it
1544 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1545 err->src_addr = clientConnection->remote;
1546 const Security::ErrorDetail::Pointer errDetail = new Security::ErrorDetail(
1547 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1548 srvCert, nullptr);
1549 updateError(ERR_SECURE_CONNECT_FAIL, errDetail);
1550 repContext->setReplyToError(request->method, err);
1551 assert(context->http->out.offset == 0);
1552 context->pullData();
1553 return true;
1554 }
1555 }
1556 }
1557
1558 return false;
1559 }
1560 #endif // USE_OPENSSL
1561
1562 /// initiate tunneling if possible or return false otherwise
1563 bool
1564 ConnStateData::tunnelOnError(const err_type requestError)
1565 {
1566 if (!Config.accessList.on_unsupported_protocol) {
1567 debugs(33, 5, "disabled; send error: " << requestError);
1568 return false;
1569 }
1570
1571 if (!preservingClientData_) {
1572 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1573 return false;
1574 }
1575
1576 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, nullptr);
1577 checklist.requestErrorType = requestError;
1578 fillChecklist(checklist);
1579 auto answer = checklist.fastCheck();
1580 if (answer.allowed() && answer.kind == 1) {
1581 debugs(33, 3, "Request will be tunneled to server");
1582 const auto context = pipeline.front();
1583 const auto http = context ? context->http : nullptr;
1584 const auto request = http ? http->request : nullptr;
1585 if (context)
1586 context->finished(); // Will remove from pipeline queue
1587 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, nullptr, nullptr, 0);
1588 return initiateTunneledRequest(request, "unknown-protocol", preservedClientData);
1589 }
1590 debugs(33, 3, "denied; send error: " << requestError);
1591 return false;
1592 }
1593
1594 void
1595 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1596 {
1597 /*
1598 * DPW 2007-05-18
1599 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1600 * to here because calling comm_reset_close() causes http to
1601 * be freed before accessing.
1602 */
1603 if (request != nullptr && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1604 debugs(33, 3, "Sending TCP RST on " << conn->clientConnection);
1605 conn->flags.readMore = false;
1606 comm_reset_close(conn->clientConnection);
1607 }
1608 }
1609
1610 void
1611 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1612 {
1613 ClientHttpRequest *http = context->http;
1614 bool mustReplyToOptions = false;
1615 bool expectBody = false;
1616
1617 // We already have the request parsed and checked, so we
1618 // only need to go through the final body/conn setup to doCallouts().
1619 assert(http->request);
1620 HttpRequest::Pointer request = http->request;
1621
1622 // temporary hack to avoid splitting this huge function with sensitive code
1623 const bool isFtp = !hp;
1624
1625 // Some blobs below are still HTTP-specific, but we would have to rewrite
1626 // this entire function to remove them from the FTP code path. Connection
1627 // setup and body_pipe preparation blobs are needed for FTP.
1628
1629 request->manager(conn, http->al);
1630
1631 request->flags.accelerated = http->flags.accel;
1632 request->flags.sslBumped=conn->switchedToHttps();
1633 // TODO: decouple http->flags.accel from request->flags.sslBumped
1634 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1635 !conn->port->allow_direct : 0;
1636 request->sources |= isFtp ? Http::Message::srcFtp :
1637 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1638 #if USE_AUTH
1639 if (request->flags.sslBumped) {
1640 if (conn->getAuth() != nullptr)
1641 request->auth_user_request = conn->getAuth();
1642 }
1643 #endif
1644
1645 if (internalCheck(request->url.path())) {
1646 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1647 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1648 http->flags.internal = true;
1649 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1650 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1651 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1652 request->url.host(internalHostname());
1653 request->url.port(getMyPort());
1654 http->flags.internal = true;
1655 http->setLogUriToRequestUri();
1656 } else
1657 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1658 }
1659
1660 request->flags.internal = http->flags.internal;
1661
1662 if (!isFtp) {
1663 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1664 // for now Squid only supports HTTP requests
1665 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1666 assert(request->http_ver.protocol == http_ver.protocol);
1667 request->http_ver.major = http_ver.major;
1668 request->http_ver.minor = http_ver.minor;
1669 }
1670
1671 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1672 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1673 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions) {
1674 clientStreamNode *node = context->getClientReplyContext();
1675 conn->quitAfterError(request.getRaw());
1676 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1677 assert (repContext);
1678 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, nullptr,
1679 conn, request.getRaw(), nullptr, nullptr);
1680 assert(context->http->out.offset == 0);
1681 context->pullData();
1682 clientProcessRequestFinished(conn, request);
1683 return;
1684 }
1685
1686 const auto frameStatus = request->checkEntityFraming();
1687 if (frameStatus != Http::scNone) {
1688 clientStreamNode *node = context->getClientReplyContext();
1689 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1690 assert (repContext);
1691 conn->quitAfterError(request.getRaw());
1692 repContext->setReplyToError(ERR_INVALID_REQ, frameStatus, nullptr, conn, request.getRaw(), nullptr, nullptr);
1693 assert(context->http->out.offset == 0);
1694 context->pullData();
1695 clientProcessRequestFinished(conn, request);
1696 return;
1697 }
1698
1699 clientSetKeepaliveFlag(http);
1700 // Let tunneling code be fully responsible for CONNECT requests
1701 if (http->request->method == Http::METHOD_CONNECT) {
1702 context->mayUseConnection(true);
1703 conn->flags.readMore = false;
1704 }
1705
1706 #if USE_OPENSSL
1707 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1708 clientProcessRequestFinished(conn, request);
1709 return;
1710 }
1711 #endif
1712
1713 /* Do we expect a request-body? */
1714 const auto chunked = request->header.chunked();
1715 expectBody = chunked || request->content_length > 0;
1716 if (!context->mayUseConnection() && expectBody) {
1717 request->body_pipe = conn->expectRequestBody(
1718 chunked ? -1 : request->content_length);
1719
1720 /* Is it too large? */
1721 if (!chunked && // if chunked, we will check as we accumulate
1722 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1723 clientStreamNode *node = context->getClientReplyContext();
1724 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1725 assert (repContext);
1726 conn->quitAfterError(request.getRaw());
1727 repContext->setReplyToError(ERR_TOO_BIG,
1728 Http::scContentTooLarge, nullptr,
1729 conn, http->request, nullptr, nullptr);
1730 assert(context->http->out.offset == 0);
1731 context->pullData();
1732 clientProcessRequestFinished(conn, request);
1733 return;
1734 }
1735
1736 if (!isFtp) {
1737 // We may stop producing, comm_close, and/or call setReplyToError()
1738 // below, so quit on errors to avoid http->doCallouts()
1739 if (!conn->handleRequestBodyData()) {
1740 clientProcessRequestFinished(conn, request);
1741 return;
1742 }
1743
1744 if (!request->body_pipe->productionEnded()) {
1745 debugs(33, 5, "need more request body");
1746 context->mayUseConnection(true);
1747 assert(conn->flags.readMore);
1748 }
1749 }
1750 }
1751
1752 http->calloutContext = new ClientRequestContext(http);
1753
1754 http->doCallouts();
1755
1756 clientProcessRequestFinished(conn, request);
1757 }
1758
1759 void
1760 ConnStateData::add(const Http::StreamPointer &context)
1761 {
1762 debugs(33, 3, context << " to " << pipeline.count() << '/' << pipeline.nrequests);
1763 if (bareError) {
1764 debugs(33, 5, "assigning " << bareError);
1765 assert(context);
1766 assert(context->http);
1767 context->http->updateError(bareError);
1768 bareError.clear();
1769 }
1770 pipeline.add(context);
1771 }
1772
1773 int
1774 ConnStateData::pipelinePrefetchMax() const
1775 {
1776 // TODO: Support pipelined requests through pinned connections.
1777 if (pinning.pinned)
1778 return 0;
1779 return Config.pipeline_max_prefetch;
1780 }
1781
1782 /**
1783 * Limit the number of concurrent requests.
1784 * \return true when there are available position(s) in the pipeline queue for another request.
1785 * \return false when the pipeline queue is full or disabled.
1786 */
1787 bool
1788 ConnStateData::concurrentRequestQueueFilled() const
1789 {
1790 const int existingRequestCount = pipeline.count();
1791
1792 // default to the configured pipeline size.
1793 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1794 #if USE_OPENSSL
1795 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1796 #else
1797 const int internalRequest = 0;
1798 #endif
1799 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1800
1801 // when queue filled already we can't add more.
1802 if (existingRequestCount >= concurrentRequestLimit) {
1803 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1804 debugs(33, 5, clientConnection << " deferring new request until one is done");
1805 return true;
1806 }
1807
1808 return false;
1809 }
1810
1811 /**
1812 * Perform proxy_protocol_access ACL tests on the client which
1813 * connected to PROXY protocol port to see if we trust the
1814 * sender enough to accept their PROXY header claim.
1815 */
1816 bool
1817 ConnStateData::proxyProtocolValidateClient()
1818 {
1819 if (!Config.accessList.proxyProtocol)
1820 return proxyProtocolError("PROXY client not permitted by default ACL");
1821
1822 ACLFilledChecklist ch(Config.accessList.proxyProtocol, nullptr);
1823 fillChecklist(ch);
1824 if (!ch.fastCheck().allowed())
1825 return proxyProtocolError("PROXY client not permitted by ACLs");
1826
1827 return true;
1828 }
1829
1830 /**
1831 * Perform cleanup on PROXY protocol errors.
1832 * If header parsing hits a fatal error terminate the connection,
1833 * otherwise wait for more data.
1834 */
1835 bool
1836 ConnStateData::proxyProtocolError(const char *msg)
1837 {
1838 if (msg) {
1839 // This is important to know, but maybe not so much that flooding the log is okay.
1840 #if QUIET_PROXY_PROTOCOL
1841 // display the first of every 32 occurrences at level 1, the others at level 2.
1842 static uint8_t hide = 0;
1843 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1844 #else
1845 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1846 #endif
1847 mustStop(msg);
1848 }
1849 return false;
1850 }
1851
1852 /// Attempts to extract a PROXY protocol header from the input buffer and,
1853 /// upon success, stores the parsed header in proxyProtocolHeader_.
1854 /// \returns true if the header was successfully parsed
1855 /// \returns false if more data is needed to parse the header or on error
1856 bool
1857 ConnStateData::parseProxyProtocolHeader()
1858 {
1859 try {
1860 const auto parsed = ProxyProtocol::Parse(inBuf);
1861 proxyProtocolHeader_ = parsed.header;
1862 assert(bool(proxyProtocolHeader_));
1863 inBuf.consume(parsed.size);
1864 needProxyProtocolHeader_ = false;
1865 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1866 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1867 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1868 if ((clientConnection->flags & COMM_TRANSPARENT))
1869 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1870 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1871 }
1872 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1873 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1874 return false;
1875 } catch (const std::exception &e) {
1876 return proxyProtocolError(e.what());
1877 }
1878 return true;
1879 }
1880
1881 void
1882 ConnStateData::receivedFirstByte()
1883 {
1884 if (receivedFirstByte_)
1885 return;
1886
1887 receivedFirstByte_ = true;
1888 resetReadTimeout(Config.Timeout.request);
1889 }
1890
1891 /**
1892 * Attempt to parse one or more requests from the input buffer.
1893 * Returns true after completing parsing of at least one request [header]. That
1894 * includes cases where parsing ended with an error (e.g., a huge request).
1895 */
1896 bool
1897 ConnStateData::clientParseRequests()
1898 {
1899 bool parsed_req = false;
1900
1901 debugs(33, 5, clientConnection << ": attempting to parse");
1902
1903 // Loop while we have read bytes that are not needed for producing the body
1904 // On errors, bodyPipe may become nil, but readMore will be cleared
1905 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1906
1907 // Prohibit concurrent requests when using a pinned to-server connection
1908 // because our Client classes do not support request pipelining.
1909 if (pinning.pinned && !pinning.readHandler) {
1910 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1911 break;
1912 }
1913
1914 /* Limit the number of concurrent requests */
1915 if (concurrentRequestQueueFilled())
1916 break;
1917
1918 // try to parse the PROXY protocol header magic bytes
1919 if (needProxyProtocolHeader_) {
1920 if (!parseProxyProtocolHeader())
1921 break;
1922
1923 // we have been waiting for PROXY to provide client-IP
1924 // for some lookups, ie rDNS and IDENT.
1925 whenClientIpKnown();
1926
1927 // Done with PROXY protocol which has cleared preservingClientData_.
1928 // If the next protocol supports on_unsupported_protocol, then its
1929 // parseOneRequest() must reset preservingClientData_.
1930 assert(!preservingClientData_);
1931 }
1932
1933 if (Http::StreamPointer context = parseOneRequest()) {
1934 debugs(33, 5, clientConnection << ": done parsing a request");
1935 extendLifetime();
1936 context->registerWithConn();
1937
1938 #if USE_OPENSSL
1939 if (switchedToHttps())
1940 parsedBumpedRequestCount++;
1941 #endif
1942
1943 processParsedRequest(context);
1944
1945 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1946
1947 if (context->mayUseConnection()) {
1948 debugs(33, 3, "Not parsing new requests, as this request may need the connection");
1949 break;
1950 }
1951 } else {
1952 debugs(33, 5, clientConnection << ": not enough request data: " <<
1953 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1954 Must(inBuf.length() < Config.maxRequestHeaderSize);
1955 break;
1956 }
1957 }
1958
1959 /* XXX where to 'finish' the parsing pass? */
1960 return parsed_req;
1961 }
1962
1963 void
1964 ConnStateData::afterClientRead()
1965 {
1966 #if USE_OPENSSL
1967 if (parsingTlsHandshake) {
1968 parseTlsHandshake();
1969 return;
1970 }
1971 #endif
1972
1973 /* Process next request */
1974 if (pipeline.empty())
1975 fd_note(clientConnection->fd, "Reading next request");
1976
1977 if (!clientParseRequests()) {
1978 if (!isOpen())
1979 return;
1980 // We may get here if the client half-closed after sending a partial
1981 // request. See doClientRead() and shouldCloseOnEof().
1982 // XXX: This partially duplicates ConnStateData::kick().
1983 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
1984 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
1985 clientConnection->close();
1986 return;
1987 }
1988 }
1989
1990 if (!isOpen())
1991 return;
1992
1993 clientAfterReadingRequests();
1994 }
1995
1996 /**
1997 * called when new request data has been read from the socket
1998 *
1999 * \retval false called comm_close or setReplyToError (the caller should bail)
2000 * \retval true we did not call comm_close or setReplyToError
2001 */
2002 bool
2003 ConnStateData::handleReadData()
2004 {
2005 // if we are reading a body, stuff data into the body pipe
2006 if (bodyPipe != nullptr)
2007 return handleRequestBodyData();
2008 return true;
2009 }
2010
2011 /**
2012 * called when new request body data has been buffered in inBuf
2013 * may close the connection if we were closing and piped everything out
2014 *
2015 * \retval false called comm_close or setReplyToError (the caller should bail)
2016 * \retval true we did not call comm_close or setReplyToError
2017 */
2018 bool
2019 ConnStateData::handleRequestBodyData()
2020 {
2021 assert(bodyPipe != nullptr);
2022
2023 if (bodyParser) { // chunked encoding
2024 if (const err_type error = handleChunkedRequestBody()) {
2025 abortChunkedRequestBody(error);
2026 return false;
2027 }
2028 } else { // identity encoding
2029 debugs(33,5, "handling plain request body for " << clientConnection);
2030 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2031 if (putSize > 0)
2032 consumeInput(putSize);
2033
2034 if (!bodyPipe->mayNeedMoreData()) {
2035 // BodyPipe will clear us automagically when we produced everything
2036 bodyPipe = nullptr;
2037 }
2038 }
2039
2040 if (!bodyPipe) {
2041 debugs(33,5, "produced entire request body for " << clientConnection);
2042
2043 if (const char *reason = stoppedSending()) {
2044 /* we've finished reading like good clients,
2045 * now do the close that initiateClose initiated.
2046 */
2047 debugs(33, 3, "closing for earlier sending error: " << reason);
2048 clientConnection->close();
2049 return false;
2050 }
2051 }
2052
2053 return true;
2054 }
2055
2056 /// parses available chunked encoded body bytes, checks size, returns errors
2057 err_type
2058 ConnStateData::handleChunkedRequestBody()
2059 {
2060 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2061
2062 try { // the parser will throw on errors
2063
2064 if (inBuf.isEmpty()) // nothing to do
2065 return ERR_NONE;
2066
2067 BodyPipeCheckout bpc(*bodyPipe);
2068 bodyParser->setPayloadBuffer(&bpc.buf);
2069 const bool parsed = bodyParser->parse(inBuf);
2070 inBuf = bodyParser->remaining(); // sync buffers
2071 bpc.checkIn();
2072
2073 // dechunk then check: the size limit applies to _dechunked_ content
2074 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2075 return ERR_TOO_BIG;
2076
2077 if (parsed) {
2078 finishDechunkingRequest(true);
2079 Must(!bodyPipe);
2080 return ERR_NONE; // nil bodyPipe implies body end for the caller
2081 }
2082
2083 // if chunk parser needs data, then the body pipe must need it too
2084 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2085
2086 // if parser needs more space and we can consume nothing, we will stall
2087 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2088 } catch (...) { // TODO: be more specific
2089 debugs(33, 3, "malformed chunks" << bodyPipe->status());
2090 return ERR_INVALID_REQ;
2091 }
2092
2093 debugs(33, 7, "need more chunked data" << *bodyPipe->status());
2094 return ERR_NONE;
2095 }
2096
2097 /// quit on errors related to chunked request body handling
2098 void
2099 ConnStateData::abortChunkedRequestBody(const err_type error)
2100 {
2101 finishDechunkingRequest(false);
2102
2103 // XXX: The code below works if we fail during initial request parsing,
2104 // but if we fail when the server connection is used already, the server may send
2105 // us its response too, causing various assertions. How to prevent that?
2106 #if WE_KNOW_HOW_TO_SEND_ERRORS
2107 Http::StreamPointer context = pipeline.front();
2108 if (context != NULL && !context->http->out.offset) { // output nothing yet
2109 clientStreamNode *node = context->getClientReplyContext();
2110 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2111 assert(repContext);
2112 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2113 Http::scContentTooLarge : HTTP_BAD_REQUEST;
2114 repContext->setReplyToError(error, scode,
2115 repContext->http->uri,
2116 CachePeer,
2117 repContext->http->request,
2118 inBuf, NULL);
2119 context->pullData();
2120 } else {
2121 // close or otherwise we may get stuck as nobody will notice the error?
2122 comm_reset_close(clientConnection);
2123 }
2124 #else
2125 debugs(33, 3, "aborting chunked request without error " << error);
2126 comm_reset_close(clientConnection);
2127 #endif
2128 flags.readMore = false;
2129 }
2130
2131 void
2132 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2133 {
2134 // request reader may get stuck waiting for space if nobody consumes body
2135 if (bodyPipe != nullptr)
2136 bodyPipe->enableAutoConsumption();
2137
2138 // kids extend
2139 }
2140
2141 /** general lifetime handler for HTTP requests */
2142 void
2143 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2144 {
2145 if (!Comm::IsConnOpen(io.conn))
2146 return;
2147
2148 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2149 updateError(error);
2150 if (tunnelOnError(error))
2151 return;
2152
2153 /*
2154 * Just close the connection to not confuse browsers
2155 * using persistent connections. Some browsers open
2156 * a connection and then do not use it until much
2157 * later (presumeably because the request triggering
2158 * the open has already been completed on another
2159 * connection)
2160 */
2161 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2162 io.conn->close();
2163 }
2164
2165 void
2166 ConnStateData::lifetimeTimeout(const CommTimeoutCbParams &io)
2167 {
2168 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout" <<
2169 Debug::Extra << "connection: " << io.conn);
2170
2171 LogTagsErrors lte;
2172 lte.timedout = true;
2173 terminateAll(ERR_LIFETIME_EXP, lte);
2174 }
2175
2176 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2177 AsyncJob("ConnStateData"), // kids overwrite
2178 Server(xact)
2179 #if USE_OPENSSL
2180 , tlsParser(Security::HandshakeParser::fromClient)
2181 #endif
2182 {
2183 // store the details required for creating more MasterXaction objects as new requests come in
2184 log_addr = xact->tcpClient->remote;
2185 log_addr.applyClientMask(Config.Addrs.client_netmask);
2186
2187 // register to receive notice of Squid signal events
2188 // which may affect long persisting client connections
2189 registerRunner();
2190 }
2191
2192 void
2193 ConnStateData::start()
2194 {
2195 BodyProducer::start();
2196 HttpControlMsgSink::start();
2197
2198 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2199 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2200 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2201 int i = IP_PMTUDISC_DONT;
2202 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2203 int xerrno = errno;
2204 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2205 }
2206 #else
2207 static bool reported = false;
2208
2209 if (!reported) {
2210 debugs(33, DBG_IMPORTANT, "WARNING: Path MTU discovery disabling is not supported on your platform.");
2211 reported = true;
2212 }
2213 #endif
2214 }
2215
2216 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2217 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2218 comm_add_close_handler(clientConnection->fd, call);
2219
2220 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2221 if (needProxyProtocolHeader_) {
2222 if (!proxyProtocolValidateClient()) // will close the connection on failure
2223 return;
2224 } else
2225 whenClientIpKnown();
2226
2227 // requires needProxyProtocolHeader_ which is initialized above
2228 preservingClientData_ = shouldPreserveClientData();
2229 }
2230
2231 void
2232 ConnStateData::whenClientIpKnown()
2233 {
2234 debugs(33, 7, clientConnection->remote);
2235 if (Dns::ResolveClientAddressesAsap)
2236 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2237
2238 #if USE_IDENT
2239 if (Ident::TheConfig.identLookup) {
2240 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, nullptr, nullptr);
2241 fillChecklist(identChecklist);
2242 if (identChecklist.fastCheck().allowed())
2243 Ident::Start(clientConnection, clientIdentDone, this);
2244 }
2245 #endif
2246
2247 clientdbEstablished(clientConnection->remote, 1);
2248
2249 #if USE_DELAY_POOLS
2250 fd_table[clientConnection->fd].clientInfo = nullptr;
2251
2252 if (!Config.onoff.client_db)
2253 return; // client delay pools require client_db
2254
2255 const auto &pools = ClientDelayPools::Instance()->pools;
2256 if (pools.size()) {
2257 ACLFilledChecklist ch(nullptr, nullptr, nullptr);
2258 fillChecklist(ch);
2259 // TODO: we check early to limit error response bandwidth but we
2260 // should recheck when we can honor delay_pool_uses_indirect
2261 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2262
2263 /* pools require explicit 'allow' to assign a client into them */
2264 if (pools[pool]->access) {
2265 ch.changeAcl(pools[pool]->access);
2266 auto answer = ch.fastCheck();
2267 if (answer.allowed()) {
2268
2269 /* request client information from db after we did all checks
2270 this will save hash lookup if client failed checks */
2271 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2272 assert(cli);
2273
2274 /* put client info in FDE */
2275 fd_table[clientConnection->fd].clientInfo = cli;
2276
2277 /* setup write limiter for this request */
2278 const double burst = floor(0.5 +
2279 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2280 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2281 break;
2282 } else {
2283 debugs(83, 4, "Delay pool " << pool << " skipped because ACL " << answer);
2284 }
2285 }
2286 }
2287 }
2288 #endif
2289
2290 // kids must extend to actually start doing something (e.g., reading)
2291 }
2292
2293 Security::IoResult
2294 ConnStateData::acceptTls()
2295 {
2296 const auto handshakeResult = Security::Accept(*clientConnection);
2297
2298 #if USE_OPENSSL
2299 // log ASAP, even if the handshake has not completed (or failed)
2300 const auto fd = clientConnection->fd;
2301 assert(fd >= 0);
2302 keyLogger.checkpoint(*fd_table[fd].ssl, *this);
2303 #else
2304 // TODO: Support fd_table[fd].ssl dereference in other builds.
2305 #endif
2306
2307 return handshakeResult;
2308 }
2309
2310 /** Handle a new connection on an HTTP socket. */
2311 void
2312 httpAccept(const CommAcceptCbParams &params)
2313 {
2314 Assure(params.port);
2315
2316 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2317
2318 if (params.flag != Comm::OK) {
2319 // Its possible the call was still queued when the client disconnected
2320 debugs(33, 2, params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2321 return;
2322 }
2323
2324 debugs(33, 4, params.conn << ": accepted");
2325 fd_note(params.conn->fd, "client http connect");
2326 const auto xact = MasterXaction::MakePortful(params.port);
2327 xact->tcpClient = params.conn;
2328
2329 // Socket is ready, setup the connection manager to start using it
2330 auto *srv = Http::NewServer(xact);
2331 // XXX: do not abandon the MasterXaction object
2332 AsyncJob::Start(srv); // usually async-calls readSomeData()
2333 }
2334
2335 /// Create TLS connection structure and update fd_table
2336 static bool
2337 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2338 {
2339 const auto conn = connState->clientConnection;
2340 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2341 debugs(33, 5, "will negotiate TLS on " << conn);
2342 return true;
2343 }
2344
2345 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2346 conn->close();
2347 return false;
2348 }
2349
2350 /** negotiate an SSL connection */
2351 static void
2352 clientNegotiateSSL(int fd, void *data)
2353 {
2354 ConnStateData *conn = (ConnStateData *)data;
2355
2356 const auto handshakeResult = conn->acceptTls();
2357 switch (handshakeResult.category) {
2358 case Security::IoResult::ioSuccess:
2359 break;
2360
2361 case Security::IoResult::ioWantRead:
2362 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
2363 return;
2364
2365 case Security::IoResult::ioWantWrite:
2366 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
2367 return;
2368
2369 case Security::IoResult::ioError:
2370 debugs(83, (handshakeResult.important ? Important(62) : 2), "ERROR: " << handshakeResult.errorDescription <<
2371 " while accepting a TLS connection on " << conn->clientConnection << ": " << handshakeResult.errorDetail);
2372 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2373 // path because we cannot know the intended connection target?
2374 conn->updateError(ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
2375 conn->clientConnection->close();
2376 return;
2377 }
2378
2379 Security::SessionPointer session(fd_table[fd].ssl);
2380
2381 #if USE_OPENSSL
2382 if (Security::SessionIsResumed(session)) {
2383 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2384 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2385 ":" << (int)fd_table[fd].remote_port << ")");
2386 } else {
2387 if (Debug::Enabled(83, 4)) {
2388 /* Write out the SSL session details.. actually the call below, but
2389 * OpenSSL headers do strange typecasts confusing GCC.. */
2390 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2391 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2392 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2393 PEM_STRING_SSL_SESSION, debug_log,
2394 reinterpret_cast<char *>(SSL_get_session(session.get())),
2395 nullptr, nullptr, 0, nullptr, nullptr);
2396
2397 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2398
2399 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2400 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2401 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2402 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2403 * Because there are two possible usable cast, if you get an error here, try the other
2404 * commented line. */
2405
2406 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2407 debug_log,
2408 reinterpret_cast<char *>(SSL_get_session(session.get())),
2409 nullptr, nullptr, 0, nullptr, nullptr);
2410 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2411 debug_log,
2412 reinterpret_cast<char *>(SSL_get_session(session.get())),
2413 nullptr, nullptr, 0, nullptr, nullptr);
2414 */
2415 #else
2416 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2417
2418 #endif
2419 /* Note: This does not automatically fflush the log file.. */
2420 }
2421
2422 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2423 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2424 fd_table[fd].remote_port << ")");
2425 }
2426 #else
2427 debugs(83, 2, "TLS session reuse not yet implemented.");
2428 #endif
2429
2430 // Connection established. Retrieve TLS connection parameters for logging.
2431 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2432
2433 #if USE_OPENSSL
2434 X509 *client_cert = SSL_get_peer_certificate(session.get());
2435
2436 if (client_cert) {
2437 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2438 Security::SubjectName(*client_cert));
2439
2440 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2441 Security::IssuerName(*client_cert));
2442
2443 X509_free(client_cert);
2444 } else {
2445 debugs(83, 5, "FD " << fd << " has no client certificate.");
2446 }
2447 #else
2448 debugs(83, 2, "Client certificate requesting not yet implemented.");
2449 #endif
2450
2451 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2452 if (auto xact = conn->pipeline.front()) {
2453 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2454 xact->finished();
2455 // cannot proceed with encryption if requests wait for plain responses
2456 Must(conn->pipeline.empty());
2457 }
2458 /* careful: finished() above frees request, host, etc. */
2459
2460 conn->readSomeData();
2461 }
2462
2463 /**
2464 * If Security::ContextPointer is given, starts reading the TLS handshake.
2465 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2466 */
2467 static void
2468 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2469 {
2470 assert(connState);
2471 const Comm::ConnectionPointer &details = connState->clientConnection;
2472
2473 if (!ctx || !httpsCreate(connState, ctx))
2474 return;
2475
2476 connState->resetReadTimeout(Config.Timeout.request);
2477
2478 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2479 }
2480
2481 #if USE_OPENSSL
2482 /**
2483 * A callback function to use with the ACLFilledChecklist callback.
2484 */
2485 static void
2486 httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2487 {
2488 ConnStateData *connState = (ConnStateData *) data;
2489
2490 // if the connection is closed or closing, just return.
2491 if (!connState->isOpen())
2492 return;
2493
2494 if (answer.allowed()) {
2495 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2496 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2497 } else {
2498 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2499 connState->sslBumpMode = Ssl::bumpSplice;
2500 }
2501
2502 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2503 connState->clientConnection->close();
2504 return;
2505 }
2506
2507 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2508 connState->clientConnection->close();
2509 }
2510 #endif
2511
2512 /** handle a new HTTPS connection */
2513 static void
2514 httpsAccept(const CommAcceptCbParams &params)
2515 {
2516 Assure(params.port);
2517
2518 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2519
2520 if (params.flag != Comm::OK) {
2521 // Its possible the call was still queued when the client disconnected
2522 debugs(33, 2, "httpsAccept: " << params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2523 return;
2524 }
2525
2526 const auto xact = MasterXaction::MakePortful(params.port);
2527 xact->tcpClient = params.conn;
2528
2529 debugs(33, 4, params.conn << " accepted, starting SSL negotiation.");
2530 fd_note(params.conn->fd, "client https connect");
2531
2532 // Socket is ready, setup the connection manager to start using it
2533 auto *srv = Https::NewServer(xact);
2534 // XXX: do not abandon the MasterXaction object
2535 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2536 }
2537
2538 void
2539 ConnStateData::postHttpsAccept()
2540 {
2541 if (port->flags.tunnelSslBumping) {
2542 #if USE_OPENSSL
2543 debugs(33, 5, "accept transparent connection: " << clientConnection);
2544
2545 if (!Config.accessList.ssl_bump) {
2546 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2547 return;
2548 }
2549
2550 const auto mx = MasterXaction::MakePortful(port);
2551 mx->tcpClient = clientConnection;
2552 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2553 // using tproxy/intercept provided destination IP and port.
2554 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2555 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2556 HttpRequest *request = new HttpRequest(mx);
2557 static char ip[MAX_IPSTRLEN];
2558 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2559 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2560 request->url.port(clientConnection->local.port());
2561 request->myportname = port->name;
2562 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2563 CodeContext::Reset(connectAle);
2564 // TODO: Use these request/ALE when waiting for new bumped transactions.
2565
2566 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, nullptr);
2567 fillChecklist(*acl_checklist);
2568 // Build a local AccessLogEntry to allow requiresAle() acls work
2569 acl_checklist->al = connectAle;
2570 acl_checklist->al->cache.start_time = current_time;
2571 acl_checklist->al->tcpClient = clientConnection;
2572 acl_checklist->al->cache.port = port;
2573 acl_checklist->al->cache.caddr = log_addr;
2574 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2575 acl_checklist->al->updateError(bareError);
2576 HTTPMSGUNLOCK(acl_checklist->al->request);
2577 acl_checklist->al->request = request;
2578 HTTPMSGLOCK(acl_checklist->al->request);
2579 Http::StreamPointer context = pipeline.front();
2580 ClientHttpRequest *http = context ? context->http : nullptr;
2581 const char *log_uri = http ? http->log_uri : nullptr;
2582 acl_checklist->syncAle(request, log_uri);
2583 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2584 #else
2585 fatal("FATAL: SSL-Bump requires --with-openssl");
2586 #endif
2587 return;
2588 } else {
2589 httpsEstablish(this, port->secure.staticContext);
2590 }
2591 }
2592
2593 #if USE_OPENSSL
2594 void
2595 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2596 {
2597 ConnStateData * state_data = (ConnStateData *)(data);
2598 state_data->sslCrtdHandleReply(reply);
2599 }
2600
2601 void
2602 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2603 {
2604 if (!isOpen()) {
2605 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2606 return;
2607 }
2608
2609 if (reply.result == Helper::BrokenHelper) {
2610 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2611 } else if (!reply.other().hasContent()) {
2612 debugs(1, DBG_IMPORTANT, "\"ssl_crtd\" helper returned <NULL> reply.");
2613 } else {
2614 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2615 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2616 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2617 } else {
2618 if (reply.result != Helper::Okay) {
2619 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2620 } else {
2621 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2622 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2623 doPeekAndSpliceStep();
2624 auto ssl = fd_table[clientConnection->fd].ssl.get();
2625 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2626 if (!ret)
2627 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2628
2629 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2630 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2631 } else {
2632 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2633 if (ctx && !sslBumpCertKey.isEmpty())
2634 storeTlsContextToCache(sslBumpCertKey, ctx);
2635 getSslContextDone(ctx);
2636 }
2637 return;
2638 }
2639 }
2640 }
2641 Security::ContextPointer nil;
2642 getSslContextDone(nil);
2643 }
2644
2645 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2646 {
2647 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2648
2649 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2650 if (connectedOk) {
2651 if (X509 *mimicCert = sslServerBump->serverCert.get())
2652 certProperties.mimicCert.resetAndLock(mimicCert);
2653
2654 ACLFilledChecklist checklist(nullptr, sslServerBump->request.getRaw());
2655 fillChecklist(checklist);
2656
2657 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != nullptr; ca = ca->next) {
2658 // If the algorithm already set, then ignore it.
2659 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2660 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2661 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2662 continue;
2663
2664 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2665 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2666 const char *param = ca->param;
2667
2668 // For parameterless CN adaptation, use hostname from the
2669 // CONNECT request.
2670 if (ca->alg == Ssl::algSetCommonName) {
2671 if (!param)
2672 param = tlsConnectHostOrIp.c_str();
2673 certProperties.commonName = param;
2674 certProperties.setCommonName = true;
2675 } else if (ca->alg == Ssl::algSetValidAfter)
2676 certProperties.setValidAfter = true;
2677 else if (ca->alg == Ssl::algSetValidBefore)
2678 certProperties.setValidBefore = true;
2679
2680 debugs(33, 5, "Matches certificate adaptation aglorithm: " <<
2681 alg << " param: " << (param ? param : "-"));
2682 }
2683 }
2684
2685 certProperties.signAlgorithm = Ssl::algSignEnd;
2686 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != nullptr; sg = sg->next) {
2687 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2688 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2689 break;
2690 }
2691 }
2692 } else {// did not try to connect (e.g. client-first) or failed to connect
2693 // In case of an error while connecting to the secure server, use a
2694 // trusted certificate, with no mimicked fields and no adaptation
2695 // algorithms. There is nothing we can mimic, so we want to minimize the
2696 // number of warnings the user will have to see to get to the error page.
2697 // We will close the connection, so that the trust is not extended to
2698 // non-Squid content.
2699 certProperties.signAlgorithm = Ssl::algSignTrusted;
2700 }
2701
2702 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2703
2704 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2705 assert(port->secure.untrustedSigningCa.cert);
2706 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2707 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2708 } else {
2709 assert(port->secure.signingCa.cert.get());
2710 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2711
2712 if (port->secure.signingCa.pkey)
2713 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2714 }
2715 signAlgorithm = certProperties.signAlgorithm;
2716
2717 certProperties.signHash = Ssl::DefaultSignHash;
2718 }
2719
2720 Security::ContextPointer
2721 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2722 {
2723 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2724 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2725 if (const auto ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2726 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2727 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2728 return *ctx;
2729 } else {
2730 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2731 if (ssl_ctx_cache)
2732 ssl_ctx_cache->del(cacheKey);
2733 }
2734 }
2735 return Security::ContextPointer(nullptr);
2736 }
2737
2738 void
2739 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2740 {
2741 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2742 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, ctx)) {
2743 // If it is not in storage delete after using. Else storage deleted it.
2744 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2745 }
2746 }
2747
2748 void
2749 ConnStateData::getSslContextStart()
2750 {
2751 if (port->secure.generateHostCertificates) {
2752 Ssl::CertificateProperties certProperties;
2753 buildSslCertGenerationParams(certProperties);
2754
2755 // Disable caching for bumpPeekAndSplice mode
2756 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2757 sslBumpCertKey.clear();
2758 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2759 assert(!sslBumpCertKey.isEmpty());
2760
2761 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2762 if (ctx) {
2763 getSslContextDone(ctx);
2764 return;
2765 }
2766 }
2767
2768 #if USE_SSL_CRTD
2769 try {
2770 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2771 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2772 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2773 request_message.composeRequest(certProperties);
2774 debugs(33, 5, "SSL crtd request: " << request_message.compose().c_str());
2775 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2776 return;
2777 } catch (const std::exception &e) {
2778 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2779 "request for " << certProperties.commonName <<
2780 " certificate: " << e.what() << "; will now block to " <<
2781 "generate that certificate.");
2782 // fall through to do blocking in-process generation.
2783 }
2784 #endif // USE_SSL_CRTD
2785
2786 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName);
2787 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2788 doPeekAndSpliceStep();
2789 auto ssl = fd_table[clientConnection->fd].ssl.get();
2790 if (!Ssl::configureSSL(ssl, certProperties, *port))
2791 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2792
2793 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2794 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2795 } else {
2796 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2797 if (dynCtx && !sslBumpCertKey.isEmpty())
2798 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2799 getSslContextDone(dynCtx);
2800 }
2801 return;
2802 }
2803
2804 Security::ContextPointer nil;
2805 getSslContextDone(nil);
2806 }
2807
2808 void
2809 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2810 {
2811 if (port->secure.generateHostCertificates && !ctx) {
2812 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2813 }
2814
2815 // If generated ssl context = NULL, try to use static ssl context.
2816 if (!ctx) {
2817 if (!port->secure.staticContext) {
2818 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2819 clientConnection->close();
2820 return;
2821 } else {
2822 debugs(33, 5, "Using static TLS context.");
2823 ctx = port->secure.staticContext;
2824 }
2825 }
2826
2827 if (!httpsCreate(this, ctx))
2828 return;
2829
2830 // bumped intercepted conns should already have Config.Timeout.request set
2831 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2832 // to make sure the connection does not get stuck on non-SSL clients.
2833 resetReadTimeout(Config.Timeout.request);
2834
2835 switchedToHttps_ = true;
2836
2837 auto ssl = fd_table[clientConnection->fd].ssl.get();
2838 BIO *b = SSL_get_rbio(ssl);
2839 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2840 bio->setReadBufData(inBuf);
2841 inBuf.clear();
2842 clientNegotiateSSL(clientConnection->fd, this);
2843 }
2844
2845 void
2846 ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2847 {
2848 assert(!switchedToHttps_);
2849 Must(http->request);
2850 auto &request = http->request;
2851
2852 // Depending on receivedFirstByte_, we are at the start of either an
2853 // established CONNECT tunnel with the client or an intercepted TCP (and
2854 // presumably TLS) connection from the client. Expect TLS Client Hello.
2855 const auto insideConnectTunnel = receivedFirstByte_;
2856 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2857
2858 tlsConnectHostOrIp = request->url.hostOrIp();
2859 tlsConnectPort = request->url.port();
2860 resetSslCommonName(request->url.host());
2861
2862 // We are going to read new request
2863 flags.readMore = true;
2864
2865 // keep version major.minor details the same.
2866 // but we are now performing the HTTPS handshake traffic
2867 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2868
2869 // If sslServerBump is set, then we have decided to deny CONNECT
2870 // and now want to switch to SSL to send the error to the client
2871 // without even peeking at the origin server certificate.
2872 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2873 request->flags.sslPeek = true;
2874 sslServerBump = new Ssl::ServerBump(http);
2875 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2876 request->flags.sslPeek = true;
2877 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2878 }
2879
2880 // commSetConnTimeout() was called for this request before we switched.
2881 // Fix timeout to request_start_timeout
2882 resetReadTimeout(Config.Timeout.request_start_timeout);
2883 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2884 // a bumbed "connect" request on non transparent port.
2885 receivedFirstByte_ = false;
2886 // Get more data to peek at Tls
2887 parsingTlsHandshake = true;
2888
2889 // If the protocol has changed, then reset preservingClientData_.
2890 // Otherwise, its value initially set in start() is still valid/fresh.
2891 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2892 if (insideConnectTunnel)
2893 preservingClientData_ = shouldPreserveClientData();
2894
2895 readSomeData();
2896 }
2897
2898 void
2899 ConnStateData::parseTlsHandshake()
2900 {
2901 Must(parsingTlsHandshake);
2902
2903 assert(!inBuf.isEmpty());
2904 receivedFirstByte();
2905 fd_note(clientConnection->fd, "Parsing TLS handshake");
2906
2907 // stops being nil if we fail to parse the handshake
2908 ErrorDetail::Pointer parseErrorDetails;
2909
2910 try {
2911 if (!tlsParser.parseHello(inBuf)) {
2912 // need more data to finish parsing
2913 readSomeData();
2914 return;
2915 }
2916 }
2917 catch (const TextException &ex) {
2918 debugs(83, 2, "exception: " << ex);
2919 parseErrorDetails = new ExceptionErrorDetail(ex.id());
2920 }
2921 catch (...) {
2922 debugs(83, 2, "exception: " << CurrentException);
2923 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2924 parseErrorDetails = d;
2925 }
2926
2927 parsingTlsHandshake = false;
2928
2929 // client data may be needed for splicing and for
2930 // tunneling unsupportedProtocol after an error
2931 preservedClientData = inBuf;
2932
2933 // Even if the parser failed, each TLS detail should either be set
2934 // correctly or still be "unknown"; copying unknown detail is a no-op.
2935 Security::TlsDetails::Pointer const &details = tlsParser.details;
2936 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2937 if (details && !details->serverName.isEmpty()) {
2938 resetSslCommonName(details->serverName.c_str());
2939 tlsClientSni_ = details->serverName;
2940 }
2941
2942 // We should disable read/write handlers
2943 Comm::ResetSelect(clientConnection->fd);
2944
2945 if (parseErrorDetails) {
2946 Http::StreamPointer context = pipeline.front();
2947 Must(context && context->http);
2948 HttpRequest::Pointer request = context->http->request;
2949 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2950 updateError(ERR_PROTOCOL_UNKNOWN, parseErrorDetails);
2951 if (!tunnelOnError(ERR_PROTOCOL_UNKNOWN))
2952 clientConnection->close();
2953 return;
2954 }
2955
2956 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
2957 getSslContextStart();
2958 return;
2959 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
2960 debugs(83, 5, "server-first skips step2; start forwarding the request");
2961 sslServerBump->step = XactionStep::tlsBump3;
2962 Http::StreamPointer context = pipeline.front();
2963 ClientHttpRequest *http = context ? context->http : nullptr;
2964 // will call httpsPeeked() with certificate and connection, eventually
2965 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
2966 } else {
2967 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
2968 startPeekAndSplice();
2969 }
2970 }
2971
2972 static void
2973 httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
2974 {
2975 ConnStateData *connState = (ConnStateData *) data;
2976
2977 // if the connection is closed or closing, just return.
2978 if (!connState->isOpen())
2979 return;
2980
2981 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
2982 assert(connState->serverBump());
2983 Ssl::BumpMode bumpAction;
2984 if (answer.allowed()) {
2985 bumpAction = (Ssl::BumpMode)answer.kind;
2986 } else
2987 bumpAction = Ssl::bumpSplice;
2988
2989 connState->serverBump()->act.step2 = bumpAction;
2990 connState->sslBumpMode = bumpAction;
2991 Http::StreamPointer context = connState->pipeline.front();
2992 if (ClientHttpRequest *http = (context ? context->http : nullptr))
2993 http->al->ssl.bumpMode = bumpAction;
2994
2995 if (bumpAction == Ssl::bumpTerminate) {
2996 connState->clientConnection->close();
2997 } else if (bumpAction != Ssl::bumpSplice) {
2998 connState->startPeekAndSplice();
2999 } else if (!connState->splice())
3000 connState->clientConnection->close();
3001 }
3002
3003 bool
3004 ConnStateData::splice()
3005 {
3006 // normally we can splice here, because we just got client hello message
3007
3008 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
3009 // we should not lose any raw bytes when switching to raw I/O here.
3010 if (fd_table[clientConnection->fd].ssl.get())
3011 fd_table[clientConnection->fd].useDefaultIo();
3012
3013 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3014 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3015 transferProtocol = Http::ProtocolVersion();
3016 assert(!pipeline.empty());
3017 Http::StreamPointer context = pipeline.front();
3018 Must(context);
3019 Must(context->http);
3020 ClientHttpRequest *http = context->http;
3021 HttpRequest::Pointer request = http->request;
3022 context->finished();
3023 if (transparent()) {
3024 // For transparent connections, make a new fake CONNECT request, now
3025 // with SNI as target. doCallout() checks, adaptations may need that.
3026 return fakeAConnectRequest("splice", preservedClientData);
3027 } else {
3028 // For non transparent connections make a new tunneled CONNECT, which
3029 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3030 // respond with "Connection Established" to the client.
3031 // This fake CONNECT request required to allow use of SNI in
3032 // doCallout() checks and adaptations.
3033 return initiateTunneledRequest(request, "splice", preservedClientData);
3034 }
3035 }
3036
3037 void
3038 ConnStateData::startPeekAndSplice()
3039 {
3040 // This is the Step2 of the SSL bumping
3041 assert(sslServerBump);
3042 Http::StreamPointer context = pipeline.front();
3043 ClientHttpRequest *http = context ? context->http : nullptr;
3044
3045 if (sslServerBump->at(XactionStep::tlsBump1)) {
3046 sslServerBump->step = XactionStep::tlsBump2;
3047 // Run a accessList check to check if want to splice or continue bumping
3048
3049 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3050 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
3051 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3052 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3053 fillChecklist(*acl_checklist);
3054 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3055 return;
3056 }
3057
3058 // will call httpsPeeked() with certificate and connection, eventually
3059 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3060 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3061
3062 if (!httpsCreate(this, unConfiguredCTX))
3063 return;
3064
3065 switchedToHttps_ = true;
3066
3067 auto ssl = fd_table[clientConnection->fd].ssl.get();
3068 BIO *b = SSL_get_rbio(ssl);
3069 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3070 bio->setReadBufData(inBuf);
3071 bio->hold(true);
3072
3073 // We have successfully parsed client Hello, but our TLS handshake parser is
3074 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3075 // can honor on_unsupported_protocol if needed. If there are no errors, we
3076 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3077 // also allow an ioWantRead result in case some fancy TLS extension that
3078 // Squid does not yet understand requires reading post-Hello client bytes.
3079 const auto handshakeResult = acceptTls();
3080 if (!handshakeResult.wantsIo())
3081 return handleSslBumpHandshakeError(handshakeResult);
3082
3083 // We need to reset inBuf here, to be used by incoming requests in the case
3084 // of SSL bump
3085 inBuf.clear();
3086
3087 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3088 sslServerBump->step = XactionStep::tlsBump3;
3089 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3090 }
3091
3092 /// process a problematic Security::Accept() result on the SslBump code path
3093 void
3094 ConnStateData::handleSslBumpHandshakeError(const Security::IoResult &handshakeResult)
3095 {
3096 auto errCategory = ERR_NONE;
3097
3098 switch (handshakeResult.category) {
3099 case Security::IoResult::ioSuccess: {
3100 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3101 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3102 break;
3103 }
3104
3105 case Security::IoResult::ioWantRead: {
3106 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3107 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3108 break;
3109 }
3110
3111 case Security::IoResult::ioWantWrite: {
3112 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3113 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3114 break;
3115 }
3116
3117 case Security::IoResult::ioError:
3118 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: " << handshakeResult.errorDescription <<
3119 " while SslBump-accepting a TLS connection on " << clientConnection << ": " << handshakeResult.errorDetail);
3120 updateError(errCategory = ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
3121 break;
3122
3123 }
3124
3125 if (!tunnelOnError(errCategory))
3126 clientConnection->close();
3127 }
3128
3129 void
3130 ConnStateData::doPeekAndSpliceStep()
3131 {
3132 auto ssl = fd_table[clientConnection->fd].ssl.get();
3133 BIO *b = SSL_get_rbio(ssl);
3134 assert(b);
3135 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3136
3137 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3138 bio->hold(false);
3139
3140 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3141 switchedToHttps_ = true;
3142 }
3143
3144 void
3145 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3146 {
3147 Must(sslServerBump != nullptr);
3148 Must(sslServerBump->request == pic.request);
3149 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3150
3151 if (Comm::IsConnOpen(pic.connection)) {
3152 notePinnedConnectionBecameIdle(pic);
3153 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3154 } else
3155 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3156
3157 getSslContextStart();
3158 }
3159
3160 #endif /* USE_OPENSSL */
3161
3162 bool
3163 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, const char *reason, const SBuf &payload)
3164 {
3165 // fake a CONNECT request to force connState to tunnel
3166 SBuf connectHost;
3167 unsigned short connectPort = 0;
3168
3169 if (pinning.serverConnection != nullptr) {
3170 static char ip[MAX_IPSTRLEN];
3171 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3172 connectPort = pinning.serverConnection->remote.port();
3173 } else if (cause) {
3174 connectHost = cause->url.hostOrIp();
3175 connectPort = cause->url.port();
3176 #if USE_OPENSSL
3177 } else if (!tlsConnectHostOrIp.isEmpty()) {
3178 connectHost = tlsConnectHostOrIp;
3179 connectPort = tlsConnectPort;
3180 #endif
3181 } else if (transparent()) {
3182 static char ip[MAX_IPSTRLEN];
3183 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3184 connectPort = clientConnection->local.port();
3185 } else {
3186 // Typical cases are malformed HTTP requests on http_port and malformed
3187 // TLS handshakes on non-bumping https_port. TODO: Discover these
3188 // problems earlier so that they can be classified/detailed better.
3189 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3190 // TODO: throw when nonBlockingCheck() callbacks gain job protections
3191 static const auto d = MakeNamedErrorDetail("TUNNEL_TARGET");
3192 updateError(ERR_INVALID_REQ, d);
3193 return false;
3194 }
3195
3196 debugs(33, 2, "Request tunneling for " << reason);
3197 ClientHttpRequest *http = buildFakeRequest(connectHost, connectPort, payload);
3198 HttpRequest::Pointer request = http->request;
3199 request->flags.forceTunnel = true;
3200 http->calloutContext = new ClientRequestContext(http);
3201 http->doCallouts();
3202 clientProcessRequestFinished(this, request);
3203 return true;
3204 }
3205
3206 bool
3207 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3208 {
3209 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3210
3211 SBuf connectHost;
3212 assert(transparent());
3213 const unsigned short connectPort = clientConnection->local.port();
3214
3215 #if USE_OPENSSL
3216 if (!tlsClientSni_.isEmpty())
3217 connectHost.assign(tlsClientSni_);
3218 else
3219 #endif
3220 {
3221 static char ip[MAX_IPSTRLEN];
3222 clientConnection->local.toHostStr(ip, sizeof(ip));
3223 connectHost.assign(ip);
3224 }
3225
3226 ClientHttpRequest *http = buildFakeRequest(connectHost, connectPort, payload);
3227
3228 http->calloutContext = new ClientRequestContext(http);
3229 HttpRequest::Pointer request = http->request;
3230 http->doCallouts();
3231 clientProcessRequestFinished(this, request);
3232 return true;
3233 }
3234
3235 ClientHttpRequest *
3236 ConnStateData::buildFakeRequest(SBuf &useHost, unsigned short usePort, const SBuf &payload)
3237 {
3238 ClientHttpRequest *http = new ClientHttpRequest(this);
3239 Http::Stream *stream = new Http::Stream(clientConnection, http);
3240
3241 StoreIOBuffer tempBuffer;
3242 tempBuffer.data = stream->reqbuf;
3243 tempBuffer.length = HTTP_REQBUF_SZ;
3244
3245 ClientStreamData newServer = new clientReplyContext(http);
3246 ClientStreamData newClient = stream;
3247 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3248 clientReplyStatus, newServer, clientSocketRecipient,
3249 clientSocketDetach, newClient, tempBuffer);
3250
3251 stream->flags.parsed_ok = 1; // Do we need it?
3252 stream->mayUseConnection(true);
3253 extendLifetime();
3254 stream->registerWithConn();
3255
3256 const auto mx = MasterXaction::MakePortful(port);
3257 mx->tcpClient = clientConnection;
3258 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3259 // clientProcessRequest
3260 HttpRequest::Pointer request = new HttpRequest(mx);
3261 request->url.setScheme(AnyP::PROTO_AUTHORITY_FORM, nullptr);
3262 request->method = Http::METHOD_CONNECT;
3263 request->url.host(useHost.c_str());
3264 request->url.port(usePort);
3265
3266 http->uri = SBufToCstring(request->effectiveRequestUri());
3267 http->initRequest(request.getRaw());
3268
3269 request->manager(this, http->al);
3270
3271 request->header.putStr(Http::HOST, useHost.c_str());
3272
3273 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3274 #if USE_AUTH
3275 if (getAuth())
3276 request->auth_user_request = getAuth();
3277 #endif
3278
3279 inBuf = payload;
3280 flags.readMore = false;
3281
3282 return http;
3283 }
3284
3285 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3286 static bool
3287 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3288 {
3289 if (!Comm::IsConnOpen(c)) {
3290 Must(NHttpSockets > 0); // we tried to open some
3291 --NHttpSockets; // there will be fewer sockets than planned
3292 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3293
3294 if (!NHttpSockets) // we could not open any listen sockets at all
3295 fatalf("Unable to open %s",FdNote(portType));
3296
3297 return false;
3298 }
3299 return true;
3300 }
3301
3302 /// find any unused HttpSockets[] slot and store fd there or return false
3303 static bool
3304 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3305 {
3306 bool found = false;
3307 for (int i = 0; i < NHttpSockets && !found; ++i) {
3308 if ((found = HttpSockets[i] < 0))
3309 HttpSockets[i] = conn->fd;
3310 }
3311 return found;
3312 }
3313
3314 static void
3315 clientHttpConnectionsOpen(void)
3316 {
3317 const auto savedContext = CodeContext::Current();
3318 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3319 CodeContext::Reset(s);
3320 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3321
3322 if (MAXTCPLISTENPORTS == NHttpSockets) {
3323 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3324 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3325 continue;
3326 }
3327
3328 #if USE_OPENSSL
3329 if (s->flags.tunnelSslBumping) {
3330 if (!Config.accessList.ssl_bump) {
3331 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3332 s->flags.tunnelSslBumping = false;
3333 }
3334 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3335 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3336 s->flags.tunnelSslBumping = false;
3337 if (s->transport.protocol == AnyP::PROTO_HTTP)
3338 s->secure.encryptTransport = false;
3339 }
3340 if (s->flags.tunnelSslBumping) {
3341 // Create ssl_ctx cache for this port.
3342 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3343 }
3344 }
3345 #endif
3346
3347 if (s->secure.encryptTransport && !s->secure.staticContext) {
3348 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3349 continue;
3350 }
3351
3352 const auto protocol = s->transport.protocol;
3353 assert(protocol == AnyP::PROTO_HTTP || protocol == AnyP::PROTO_HTTPS);
3354 const auto isHttps = protocol == AnyP::PROTO_HTTPS;
3355 using AcceptCall = CommCbFunPtrCallT<CommAcceptCbPtrFun>;
3356 RefCount<AcceptCall> subCall = commCbCall(5, 5, isHttps ? "httpsAccept" : "httpAccept",
3357 CommAcceptCbPtrFun(isHttps ? httpsAccept : httpAccept, CommAcceptCbParams(nullptr)));
3358 clientStartListeningOn(s, subCall, isHttps ? Ipc::fdnHttpsSocket : Ipc::fdnHttpSocket);
3359 }
3360 CodeContext::Reset(savedContext);
3361 }
3362
3363 void
3364 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3365 {
3366 // Fill out a Comm::Connection which IPC will open as a listener for us
3367 port->listenConn = new Comm::Connection;
3368 port->listenConn->local = port->s;
3369 port->listenConn->flags =
3370 COMM_NONBLOCKING |
3371 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3372 (port->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3373 (port->workerQueues ? COMM_REUSEPORT : 0);
3374
3375 // route new connections to subCall
3376 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3377 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3378 const auto listenCall =
3379 asyncCall(33, 2, "clientListenerConnectionOpened",
3380 ListeningStartedDialer(&clientListenerConnectionOpened,
3381 port, fdNote, sub));
3382 AsyncCallback<Ipc::StartListeningAnswer> callback(listenCall);
3383 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, callback);
3384
3385 assert(NHttpSockets < MAXTCPLISTENPORTS);
3386 HttpSockets[NHttpSockets] = -1;
3387 ++NHttpSockets;
3388 }
3389
3390 /// process clientHttpConnectionsOpen result
3391 static void
3392 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3393 {
3394 Must(s != nullptr);
3395
3396 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3397 return;
3398
3399 Must(Comm::IsConnOpen(s->listenConn));
3400
3401 // TCP: setup a job to handle accept() with subscribed handler
3402 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3403
3404 debugs(1, Important(13), "Accepting " <<
3405 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3406 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3407 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3408 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3409 << FdNote(portTypeNote) << " connections at "
3410 << s->listenConn);
3411
3412 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3413
3414 #if USE_SYSTEMD
3415 // When the very first port opens, tell systemd we are able to serve connections.
3416 // Subsequent sd_notify() calls, including calls during reconfiguration,
3417 // do nothing because the first call parameter is 1.
3418 // XXX: Send the notification only after opening all configured ports.
3419 if (opt_foreground || opt_no_daemon) {
3420 const auto result = sd_notify(1, "READY=1");
3421 if (result < 0) {
3422 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3423 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3424 }
3425 }
3426 #endif
3427 }
3428
3429 void
3430 clientOpenListenSockets(void)
3431 {
3432 clientHttpConnectionsOpen();
3433 Ftp::StartListening();
3434
3435 if (NHttpSockets < 1)
3436 fatal("No HTTP, HTTPS, or FTP ports configured");
3437 }
3438
3439 void
3440 clientConnectionsClose()
3441 {
3442 const auto savedContext = CodeContext::Current();
3443 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3444 CodeContext::Reset(s);
3445 if (s->listenConn != nullptr) {
3446 debugs(1, Important(14), "Closing HTTP(S) port " << s->listenConn->local);
3447 s->listenConn->close();
3448 s->listenConn = nullptr;
3449 }
3450 }
3451 CodeContext::Reset(savedContext);
3452
3453 Ftp::StopListening();
3454
3455 // TODO see if we can drop HttpSockets array entirely */
3456 for (int i = 0; i < NHttpSockets; ++i) {
3457 HttpSockets[i] = -1;
3458 }
3459
3460 NHttpSockets = 0;
3461 }
3462
3463 int
3464 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3465 {
3466 SBuf vary(request->vary_headers);
3467 const auto &reply = entry->mem().freshestReply();
3468 auto has_vary = reply.header.has(Http::HdrType::VARY);
3469 #if X_ACCELERATOR_VARY
3470
3471 has_vary |=
3472 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3473 #endif
3474
3475 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3476 if (!vary.isEmpty()) {
3477 /* Oops... something odd is going on here.. */
3478 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3479 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3480 request->vary_headers.clear();
3481 return VARY_CANCEL;
3482 }
3483
3484 if (!has_vary) {
3485 /* This is not a varying object */
3486 return VARY_NONE;
3487 }
3488
3489 /* virtual "vary" object found. Calculate the vary key and
3490 * continue the search
3491 */
3492 vary = httpMakeVaryMark(request, &reply);
3493
3494 if (!vary.isEmpty()) {
3495 request->vary_headers = vary;
3496 return VARY_OTHER;
3497 } else {
3498 /* Ouch.. we cannot handle this kind of variance */
3499 /* XXX This cannot really happen, but just to be complete */
3500 return VARY_CANCEL;
3501 }
3502 } else {
3503 if (vary.isEmpty()) {
3504 vary = httpMakeVaryMark(request, &reply);
3505
3506 if (!vary.isEmpty())
3507 request->vary_headers = vary;
3508 }
3509
3510 if (vary.isEmpty()) {
3511 /* Ouch.. we cannot handle this kind of variance */
3512 /* XXX This cannot really happen, but just to be complete */
3513 return VARY_CANCEL;
3514 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3515 return VARY_MATCH;
3516 } else {
3517 /* Oops.. we have already been here and still haven't
3518 * found the requested variant. Bail out
3519 */
3520 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3521 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3522 return VARY_CANCEL;
3523 }
3524 }
3525 }
3526
3527 ACLFilledChecklist *
3528 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3529 {
3530 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3531 clientAclChecklistFill(*checklist, http);
3532 return checklist;
3533 }
3534
3535 void
3536 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3537 {
3538 assert(http);
3539
3540 if (!checklist.request && http->request)
3541 checklist.setRequest(http->request);
3542
3543 if (!checklist.al && http->al) {
3544 checklist.al = http->al;
3545 checklist.syncAle(http->request, http->log_uri);
3546 if (!checklist.reply && http->al->reply) {
3547 checklist.reply = http->al->reply.getRaw();
3548 HTTPMSGLOCK(checklist.reply);
3549 }
3550 }
3551
3552 if (const auto conn = http->getConn())
3553 checklist.setConn(conn); // may already be set
3554 }
3555
3556 void
3557 ConnStateData::fillChecklist(ACLFilledChecklist &checklist) const
3558 {
3559 const auto context = pipeline.front();
3560 if (const auto http = context ? context->http : nullptr)
3561 return clientAclChecklistFill(checklist, http); // calls checklist.setConn()
3562
3563 // no requests, but we always have connection-level details
3564 // TODO: ACL checks should not require a mutable ConnStateData. Adjust the
3565 // code that accidentally violates that principle to remove this const_cast!
3566 checklist.setConn(const_cast<ConnStateData*>(this));
3567
3568 // Set other checklist fields inside our fillConnectionLevelDetails() rather
3569 // than here because clientAclChecklistFill() code path calls that method
3570 // (via ACLFilledChecklist::setConn()) rather than calling us directly.
3571 }
3572
3573 void
3574 ConnStateData::fillConnectionLevelDetails(ACLFilledChecklist &checklist) const
3575 {
3576 assert(checklist.conn() == this);
3577 assert(clientConnection);
3578
3579 if (!checklist.request) { // preserve (better) addresses supplied by setRequest()
3580 checklist.src_addr = clientConnection->remote;
3581 checklist.my_addr = clientConnection->local; // TODO: or port->s?
3582 }
3583
3584 #if USE_OPENSSL
3585 if (!checklist.sslErrors && sslServerBump)
3586 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
3587 #endif
3588
3589 if (!checklist.rfc931[0]) // checklist creator may have supplied it already
3590 checklist.setIdent(clientConnection->rfc931);
3591
3592 }
3593
3594 bool
3595 ConnStateData::transparent() const
3596 {
3597 return clientConnection != nullptr && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3598 }
3599
3600 BodyPipe::Pointer
3601 ConnStateData::expectRequestBody(int64_t size)
3602 {
3603 bodyPipe = new BodyPipe(this);
3604 if (size >= 0)
3605 bodyPipe->setBodySize(size);
3606 else
3607 startDechunkingRequest();
3608 return bodyPipe;
3609 }
3610
3611 int64_t
3612 ConnStateData::mayNeedToReadMoreBody() const
3613 {
3614 if (!bodyPipe)
3615 return 0; // request without a body or read/produced all body bytes
3616
3617 if (!bodyPipe->bodySizeKnown())
3618 return -1; // probably need to read more, but we cannot be sure
3619
3620 const int64_t needToProduce = bodyPipe->unproducedSize();
3621 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3622
3623 if (needToProduce <= haveAvailable)
3624 return 0; // we have read what we need (but are waiting for pipe space)
3625
3626 return needToProduce - haveAvailable;
3627 }
3628
3629 void
3630 ConnStateData::stopReceiving(const char *error)
3631 {
3632 debugs(33, 4, "receiving error (" << clientConnection << "): " << error <<
3633 "; old sending error: " <<
3634 (stoppedSending() ? stoppedSending_ : "none"));
3635
3636 if (const char *oldError = stoppedReceiving()) {
3637 debugs(33, 3, "already stopped receiving: " << oldError);
3638 return; // nothing has changed as far as this connection is concerned
3639 }
3640
3641 stoppedReceiving_ = error;
3642
3643 if (const char *sendError = stoppedSending()) {
3644 debugs(33, 3, "closing because also stopped sending: " << sendError);
3645 clientConnection->close();
3646 }
3647 }
3648
3649 void
3650 ConnStateData::expectNoForwarding()
3651 {
3652 if (bodyPipe != nullptr) {
3653 debugs(33, 4, "no consumer for virgin body " << bodyPipe->status());
3654 bodyPipe->expectNoConsumption();
3655 }
3656 }
3657
3658 /// initialize dechunking state
3659 void
3660 ConnStateData::startDechunkingRequest()
3661 {
3662 Must(bodyPipe != nullptr);
3663 debugs(33, 5, "start dechunking" << bodyPipe->status());
3664 assert(!bodyParser);
3665 bodyParser = new Http1::TeChunkedParser;
3666 }
3667
3668 /// put parsed content into input buffer and clean up
3669 void
3670 ConnStateData::finishDechunkingRequest(bool withSuccess)
3671 {
3672 debugs(33, 5, "finish dechunking: " << withSuccess);
3673
3674 if (bodyPipe != nullptr) {
3675 debugs(33, 7, "dechunked tail: " << bodyPipe->status());
3676 BodyPipe::Pointer myPipe = bodyPipe;
3677 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3678 Must(!bodyPipe); // we rely on it being nil after we are done with body
3679 if (withSuccess) {
3680 Must(myPipe->bodySizeKnown());
3681 Http::StreamPointer context = pipeline.front();
3682 if (context != nullptr && context->http && context->http->request)
3683 context->http->request->setContentLength(myPipe->bodySize());
3684 }
3685 }
3686
3687 delete bodyParser;
3688 bodyParser = nullptr;
3689 }
3690
3691 // XXX: this is an HTTP/1-only operation
3692 void
3693 ConnStateData::sendControlMsg(HttpControlMsg msg)
3694 {
3695 if (const auto context = pipeline.front()) {
3696 if (context->http)
3697 context->http->al->reply = msg.reply;
3698 }
3699
3700 if (!isOpen()) {
3701 debugs(33, 3, "ignoring 1xx due to earlier closure");
3702 return;
3703 }
3704
3705 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3706 if (!pipeline.empty()) {
3707 HttpReply::Pointer rep(msg.reply);
3708 Must(rep);
3709 // remember the callback
3710 cbControlMsgSent = msg.cbSuccess;
3711
3712 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3713 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3714
3715 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3716 // but still inform the caller (so it may resume its operation)
3717 doneWithControlMsg();
3718 }
3719 return;
3720 }
3721
3722 debugs(33, 3, " closing due to missing context for 1xx");
3723 clientConnection->close();
3724 }
3725
3726 void
3727 ConnStateData::doneWithControlMsg()
3728 {
3729 HttpControlMsgSink::doneWithControlMsg();
3730
3731 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3732 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3733 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3734 }
3735 }
3736
3737 /// Our close handler called by Comm when the pinned connection is closed
3738 void
3739 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3740 {
3741 // FwdState might repin a failed connection sooner than this close
3742 // callback is called for the failed connection.
3743 assert(pinning.serverConnection == io.conn);
3744 pinning.closeHandler = nullptr; // Comm unregisters handlers before calling
3745 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3746 pinning.serverConnection->noteClosure();
3747 unpinConnection(false);
3748
3749 if (sawZeroReply && clientConnection != nullptr) {
3750 debugs(33, 3, "Closing client connection on pinned zero reply.");
3751 clientConnection->close();
3752 }
3753
3754 }
3755
3756 void
3757 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3758 {
3759 pinConnection(pinServer, *request);
3760 }
3761
3762 void
3763 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3764 {
3765 Must(pic.connection);
3766 Must(pic.request);
3767 pinConnection(pic.connection, *pic.request);
3768
3769 // monitor pinned server connection for remote-end closures.
3770 startPinnedConnectionMonitoring();
3771
3772 if (pipeline.empty())
3773 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3774 }
3775
3776 /// Forward future client requests using the given server connection.
3777 void
3778 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3779 {
3780 if (Comm::IsConnOpen(pinning.serverConnection) &&
3781 pinning.serverConnection->fd == pinServer->fd) {
3782 debugs(33, 3, "already pinned" << pinServer);
3783 return;
3784 }
3785
3786 unpinConnection(true); // closes pinned connection, if any, and resets fields
3787
3788 pinning.serverConnection = pinServer;
3789
3790 debugs(33, 3, pinning.serverConnection);
3791
3792 Must(pinning.serverConnection != nullptr);
3793
3794 const char *pinnedHost = "[unknown]";
3795 pinning.host = xstrdup(request.url.host());
3796 pinning.port = request.url.port();
3797 pinnedHost = pinning.host;
3798 pinning.pinned = true;
3799 if (CachePeer *aPeer = pinServer->getPeer())
3800 pinning.peer = cbdataReference(aPeer);
3801 pinning.auth = request.flags.connectionAuth;
3802 char stmp[MAX_IPSTRLEN];
3803 char desc[FD_DESC_SZ];
3804 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3805 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3806 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3807 clientConnection->fd);
3808 fd_note(pinning.serverConnection->fd, desc);
3809
3810 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3811 pinning.closeHandler = JobCallback(33, 5,
3812 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3813 // remember the pinned connection so that cb does not unpin a fresher one
3814 typedef CommCloseCbParams Params;
3815 Params &params = GetCommParams<Params>(pinning.closeHandler);
3816 params.conn = pinning.serverConnection;
3817 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3818 }
3819
3820 /// [re]start monitoring pinned connection for peer closures so that we can
3821 /// propagate them to an _idle_ client pinned to that peer
3822 void
3823 ConnStateData::startPinnedConnectionMonitoring()
3824 {
3825 if (pinning.readHandler != nullptr)
3826 return; // already monitoring
3827
3828 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3829 pinning.readHandler = JobCallback(33, 3,
3830 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3831 Comm::Read(pinning.serverConnection, pinning.readHandler);
3832 }
3833
3834 void
3835 ConnStateData::stopPinnedConnectionMonitoring()
3836 {
3837 if (pinning.readHandler != nullptr) {
3838 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3839 pinning.readHandler = nullptr;
3840 }
3841 }
3842
3843 #if USE_OPENSSL
3844 bool
3845 ConnStateData::handleIdleClientPinnedTlsRead()
3846 {
3847 // A ready-for-reading connection means that the TLS server either closed
3848 // the connection, sent us some unexpected HTTP data, or started TLS
3849 // renegotiations. We should close the connection except for the last case.
3850
3851 Must(pinning.serverConnection != nullptr);
3852 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3853 if (!ssl)
3854 return false;
3855
3856 char buf[1];
3857 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3858
3859 if (readResult > 0 || SSL_pending(ssl) > 0) {
3860 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3861 return false;
3862 }
3863
3864 switch(const int error = SSL_get_error(ssl, readResult)) {
3865 case SSL_ERROR_WANT_WRITE:
3866 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3867 [[fallthrough]]; // to restart monitoring, for now
3868
3869 case SSL_ERROR_NONE:
3870 case SSL_ERROR_WANT_READ:
3871 startPinnedConnectionMonitoring();
3872 return true;
3873
3874 default:
3875 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3876 return false;
3877 }
3878
3879 // not reached
3880 return true;
3881 }
3882 #endif
3883
3884 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3885 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3886 void
3887 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3888 {
3889 pinning.readHandler = nullptr; // Comm unregisters handlers before calling
3890
3891 if (io.flag == Comm::ERR_CLOSING)
3892 return; // close handler will clean up
3893
3894 Must(pinning.serverConnection == io.conn);
3895
3896 #if USE_OPENSSL
3897 if (handleIdleClientPinnedTlsRead())
3898 return;
3899 #endif
3900
3901 const bool clientIsIdle = pipeline.empty();
3902
3903 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3904 io.size << (clientIsIdle ? " with idle client" : ""));
3905
3906 pinning.serverConnection->close();
3907
3908 // If we are still sending data to the client, do not close now. When we are done sending,
3909 // ConnStateData::kick() checks pinning.serverConnection and will close.
3910 // However, if we are idle, then we must close to inform the idle client and minimize races.
3911 if (clientIsIdle && clientConnection != nullptr)
3912 clientConnection->close();
3913 }
3914
3915 Comm::ConnectionPointer
3916 ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3917 {
3918 debugs(33, 7, pinning.serverConnection);
3919 Must(request);
3920
3921 const auto pinningError = [&](const err_type type) {
3922 unpinConnection(true);
3923 HttpRequestPointer requestPointer = request;
3924 return ErrorState::NewForwarding(type, requestPointer, ale);
3925 };
3926
3927 if (!Comm::IsConnOpen(pinning.serverConnection))
3928 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3929
3930 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3931 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3932
3933 if (pinning.port != request->url.port())
3934 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3935
3936 if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3937 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3938
3939 if (pinning.peerAccessDenied)
3940 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3941
3942 stopPinnedConnectionMonitoring();
3943 return pinning.serverConnection;
3944 }
3945
3946 Comm::ConnectionPointer
3947 ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3948 {
3949 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
3950 return connManager->borrowPinnedConnection(request, ale);
3951
3952 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3953 // there is no point since the client connection is now gone
3954 HttpRequestPointer requestPointer = request;
3955 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
3956 }
3957
3958 void
3959 ConnStateData::unpinConnection(const bool andClose)
3960 {
3961 debugs(33, 3, pinning.serverConnection);
3962
3963 if (pinning.peer)
3964 cbdataReferenceDone(pinning.peer);
3965
3966 if (Comm::IsConnOpen(pinning.serverConnection)) {
3967 if (pinning.closeHandler != nullptr) {
3968 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3969 pinning.closeHandler = nullptr;
3970 }
3971
3972 stopPinnedConnectionMonitoring();
3973
3974 // close the server side socket if requested
3975 if (andClose)
3976 pinning.serverConnection->close();
3977 pinning.serverConnection = nullptr;
3978 }
3979
3980 safe_free(pinning.host);
3981
3982 pinning.zeroReply = false;
3983 pinning.peerAccessDenied = false;
3984
3985 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3986 * connection has gone away */
3987 }
3988
3989 void
3990 ConnStateData::terminateAll(const Error &rawError, const LogTagsErrors &lte)
3991 {
3992 auto error = rawError; // (cheap) copy so that we can detail
3993 // We detail even ERR_NONE: There should be no transactions left, and
3994 // detailed ERR_NONE will be unused. Otherwise, this detail helps in triage.
3995 if (!error.detail) {
3996 static const auto d = MakeNamedErrorDetail("WITH_CLIENT");
3997 error.detail = d;
3998 }
3999
4000 debugs(33, 3, pipeline.count() << '/' << pipeline.nrequests << " after " << error);
4001
4002 if (pipeline.empty()) {
4003 bareError.update(error); // XXX: bareLogTagsErrors
4004 } else {
4005 // We terminate the current CONNECT/PUT/etc. context below, logging any
4006 // error details, but that context may leave unparsed bytes behind.
4007 // Consume them to stop checkLogging() from logging them again later.
4008 const auto intputToConsume =
4009 #if USE_OPENSSL
4010 parsingTlsHandshake ? "TLS handshake" : // more specific than CONNECT
4011 #endif
4012 bodyPipe ? "HTTP request body" :
4013 pipeline.back()->mayUseConnection() ? "HTTP CONNECT" :
4014 nullptr;
4015
4016 while (const auto context = pipeline.front()) {
4017 context->noteIoError(error, lte);
4018 context->finished(); // cleanup and self-deregister
4019 assert(context != pipeline.front());
4020 }
4021
4022 if (intputToConsume && !inBuf.isEmpty()) {
4023 debugs(83, 5, "forgetting client " << intputToConsume << " bytes: " << inBuf.length());
4024 inBuf.clear();
4025 }
4026 }
4027
4028 clientConnection->close();
4029 }
4030
4031 /// log the last (attempt at) transaction if nobody else did
4032 void
4033 ConnStateData::checkLogging()
4034 {
4035 // to simplify our logic, we assume that terminateAll() has been called
4036 assert(pipeline.empty());
4037
4038 // do not log connections that closed after a transaction (it is normal)
4039 // TODO: access_log needs ACLs to match received-no-bytes connections
4040 if (pipeline.nrequests && inBuf.isEmpty())
4041 return;
4042
4043 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4044 ClientHttpRequest http(this);
4045 http.req_sz = inBuf.length();
4046 // XXX: Or we died while waiting for the pinned connection to become idle.
4047 http.setErrorUri("error:transaction-end-before-headers");
4048 http.updateError(bareError);
4049 }
4050
4051 bool
4052 ConnStateData::shouldPreserveClientData() const
4053 {
4054 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4055 if (needProxyProtocolHeader_)
4056 return false;
4057
4058 // If our decision here is negative, configuration changes are irrelevant.
4059 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4060 if (!Config.accessList.on_unsupported_protocol)
4061 return false;
4062
4063 // TODO: Figure out whether/how we can support FTP tunneling.
4064 if (port->transport.protocol == AnyP::PROTO_FTP)
4065 return false;
4066
4067 #if USE_OPENSSL
4068 if (parsingTlsHandshake)
4069 return true;
4070
4071 // the 1st HTTP request on a bumped connection
4072 if (!parsedBumpedRequestCount && switchedToHttps())
4073 return true;
4074 #endif
4075
4076 // the 1st HTTP(S) request on a connection to an intercepting port
4077 if (!pipeline.nrequests && transparent())
4078 return true;
4079
4080 return false;
4081 }
4082
4083 NotePairs::Pointer
4084 ConnStateData::notes()
4085 {
4086 if (!theNotes)
4087 theNotes = new NotePairs;
4088 return theNotes;
4089 }
4090
4091 std::ostream &
4092 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4093 {
4094 return os << pic.connection << ", request=" << pic.request;
4095 }
4096
4097 std::ostream &
4098 operator <<(std::ostream &os, const ConnStateData::ServerConnectionContext &scc)
4099 {
4100 return os << scc.conn_ << ", srv_bytes=" << scc.preReadServerBytes.length();
4101 }
4102