]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
3dcb8eb106569f2b6ea6da80e74546fe89de8e17
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "debug/Messages.h"
80 #include "error/ExceptionErrorDetail.h"
81 #include "errorpage.h"
82 #include "fd.h"
83 #include "fde.h"
84 #include "fqdncache.h"
85 #include "FwdState.h"
86 #include "globals.h"
87 #include "helper.h"
88 #include "helper/Reply.h"
89 #include "http.h"
90 #include "http/one/RequestParser.h"
91 #include "http/one/TeChunkedParser.h"
92 #include "http/Stream.h"
93 #include "HttpHdrContRange.h"
94 #include "HttpHeaderTools.h"
95 #include "HttpReply.h"
96 #include "HttpRequest.h"
97 #include "ident/Config.h"
98 #include "ident/Ident.h"
99 #include "internal.h"
100 #include "ipc/FdNotes.h"
101 #include "ipc/StartListening.h"
102 #include "log/access_log.h"
103 #include "MemBuf.h"
104 #include "MemObject.h"
105 #include "mime_header.h"
106 #include "parser/Tokenizer.h"
107 #include "proxyp/Header.h"
108 #include "proxyp/Parser.h"
109 #include "sbuf/Stream.h"
110 #include "security/Certificate.h"
111 #include "security/CommunicationSecrets.h"
112 #include "security/Io.h"
113 #include "security/KeyLog.h"
114 #include "security/NegotiationHistory.h"
115 #include "servers/forward.h"
116 #include "SquidConfig.h"
117 #include "StatCounters.h"
118 #include "StatHist.h"
119 #include "Store.h"
120 #include "TimeOrTag.h"
121 #include "tools.h"
122
123 #if USE_AUTH
124 #include "auth/UserRequest.h"
125 #endif
126 #if USE_DELAY_POOLS
127 #include "ClientInfo.h"
128 #include "MessageDelayPools.h"
129 #endif
130 #if USE_OPENSSL
131 #include "ssl/bio.h"
132 #include "ssl/context_storage.h"
133 #include "ssl/gadgets.h"
134 #include "ssl/helper.h"
135 #include "ssl/ProxyCerts.h"
136 #include "ssl/ServerBump.h"
137 #include "ssl/support.h"
138 #endif
139
140 #include <climits>
141 #include <cmath>
142 #include <limits>
143
144 #if HAVE_SYSTEMD_SD_DAEMON_H
145 #include <systemd/sd-daemon.h>
146 #endif
147
148 /// dials clientListenerConnectionOpened call
149 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
150 {
151 public:
152 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
153 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
154 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
155
156 virtual void print(std::ostream &os) const {
157 startPrint(os) <<
158 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
159 }
160
161 virtual bool canDial(AsyncCall &) const { return true; }
162 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
163
164 public:
165 Handler handler;
166
167 private:
168 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
169 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
170 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
171 };
172
173 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
174
175 static IOACB httpAccept;
176 #if USE_IDENT
177 static IDCB clientIdentDone;
178 #endif
179 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
180
181 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
182 static void clientUpdateStatCounters(const LogTags &logType);
183 static void clientUpdateHierCounters(HierarchyLogEntry *);
184 static bool clientPingHasFinished(ping_data const *aPing);
185 void prepareLogWithRequestDetails(HttpRequest *, const AccessLogEntryPointer &);
186 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
187
188 char *skipLeadingSpace(char *aString);
189
190 #if USE_IDENT
191 static void
192 clientIdentDone(const char *ident, void *data)
193 {
194 ConnStateData *conn = (ConnStateData *)data;
195 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
196 }
197 #endif
198
199 void
200 clientUpdateStatCounters(const LogTags &logType)
201 {
202 ++statCounter.client_http.requests;
203
204 if (logType.isTcpHit())
205 ++statCounter.client_http.hits;
206
207 if (logType.oldType == LOG_TCP_HIT)
208 ++statCounter.client_http.disk_hits;
209 else if (logType.oldType == LOG_TCP_MEM_HIT)
210 ++statCounter.client_http.mem_hits;
211 }
212
213 void
214 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
215 {
216 statCounter.client_http.allSvcTime.count(svc_time);
217 /**
218 * The idea here is not to be complete, but to get service times
219 * for only well-defined types. For example, we don't include
220 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
221 * (we *tried* to validate it, but failed).
222 */
223
224 switch (logType.oldType) {
225
226 case LOG_TCP_REFRESH_UNMODIFIED:
227 statCounter.client_http.nearHitSvcTime.count(svc_time);
228 break;
229
230 case LOG_TCP_INM_HIT:
231 case LOG_TCP_IMS_HIT:
232 statCounter.client_http.nearMissSvcTime.count(svc_time);
233 break;
234
235 case LOG_TCP_HIT:
236
237 case LOG_TCP_MEM_HIT:
238
239 case LOG_TCP_OFFLINE_HIT:
240 statCounter.client_http.hitSvcTime.count(svc_time);
241 break;
242
243 case LOG_TCP_MISS:
244
245 case LOG_TCP_CLIENT_REFRESH_MISS:
246 statCounter.client_http.missSvcTime.count(svc_time);
247 break;
248
249 default:
250 /* make compiler warnings go away */
251 break;
252 }
253 }
254
255 bool
256 clientPingHasFinished(ping_data const *aPing)
257 {
258 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
259 return true;
260
261 return false;
262 }
263
264 void
265 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
266 {
267 ping_data *i;
268
269 switch (someEntry->code) {
270 #if USE_CACHE_DIGESTS
271
272 case CD_PARENT_HIT:
273
274 case CD_SIBLING_HIT:
275 ++ statCounter.cd.times_used;
276 break;
277 #endif
278
279 case SIBLING_HIT:
280
281 case PARENT_HIT:
282
283 case FIRST_PARENT_MISS:
284
285 case CLOSEST_PARENT_MISS:
286 ++ statCounter.icp.times_used;
287 i = &someEntry->ping;
288
289 if (clientPingHasFinished(i))
290 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
291
292 if (i->timeout)
293 ++ statCounter.icp.query_timeouts;
294
295 break;
296
297 case CLOSEST_PARENT:
298
299 case CLOSEST_DIRECT:
300 ++ statCounter.netdb.times_used;
301
302 break;
303
304 default:
305 break;
306 }
307 }
308
309 void
310 ClientHttpRequest::updateCounters()
311 {
312 clientUpdateStatCounters(loggingTags());
313
314 if (request->error)
315 ++ statCounter.client_http.errors;
316
317 clientUpdateStatHistCounters(loggingTags(),
318 tvSubMsec(al->cache.start_time, current_time));
319
320 clientUpdateHierCounters(&request->hier);
321 }
322
323 void
324 prepareLogWithRequestDetails(HttpRequest *request, const AccessLogEntryPointer &aLogEntry)
325 {
326 assert(request);
327 assert(aLogEntry != nullptr);
328
329 if (Config.onoff.log_mime_hdrs) {
330 MemBuf mb;
331 mb.init();
332 request->header.packInto(&mb);
333 //This is the request after adaptation or redirection
334 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
335
336 // the virgin request is saved to aLogEntry->request
337 if (aLogEntry->request) {
338 mb.reset();
339 aLogEntry->request->header.packInto(&mb);
340 aLogEntry->headers.request = xstrdup(mb.buf);
341 }
342
343 #if USE_ADAPTATION
344 const Adaptation::History::Pointer ah = request->adaptLogHistory();
345 if (ah != nullptr) {
346 mb.reset();
347 ah->lastMeta.packInto(&mb);
348 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
349 }
350 #endif
351
352 mb.clean();
353 }
354
355 #if ICAP_CLIENT
356 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
357 if (ih != nullptr)
358 ih->processingTime(aLogEntry->icap.processingTime);
359 #endif
360
361 aLogEntry->http.method = request->method;
362 aLogEntry->http.version = request->http_ver;
363 aLogEntry->hier = request->hier;
364 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
365
366 // Adapted request, if any, inherits and then collects all the stats, but
367 // the virgin request gets logged instead; copy the stats to log them.
368 // TODO: avoid losses by keeping these stats in a shared history object?
369 if (aLogEntry->request) {
370 aLogEntry->request->dnsWait = request->dnsWait;
371 aLogEntry->request->error = request->error;
372 }
373 }
374
375 void
376 ClientHttpRequest::logRequest()
377 {
378 if (!out.size && loggingTags().oldType == LOG_TAG_NONE)
379 debugs(33, 5, "logging half-baked transaction: " << log_uri);
380
381 al->icp.opcode = ICP_INVALID;
382 al->url = log_uri;
383 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
384
385 const auto findReply = [this]() -> const HttpReply * {
386 if (al->reply)
387 return al->reply.getRaw();
388 if (const auto le = loggingEntry())
389 return le->hasFreshestReply();
390 return nullptr;
391 };
392 if (const auto reply = findReply()) {
393 al->http.code = reply->sline.status();
394 al->http.content_type = reply->content_type.termedBuf();
395 }
396
397 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
398
399 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
400 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
401
402 al->http.clientRequestSz.header = req_sz;
403 // the virgin request is saved to al->request
404 if (al->request && al->request->body_pipe)
405 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
406 al->http.clientReplySz.header = out.headers_sz;
407 // XXX: calculate without payload encoding or headers !!
408 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
409
410 al->cache.highOffset = out.offset;
411
412 tvSub(al->cache.trTime, al->cache.start_time, current_time);
413
414 if (request)
415 prepareLogWithRequestDetails(request, al);
416
417 #if USE_OPENSSL && 0
418
419 /* This is broken. Fails if the connection has been closed. Needs
420 * to snarf the ssl details some place earlier..
421 */
422 if (getConn() != NULL)
423 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
424
425 #endif
426
427 /* Add notes (if we have a request to annotate) */
428 if (request) {
429 SBuf matched;
430 for (auto h: Config.notes) {
431 if (h->match(request, al->reply.getRaw(), al, matched)) {
432 request->notes()->add(h->key(), matched);
433 debugs(33, 3, h->key() << " " << matched);
434 }
435 }
436 // The al->notes and request->notes must point to the same object.
437 al->syncNotes(request);
438 }
439
440 ACLFilledChecklist checklist(nullptr, request, nullptr);
441 if (al->reply) {
442 checklist.reply = al->reply.getRaw();
443 HTTPMSGLOCK(checklist.reply);
444 }
445
446 if (request) {
447 HTTPMSGUNLOCK(al->adapted_request);
448 al->adapted_request = request;
449 HTTPMSGLOCK(al->adapted_request);
450 }
451 // no need checklist.syncAle(): already synced
452 checklist.al = al;
453 accessLogLog(al, &checklist);
454
455 bool updatePerformanceCounters = true;
456 if (Config.accessList.stats_collection) {
457 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, nullptr);
458 statsCheck.al = al;
459 if (al->reply) {
460 statsCheck.reply = al->reply.getRaw();
461 HTTPMSGLOCK(statsCheck.reply);
462 }
463 updatePerformanceCounters = statsCheck.fastCheck().allowed();
464 }
465
466 if (updatePerformanceCounters) {
467 if (request)
468 updateCounters();
469
470 if (getConn() != nullptr && getConn()->clientConnection != nullptr)
471 clientdbUpdate(getConn()->clientConnection->remote, loggingTags(), AnyP::PROTO_HTTP, out.size);
472 }
473 }
474
475 void
476 ClientHttpRequest::freeResources()
477 {
478 safe_free(uri);
479 safe_free(redirect.location);
480 range_iter.boundary.clean();
481 clearRequest();
482
483 if (client_stream.tail)
484 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
485 }
486
487 void
488 httpRequestFree(void *data)
489 {
490 ClientHttpRequest *http = (ClientHttpRequest *)data;
491 assert(http != nullptr);
492 delete http;
493 }
494
495 /* This is a handler normally called by comm_close() */
496 void ConnStateData::connStateClosed(const CommCloseCbParams &)
497 {
498 if (clientConnection) {
499 clientConnection->noteClosure();
500 // keep closed clientConnection for logging, clientdb cleanup, etc.
501 }
502 deleteThis("ConnStateData::connStateClosed");
503 }
504
505 #if USE_AUTH
506 void
507 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
508 {
509 if (auth_ == nullptr) {
510 if (aur != nullptr) {
511 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
512 auth_ = aur;
513 }
514 return;
515 }
516
517 // clobered with self-pointer
518 // NP: something nasty is going on in Squid, but harmless.
519 if (aur == auth_) {
520 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
521 return;
522 }
523
524 /*
525 * Connection-auth relies on a single set of credentials being preserved
526 * for all requests on a connection once they have been setup.
527 * There are several things which need to happen to preserve security
528 * when connection-auth credentials change unexpectedly or are unset.
529 *
530 * 1) auth helper released from any active state
531 *
532 * They can only be reserved by a handshake process which this
533 * connection can now never complete.
534 * This prevents helpers hanging when their connections close.
535 *
536 * 2) pinning is expected to be removed and server conn closed
537 *
538 * The upstream link is authenticated with the same credentials.
539 * Expecting the same level of consistency we should have received.
540 * This prevents upstream being faced with multiple or missing
541 * credentials after authentication.
542 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
543 * we just trigger that cleanup here via comm_reset_close() or
544 * ConnStateData::stopReceiving()
545 *
546 * 3) the connection needs to close.
547 *
548 * This prevents attackers injecting requests into a connection,
549 * or gateways wrongly multiplexing users into a single connection.
550 *
551 * When credentials are missing closure needs to follow an auth
552 * challenge for best recovery by the client.
553 *
554 * When credentials change there is nothing we can do but abort as
555 * fast as possible. Sending TCP RST instead of an HTTP response
556 * is the best-case action.
557 */
558
559 // clobbered with nul-pointer
560 if (aur == nullptr) {
561 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
562 auth_->releaseAuthServer();
563 auth_ = nullptr;
564 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
565 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
566 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
567 stopReceiving("connection-auth removed");
568 return;
569 }
570
571 // clobbered with alternative credentials
572 if (aur != auth_) {
573 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
574 auth_->releaseAuthServer();
575 auth_ = nullptr;
576 // this is a fatal type of problem.
577 // Close the connection immediately with TCP RST to abort all traffic flow
578 comm_reset_close(clientConnection);
579 return;
580 }
581
582 /* NOT REACHABLE */
583 }
584 #endif
585
586 void
587 ConnStateData::resetReadTimeout(const time_t timeout)
588 {
589 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
590 AsyncCall::Pointer callback = JobCallback(33, 5, TimeoutDialer, this, ConnStateData::requestTimeout);
591 commSetConnTimeout(clientConnection, timeout, callback);
592 }
593
594 void
595 ConnStateData::extendLifetime()
596 {
597 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
598 AsyncCall::Pointer callback = JobCallback(5, 4, TimeoutDialer, this, ConnStateData::lifetimeTimeout);
599 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, callback);
600 }
601
602 // cleans up before destructor is called
603 void
604 ConnStateData::swanSong()
605 {
606 debugs(33, 2, clientConnection);
607
608 flags.readMore = false;
609 clientdbEstablished(clientConnection->remote, -1); /* decrement */
610
611 terminateAll(ERR_NONE, LogTagsErrors());
612 checkLogging();
613
614 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
615 unpinConnection(true);
616
617 Server::swanSong();
618
619 #if USE_AUTH
620 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
621 setAuth(nullptr, "ConnStateData::SwanSong cleanup");
622 #endif
623
624 flags.swanSang = true;
625 }
626
627 void
628 ConnStateData::callException(const std::exception &ex)
629 {
630 Server::callException(ex); // logs ex and stops the job
631
632 ErrorDetail::Pointer errorDetail;
633 if (const auto tex = dynamic_cast<const TextException*>(&ex))
634 errorDetail = new ExceptionErrorDetail(tex->id());
635 else
636 errorDetail = new ExceptionErrorDetail(Here().id());
637 updateError(ERR_GATEWAY_FAILURE, errorDetail);
638 }
639
640 void
641 ConnStateData::updateError(const Error &error)
642 {
643 if (const auto context = pipeline.front()) {
644 const auto http = context->http;
645 assert(http);
646 http->updateError(error);
647 } else {
648 bareError.update(error);
649 }
650 }
651
652 bool
653 ConnStateData::isOpen() const
654 {
655 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
656 Comm::IsConnOpen(clientConnection) &&
657 !fd_table[clientConnection->fd].closing();
658 }
659
660 ConnStateData::~ConnStateData()
661 {
662 debugs(33, 3, clientConnection);
663
664 if (isOpen())
665 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData did not close " << clientConnection);
666
667 if (!flags.swanSang)
668 debugs(33, DBG_IMPORTANT, "ERROR: Squid BUG: ConnStateData was not destroyed properly; " << clientConnection);
669
670 if (bodyPipe != nullptr)
671 stopProducingFor(bodyPipe, false);
672
673 delete bodyParser; // TODO: pool
674
675 #if USE_OPENSSL
676 delete sslServerBump;
677 #endif
678 }
679
680 /**
681 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
682 * This is the client-side persistent connection flag. We need
683 * to set this relatively early in the request processing
684 * to handle hacks for broken servers and clients.
685 */
686 void
687 clientSetKeepaliveFlag(ClientHttpRequest * http)
688 {
689 HttpRequest *request = http->request;
690
691 debugs(33, 3, "http_ver = " << request->http_ver);
692 debugs(33, 3, "method = " << request->method);
693
694 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
695 request->flags.proxyKeepalive = request->persistent();
696 }
697
698 int
699 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
700 {
701 if (Config.maxRequestBodySize &&
702 bodyLength > Config.maxRequestBodySize)
703 return 1; /* too large */
704
705 return 0;
706 }
707
708 bool
709 ClientHttpRequest::multipartRangeRequest() const
710 {
711 return request->multipartRangeRequest();
712 }
713
714 void
715 clientPackTermBound(String boundary, MemBuf *mb)
716 {
717 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
718 debugs(33, 6, "buf offset: " << mb->size);
719 }
720
721 void
722 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
723 {
724 HttpHeader hdr(hoReply);
725 assert(rep);
726 assert(spec);
727
728 /* put boundary */
729 debugs(33, 5, "appending boundary: " << boundary);
730 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
731 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
732
733 /* stuff the header with required entries and pack it */
734
735 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
736 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
737
738 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
739
740 hdr.packInto(mb);
741 hdr.clean();
742
743 /* append <crlf> (we packed a header, not a reply) */
744 mb->append("\r\n", 2);
745 }
746
747 /** returns expected content length for multi-range replies
748 * note: assumes that httpHdrRangeCanonize has already been called
749 * warning: assumes that HTTP headers for individual ranges at the
750 * time of the actuall assembly will be exactly the same as
751 * the headers when clientMRangeCLen() is called */
752 int64_t
753 ClientHttpRequest::mRangeCLen() const
754 {
755 int64_t clen = 0;
756 MemBuf mb;
757
758 assert(memObject());
759
760 mb.init();
761 HttpHdrRange::iterator pos = request->range->begin();
762
763 while (pos != request->range->end()) {
764 /* account for headers for this range */
765 mb.reset();
766 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
767 *pos, range_iter.boundary, &mb);
768 clen += mb.size;
769
770 /* account for range content */
771 clen += (*pos)->length;
772
773 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
774 ++pos;
775 }
776
777 /* account for the terminating boundary */
778 mb.reset();
779
780 clientPackTermBound(range_iter.boundary, &mb);
781
782 clen += mb.size;
783
784 mb.clean();
785
786 return clen;
787 }
788
789 /**
790 * generates a "unique" boundary string for multipart responses
791 * the caller is responsible for cleaning the string */
792 String
793 ClientHttpRequest::rangeBoundaryStr() const
794 {
795 const char *key;
796 String b(APP_FULLNAME);
797 b.append(":",1);
798 key = storeEntry()->getMD5Text();
799 b.append(key, strlen(key));
800 return b;
801 }
802
803 /**
804 * Write a chunk of data to a client socket. If the reply is present,
805 * send the reply headers down the wire too, and clean them up when
806 * finished.
807 * Pre-condition:
808 * The request is one backed by a connection, not an internal request.
809 * data context is not NULL
810 * There are no more entries in the stream chain.
811 */
812 void
813 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
814 HttpReply * rep, StoreIOBuffer receivedData)
815 {
816 // do not try to deliver if client already ABORTED
817 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
818 return;
819
820 /* Test preconditions */
821 assert(node != nullptr);
822 /* TODO: handle this rather than asserting
823 * - it should only ever happen if we cause an abort and
824 * the callback chain loops back to here, so we can simply return.
825 * However, that itself shouldn't happen, so it stays as an assert for now.
826 */
827 assert(cbdataReferenceValid(node));
828 assert(node->node.next == nullptr);
829 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
830 assert(context != nullptr);
831
832 /* TODO: check offset is what we asked for */
833
834 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
835 if (context != http->getConn()->pipeline.front())
836 context->deferRecipientForLater(node, rep, receivedData);
837 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
838 context->deferRecipientForLater(node, rep, receivedData);
839 else
840 http->getConn()->handleReply(rep, receivedData);
841 }
842
843 /**
844 * Called when a downstream node is no longer interested in
845 * our data. As we are a terminal node, this means on aborts
846 * only
847 */
848 void
849 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
850 {
851 /* Test preconditions */
852 assert(node != nullptr);
853 /* TODO: handle this rather than asserting
854 * - it should only ever happen if we cause an abort and
855 * the callback chain loops back to here, so we can simply return.
856 * However, that itself shouldn't happen, so it stays as an assert for now.
857 */
858 assert(cbdataReferenceValid(node));
859 /* Set null by ContextFree */
860 assert(node->node.next == nullptr);
861 /* this is the assert discussed above */
862 assert(nullptr == dynamic_cast<Http::Stream *>(node->data.getRaw()));
863 /* We are only called when the client socket shutsdown.
864 * Tell the prev pipeline member we're finished
865 */
866 clientStreamDetach(node, http);
867 }
868
869 void
870 ConnStateData::readNextRequest()
871 {
872 debugs(33, 5, clientConnection << " reading next req");
873
874 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
875 /**
876 * Set the timeout BEFORE calling readSomeData().
877 */
878 resetReadTimeout(clientConnection->timeLeft(idleTimeout()));
879
880 readSomeData();
881 /** Please don't do anything with the FD past here! */
882 }
883
884 static void
885 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
886 {
887 debugs(33, 2, conn->clientConnection << " Sending next");
888
889 /** If the client stream is waiting on a socket write to occur, then */
890
891 if (deferredRequest->flags.deferred) {
892 /** NO data is allowed to have been sent. */
893 assert(deferredRequest->http->out.size == 0);
894 /** defer now. */
895 clientSocketRecipient(deferredRequest->deferredparams.node,
896 deferredRequest->http,
897 deferredRequest->deferredparams.rep,
898 deferredRequest->deferredparams.queuedBuffer);
899 }
900
901 /** otherwise, the request is still active in a callbacksomewhere,
902 * and we are done
903 */
904 }
905
906 void
907 ConnStateData::kick()
908 {
909 if (!Comm::IsConnOpen(clientConnection)) {
910 debugs(33, 2, clientConnection << " Connection was closed");
911 return;
912 }
913
914 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
915 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
916 clientConnection->close();
917 return;
918 }
919
920 /** \par
921 * We are done with the response, and we are either still receiving request
922 * body (early response!) or have already stopped receiving anything.
923 *
924 * If we are still receiving, then clientParseRequest() below will fail.
925 * (XXX: but then we will call readNextRequest() which may succeed and
926 * execute a smuggled request as we are not done with the current request).
927 *
928 * If we stopped because we got everything, then try the next request.
929 *
930 * If we stopped receiving because of an error, then close now to avoid
931 * getting stuck and to prevent accidental request smuggling.
932 */
933
934 if (const char *reason = stoppedReceiving()) {
935 debugs(33, 3, "closing for earlier request error: " << reason);
936 clientConnection->close();
937 return;
938 }
939
940 /** \par
941 * Attempt to parse a request from the request buffer.
942 * If we've been fed a pipelined request it may already
943 * be in our read buffer.
944 *
945 \par
946 * This needs to fall through - if we're unlucky and parse the _last_ request
947 * from our read buffer we may never re-register for another client read.
948 */
949
950 if (clientParseRequests()) {
951 debugs(33, 3, clientConnection << ": parsed next request from buffer");
952 }
953
954 /** \par
955 * Either we need to kick-start another read or, if we have
956 * a half-closed connection, kill it after the last request.
957 * This saves waiting for half-closed connections to finished being
958 * half-closed _AND_ then, sometimes, spending "Timeout" time in
959 * the keepalive "Waiting for next request" state.
960 */
961 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
962 debugs(33, 3, "half-closed client with no pending requests, closing");
963 clientConnection->close();
964 return;
965 }
966
967 /** \par
968 * At this point we either have a parsed request (which we've
969 * kicked off the processing for) or not. If we have a deferred
970 * request (parsed but deferred for pipeling processing reasons)
971 * then look at processing it. If not, simply kickstart
972 * another read.
973 */
974 Http::StreamPointer deferredRequest = pipeline.front();
975 if (deferredRequest != nullptr) {
976 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
977 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
978 } else if (flags.readMore) {
979 debugs(33, 3, clientConnection << ": calling readNextRequest()");
980 readNextRequest();
981 } else {
982 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
983 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
984 }
985 }
986
987 void
988 ConnStateData::stopSending(const char *error)
989 {
990 debugs(33, 4, "sending error (" << clientConnection << "): " << error <<
991 "; old receiving error: " <<
992 (stoppedReceiving() ? stoppedReceiving_ : "none"));
993
994 if (const char *oldError = stoppedSending()) {
995 debugs(33, 3, "already stopped sending: " << oldError);
996 return; // nothing has changed as far as this connection is concerned
997 }
998 stoppedSending_ = error;
999
1000 if (!stoppedReceiving()) {
1001 if (const int64_t expecting = mayNeedToReadMoreBody()) {
1002 debugs(33, 5, "must still read " << expecting <<
1003 " request body bytes with " << inBuf.length() << " unused");
1004 return; // wait for the request receiver to finish reading
1005 }
1006 }
1007
1008 clientConnection->close();
1009 }
1010
1011 void
1012 ConnStateData::afterClientWrite(size_t size)
1013 {
1014 if (pipeline.empty())
1015 return;
1016
1017 auto ctx = pipeline.front();
1018 if (size) {
1019 statCounter.client_http.kbytes_out += size;
1020 if (ctx->http->loggingTags().isTcpHit())
1021 statCounter.client_http.hit_kbytes_out += size;
1022 }
1023 ctx->writeComplete(size);
1024 }
1025
1026 Http::Stream *
1027 ConnStateData::abortRequestParsing(const char *const uri)
1028 {
1029 ClientHttpRequest *http = new ClientHttpRequest(this);
1030 http->req_sz = inBuf.length();
1031 http->setErrorUri(uri);
1032 auto *context = new Http::Stream(clientConnection, http);
1033 StoreIOBuffer tempBuffer;
1034 tempBuffer.data = context->reqbuf;
1035 tempBuffer.length = HTTP_REQBUF_SZ;
1036 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1037 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1038 clientSocketDetach, context, tempBuffer);
1039 return context;
1040 }
1041
1042 void
1043 ConnStateData::startShutdown()
1044 {
1045 // RegisteredRunner API callback - Squid has been shut down
1046
1047 // if connection is idle terminate it now,
1048 // otherwise wait for grace period to end
1049 if (pipeline.empty())
1050 endingShutdown();
1051 }
1052
1053 void
1054 ConnStateData::endingShutdown()
1055 {
1056 // RegisteredRunner API callback - Squid shutdown grace period is over
1057
1058 // force the client connection to close immediately
1059 // swanSong() in the close handler will cleanup.
1060 if (Comm::IsConnOpen(clientConnection))
1061 clientConnection->close();
1062 }
1063
1064 char *
1065 skipLeadingSpace(char *aString)
1066 {
1067 char *result = aString;
1068
1069 while (xisspace(*aString))
1070 ++aString;
1071
1072 return result;
1073 }
1074
1075 /**
1076 * 'end' defaults to NULL for backwards compatibility
1077 * remove default value if we ever get rid of NULL-terminated
1078 * request buffers.
1079 */
1080 const char *
1081 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1082 {
1083 if (nullptr == end) {
1084 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1085 assert(end);
1086 }
1087
1088 for (; end > uriAndHTTPVersion; --end) {
1089 if (*end == '\n' || *end == '\r')
1090 continue;
1091
1092 if (xisspace(*end)) {
1093 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1094 return end + 1;
1095 else
1096 break;
1097 }
1098 }
1099
1100 return nullptr;
1101 }
1102
1103 static char *
1104 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1105 {
1106 int vhost = conn->port->vhost;
1107 int vport = conn->port->vport;
1108 static char ipbuf[MAX_IPSTRLEN];
1109
1110 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1111
1112 static const SBuf cache_object("cache_object://");
1113 if (hp->requestUri().startsWith(cache_object))
1114 return nullptr; /* already in good shape */
1115
1116 // XXX: re-use proper URL parser for this
1117 SBuf url = hp->requestUri(); // use full provided URI if we abort
1118 do { // use a loop so we can break out of it
1119 ::Parser::Tokenizer tok(url);
1120 if (tok.skip('/')) // origin-form URL already.
1121 break;
1122
1123 if (conn->port->vhost)
1124 return nullptr; /* already in good shape */
1125
1126 // skip the URI scheme
1127 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1128 static const SBuf uriSchemeEnd("://");
1129 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1130 break;
1131
1132 // skip the authority segment
1133 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1134 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1135 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1136 if (!tok.skipAll(authority))
1137 break;
1138
1139 static const SBuf slashUri("/");
1140 const SBuf t = tok.remaining();
1141 if (t.isEmpty())
1142 url = slashUri;
1143 else if (t[0]=='/') // looks like path
1144 url = t;
1145 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1146 url = slashUri;
1147 url.append(t);
1148 } // else do nothing. invalid path
1149
1150 } while(false);
1151
1152 #if SHOULD_REJECT_UNKNOWN_URLS
1153 // reject URI which are not well-formed even after the processing above
1154 if (url.isEmpty() || url[0] != '/') {
1155 hp->parseStatusCode = Http::scBadRequest;
1156 return conn->abortRequestParsing("error:invalid-request");
1157 }
1158 #endif
1159
1160 if (vport < 0)
1161 vport = conn->clientConnection->local.port();
1162
1163 char *receivedHost = nullptr;
1164 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1165 SBuf host(receivedHost);
1166 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1167 if (vport > 0) {
1168 // remove existing :port (if any), cope with IPv6+ without port
1169 const auto lastColonPos = host.rfind(':');
1170 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1171 host.chop(0, lastColonPos); // truncate until the last colon
1172 }
1173 host.appendf(":%d", vport);
1174 } // else nothing to alter port-wise.
1175 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1176 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1177 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1178 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1179 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1180 return uri;
1181 } else if (conn->port->defaultsite /* && !vhost */) {
1182 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1183 char vportStr[32];
1184 vportStr[0] = '\0';
1185 if (vport > 0) {
1186 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1187 }
1188 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1189 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1190 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1191 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1192 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1193 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1194 return uri;
1195 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1196 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1197 /* Put the local socket IP address as the hostname, with whatever vport we found */
1198 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1199 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1200 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1201 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1202 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1203 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1204 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1205 return uri;
1206 }
1207
1208 return nullptr;
1209 }
1210
1211 static char *
1212 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1213 {
1214 char *uri = nullptr;
1215 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1216 if (const char *host = hp->getHostHeaderField()) {
1217 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1218 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1219 uri = static_cast<char *>(xcalloc(url_sz, 1));
1220 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1221 SQUIDSBUFPRINT(scheme),
1222 host,
1223 SQUIDSBUFPRINT(hp->requestUri()));
1224 }
1225 return uri;
1226 }
1227
1228 char *
1229 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1230 {
1231 Must(switchedToHttps());
1232
1233 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1234 return nullptr; /* already in good shape */
1235
1236 char *uri = buildUrlFromHost(this, hp);
1237 #if USE_OPENSSL
1238 if (!uri) {
1239 Must(tlsConnectPort);
1240 Must(!tlsConnectHostOrIp.isEmpty());
1241 SBuf useHost;
1242 if (!tlsClientSni().isEmpty())
1243 useHost = tlsClientSni();
1244 else
1245 useHost = tlsConnectHostOrIp;
1246
1247 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1248 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1249 uri = static_cast<char *>(xcalloc(url_sz, 1));
1250 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1251 SQUIDSBUFPRINT(scheme),
1252 SQUIDSBUFPRINT(useHost),
1253 tlsConnectPort,
1254 SQUIDSBUFPRINT(hp->requestUri()));
1255 }
1256 #endif
1257 if (uri)
1258 debugs(33, 5, "TLS switching host rewrite: " << uri);
1259 return uri;
1260 }
1261
1262 static char *
1263 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1264 {
1265 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1266 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1267 return nullptr; /* already in good shape */
1268
1269 char *uri = buildUrlFromHost(conn, hp);
1270 if (!uri) {
1271 /* Put the local socket IP address as the hostname. */
1272 static char ipbuf[MAX_IPSTRLEN];
1273 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1274 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1275 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1276 uri = static_cast<char *>(xcalloc(url_sz, 1));
1277 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1278 SQUIDSBUFPRINT(scheme),
1279 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1280 }
1281
1282 if (uri)
1283 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1284 return uri;
1285 }
1286
1287 Http::Stream *
1288 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1289 {
1290 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1291 {
1292 Must(hp);
1293
1294 if (preservingClientData_)
1295 preservedClientData = inBuf;
1296
1297 const bool parsedOk = hp->parse(inBuf);
1298
1299 // sync the buffers after parsing.
1300 inBuf = hp->remaining();
1301
1302 if (hp->needsMoreData()) {
1303 debugs(33, 5, "Incomplete request, waiting for end of request line");
1304 return nullptr;
1305 }
1306
1307 if (!parsedOk) {
1308 const bool tooBig =
1309 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1310 hp->parseStatusCode == Http::scUriTooLong;
1311 auto result = abortRequestParsing(
1312 tooBig ? "error:request-too-large" : "error:invalid-request");
1313 // assume that remaining leftovers belong to this bad request
1314 if (!inBuf.isEmpty())
1315 consumeInput(inBuf.length());
1316 return result;
1317 }
1318 }
1319
1320 /* We know the whole request is in parser now */
1321 debugs(11, 2, "HTTP Client " << clientConnection);
1322 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1323 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1324 hp->mimeHeader() <<
1325 "\n----------");
1326
1327 /* deny CONNECT via accelerated ports */
1328 if (hp->method() == Http::METHOD_CONNECT && port != nullptr && port->flags.accelSurrogate) {
1329 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1330 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1331 hp->parseStatusCode = Http::scMethodNotAllowed;
1332 return abortRequestParsing("error:method-not-allowed");
1333 }
1334
1335 /* HTTP/2 connection magic prefix starts with "PRI ".
1336 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1337 * If seen it signals a broken client or proxy has corrupted the traffic.
1338 */
1339 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1340 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1341 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1342 hp->parseStatusCode = Http::scMethodNotAllowed;
1343 return abortRequestParsing("error:method-not-allowed");
1344 }
1345
1346 if (hp->method() == Http::METHOD_NONE) {
1347 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1348 hp->parseStatusCode = Http::scMethodNotAllowed;
1349 return abortRequestParsing("error:unsupported-request-method");
1350 }
1351
1352 // Process headers after request line
1353 debugs(33, 3, "complete request received. " <<
1354 "prefix_sz = " << hp->messageHeaderSize() <<
1355 ", request-line-size=" << hp->firstLineSize() <<
1356 ", mime-header-size=" << hp->headerBlockSize() <<
1357 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1358
1359 /* Ok, all headers are received */
1360 ClientHttpRequest *http = new ClientHttpRequest(this);
1361
1362 http->req_sz = hp->messageHeaderSize();
1363 Http::Stream *result = new Http::Stream(clientConnection, http);
1364
1365 StoreIOBuffer tempBuffer;
1366 tempBuffer.data = result->reqbuf;
1367 tempBuffer.length = HTTP_REQBUF_SZ;
1368
1369 ClientStreamData newServer = new clientReplyContext(http);
1370 ClientStreamData newClient = result;
1371 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1372 clientReplyStatus, newServer, clientSocketRecipient,
1373 clientSocketDetach, newClient, tempBuffer);
1374
1375 /* set url */
1376 debugs(33,5, "Prepare absolute URL from " <<
1377 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1378 /* Rewrite the URL in transparent or accelerator mode */
1379 /* NP: there are several cases to traverse here:
1380 * - standard mode (forward proxy)
1381 * - transparent mode (TPROXY)
1382 * - transparent mode with failures
1383 * - intercept mode (NAT)
1384 * - intercept mode with failures
1385 * - accelerator mode (reverse proxy)
1386 * - internal relative-URL
1387 * - mixed combos of the above with internal URL
1388 * - remote interception with PROXY protocol
1389 * - remote reverse-proxy with PROXY protocol
1390 */
1391 if (switchedToHttps()) {
1392 http->uri = prepareTlsSwitchingURL(hp);
1393 } else if (transparent()) {
1394 /* intercept or transparent mode, properly working with no failures */
1395 http->uri = prepareTransparentURL(this, hp);
1396
1397 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1398 /* internal URL mode */
1399 /* prepend our name & port */
1400 http->uri = xstrdup(internalLocalUri(nullptr, hp->requestUri()));
1401 // We just re-wrote the URL. Must replace the Host: header.
1402 // But have not parsed there yet!! flag for local-only handling.
1403 http->flags.internal = true;
1404
1405 } else if (port->flags.accelSurrogate) {
1406 /* accelerator mode */
1407 http->uri = prepareAcceleratedURL(this, hp);
1408 http->flags.accel = true;
1409 }
1410
1411 if (!http->uri) {
1412 /* No special rewrites have been applied above, use the
1413 * requested url. may be rewritten later, so make extra room */
1414 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1415 http->uri = (char *)xcalloc(url_sz, 1);
1416 SBufToCstring(http->uri, hp->requestUri());
1417 }
1418
1419 result->flags.parsed_ok = 1;
1420 return result;
1421 }
1422
1423 bool
1424 ConnStateData::shouldCloseOnEof() const
1425 {
1426 if (pipeline.empty() && inBuf.isEmpty()) {
1427 debugs(33, 4, "yes, without active requests and unparsed input");
1428 return true;
1429 }
1430
1431 if (!Config.onoff.half_closed_clients) {
1432 debugs(33, 3, "yes, without half_closed_clients");
1433 return true;
1434 }
1435
1436 // Squid currently tries to parse (possibly again) a partially received
1437 // request after an EOF with half_closed_clients. To give that last parse in
1438 // afterClientRead() a chance, we ignore partially parsed requests here.
1439 debugs(33, 3, "no, honoring half_closed_clients");
1440 return false;
1441 }
1442
1443 void
1444 ConnStateData::consumeInput(const size_t byteCount)
1445 {
1446 assert(byteCount > 0 && byteCount <= inBuf.length());
1447 inBuf.consume(byteCount);
1448 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1449 }
1450
1451 void
1452 ConnStateData::clientAfterReadingRequests()
1453 {
1454 // Were we expecting to read more request body from half-closed connection?
1455 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1456 debugs(33, 3, "truncated body: closing half-closed " << clientConnection);
1457 clientConnection->close();
1458 return;
1459 }
1460
1461 if (flags.readMore)
1462 readSomeData();
1463 }
1464
1465 void
1466 ConnStateData::quitAfterError(HttpRequest *request)
1467 {
1468 // From HTTP p.o.v., we do not have to close after every error detected
1469 // at the client-side, but many such errors do require closure and the
1470 // client-side code is bad at handling errors so we play it safe.
1471 if (request)
1472 request->flags.proxyKeepalive = false;
1473 flags.readMore = false;
1474 debugs(33,4, "Will close after error: " << clientConnection);
1475 }
1476
1477 #if USE_OPENSSL
1478 bool ConnStateData::serveDelayedError(Http::Stream *context)
1479 {
1480 ClientHttpRequest *http = context->http;
1481
1482 if (!sslServerBump)
1483 return false;
1484
1485 assert(sslServerBump->entry);
1486 // Did we create an error entry while processing CONNECT?
1487 if (!sslServerBump->entry->isEmpty()) {
1488 quitAfterError(http->request);
1489
1490 // Get the saved error entry and send it to the client by replacing the
1491 // ClientHttpRequest store entry with it.
1492 clientStreamNode *node = context->getClientReplyContext();
1493 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1494 assert(repContext);
1495 debugs(33, 5, "Responding with delated error for " << http->uri);
1496 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1497
1498 // Get error details from the fake certificate-peeking request.
1499 http->request->error.update(sslServerBump->request->error);
1500 context->pullData();
1501 return true;
1502 }
1503
1504 // In bump-server-first mode, we have not necessarily seen the intended
1505 // server name at certificate-peeking time. Check for domain mismatch now,
1506 // when we can extract the intended name from the bumped HTTP request.
1507 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1508 HttpRequest *request = http->request;
1509 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1510 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1511 "does not match domainname " << request->url.host());
1512
1513 bool allowDomainMismatch = false;
1514 if (Config.ssl_client.cert_error) {
1515 ACLFilledChecklist check(Config.ssl_client.cert_error, nullptr);
1516 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1517 clientAclChecklistFill(check, http);
1518 allowDomainMismatch = check.fastCheck().allowed();
1519 delete check.sslErrors;
1520 check.sslErrors = nullptr;
1521 }
1522
1523 if (!allowDomainMismatch) {
1524 quitAfterError(request);
1525
1526 clientStreamNode *node = context->getClientReplyContext();
1527 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1528 assert (repContext);
1529
1530 request->hier = sslServerBump->request->hier;
1531
1532 // Create an error object and fill it
1533 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1534 err->src_addr = clientConnection->remote;
1535 const Security::ErrorDetail::Pointer errDetail = new Security::ErrorDetail(
1536 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1537 srvCert, nullptr);
1538 updateError(ERR_SECURE_CONNECT_FAIL, errDetail);
1539 repContext->setReplyToError(request->method, err);
1540 assert(context->http->out.offset == 0);
1541 context->pullData();
1542 return true;
1543 }
1544 }
1545 }
1546
1547 return false;
1548 }
1549 #endif // USE_OPENSSL
1550
1551 /// initiate tunneling if possible or return false otherwise
1552 bool
1553 ConnStateData::tunnelOnError(const err_type requestError)
1554 {
1555 if (!Config.accessList.on_unsupported_protocol) {
1556 debugs(33, 5, "disabled; send error: " << requestError);
1557 return false;
1558 }
1559
1560 if (!preservingClientData_) {
1561 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1562 return false;
1563 }
1564
1565 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, nullptr);
1566 checklist.requestErrorType = requestError;
1567 fillChecklist(checklist);
1568 auto answer = checklist.fastCheck();
1569 if (answer.allowed() && answer.kind == 1) {
1570 debugs(33, 3, "Request will be tunneled to server");
1571 const auto context = pipeline.front();
1572 const auto http = context ? context->http : nullptr;
1573 const auto request = http ? http->request : nullptr;
1574 if (context)
1575 context->finished(); // Will remove from pipeline queue
1576 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, nullptr, nullptr, 0);
1577 return initiateTunneledRequest(request, "unknown-protocol", preservedClientData);
1578 }
1579 debugs(33, 3, "denied; send error: " << requestError);
1580 return false;
1581 }
1582
1583 void
1584 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1585 {
1586 /*
1587 * DPW 2007-05-18
1588 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1589 * to here because calling comm_reset_close() causes http to
1590 * be freed before accessing.
1591 */
1592 if (request != nullptr && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1593 debugs(33, 3, "Sending TCP RST on " << conn->clientConnection);
1594 conn->flags.readMore = false;
1595 comm_reset_close(conn->clientConnection);
1596 }
1597 }
1598
1599 void
1600 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1601 {
1602 ClientHttpRequest *http = context->http;
1603 bool mustReplyToOptions = false;
1604 bool expectBody = false;
1605
1606 // We already have the request parsed and checked, so we
1607 // only need to go through the final body/conn setup to doCallouts().
1608 assert(http->request);
1609 HttpRequest::Pointer request = http->request;
1610
1611 // temporary hack to avoid splitting this huge function with sensitive code
1612 const bool isFtp = !hp;
1613
1614 // Some blobs below are still HTTP-specific, but we would have to rewrite
1615 // this entire function to remove them from the FTP code path. Connection
1616 // setup and body_pipe preparation blobs are needed for FTP.
1617
1618 request->manager(conn, http->al);
1619
1620 request->flags.accelerated = http->flags.accel;
1621 request->flags.sslBumped=conn->switchedToHttps();
1622 // TODO: decouple http->flags.accel from request->flags.sslBumped
1623 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1624 !conn->port->allow_direct : 0;
1625 request->sources |= isFtp ? Http::Message::srcFtp :
1626 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1627 #if USE_AUTH
1628 if (request->flags.sslBumped) {
1629 if (conn->getAuth() != nullptr)
1630 request->auth_user_request = conn->getAuth();
1631 }
1632 #endif
1633
1634 if (internalCheck(request->url.path())) {
1635 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1636 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1637 http->flags.internal = true;
1638 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1639 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1640 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1641 request->url.host(internalHostname());
1642 request->url.port(getMyPort());
1643 http->flags.internal = true;
1644 http->setLogUriToRequestUri();
1645 } else
1646 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1647 }
1648
1649 request->flags.internal = http->flags.internal;
1650
1651 if (!isFtp) {
1652 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1653 // for now Squid only supports HTTP requests
1654 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1655 assert(request->http_ver.protocol == http_ver.protocol);
1656 request->http_ver.major = http_ver.major;
1657 request->http_ver.minor = http_ver.minor;
1658 }
1659
1660 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1661 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1662 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions) {
1663 clientStreamNode *node = context->getClientReplyContext();
1664 conn->quitAfterError(request.getRaw());
1665 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1666 assert (repContext);
1667 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, nullptr,
1668 conn, request.getRaw(), nullptr, nullptr);
1669 assert(context->http->out.offset == 0);
1670 context->pullData();
1671 clientProcessRequestFinished(conn, request);
1672 return;
1673 }
1674
1675 const auto frameStatus = request->checkEntityFraming();
1676 if (frameStatus != Http::scNone) {
1677 clientStreamNode *node = context->getClientReplyContext();
1678 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1679 assert (repContext);
1680 conn->quitAfterError(request.getRaw());
1681 repContext->setReplyToError(ERR_INVALID_REQ, frameStatus, nullptr, conn, request.getRaw(), nullptr, nullptr);
1682 assert(context->http->out.offset == 0);
1683 context->pullData();
1684 clientProcessRequestFinished(conn, request);
1685 return;
1686 }
1687
1688 clientSetKeepaliveFlag(http);
1689 // Let tunneling code be fully responsible for CONNECT requests
1690 if (http->request->method == Http::METHOD_CONNECT) {
1691 context->mayUseConnection(true);
1692 conn->flags.readMore = false;
1693 }
1694
1695 #if USE_OPENSSL
1696 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1697 clientProcessRequestFinished(conn, request);
1698 return;
1699 }
1700 #endif
1701
1702 /* Do we expect a request-body? */
1703 const auto chunked = request->header.chunked();
1704 expectBody = chunked || request->content_length > 0;
1705 if (!context->mayUseConnection() && expectBody) {
1706 request->body_pipe = conn->expectRequestBody(
1707 chunked ? -1 : request->content_length);
1708
1709 /* Is it too large? */
1710 if (!chunked && // if chunked, we will check as we accumulate
1711 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1712 clientStreamNode *node = context->getClientReplyContext();
1713 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1714 assert (repContext);
1715 conn->quitAfterError(request.getRaw());
1716 repContext->setReplyToError(ERR_TOO_BIG,
1717 Http::scContentTooLarge, nullptr,
1718 conn, http->request, nullptr, nullptr);
1719 assert(context->http->out.offset == 0);
1720 context->pullData();
1721 clientProcessRequestFinished(conn, request);
1722 return;
1723 }
1724
1725 if (!isFtp) {
1726 // We may stop producing, comm_close, and/or call setReplyToError()
1727 // below, so quit on errors to avoid http->doCallouts()
1728 if (!conn->handleRequestBodyData()) {
1729 clientProcessRequestFinished(conn, request);
1730 return;
1731 }
1732
1733 if (!request->body_pipe->productionEnded()) {
1734 debugs(33, 5, "need more request body");
1735 context->mayUseConnection(true);
1736 assert(conn->flags.readMore);
1737 }
1738 }
1739 }
1740
1741 http->calloutContext = new ClientRequestContext(http);
1742
1743 http->doCallouts();
1744
1745 clientProcessRequestFinished(conn, request);
1746 }
1747
1748 void
1749 ConnStateData::add(const Http::StreamPointer &context)
1750 {
1751 debugs(33, 3, context << " to " << pipeline.count() << '/' << pipeline.nrequests);
1752 if (bareError) {
1753 debugs(33, 5, "assigning " << bareError);
1754 assert(context);
1755 assert(context->http);
1756 context->http->updateError(bareError);
1757 bareError.clear();
1758 }
1759 pipeline.add(context);
1760 }
1761
1762 int
1763 ConnStateData::pipelinePrefetchMax() const
1764 {
1765 // TODO: Support pipelined requests through pinned connections.
1766 if (pinning.pinned)
1767 return 0;
1768 return Config.pipeline_max_prefetch;
1769 }
1770
1771 /**
1772 * Limit the number of concurrent requests.
1773 * \return true when there are available position(s) in the pipeline queue for another request.
1774 * \return false when the pipeline queue is full or disabled.
1775 */
1776 bool
1777 ConnStateData::concurrentRequestQueueFilled() const
1778 {
1779 const int existingRequestCount = pipeline.count();
1780
1781 // default to the configured pipeline size.
1782 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1783 #if USE_OPENSSL
1784 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1785 #else
1786 const int internalRequest = 0;
1787 #endif
1788 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1789
1790 // when queue filled already we can't add more.
1791 if (existingRequestCount >= concurrentRequestLimit) {
1792 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1793 debugs(33, 5, clientConnection << " deferring new request until one is done");
1794 return true;
1795 }
1796
1797 return false;
1798 }
1799
1800 /**
1801 * Perform proxy_protocol_access ACL tests on the client which
1802 * connected to PROXY protocol port to see if we trust the
1803 * sender enough to accept their PROXY header claim.
1804 */
1805 bool
1806 ConnStateData::proxyProtocolValidateClient()
1807 {
1808 if (!Config.accessList.proxyProtocol)
1809 return proxyProtocolError("PROXY client not permitted by default ACL");
1810
1811 ACLFilledChecklist ch(Config.accessList.proxyProtocol, nullptr);
1812 fillChecklist(ch);
1813 if (!ch.fastCheck().allowed())
1814 return proxyProtocolError("PROXY client not permitted by ACLs");
1815
1816 return true;
1817 }
1818
1819 /**
1820 * Perform cleanup on PROXY protocol errors.
1821 * If header parsing hits a fatal error terminate the connection,
1822 * otherwise wait for more data.
1823 */
1824 bool
1825 ConnStateData::proxyProtocolError(const char *msg)
1826 {
1827 if (msg) {
1828 // This is important to know, but maybe not so much that flooding the log is okay.
1829 #if QUIET_PROXY_PROTOCOL
1830 // display the first of every 32 occurrences at level 1, the others at level 2.
1831 static uint8_t hide = 0;
1832 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1833 #else
1834 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1835 #endif
1836 mustStop(msg);
1837 }
1838 return false;
1839 }
1840
1841 /// Attempts to extract a PROXY protocol header from the input buffer and,
1842 /// upon success, stores the parsed header in proxyProtocolHeader_.
1843 /// \returns true if the header was successfully parsed
1844 /// \returns false if more data is needed to parse the header or on error
1845 bool
1846 ConnStateData::parseProxyProtocolHeader()
1847 {
1848 try {
1849 const auto parsed = ProxyProtocol::Parse(inBuf);
1850 proxyProtocolHeader_ = parsed.header;
1851 assert(bool(proxyProtocolHeader_));
1852 inBuf.consume(parsed.size);
1853 needProxyProtocolHeader_ = false;
1854 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1855 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1856 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1857 if ((clientConnection->flags & COMM_TRANSPARENT))
1858 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1859 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1860 }
1861 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1862 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1863 return false;
1864 } catch (const std::exception &e) {
1865 return proxyProtocolError(e.what());
1866 }
1867 return true;
1868 }
1869
1870 void
1871 ConnStateData::receivedFirstByte()
1872 {
1873 if (receivedFirstByte_)
1874 return;
1875
1876 receivedFirstByte_ = true;
1877 resetReadTimeout(Config.Timeout.request);
1878 }
1879
1880 /**
1881 * Attempt to parse one or more requests from the input buffer.
1882 * Returns true after completing parsing of at least one request [header]. That
1883 * includes cases where parsing ended with an error (e.g., a huge request).
1884 */
1885 bool
1886 ConnStateData::clientParseRequests()
1887 {
1888 bool parsed_req = false;
1889
1890 debugs(33, 5, clientConnection << ": attempting to parse");
1891
1892 // Loop while we have read bytes that are not needed for producing the body
1893 // On errors, bodyPipe may become nil, but readMore will be cleared
1894 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1895
1896 // Prohibit concurrent requests when using a pinned to-server connection
1897 // because our Client classes do not support request pipelining.
1898 if (pinning.pinned && !pinning.readHandler) {
1899 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1900 break;
1901 }
1902
1903 /* Limit the number of concurrent requests */
1904 if (concurrentRequestQueueFilled())
1905 break;
1906
1907 // try to parse the PROXY protocol header magic bytes
1908 if (needProxyProtocolHeader_) {
1909 if (!parseProxyProtocolHeader())
1910 break;
1911
1912 // we have been waiting for PROXY to provide client-IP
1913 // for some lookups, ie rDNS and IDENT.
1914 whenClientIpKnown();
1915
1916 // Done with PROXY protocol which has cleared preservingClientData_.
1917 // If the next protocol supports on_unsupported_protocol, then its
1918 // parseOneRequest() must reset preservingClientData_.
1919 assert(!preservingClientData_);
1920 }
1921
1922 if (Http::StreamPointer context = parseOneRequest()) {
1923 debugs(33, 5, clientConnection << ": done parsing a request");
1924 extendLifetime();
1925 context->registerWithConn();
1926
1927 #if USE_OPENSSL
1928 if (switchedToHttps())
1929 parsedBumpedRequestCount++;
1930 #endif
1931
1932 processParsedRequest(context);
1933
1934 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1935
1936 if (context->mayUseConnection()) {
1937 debugs(33, 3, "Not parsing new requests, as this request may need the connection");
1938 break;
1939 }
1940 } else {
1941 debugs(33, 5, clientConnection << ": not enough request data: " <<
1942 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1943 Must(inBuf.length() < Config.maxRequestHeaderSize);
1944 break;
1945 }
1946 }
1947
1948 /* XXX where to 'finish' the parsing pass? */
1949 return parsed_req;
1950 }
1951
1952 void
1953 ConnStateData::afterClientRead()
1954 {
1955 #if USE_OPENSSL
1956 if (parsingTlsHandshake) {
1957 parseTlsHandshake();
1958 return;
1959 }
1960 #endif
1961
1962 /* Process next request */
1963 if (pipeline.empty())
1964 fd_note(clientConnection->fd, "Reading next request");
1965
1966 if (!clientParseRequests()) {
1967 if (!isOpen())
1968 return;
1969 // We may get here if the client half-closed after sending a partial
1970 // request. See doClientRead() and shouldCloseOnEof().
1971 // XXX: This partially duplicates ConnStateData::kick().
1972 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
1973 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
1974 clientConnection->close();
1975 return;
1976 }
1977 }
1978
1979 if (!isOpen())
1980 return;
1981
1982 clientAfterReadingRequests();
1983 }
1984
1985 /**
1986 * called when new request data has been read from the socket
1987 *
1988 * \retval false called comm_close or setReplyToError (the caller should bail)
1989 * \retval true we did not call comm_close or setReplyToError
1990 */
1991 bool
1992 ConnStateData::handleReadData()
1993 {
1994 // if we are reading a body, stuff data into the body pipe
1995 if (bodyPipe != nullptr)
1996 return handleRequestBodyData();
1997 return true;
1998 }
1999
2000 /**
2001 * called when new request body data has been buffered in inBuf
2002 * may close the connection if we were closing and piped everything out
2003 *
2004 * \retval false called comm_close or setReplyToError (the caller should bail)
2005 * \retval true we did not call comm_close or setReplyToError
2006 */
2007 bool
2008 ConnStateData::handleRequestBodyData()
2009 {
2010 assert(bodyPipe != nullptr);
2011
2012 if (bodyParser) { // chunked encoding
2013 if (const err_type error = handleChunkedRequestBody()) {
2014 abortChunkedRequestBody(error);
2015 return false;
2016 }
2017 } else { // identity encoding
2018 debugs(33,5, "handling plain request body for " << clientConnection);
2019 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2020 if (putSize > 0)
2021 consumeInput(putSize);
2022
2023 if (!bodyPipe->mayNeedMoreData()) {
2024 // BodyPipe will clear us automagically when we produced everything
2025 bodyPipe = nullptr;
2026 }
2027 }
2028
2029 if (!bodyPipe) {
2030 debugs(33,5, "produced entire request body for " << clientConnection);
2031
2032 if (const char *reason = stoppedSending()) {
2033 /* we've finished reading like good clients,
2034 * now do the close that initiateClose initiated.
2035 */
2036 debugs(33, 3, "closing for earlier sending error: " << reason);
2037 clientConnection->close();
2038 return false;
2039 }
2040 }
2041
2042 return true;
2043 }
2044
2045 /// parses available chunked encoded body bytes, checks size, returns errors
2046 err_type
2047 ConnStateData::handleChunkedRequestBody()
2048 {
2049 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2050
2051 try { // the parser will throw on errors
2052
2053 if (inBuf.isEmpty()) // nothing to do
2054 return ERR_NONE;
2055
2056 BodyPipeCheckout bpc(*bodyPipe);
2057 bodyParser->setPayloadBuffer(&bpc.buf);
2058 const bool parsed = bodyParser->parse(inBuf);
2059 inBuf = bodyParser->remaining(); // sync buffers
2060 bpc.checkIn();
2061
2062 // dechunk then check: the size limit applies to _dechunked_ content
2063 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2064 return ERR_TOO_BIG;
2065
2066 if (parsed) {
2067 finishDechunkingRequest(true);
2068 Must(!bodyPipe);
2069 return ERR_NONE; // nil bodyPipe implies body end for the caller
2070 }
2071
2072 // if chunk parser needs data, then the body pipe must need it too
2073 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2074
2075 // if parser needs more space and we can consume nothing, we will stall
2076 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2077 } catch (...) { // TODO: be more specific
2078 debugs(33, 3, "malformed chunks" << bodyPipe->status());
2079 return ERR_INVALID_REQ;
2080 }
2081
2082 debugs(33, 7, "need more chunked data" << *bodyPipe->status());
2083 return ERR_NONE;
2084 }
2085
2086 /// quit on errors related to chunked request body handling
2087 void
2088 ConnStateData::abortChunkedRequestBody(const err_type error)
2089 {
2090 finishDechunkingRequest(false);
2091
2092 // XXX: The code below works if we fail during initial request parsing,
2093 // but if we fail when the server connection is used already, the server may send
2094 // us its response too, causing various assertions. How to prevent that?
2095 #if WE_KNOW_HOW_TO_SEND_ERRORS
2096 Http::StreamPointer context = pipeline.front();
2097 if (context != NULL && !context->http->out.offset) { // output nothing yet
2098 clientStreamNode *node = context->getClientReplyContext();
2099 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2100 assert(repContext);
2101 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2102 Http::scContentTooLarge : HTTP_BAD_REQUEST;
2103 repContext->setReplyToError(error, scode,
2104 repContext->http->uri,
2105 CachePeer,
2106 repContext->http->request,
2107 inBuf, NULL);
2108 context->pullData();
2109 } else {
2110 // close or otherwise we may get stuck as nobody will notice the error?
2111 comm_reset_close(clientConnection);
2112 }
2113 #else
2114 debugs(33, 3, "aborting chunked request without error " << error);
2115 comm_reset_close(clientConnection);
2116 #endif
2117 flags.readMore = false;
2118 }
2119
2120 void
2121 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2122 {
2123 // request reader may get stuck waiting for space if nobody consumes body
2124 if (bodyPipe != nullptr)
2125 bodyPipe->enableAutoConsumption();
2126
2127 // kids extend
2128 }
2129
2130 /** general lifetime handler for HTTP requests */
2131 void
2132 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2133 {
2134 if (!Comm::IsConnOpen(io.conn))
2135 return;
2136
2137 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2138 updateError(error);
2139 if (tunnelOnError(error))
2140 return;
2141
2142 /*
2143 * Just close the connection to not confuse browsers
2144 * using persistent connections. Some browsers open
2145 * a connection and then do not use it until much
2146 * later (presumeably because the request triggering
2147 * the open has already been completed on another
2148 * connection)
2149 */
2150 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2151 io.conn->close();
2152 }
2153
2154 void
2155 ConnStateData::lifetimeTimeout(const CommTimeoutCbParams &io)
2156 {
2157 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout" <<
2158 Debug::Extra << "connection: " << io.conn);
2159
2160 LogTagsErrors lte;
2161 lte.timedout = true;
2162 terminateAll(ERR_LIFETIME_EXP, lte);
2163 }
2164
2165 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2166 AsyncJob("ConnStateData"), // kids overwrite
2167 Server(xact)
2168 #if USE_OPENSSL
2169 , tlsParser(Security::HandshakeParser::fromClient)
2170 #endif
2171 {
2172 // store the details required for creating more MasterXaction objects as new requests come in
2173 log_addr = xact->tcpClient->remote;
2174 log_addr.applyClientMask(Config.Addrs.client_netmask);
2175
2176 // register to receive notice of Squid signal events
2177 // which may affect long persisting client connections
2178 registerRunner();
2179 }
2180
2181 void
2182 ConnStateData::start()
2183 {
2184 BodyProducer::start();
2185 HttpControlMsgSink::start();
2186
2187 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2188 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2189 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2190 int i = IP_PMTUDISC_DONT;
2191 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2192 int xerrno = errno;
2193 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2194 }
2195 #else
2196 static bool reported = false;
2197
2198 if (!reported) {
2199 debugs(33, DBG_IMPORTANT, "WARNING: Path MTU discovery disabling is not supported on your platform.");
2200 reported = true;
2201 }
2202 #endif
2203 }
2204
2205 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2206 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2207 comm_add_close_handler(clientConnection->fd, call);
2208
2209 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2210 if (needProxyProtocolHeader_) {
2211 if (!proxyProtocolValidateClient()) // will close the connection on failure
2212 return;
2213 } else
2214 whenClientIpKnown();
2215
2216 // requires needProxyProtocolHeader_ which is initialized above
2217 preservingClientData_ = shouldPreserveClientData();
2218 }
2219
2220 void
2221 ConnStateData::whenClientIpKnown()
2222 {
2223 debugs(33, 7, clientConnection->remote);
2224 if (Dns::ResolveClientAddressesAsap)
2225 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2226
2227 #if USE_IDENT
2228 if (Ident::TheConfig.identLookup) {
2229 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, nullptr, nullptr);
2230 fillChecklist(identChecklist);
2231 if (identChecklist.fastCheck().allowed())
2232 Ident::Start(clientConnection, clientIdentDone, this);
2233 }
2234 #endif
2235
2236 clientdbEstablished(clientConnection->remote, 1);
2237
2238 #if USE_DELAY_POOLS
2239 fd_table[clientConnection->fd].clientInfo = nullptr;
2240
2241 if (!Config.onoff.client_db)
2242 return; // client delay pools require client_db
2243
2244 const auto &pools = ClientDelayPools::Instance()->pools;
2245 if (pools.size()) {
2246 ACLFilledChecklist ch(nullptr, nullptr, nullptr);
2247 fillChecklist(ch);
2248 // TODO: we check early to limit error response bandwidth but we
2249 // should recheck when we can honor delay_pool_uses_indirect
2250 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2251
2252 /* pools require explicit 'allow' to assign a client into them */
2253 if (pools[pool]->access) {
2254 ch.changeAcl(pools[pool]->access);
2255 auto answer = ch.fastCheck();
2256 if (answer.allowed()) {
2257
2258 /* request client information from db after we did all checks
2259 this will save hash lookup if client failed checks */
2260 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2261 assert(cli);
2262
2263 /* put client info in FDE */
2264 fd_table[clientConnection->fd].clientInfo = cli;
2265
2266 /* setup write limiter for this request */
2267 const double burst = floor(0.5 +
2268 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2269 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2270 break;
2271 } else {
2272 debugs(83, 4, "Delay pool " << pool << " skipped because ACL " << answer);
2273 }
2274 }
2275 }
2276 }
2277 #endif
2278
2279 // kids must extend to actually start doing something (e.g., reading)
2280 }
2281
2282 Security::IoResult
2283 ConnStateData::acceptTls()
2284 {
2285 const auto handshakeResult = Security::Accept(*clientConnection);
2286
2287 #if USE_OPENSSL
2288 // log ASAP, even if the handshake has not completed (or failed)
2289 const auto fd = clientConnection->fd;
2290 assert(fd >= 0);
2291 keyLogger.checkpoint(*fd_table[fd].ssl, *this);
2292 #else
2293 // TODO: Support fd_table[fd].ssl dereference in other builds.
2294 #endif
2295
2296 return handshakeResult;
2297 }
2298
2299 /** Handle a new connection on an HTTP socket. */
2300 void
2301 httpAccept(const CommAcceptCbParams &params)
2302 {
2303 Assure(params.port);
2304
2305 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2306
2307 if (params.flag != Comm::OK) {
2308 // Its possible the call was still queued when the client disconnected
2309 debugs(33, 2, params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2310 return;
2311 }
2312
2313 debugs(33, 4, params.conn << ": accepted");
2314 fd_note(params.conn->fd, "client http connect");
2315 const auto xact = MasterXaction::MakePortful(params.port);
2316 xact->tcpClient = params.conn;
2317
2318 // Socket is ready, setup the connection manager to start using it
2319 auto *srv = Http::NewServer(xact);
2320 // XXX: do not abandon the MasterXaction object
2321 AsyncJob::Start(srv); // usually async-calls readSomeData()
2322 }
2323
2324 /// Create TLS connection structure and update fd_table
2325 static bool
2326 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2327 {
2328 const auto conn = connState->clientConnection;
2329 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2330 debugs(33, 5, "will negotiate TLS on " << conn);
2331 return true;
2332 }
2333
2334 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2335 conn->close();
2336 return false;
2337 }
2338
2339 /** negotiate an SSL connection */
2340 static void
2341 clientNegotiateSSL(int fd, void *data)
2342 {
2343 ConnStateData *conn = (ConnStateData *)data;
2344
2345 const auto handshakeResult = conn->acceptTls();
2346 switch (handshakeResult.category) {
2347 case Security::IoResult::ioSuccess:
2348 break;
2349
2350 case Security::IoResult::ioWantRead:
2351 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
2352 return;
2353
2354 case Security::IoResult::ioWantWrite:
2355 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
2356 return;
2357
2358 case Security::IoResult::ioError:
2359 debugs(83, (handshakeResult.important ? Important(62) : 2), "ERROR: " << handshakeResult.errorDescription <<
2360 " while accepting a TLS connection on " << conn->clientConnection << ": " << handshakeResult.errorDetail);
2361 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2362 // path because we cannot know the intended connection target?
2363 conn->updateError(ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
2364 conn->clientConnection->close();
2365 return;
2366 }
2367
2368 Security::SessionPointer session(fd_table[fd].ssl);
2369
2370 #if USE_OPENSSL
2371 if (Security::SessionIsResumed(session)) {
2372 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2373 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2374 ":" << (int)fd_table[fd].remote_port << ")");
2375 } else {
2376 if (Debug::Enabled(83, 4)) {
2377 /* Write out the SSL session details.. actually the call below, but
2378 * OpenSSL headers do strange typecasts confusing GCC.. */
2379 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2380 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2381 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2382 PEM_STRING_SSL_SESSION, debug_log,
2383 reinterpret_cast<char *>(SSL_get_session(session.get())),
2384 nullptr, nullptr, 0, nullptr, nullptr);
2385
2386 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2387
2388 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2389 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2390 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2391 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2392 * Because there are two possible usable cast, if you get an error here, try the other
2393 * commented line. */
2394
2395 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2396 debug_log,
2397 reinterpret_cast<char *>(SSL_get_session(session.get())),
2398 nullptr, nullptr, 0, nullptr, nullptr);
2399 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2400 debug_log,
2401 reinterpret_cast<char *>(SSL_get_session(session.get())),
2402 nullptr, nullptr, 0, nullptr, nullptr);
2403 */
2404 #else
2405 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2406
2407 #endif
2408 /* Note: This does not automatically fflush the log file.. */
2409 }
2410
2411 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2412 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2413 fd_table[fd].remote_port << ")");
2414 }
2415 #else
2416 debugs(83, 2, "TLS session reuse not yet implemented.");
2417 #endif
2418
2419 // Connection established. Retrieve TLS connection parameters for logging.
2420 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2421
2422 #if USE_OPENSSL
2423 X509 *client_cert = SSL_get_peer_certificate(session.get());
2424
2425 if (client_cert) {
2426 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2427 Security::SubjectName(*client_cert));
2428
2429 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2430 Security::IssuerName(*client_cert));
2431
2432 X509_free(client_cert);
2433 } else {
2434 debugs(83, 5, "FD " << fd << " has no client certificate.");
2435 }
2436 #else
2437 debugs(83, 2, "Client certificate requesting not yet implemented.");
2438 #endif
2439
2440 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2441 if (auto xact = conn->pipeline.front()) {
2442 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2443 xact->finished();
2444 // cannot proceed with encryption if requests wait for plain responses
2445 Must(conn->pipeline.empty());
2446 }
2447 /* careful: finished() above frees request, host, etc. */
2448
2449 conn->readSomeData();
2450 }
2451
2452 /**
2453 * If Security::ContextPointer is given, starts reading the TLS handshake.
2454 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2455 */
2456 static void
2457 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2458 {
2459 assert(connState);
2460 const Comm::ConnectionPointer &details = connState->clientConnection;
2461
2462 if (!ctx || !httpsCreate(connState, ctx))
2463 return;
2464
2465 connState->resetReadTimeout(Config.Timeout.request);
2466
2467 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2468 }
2469
2470 #if USE_OPENSSL
2471 /**
2472 * A callback function to use with the ACLFilledChecklist callback.
2473 */
2474 static void
2475 httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2476 {
2477 ConnStateData *connState = (ConnStateData *) data;
2478
2479 // if the connection is closed or closing, just return.
2480 if (!connState->isOpen())
2481 return;
2482
2483 if (answer.allowed()) {
2484 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2485 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2486 } else {
2487 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2488 connState->sslBumpMode = Ssl::bumpSplice;
2489 }
2490
2491 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2492 connState->clientConnection->close();
2493 return;
2494 }
2495
2496 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2497 connState->clientConnection->close();
2498 }
2499 #endif
2500
2501 /** handle a new HTTPS connection */
2502 static void
2503 httpsAccept(const CommAcceptCbParams &params)
2504 {
2505 Assure(params.port);
2506
2507 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2508
2509 if (params.flag != Comm::OK) {
2510 // Its possible the call was still queued when the client disconnected
2511 debugs(33, 2, "httpsAccept: " << params.port->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2512 return;
2513 }
2514
2515 const auto xact = MasterXaction::MakePortful(params.port);
2516 xact->tcpClient = params.conn;
2517
2518 debugs(33, 4, params.conn << " accepted, starting SSL negotiation.");
2519 fd_note(params.conn->fd, "client https connect");
2520
2521 // Socket is ready, setup the connection manager to start using it
2522 auto *srv = Https::NewServer(xact);
2523 // XXX: do not abandon the MasterXaction object
2524 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2525 }
2526
2527 void
2528 ConnStateData::postHttpsAccept()
2529 {
2530 if (port->flags.tunnelSslBumping) {
2531 #if USE_OPENSSL
2532 debugs(33, 5, "accept transparent connection: " << clientConnection);
2533
2534 if (!Config.accessList.ssl_bump) {
2535 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2536 return;
2537 }
2538
2539 const auto mx = MasterXaction::MakePortful(port);
2540 mx->tcpClient = clientConnection;
2541 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2542 // using tproxy/intercept provided destination IP and port.
2543 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2544 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2545 HttpRequest *request = new HttpRequest(mx);
2546 static char ip[MAX_IPSTRLEN];
2547 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2548 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2549 request->url.port(clientConnection->local.port());
2550 request->myportname = port->name;
2551 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2552 CodeContext::Reset(connectAle);
2553 // TODO: Use these request/ALE when waiting for new bumped transactions.
2554
2555 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, nullptr);
2556 fillChecklist(*acl_checklist);
2557 // Build a local AccessLogEntry to allow requiresAle() acls work
2558 acl_checklist->al = connectAle;
2559 acl_checklist->al->cache.start_time = current_time;
2560 acl_checklist->al->tcpClient = clientConnection;
2561 acl_checklist->al->cache.port = port;
2562 acl_checklist->al->cache.caddr = log_addr;
2563 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2564 acl_checklist->al->updateError(bareError);
2565 HTTPMSGUNLOCK(acl_checklist->al->request);
2566 acl_checklist->al->request = request;
2567 HTTPMSGLOCK(acl_checklist->al->request);
2568 Http::StreamPointer context = pipeline.front();
2569 ClientHttpRequest *http = context ? context->http : nullptr;
2570 const char *log_uri = http ? http->log_uri : nullptr;
2571 acl_checklist->syncAle(request, log_uri);
2572 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2573 #else
2574 fatal("FATAL: SSL-Bump requires --with-openssl");
2575 #endif
2576 return;
2577 } else {
2578 httpsEstablish(this, port->secure.staticContext);
2579 }
2580 }
2581
2582 #if USE_OPENSSL
2583 void
2584 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2585 {
2586 ConnStateData * state_data = (ConnStateData *)(data);
2587 state_data->sslCrtdHandleReply(reply);
2588 }
2589
2590 void
2591 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2592 {
2593 if (!isOpen()) {
2594 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2595 return;
2596 }
2597
2598 if (reply.result == Helper::BrokenHelper) {
2599 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2600 } else if (!reply.other().hasContent()) {
2601 debugs(1, DBG_IMPORTANT, "\"ssl_crtd\" helper returned <NULL> reply.");
2602 } else {
2603 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2604 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2605 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2606 } else {
2607 if (reply.result != Helper::Okay) {
2608 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2609 } else {
2610 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2611 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2612 doPeekAndSpliceStep();
2613 auto ssl = fd_table[clientConnection->fd].ssl.get();
2614 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2615 if (!ret)
2616 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2617
2618 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2619 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2620 } else {
2621 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2622 if (ctx && !sslBumpCertKey.isEmpty())
2623 storeTlsContextToCache(sslBumpCertKey, ctx);
2624 getSslContextDone(ctx);
2625 }
2626 return;
2627 }
2628 }
2629 }
2630 Security::ContextPointer nil;
2631 getSslContextDone(nil);
2632 }
2633
2634 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2635 {
2636 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2637
2638 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2639 if (connectedOk) {
2640 if (X509 *mimicCert = sslServerBump->serverCert.get())
2641 certProperties.mimicCert.resetAndLock(mimicCert);
2642
2643 ACLFilledChecklist checklist(nullptr, sslServerBump->request.getRaw());
2644 fillChecklist(checklist);
2645
2646 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != nullptr; ca = ca->next) {
2647 // If the algorithm already set, then ignore it.
2648 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2649 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2650 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2651 continue;
2652
2653 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2654 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2655 const char *param = ca->param;
2656
2657 // For parameterless CN adaptation, use hostname from the
2658 // CONNECT request.
2659 if (ca->alg == Ssl::algSetCommonName) {
2660 if (!param)
2661 param = tlsConnectHostOrIp.c_str();
2662 certProperties.commonName = param;
2663 certProperties.setCommonName = true;
2664 } else if (ca->alg == Ssl::algSetValidAfter)
2665 certProperties.setValidAfter = true;
2666 else if (ca->alg == Ssl::algSetValidBefore)
2667 certProperties.setValidBefore = true;
2668
2669 debugs(33, 5, "Matches certificate adaptation aglorithm: " <<
2670 alg << " param: " << (param ? param : "-"));
2671 }
2672 }
2673
2674 certProperties.signAlgorithm = Ssl::algSignEnd;
2675 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != nullptr; sg = sg->next) {
2676 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2677 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2678 break;
2679 }
2680 }
2681 } else {// did not try to connect (e.g. client-first) or failed to connect
2682 // In case of an error while connecting to the secure server, use a
2683 // trusted certificate, with no mimicked fields and no adaptation
2684 // algorithms. There is nothing we can mimic, so we want to minimize the
2685 // number of warnings the user will have to see to get to the error page.
2686 // We will close the connection, so that the trust is not extended to
2687 // non-Squid content.
2688 certProperties.signAlgorithm = Ssl::algSignTrusted;
2689 }
2690
2691 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2692
2693 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2694 assert(port->secure.untrustedSigningCa.cert);
2695 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2696 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2697 } else {
2698 assert(port->secure.signingCa.cert.get());
2699 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2700
2701 if (port->secure.signingCa.pkey)
2702 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2703 }
2704 signAlgorithm = certProperties.signAlgorithm;
2705
2706 certProperties.signHash = Ssl::DefaultSignHash;
2707 }
2708
2709 Security::ContextPointer
2710 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2711 {
2712 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2713 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2714 if (const auto ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2715 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2716 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2717 return *ctx;
2718 } else {
2719 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2720 if (ssl_ctx_cache)
2721 ssl_ctx_cache->del(cacheKey);
2722 }
2723 }
2724 return Security::ContextPointer(nullptr);
2725 }
2726
2727 void
2728 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2729 {
2730 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2731 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, ctx)) {
2732 // If it is not in storage delete after using. Else storage deleted it.
2733 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2734 }
2735 }
2736
2737 void
2738 ConnStateData::getSslContextStart()
2739 {
2740 if (port->secure.generateHostCertificates) {
2741 Ssl::CertificateProperties certProperties;
2742 buildSslCertGenerationParams(certProperties);
2743
2744 // Disable caching for bumpPeekAndSplice mode
2745 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2746 sslBumpCertKey.clear();
2747 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2748 assert(!sslBumpCertKey.isEmpty());
2749
2750 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2751 if (ctx) {
2752 getSslContextDone(ctx);
2753 return;
2754 }
2755 }
2756
2757 #if USE_SSL_CRTD
2758 try {
2759 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2760 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2761 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2762 request_message.composeRequest(certProperties);
2763 debugs(33, 5, "SSL crtd request: " << request_message.compose().c_str());
2764 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2765 return;
2766 } catch (const std::exception &e) {
2767 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2768 "request for " << certProperties.commonName <<
2769 " certificate: " << e.what() << "; will now block to " <<
2770 "generate that certificate.");
2771 // fall through to do blocking in-process generation.
2772 }
2773 #endif // USE_SSL_CRTD
2774
2775 debugs(33, 5, "Generating SSL certificate for " << certProperties.commonName);
2776 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2777 doPeekAndSpliceStep();
2778 auto ssl = fd_table[clientConnection->fd].ssl.get();
2779 if (!Ssl::configureSSL(ssl, certProperties, *port))
2780 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2781
2782 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2783 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2784 } else {
2785 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2786 if (dynCtx && !sslBumpCertKey.isEmpty())
2787 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2788 getSslContextDone(dynCtx);
2789 }
2790 return;
2791 }
2792
2793 Security::ContextPointer nil;
2794 getSslContextDone(nil);
2795 }
2796
2797 void
2798 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2799 {
2800 if (port->secure.generateHostCertificates && !ctx) {
2801 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2802 }
2803
2804 // If generated ssl context = NULL, try to use static ssl context.
2805 if (!ctx) {
2806 if (!port->secure.staticContext) {
2807 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2808 clientConnection->close();
2809 return;
2810 } else {
2811 debugs(33, 5, "Using static TLS context.");
2812 ctx = port->secure.staticContext;
2813 }
2814 }
2815
2816 if (!httpsCreate(this, ctx))
2817 return;
2818
2819 // bumped intercepted conns should already have Config.Timeout.request set
2820 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2821 // to make sure the connection does not get stuck on non-SSL clients.
2822 resetReadTimeout(Config.Timeout.request);
2823
2824 switchedToHttps_ = true;
2825
2826 auto ssl = fd_table[clientConnection->fd].ssl.get();
2827 BIO *b = SSL_get_rbio(ssl);
2828 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2829 bio->setReadBufData(inBuf);
2830 inBuf.clear();
2831 clientNegotiateSSL(clientConnection->fd, this);
2832 }
2833
2834 void
2835 ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2836 {
2837 assert(!switchedToHttps_);
2838 Must(http->request);
2839 auto &request = http->request;
2840
2841 // Depending on receivedFirstByte_, we are at the start of either an
2842 // established CONNECT tunnel with the client or an intercepted TCP (and
2843 // presumably TLS) connection from the client. Expect TLS Client Hello.
2844 const auto insideConnectTunnel = receivedFirstByte_;
2845 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2846
2847 tlsConnectHostOrIp = request->url.hostOrIp();
2848 tlsConnectPort = request->url.port();
2849 resetSslCommonName(request->url.host());
2850
2851 // We are going to read new request
2852 flags.readMore = true;
2853
2854 // keep version major.minor details the same.
2855 // but we are now performing the HTTPS handshake traffic
2856 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2857
2858 // If sslServerBump is set, then we have decided to deny CONNECT
2859 // and now want to switch to SSL to send the error to the client
2860 // without even peeking at the origin server certificate.
2861 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2862 request->flags.sslPeek = true;
2863 sslServerBump = new Ssl::ServerBump(http);
2864 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2865 request->flags.sslPeek = true;
2866 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2867 }
2868
2869 // commSetConnTimeout() was called for this request before we switched.
2870 // Fix timeout to request_start_timeout
2871 resetReadTimeout(Config.Timeout.request_start_timeout);
2872 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2873 // a bumbed "connect" request on non transparent port.
2874 receivedFirstByte_ = false;
2875 // Get more data to peek at Tls
2876 parsingTlsHandshake = true;
2877
2878 // If the protocol has changed, then reset preservingClientData_.
2879 // Otherwise, its value initially set in start() is still valid/fresh.
2880 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2881 if (insideConnectTunnel)
2882 preservingClientData_ = shouldPreserveClientData();
2883
2884 readSomeData();
2885 }
2886
2887 void
2888 ConnStateData::parseTlsHandshake()
2889 {
2890 Must(parsingTlsHandshake);
2891
2892 assert(!inBuf.isEmpty());
2893 receivedFirstByte();
2894 fd_note(clientConnection->fd, "Parsing TLS handshake");
2895
2896 // stops being nil if we fail to parse the handshake
2897 ErrorDetail::Pointer parseErrorDetails;
2898
2899 try {
2900 if (!tlsParser.parseHello(inBuf)) {
2901 // need more data to finish parsing
2902 readSomeData();
2903 return;
2904 }
2905 }
2906 catch (const TextException &ex) {
2907 debugs(83, 2, "exception: " << ex);
2908 parseErrorDetails = new ExceptionErrorDetail(ex.id());
2909 }
2910 catch (...) {
2911 debugs(83, 2, "exception: " << CurrentException);
2912 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2913 parseErrorDetails = d;
2914 }
2915
2916 parsingTlsHandshake = false;
2917
2918 // client data may be needed for splicing and for
2919 // tunneling unsupportedProtocol after an error
2920 preservedClientData = inBuf;
2921
2922 // Even if the parser failed, each TLS detail should either be set
2923 // correctly or still be "unknown"; copying unknown detail is a no-op.
2924 Security::TlsDetails::Pointer const &details = tlsParser.details;
2925 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2926 if (details && !details->serverName.isEmpty()) {
2927 resetSslCommonName(details->serverName.c_str());
2928 tlsClientSni_ = details->serverName;
2929 }
2930
2931 // We should disable read/write handlers
2932 Comm::ResetSelect(clientConnection->fd);
2933
2934 if (parseErrorDetails) {
2935 Http::StreamPointer context = pipeline.front();
2936 Must(context && context->http);
2937 HttpRequest::Pointer request = context->http->request;
2938 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2939 updateError(ERR_PROTOCOL_UNKNOWN, parseErrorDetails);
2940 if (!tunnelOnError(ERR_PROTOCOL_UNKNOWN))
2941 clientConnection->close();
2942 return;
2943 }
2944
2945 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
2946 getSslContextStart();
2947 return;
2948 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
2949 debugs(83, 5, "server-first skips step2; start forwarding the request");
2950 sslServerBump->step = XactionStep::tlsBump3;
2951 Http::StreamPointer context = pipeline.front();
2952 ClientHttpRequest *http = context ? context->http : nullptr;
2953 // will call httpsPeeked() with certificate and connection, eventually
2954 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
2955 } else {
2956 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
2957 startPeekAndSplice();
2958 }
2959 }
2960
2961 static void
2962 httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
2963 {
2964 ConnStateData *connState = (ConnStateData *) data;
2965
2966 // if the connection is closed or closing, just return.
2967 if (!connState->isOpen())
2968 return;
2969
2970 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
2971 assert(connState->serverBump());
2972 Ssl::BumpMode bumpAction;
2973 if (answer.allowed()) {
2974 bumpAction = (Ssl::BumpMode)answer.kind;
2975 } else
2976 bumpAction = Ssl::bumpSplice;
2977
2978 connState->serverBump()->act.step2 = bumpAction;
2979 connState->sslBumpMode = bumpAction;
2980 Http::StreamPointer context = connState->pipeline.front();
2981 if (ClientHttpRequest *http = (context ? context->http : nullptr))
2982 http->al->ssl.bumpMode = bumpAction;
2983
2984 if (bumpAction == Ssl::bumpTerminate) {
2985 connState->clientConnection->close();
2986 } else if (bumpAction != Ssl::bumpSplice) {
2987 connState->startPeekAndSplice();
2988 } else if (!connState->splice())
2989 connState->clientConnection->close();
2990 }
2991
2992 bool
2993 ConnStateData::splice()
2994 {
2995 // normally we can splice here, because we just got client hello message
2996
2997 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
2998 // we should not lose any raw bytes when switching to raw I/O here.
2999 if (fd_table[clientConnection->fd].ssl.get())
3000 fd_table[clientConnection->fd].useDefaultIo();
3001
3002 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3003 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3004 transferProtocol = Http::ProtocolVersion();
3005 assert(!pipeline.empty());
3006 Http::StreamPointer context = pipeline.front();
3007 Must(context);
3008 Must(context->http);
3009 ClientHttpRequest *http = context->http;
3010 HttpRequest::Pointer request = http->request;
3011 context->finished();
3012 if (transparent()) {
3013 // For transparent connections, make a new fake CONNECT request, now
3014 // with SNI as target. doCallout() checks, adaptations may need that.
3015 return fakeAConnectRequest("splice", preservedClientData);
3016 } else {
3017 // For non transparent connections make a new tunneled CONNECT, which
3018 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3019 // respond with "Connection Established" to the client.
3020 // This fake CONNECT request required to allow use of SNI in
3021 // doCallout() checks and adaptations.
3022 return initiateTunneledRequest(request, "splice", preservedClientData);
3023 }
3024 }
3025
3026 void
3027 ConnStateData::startPeekAndSplice()
3028 {
3029 // This is the Step2 of the SSL bumping
3030 assert(sslServerBump);
3031 Http::StreamPointer context = pipeline.front();
3032 ClientHttpRequest *http = context ? context->http : nullptr;
3033
3034 if (sslServerBump->at(XactionStep::tlsBump1)) {
3035 sslServerBump->step = XactionStep::tlsBump2;
3036 // Run a accessList check to check if want to splice or continue bumping
3037
3038 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3039 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
3040 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3041 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3042 fillChecklist(*acl_checklist);
3043 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3044 return;
3045 }
3046
3047 // will call httpsPeeked() with certificate and connection, eventually
3048 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3049 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3050
3051 if (!httpsCreate(this, unConfiguredCTX))
3052 return;
3053
3054 switchedToHttps_ = true;
3055
3056 auto ssl = fd_table[clientConnection->fd].ssl.get();
3057 BIO *b = SSL_get_rbio(ssl);
3058 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3059 bio->setReadBufData(inBuf);
3060 bio->hold(true);
3061
3062 // We have successfully parsed client Hello, but our TLS handshake parser is
3063 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3064 // can honor on_unsupported_protocol if needed. If there are no errors, we
3065 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3066 // also allow an ioWantRead result in case some fancy TLS extension that
3067 // Squid does not yet understand requires reading post-Hello client bytes.
3068 const auto handshakeResult = acceptTls();
3069 if (!handshakeResult.wantsIo())
3070 return handleSslBumpHandshakeError(handshakeResult);
3071
3072 // We need to reset inBuf here, to be used by incoming requests in the case
3073 // of SSL bump
3074 inBuf.clear();
3075
3076 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3077 sslServerBump->step = XactionStep::tlsBump3;
3078 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3079 }
3080
3081 /// process a problematic Security::Accept() result on the SslBump code path
3082 void
3083 ConnStateData::handleSslBumpHandshakeError(const Security::IoResult &handshakeResult)
3084 {
3085 auto errCategory = ERR_NONE;
3086
3087 switch (handshakeResult.category) {
3088 case Security::IoResult::ioSuccess: {
3089 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3090 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3091 break;
3092 }
3093
3094 case Security::IoResult::ioWantRead: {
3095 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3096 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3097 break;
3098 }
3099
3100 case Security::IoResult::ioWantWrite: {
3101 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3102 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3103 break;
3104 }
3105
3106 case Security::IoResult::ioError:
3107 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: " << handshakeResult.errorDescription <<
3108 " while SslBump-accepting a TLS connection on " << clientConnection << ": " << handshakeResult.errorDetail);
3109 updateError(errCategory = ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
3110 break;
3111
3112 }
3113
3114 if (!tunnelOnError(errCategory))
3115 clientConnection->close();
3116 }
3117
3118 void
3119 ConnStateData::doPeekAndSpliceStep()
3120 {
3121 auto ssl = fd_table[clientConnection->fd].ssl.get();
3122 BIO *b = SSL_get_rbio(ssl);
3123 assert(b);
3124 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3125
3126 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3127 bio->hold(false);
3128
3129 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3130 switchedToHttps_ = true;
3131 }
3132
3133 void
3134 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3135 {
3136 Must(sslServerBump != nullptr);
3137 Must(sslServerBump->request == pic.request);
3138 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3139
3140 if (Comm::IsConnOpen(pic.connection)) {
3141 notePinnedConnectionBecameIdle(pic);
3142 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3143 } else
3144 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3145
3146 getSslContextStart();
3147 }
3148
3149 #endif /* USE_OPENSSL */
3150
3151 bool
3152 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, const char *reason, const SBuf &payload)
3153 {
3154 // fake a CONNECT request to force connState to tunnel
3155 SBuf connectHost;
3156 unsigned short connectPort = 0;
3157
3158 if (pinning.serverConnection != nullptr) {
3159 static char ip[MAX_IPSTRLEN];
3160 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3161 connectPort = pinning.serverConnection->remote.port();
3162 } else if (cause) {
3163 connectHost = cause->url.hostOrIp();
3164 connectPort = cause->url.port();
3165 #if USE_OPENSSL
3166 } else if (!tlsConnectHostOrIp.isEmpty()) {
3167 connectHost = tlsConnectHostOrIp;
3168 connectPort = tlsConnectPort;
3169 #endif
3170 } else if (transparent()) {
3171 static char ip[MAX_IPSTRLEN];
3172 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3173 connectPort = clientConnection->local.port();
3174 } else {
3175 // Typical cases are malformed HTTP requests on http_port and malformed
3176 // TLS handshakes on non-bumping https_port. TODO: Discover these
3177 // problems earlier so that they can be classified/detailed better.
3178 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3179 // TODO: throw when nonBlockingCheck() callbacks gain job protections
3180 static const auto d = MakeNamedErrorDetail("TUNNEL_TARGET");
3181 updateError(ERR_INVALID_REQ, d);
3182 return false;
3183 }
3184
3185 debugs(33, 2, "Request tunneling for " << reason);
3186 ClientHttpRequest *http = buildFakeRequest(connectHost, connectPort, payload);
3187 HttpRequest::Pointer request = http->request;
3188 request->flags.forceTunnel = true;
3189 http->calloutContext = new ClientRequestContext(http);
3190 http->doCallouts();
3191 clientProcessRequestFinished(this, request);
3192 return true;
3193 }
3194
3195 bool
3196 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3197 {
3198 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3199
3200 SBuf connectHost;
3201 assert(transparent());
3202 const unsigned short connectPort = clientConnection->local.port();
3203
3204 #if USE_OPENSSL
3205 if (!tlsClientSni_.isEmpty())
3206 connectHost.assign(tlsClientSni_);
3207 else
3208 #endif
3209 {
3210 static char ip[MAX_IPSTRLEN];
3211 clientConnection->local.toHostStr(ip, sizeof(ip));
3212 connectHost.assign(ip);
3213 }
3214
3215 ClientHttpRequest *http = buildFakeRequest(connectHost, connectPort, payload);
3216
3217 http->calloutContext = new ClientRequestContext(http);
3218 HttpRequest::Pointer request = http->request;
3219 http->doCallouts();
3220 clientProcessRequestFinished(this, request);
3221 return true;
3222 }
3223
3224 ClientHttpRequest *
3225 ConnStateData::buildFakeRequest(SBuf &useHost, unsigned short usePort, const SBuf &payload)
3226 {
3227 ClientHttpRequest *http = new ClientHttpRequest(this);
3228 Http::Stream *stream = new Http::Stream(clientConnection, http);
3229
3230 StoreIOBuffer tempBuffer;
3231 tempBuffer.data = stream->reqbuf;
3232 tempBuffer.length = HTTP_REQBUF_SZ;
3233
3234 ClientStreamData newServer = new clientReplyContext(http);
3235 ClientStreamData newClient = stream;
3236 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3237 clientReplyStatus, newServer, clientSocketRecipient,
3238 clientSocketDetach, newClient, tempBuffer);
3239
3240 stream->flags.parsed_ok = 1; // Do we need it?
3241 stream->mayUseConnection(true);
3242 extendLifetime();
3243 stream->registerWithConn();
3244
3245 const auto mx = MasterXaction::MakePortful(port);
3246 mx->tcpClient = clientConnection;
3247 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3248 // clientProcessRequest
3249 HttpRequest::Pointer request = new HttpRequest(mx);
3250 request->url.setScheme(AnyP::PROTO_AUTHORITY_FORM, nullptr);
3251 request->method = Http::METHOD_CONNECT;
3252 request->url.host(useHost.c_str());
3253 request->url.port(usePort);
3254
3255 http->uri = SBufToCstring(request->effectiveRequestUri());
3256 http->initRequest(request.getRaw());
3257
3258 request->manager(this, http->al);
3259
3260 request->header.putStr(Http::HOST, useHost.c_str());
3261
3262 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3263 #if USE_AUTH
3264 if (getAuth())
3265 request->auth_user_request = getAuth();
3266 #endif
3267
3268 inBuf = payload;
3269 flags.readMore = false;
3270
3271 return http;
3272 }
3273
3274 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3275 static bool
3276 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3277 {
3278 if (!Comm::IsConnOpen(c)) {
3279 Must(NHttpSockets > 0); // we tried to open some
3280 --NHttpSockets; // there will be fewer sockets than planned
3281 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3282
3283 if (!NHttpSockets) // we could not open any listen sockets at all
3284 fatalf("Unable to open %s",FdNote(portType));
3285
3286 return false;
3287 }
3288 return true;
3289 }
3290
3291 /// find any unused HttpSockets[] slot and store fd there or return false
3292 static bool
3293 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3294 {
3295 bool found = false;
3296 for (int i = 0; i < NHttpSockets && !found; ++i) {
3297 if ((found = HttpSockets[i] < 0))
3298 HttpSockets[i] = conn->fd;
3299 }
3300 return found;
3301 }
3302
3303 static void
3304 clientHttpConnectionsOpen(void)
3305 {
3306 const auto savedContext = CodeContext::Current();
3307 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3308 CodeContext::Reset(s);
3309 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3310
3311 if (MAXTCPLISTENPORTS == NHttpSockets) {
3312 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3313 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3314 continue;
3315 }
3316
3317 #if USE_OPENSSL
3318 if (s->flags.tunnelSslBumping) {
3319 if (!Config.accessList.ssl_bump) {
3320 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3321 s->flags.tunnelSslBumping = false;
3322 }
3323 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3324 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3325 s->flags.tunnelSslBumping = false;
3326 if (s->transport.protocol == AnyP::PROTO_HTTP)
3327 s->secure.encryptTransport = false;
3328 }
3329 if (s->flags.tunnelSslBumping) {
3330 // Create ssl_ctx cache for this port.
3331 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3332 }
3333 }
3334 #endif
3335
3336 if (s->secure.encryptTransport && !s->secure.staticContext) {
3337 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3338 continue;
3339 }
3340
3341 const auto protocol = s->transport.protocol;
3342 assert(protocol == AnyP::PROTO_HTTP || protocol == AnyP::PROTO_HTTPS);
3343 const auto isHttps = protocol == AnyP::PROTO_HTTPS;
3344 using AcceptCall = CommCbFunPtrCallT<CommAcceptCbPtrFun>;
3345 RefCount<AcceptCall> subCall = commCbCall(5, 5, isHttps ? "httpsAccept" : "httpAccept",
3346 CommAcceptCbPtrFun(isHttps ? httpsAccept : httpAccept, CommAcceptCbParams(nullptr)));
3347 clientStartListeningOn(s, subCall, isHttps ? Ipc::fdnHttpsSocket : Ipc::fdnHttpSocket);
3348 }
3349 CodeContext::Reset(savedContext);
3350 }
3351
3352 void
3353 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3354 {
3355 // Fill out a Comm::Connection which IPC will open as a listener for us
3356 port->listenConn = new Comm::Connection;
3357 port->listenConn->local = port->s;
3358 port->listenConn->flags =
3359 COMM_NONBLOCKING |
3360 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3361 (port->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3362 (port->workerQueues ? COMM_REUSEPORT : 0);
3363
3364 // route new connections to subCall
3365 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3366 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3367 AsyncCall::Pointer listenCall =
3368 asyncCall(33, 2, "clientListenerConnectionOpened",
3369 ListeningStartedDialer(&clientListenerConnectionOpened,
3370 port, fdNote, sub));
3371 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3372
3373 assert(NHttpSockets < MAXTCPLISTENPORTS);
3374 HttpSockets[NHttpSockets] = -1;
3375 ++NHttpSockets;
3376 }
3377
3378 /// process clientHttpConnectionsOpen result
3379 static void
3380 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3381 {
3382 Must(s != nullptr);
3383
3384 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3385 return;
3386
3387 Must(Comm::IsConnOpen(s->listenConn));
3388
3389 // TCP: setup a job to handle accept() with subscribed handler
3390 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3391
3392 debugs(1, Important(13), "Accepting " <<
3393 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3394 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3395 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3396 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3397 << FdNote(portTypeNote) << " connections at "
3398 << s->listenConn);
3399
3400 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3401
3402 #if USE_SYSTEMD
3403 // When the very first port opens, tell systemd we are able to serve connections.
3404 // Subsequent sd_notify() calls, including calls during reconfiguration,
3405 // do nothing because the first call parameter is 1.
3406 // XXX: Send the notification only after opening all configured ports.
3407 if (opt_foreground || opt_no_daemon) {
3408 const auto result = sd_notify(1, "READY=1");
3409 if (result < 0) {
3410 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3411 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3412 }
3413 }
3414 #endif
3415 }
3416
3417 void
3418 clientOpenListenSockets(void)
3419 {
3420 clientHttpConnectionsOpen();
3421 Ftp::StartListening();
3422
3423 if (NHttpSockets < 1)
3424 fatal("No HTTP, HTTPS, or FTP ports configured");
3425 }
3426
3427 void
3428 clientConnectionsClose()
3429 {
3430 const auto savedContext = CodeContext::Current();
3431 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
3432 CodeContext::Reset(s);
3433 if (s->listenConn != nullptr) {
3434 debugs(1, Important(14), "Closing HTTP(S) port " << s->listenConn->local);
3435 s->listenConn->close();
3436 s->listenConn = nullptr;
3437 }
3438 }
3439 CodeContext::Reset(savedContext);
3440
3441 Ftp::StopListening();
3442
3443 // TODO see if we can drop HttpSockets array entirely */
3444 for (int i = 0; i < NHttpSockets; ++i) {
3445 HttpSockets[i] = -1;
3446 }
3447
3448 NHttpSockets = 0;
3449 }
3450
3451 int
3452 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3453 {
3454 SBuf vary(request->vary_headers);
3455 const auto &reply = entry->mem().freshestReply();
3456 auto has_vary = reply.header.has(Http::HdrType::VARY);
3457 #if X_ACCELERATOR_VARY
3458
3459 has_vary |=
3460 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3461 #endif
3462
3463 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3464 if (!vary.isEmpty()) {
3465 /* Oops... something odd is going on here.. */
3466 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3467 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3468 request->vary_headers.clear();
3469 return VARY_CANCEL;
3470 }
3471
3472 if (!has_vary) {
3473 /* This is not a varying object */
3474 return VARY_NONE;
3475 }
3476
3477 /* virtual "vary" object found. Calculate the vary key and
3478 * continue the search
3479 */
3480 vary = httpMakeVaryMark(request, &reply);
3481
3482 if (!vary.isEmpty()) {
3483 request->vary_headers = vary;
3484 return VARY_OTHER;
3485 } else {
3486 /* Ouch.. we cannot handle this kind of variance */
3487 /* XXX This cannot really happen, but just to be complete */
3488 return VARY_CANCEL;
3489 }
3490 } else {
3491 if (vary.isEmpty()) {
3492 vary = httpMakeVaryMark(request, &reply);
3493
3494 if (!vary.isEmpty())
3495 request->vary_headers = vary;
3496 }
3497
3498 if (vary.isEmpty()) {
3499 /* Ouch.. we cannot handle this kind of variance */
3500 /* XXX This cannot really happen, but just to be complete */
3501 return VARY_CANCEL;
3502 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3503 return VARY_MATCH;
3504 } else {
3505 /* Oops.. we have already been here and still haven't
3506 * found the requested variant. Bail out
3507 */
3508 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3509 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3510 return VARY_CANCEL;
3511 }
3512 }
3513 }
3514
3515 ACLFilledChecklist *
3516 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3517 {
3518 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3519 clientAclChecklistFill(*checklist, http);
3520 return checklist;
3521 }
3522
3523 void
3524 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3525 {
3526 assert(http);
3527
3528 if (!checklist.request && http->request)
3529 checklist.setRequest(http->request);
3530
3531 if (!checklist.al && http->al) {
3532 checklist.al = http->al;
3533 checklist.syncAle(http->request, http->log_uri);
3534 if (!checklist.reply && http->al->reply) {
3535 checklist.reply = http->al->reply.getRaw();
3536 HTTPMSGLOCK(checklist.reply);
3537 }
3538 }
3539
3540 if (const auto conn = http->getConn())
3541 checklist.setConn(conn); // may already be set
3542 }
3543
3544 void
3545 ConnStateData::fillChecklist(ACLFilledChecklist &checklist) const
3546 {
3547 const auto context = pipeline.front();
3548 if (const auto http = context ? context->http : nullptr)
3549 return clientAclChecklistFill(checklist, http); // calls checklist.setConn()
3550
3551 // no requests, but we always have connection-level details
3552 // TODO: ACL checks should not require a mutable ConnStateData. Adjust the
3553 // code that accidentally violates that principle to remove this const_cast!
3554 checklist.setConn(const_cast<ConnStateData*>(this));
3555
3556 // Set other checklist fields inside our fillConnectionLevelDetails() rather
3557 // than here because clientAclChecklistFill() code path calls that method
3558 // (via ACLFilledChecklist::setConn()) rather than calling us directly.
3559 }
3560
3561 void
3562 ConnStateData::fillConnectionLevelDetails(ACLFilledChecklist &checklist) const
3563 {
3564 assert(checklist.conn() == this);
3565 assert(clientConnection);
3566
3567 if (!checklist.request) { // preserve (better) addresses supplied by setRequest()
3568 checklist.src_addr = clientConnection->remote;
3569 checklist.my_addr = clientConnection->local; // TODO: or port->s?
3570 }
3571
3572 #if USE_OPENSSL
3573 if (!checklist.sslErrors && sslServerBump)
3574 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
3575 #endif
3576
3577 if (!checklist.rfc931[0]) // checklist creator may have supplied it already
3578 checklist.setIdent(clientConnection->rfc931);
3579
3580 }
3581
3582 bool
3583 ConnStateData::transparent() const
3584 {
3585 return clientConnection != nullptr && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3586 }
3587
3588 BodyPipe::Pointer
3589 ConnStateData::expectRequestBody(int64_t size)
3590 {
3591 bodyPipe = new BodyPipe(this);
3592 if (size >= 0)
3593 bodyPipe->setBodySize(size);
3594 else
3595 startDechunkingRequest();
3596 return bodyPipe;
3597 }
3598
3599 int64_t
3600 ConnStateData::mayNeedToReadMoreBody() const
3601 {
3602 if (!bodyPipe)
3603 return 0; // request without a body or read/produced all body bytes
3604
3605 if (!bodyPipe->bodySizeKnown())
3606 return -1; // probably need to read more, but we cannot be sure
3607
3608 const int64_t needToProduce = bodyPipe->unproducedSize();
3609 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3610
3611 if (needToProduce <= haveAvailable)
3612 return 0; // we have read what we need (but are waiting for pipe space)
3613
3614 return needToProduce - haveAvailable;
3615 }
3616
3617 void
3618 ConnStateData::stopReceiving(const char *error)
3619 {
3620 debugs(33, 4, "receiving error (" << clientConnection << "): " << error <<
3621 "; old sending error: " <<
3622 (stoppedSending() ? stoppedSending_ : "none"));
3623
3624 if (const char *oldError = stoppedReceiving()) {
3625 debugs(33, 3, "already stopped receiving: " << oldError);
3626 return; // nothing has changed as far as this connection is concerned
3627 }
3628
3629 stoppedReceiving_ = error;
3630
3631 if (const char *sendError = stoppedSending()) {
3632 debugs(33, 3, "closing because also stopped sending: " << sendError);
3633 clientConnection->close();
3634 }
3635 }
3636
3637 void
3638 ConnStateData::expectNoForwarding()
3639 {
3640 if (bodyPipe != nullptr) {
3641 debugs(33, 4, "no consumer for virgin body " << bodyPipe->status());
3642 bodyPipe->expectNoConsumption();
3643 }
3644 }
3645
3646 /// initialize dechunking state
3647 void
3648 ConnStateData::startDechunkingRequest()
3649 {
3650 Must(bodyPipe != nullptr);
3651 debugs(33, 5, "start dechunking" << bodyPipe->status());
3652 assert(!bodyParser);
3653 bodyParser = new Http1::TeChunkedParser;
3654 }
3655
3656 /// put parsed content into input buffer and clean up
3657 void
3658 ConnStateData::finishDechunkingRequest(bool withSuccess)
3659 {
3660 debugs(33, 5, "finish dechunking: " << withSuccess);
3661
3662 if (bodyPipe != nullptr) {
3663 debugs(33, 7, "dechunked tail: " << bodyPipe->status());
3664 BodyPipe::Pointer myPipe = bodyPipe;
3665 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3666 Must(!bodyPipe); // we rely on it being nil after we are done with body
3667 if (withSuccess) {
3668 Must(myPipe->bodySizeKnown());
3669 Http::StreamPointer context = pipeline.front();
3670 if (context != nullptr && context->http && context->http->request)
3671 context->http->request->setContentLength(myPipe->bodySize());
3672 }
3673 }
3674
3675 delete bodyParser;
3676 bodyParser = nullptr;
3677 }
3678
3679 // XXX: this is an HTTP/1-only operation
3680 void
3681 ConnStateData::sendControlMsg(HttpControlMsg msg)
3682 {
3683 if (const auto context = pipeline.front()) {
3684 if (context->http)
3685 context->http->al->reply = msg.reply;
3686 }
3687
3688 if (!isOpen()) {
3689 debugs(33, 3, "ignoring 1xx due to earlier closure");
3690 return;
3691 }
3692
3693 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3694 if (!pipeline.empty()) {
3695 HttpReply::Pointer rep(msg.reply);
3696 Must(rep);
3697 // remember the callback
3698 cbControlMsgSent = msg.cbSuccess;
3699
3700 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3701 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3702
3703 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3704 // but still inform the caller (so it may resume its operation)
3705 doneWithControlMsg();
3706 }
3707 return;
3708 }
3709
3710 debugs(33, 3, " closing due to missing context for 1xx");
3711 clientConnection->close();
3712 }
3713
3714 void
3715 ConnStateData::doneWithControlMsg()
3716 {
3717 HttpControlMsgSink::doneWithControlMsg();
3718
3719 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3720 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3721 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3722 }
3723 }
3724
3725 /// Our close handler called by Comm when the pinned connection is closed
3726 void
3727 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3728 {
3729 // FwdState might repin a failed connection sooner than this close
3730 // callback is called for the failed connection.
3731 assert(pinning.serverConnection == io.conn);
3732 pinning.closeHandler = nullptr; // Comm unregisters handlers before calling
3733 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3734 pinning.serverConnection->noteClosure();
3735 unpinConnection(false);
3736
3737 if (sawZeroReply && clientConnection != nullptr) {
3738 debugs(33, 3, "Closing client connection on pinned zero reply.");
3739 clientConnection->close();
3740 }
3741
3742 }
3743
3744 void
3745 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3746 {
3747 pinConnection(pinServer, *request);
3748 }
3749
3750 void
3751 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3752 {
3753 Must(pic.connection);
3754 Must(pic.request);
3755 pinConnection(pic.connection, *pic.request);
3756
3757 // monitor pinned server connection for remote-end closures.
3758 startPinnedConnectionMonitoring();
3759
3760 if (pipeline.empty())
3761 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3762 }
3763
3764 /// Forward future client requests using the given server connection.
3765 void
3766 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3767 {
3768 if (Comm::IsConnOpen(pinning.serverConnection) &&
3769 pinning.serverConnection->fd == pinServer->fd) {
3770 debugs(33, 3, "already pinned" << pinServer);
3771 return;
3772 }
3773
3774 unpinConnection(true); // closes pinned connection, if any, and resets fields
3775
3776 pinning.serverConnection = pinServer;
3777
3778 debugs(33, 3, pinning.serverConnection);
3779
3780 Must(pinning.serverConnection != nullptr);
3781
3782 const char *pinnedHost = "[unknown]";
3783 pinning.host = xstrdup(request.url.host());
3784 pinning.port = request.url.port();
3785 pinnedHost = pinning.host;
3786 pinning.pinned = true;
3787 if (CachePeer *aPeer = pinServer->getPeer())
3788 pinning.peer = cbdataReference(aPeer);
3789 pinning.auth = request.flags.connectionAuth;
3790 char stmp[MAX_IPSTRLEN];
3791 char desc[FD_DESC_SZ];
3792 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3793 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3794 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3795 clientConnection->fd);
3796 fd_note(pinning.serverConnection->fd, desc);
3797
3798 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3799 pinning.closeHandler = JobCallback(33, 5,
3800 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3801 // remember the pinned connection so that cb does not unpin a fresher one
3802 typedef CommCloseCbParams Params;
3803 Params &params = GetCommParams<Params>(pinning.closeHandler);
3804 params.conn = pinning.serverConnection;
3805 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3806 }
3807
3808 /// [re]start monitoring pinned connection for peer closures so that we can
3809 /// propagate them to an _idle_ client pinned to that peer
3810 void
3811 ConnStateData::startPinnedConnectionMonitoring()
3812 {
3813 if (pinning.readHandler != nullptr)
3814 return; // already monitoring
3815
3816 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3817 pinning.readHandler = JobCallback(33, 3,
3818 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3819 Comm::Read(pinning.serverConnection, pinning.readHandler);
3820 }
3821
3822 void
3823 ConnStateData::stopPinnedConnectionMonitoring()
3824 {
3825 if (pinning.readHandler != nullptr) {
3826 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3827 pinning.readHandler = nullptr;
3828 }
3829 }
3830
3831 #if USE_OPENSSL
3832 bool
3833 ConnStateData::handleIdleClientPinnedTlsRead()
3834 {
3835 // A ready-for-reading connection means that the TLS server either closed
3836 // the connection, sent us some unexpected HTTP data, or started TLS
3837 // renegotiations. We should close the connection except for the last case.
3838
3839 Must(pinning.serverConnection != nullptr);
3840 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3841 if (!ssl)
3842 return false;
3843
3844 char buf[1];
3845 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3846
3847 if (readResult > 0 || SSL_pending(ssl) > 0) {
3848 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3849 return false;
3850 }
3851
3852 switch(const int error = SSL_get_error(ssl, readResult)) {
3853 case SSL_ERROR_WANT_WRITE:
3854 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3855 // fall through to restart monitoring, for now
3856 case SSL_ERROR_NONE:
3857 case SSL_ERROR_WANT_READ:
3858 startPinnedConnectionMonitoring();
3859 return true;
3860
3861 default:
3862 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3863 return false;
3864 }
3865
3866 // not reached
3867 return true;
3868 }
3869 #endif
3870
3871 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3872 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3873 void
3874 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3875 {
3876 pinning.readHandler = nullptr; // Comm unregisters handlers before calling
3877
3878 if (io.flag == Comm::ERR_CLOSING)
3879 return; // close handler will clean up
3880
3881 Must(pinning.serverConnection == io.conn);
3882
3883 #if USE_OPENSSL
3884 if (handleIdleClientPinnedTlsRead())
3885 return;
3886 #endif
3887
3888 const bool clientIsIdle = pipeline.empty();
3889
3890 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3891 io.size << (clientIsIdle ? " with idle client" : ""));
3892
3893 pinning.serverConnection->close();
3894
3895 // If we are still sending data to the client, do not close now. When we are done sending,
3896 // ConnStateData::kick() checks pinning.serverConnection and will close.
3897 // However, if we are idle, then we must close to inform the idle client and minimize races.
3898 if (clientIsIdle && clientConnection != nullptr)
3899 clientConnection->close();
3900 }
3901
3902 Comm::ConnectionPointer
3903 ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3904 {
3905 debugs(33, 7, pinning.serverConnection);
3906 Must(request);
3907
3908 const auto pinningError = [&](const err_type type) {
3909 unpinConnection(true);
3910 HttpRequestPointer requestPointer = request;
3911 return ErrorState::NewForwarding(type, requestPointer, ale);
3912 };
3913
3914 if (!Comm::IsConnOpen(pinning.serverConnection))
3915 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3916
3917 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3918 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3919
3920 if (pinning.port != request->url.port())
3921 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3922
3923 if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3924 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3925
3926 if (pinning.peerAccessDenied)
3927 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3928
3929 stopPinnedConnectionMonitoring();
3930 return pinning.serverConnection;
3931 }
3932
3933 Comm::ConnectionPointer
3934 ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3935 {
3936 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
3937 return connManager->borrowPinnedConnection(request, ale);
3938
3939 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3940 // there is no point since the client connection is now gone
3941 HttpRequestPointer requestPointer = request;
3942 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
3943 }
3944
3945 void
3946 ConnStateData::unpinConnection(const bool andClose)
3947 {
3948 debugs(33, 3, pinning.serverConnection);
3949
3950 if (pinning.peer)
3951 cbdataReferenceDone(pinning.peer);
3952
3953 if (Comm::IsConnOpen(pinning.serverConnection)) {
3954 if (pinning.closeHandler != nullptr) {
3955 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3956 pinning.closeHandler = nullptr;
3957 }
3958
3959 stopPinnedConnectionMonitoring();
3960
3961 // close the server side socket if requested
3962 if (andClose)
3963 pinning.serverConnection->close();
3964 pinning.serverConnection = nullptr;
3965 }
3966
3967 safe_free(pinning.host);
3968
3969 pinning.zeroReply = false;
3970 pinning.peerAccessDenied = false;
3971
3972 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3973 * connection has gone away */
3974 }
3975
3976 void
3977 ConnStateData::terminateAll(const Error &rawError, const LogTagsErrors &lte)
3978 {
3979 auto error = rawError; // (cheap) copy so that we can detail
3980 // We detail even ERR_NONE: There should be no transactions left, and
3981 // detailed ERR_NONE will be unused. Otherwise, this detail helps in triage.
3982 if (!error.detail) {
3983 static const auto d = MakeNamedErrorDetail("WITH_CLIENT");
3984 error.detail = d;
3985 }
3986
3987 debugs(33, 3, pipeline.count() << '/' << pipeline.nrequests << " after " << error);
3988
3989 if (pipeline.empty()) {
3990 bareError.update(error); // XXX: bareLogTagsErrors
3991 } else {
3992 // We terminate the current CONNECT/PUT/etc. context below, logging any
3993 // error details, but that context may leave unparsed bytes behind.
3994 // Consume them to stop checkLogging() from logging them again later.
3995 const auto intputToConsume =
3996 #if USE_OPENSSL
3997 parsingTlsHandshake ? "TLS handshake" : // more specific than CONNECT
3998 #endif
3999 bodyPipe ? "HTTP request body" :
4000 pipeline.back()->mayUseConnection() ? "HTTP CONNECT" :
4001 nullptr;
4002
4003 while (const auto context = pipeline.front()) {
4004 context->noteIoError(error, lte);
4005 context->finished(); // cleanup and self-deregister
4006 assert(context != pipeline.front());
4007 }
4008
4009 if (intputToConsume && !inBuf.isEmpty()) {
4010 debugs(83, 5, "forgetting client " << intputToConsume << " bytes: " << inBuf.length());
4011 inBuf.clear();
4012 }
4013 }
4014
4015 clientConnection->close();
4016 }
4017
4018 /// log the last (attempt at) transaction if nobody else did
4019 void
4020 ConnStateData::checkLogging()
4021 {
4022 // to simplify our logic, we assume that terminateAll() has been called
4023 assert(pipeline.empty());
4024
4025 // do not log connections that closed after a transaction (it is normal)
4026 // TODO: access_log needs ACLs to match received-no-bytes connections
4027 if (pipeline.nrequests && inBuf.isEmpty())
4028 return;
4029
4030 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4031 ClientHttpRequest http(this);
4032 http.req_sz = inBuf.length();
4033 // XXX: Or we died while waiting for the pinned connection to become idle.
4034 http.setErrorUri("error:transaction-end-before-headers");
4035 http.updateError(bareError);
4036 }
4037
4038 bool
4039 ConnStateData::shouldPreserveClientData() const
4040 {
4041 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4042 if (needProxyProtocolHeader_)
4043 return false;
4044
4045 // If our decision here is negative, configuration changes are irrelevant.
4046 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4047 if (!Config.accessList.on_unsupported_protocol)
4048 return false;
4049
4050 // TODO: Figure out whether/how we can support FTP tunneling.
4051 if (port->transport.protocol == AnyP::PROTO_FTP)
4052 return false;
4053
4054 #if USE_OPENSSL
4055 if (parsingTlsHandshake)
4056 return true;
4057
4058 // the 1st HTTP request on a bumped connection
4059 if (!parsedBumpedRequestCount && switchedToHttps())
4060 return true;
4061 #endif
4062
4063 // the 1st HTTP(S) request on a connection to an intercepting port
4064 if (!pipeline.nrequests && transparent())
4065 return true;
4066
4067 return false;
4068 }
4069
4070 NotePairs::Pointer
4071 ConnStateData::notes()
4072 {
4073 if (!theNotes)
4074 theNotes = new NotePairs;
4075 return theNotes;
4076 }
4077
4078 std::ostream &
4079 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4080 {
4081 return os << pic.connection << ", request=" << pic.request;
4082 }
4083
4084 std::ostream &
4085 operator <<(std::ostream &os, const ConnStateData::ServerConnectionContext &scc)
4086 {
4087 return os << scc.conn_ << ", srv_bytes=" << scc.preReadServerBytes.length();
4088 }
4089