]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
Handle more Range requests (#790)
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "error/ExceptionErrorDetail.h"
80 #include "errorpage.h"
81 #include "fd.h"
82 #include "fde.h"
83 #include "fqdncache.h"
84 #include "FwdState.h"
85 #include "globals.h"
86 #include "helper.h"
87 #include "helper/Reply.h"
88 #include "http.h"
89 #include "http/one/RequestParser.h"
90 #include "http/one/TeChunkedParser.h"
91 #include "http/Stream.h"
92 #include "HttpHdrContRange.h"
93 #include "HttpHeaderTools.h"
94 #include "HttpReply.h"
95 #include "HttpRequest.h"
96 #include "ident/Config.h"
97 #include "ident/Ident.h"
98 #include "internal.h"
99 #include "ipc/FdNotes.h"
100 #include "ipc/StartListening.h"
101 #include "log/access_log.h"
102 #include "MemBuf.h"
103 #include "MemObject.h"
104 #include "mime_header.h"
105 #include "parser/Tokenizer.h"
106 #include "profiler/Profiler.h"
107 #include "proxyp/Header.h"
108 #include "proxyp/Parser.h"
109 #include "sbuf/Stream.h"
110 #include "security/Io.h"
111 #include "security/NegotiationHistory.h"
112 #include "servers/forward.h"
113 #include "SquidConfig.h"
114 #include "SquidTime.h"
115 #include "StatCounters.h"
116 #include "StatHist.h"
117 #include "Store.h"
118 #include "TimeOrTag.h"
119 #include "tools.h"
120
121 #if USE_AUTH
122 #include "auth/UserRequest.h"
123 #endif
124 #if USE_DELAY_POOLS
125 #include "ClientInfo.h"
126 #include "MessageDelayPools.h"
127 #endif
128 #if USE_OPENSSL
129 #include "ssl/bio.h"
130 #include "ssl/context_storage.h"
131 #include "ssl/gadgets.h"
132 #include "ssl/helper.h"
133 #include "ssl/ProxyCerts.h"
134 #include "ssl/ServerBump.h"
135 #include "ssl/support.h"
136 #endif
137
138 // for tvSubUsec() which should be in SquidTime.h
139 #include "util.h"
140
141 #include <climits>
142 #include <cmath>
143 #include <limits>
144
145 #if HAVE_SYSTEMD_SD_DAEMON_H
146 #include <systemd/sd-daemon.h>
147 #endif
148
149 #if LINGERING_CLOSE
150 #define comm_close comm_lingering_close
151 #endif
152
153 /// dials clientListenerConnectionOpened call
154 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
155 {
156 public:
157 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
158 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
159 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
160
161 virtual void print(std::ostream &os) const {
162 startPrint(os) <<
163 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
164 }
165
166 virtual bool canDial(AsyncCall &) const { return true; }
167 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
168
169 public:
170 Handler handler;
171
172 private:
173 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
174 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
175 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
176 };
177
178 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
179
180 static IOACB httpAccept;
181 #if USE_IDENT
182 static IDCB clientIdentDone;
183 #endif
184 static int clientIsContentLengthValid(HttpRequest * r);
185 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
186
187 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
188 static void clientUpdateStatCounters(const LogTags &logType);
189 static void clientUpdateHierCounters(HierarchyLogEntry *);
190 static bool clientPingHasFinished(ping_data const *aPing);
191 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &);
192 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
193
194 char *skipLeadingSpace(char *aString);
195
196 #if USE_IDENT
197 static void
198 clientIdentDone(const char *ident, void *data)
199 {
200 ConnStateData *conn = (ConnStateData *)data;
201 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
202 }
203 #endif
204
205 void
206 clientUpdateStatCounters(const LogTags &logType)
207 {
208 ++statCounter.client_http.requests;
209
210 if (logType.isTcpHit())
211 ++statCounter.client_http.hits;
212
213 if (logType.oldType == LOG_TCP_HIT)
214 ++statCounter.client_http.disk_hits;
215 else if (logType.oldType == LOG_TCP_MEM_HIT)
216 ++statCounter.client_http.mem_hits;
217 }
218
219 void
220 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
221 {
222 statCounter.client_http.allSvcTime.count(svc_time);
223 /**
224 * The idea here is not to be complete, but to get service times
225 * for only well-defined types. For example, we don't include
226 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
227 * (we *tried* to validate it, but failed).
228 */
229
230 switch (logType.oldType) {
231
232 case LOG_TCP_REFRESH_UNMODIFIED:
233 statCounter.client_http.nearHitSvcTime.count(svc_time);
234 break;
235
236 case LOG_TCP_INM_HIT:
237 case LOG_TCP_IMS_HIT:
238 statCounter.client_http.nearMissSvcTime.count(svc_time);
239 break;
240
241 case LOG_TCP_HIT:
242
243 case LOG_TCP_MEM_HIT:
244
245 case LOG_TCP_OFFLINE_HIT:
246 statCounter.client_http.hitSvcTime.count(svc_time);
247 break;
248
249 case LOG_TCP_MISS:
250
251 case LOG_TCP_CLIENT_REFRESH_MISS:
252 statCounter.client_http.missSvcTime.count(svc_time);
253 break;
254
255 default:
256 /* make compiler warnings go away */
257 break;
258 }
259 }
260
261 bool
262 clientPingHasFinished(ping_data const *aPing)
263 {
264 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
265 return true;
266
267 return false;
268 }
269
270 void
271 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
272 {
273 ping_data *i;
274
275 switch (someEntry->code) {
276 #if USE_CACHE_DIGESTS
277
278 case CD_PARENT_HIT:
279
280 case CD_SIBLING_HIT:
281 ++ statCounter.cd.times_used;
282 break;
283 #endif
284
285 case SIBLING_HIT:
286
287 case PARENT_HIT:
288
289 case FIRST_PARENT_MISS:
290
291 case CLOSEST_PARENT_MISS:
292 ++ statCounter.icp.times_used;
293 i = &someEntry->ping;
294
295 if (clientPingHasFinished(i))
296 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
297
298 if (i->timeout)
299 ++ statCounter.icp.query_timeouts;
300
301 break;
302
303 case CLOSEST_PARENT:
304
305 case CLOSEST_DIRECT:
306 ++ statCounter.netdb.times_used;
307
308 break;
309
310 default:
311 break;
312 }
313 }
314
315 void
316 ClientHttpRequest::updateCounters()
317 {
318 clientUpdateStatCounters(logType);
319
320 if (request->error)
321 ++ statCounter.client_http.errors;
322
323 clientUpdateStatHistCounters(logType,
324 tvSubMsec(al->cache.start_time, current_time));
325
326 clientUpdateHierCounters(&request->hier);
327 }
328
329 void
330 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry::Pointer &aLogEntry)
331 {
332 assert(request);
333 assert(aLogEntry != NULL);
334
335 if (Config.onoff.log_mime_hdrs) {
336 MemBuf mb;
337 mb.init();
338 request->header.packInto(&mb);
339 //This is the request after adaptation or redirection
340 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
341
342 // the virgin request is saved to aLogEntry->request
343 if (aLogEntry->request) {
344 mb.reset();
345 aLogEntry->request->header.packInto(&mb);
346 aLogEntry->headers.request = xstrdup(mb.buf);
347 }
348
349 #if USE_ADAPTATION
350 const Adaptation::History::Pointer ah = request->adaptLogHistory();
351 if (ah != NULL) {
352 mb.reset();
353 ah->lastMeta.packInto(&mb);
354 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
355 }
356 #endif
357
358 mb.clean();
359 }
360
361 #if ICAP_CLIENT
362 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
363 if (ih != NULL)
364 ih->processingTime(aLogEntry->icap.processingTime);
365 #endif
366
367 aLogEntry->http.method = request->method;
368 aLogEntry->http.version = request->http_ver;
369 aLogEntry->hier = request->hier;
370 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
371
372 // Adapted request, if any, inherits and then collects all the stats, but
373 // the virgin request gets logged instead; copy the stats to log them.
374 // TODO: avoid losses by keeping these stats in a shared history object?
375 if (aLogEntry->request) {
376 aLogEntry->request->dnsWait = request->dnsWait;
377 aLogEntry->request->error = request->error;
378 }
379 }
380
381 void
382 ClientHttpRequest::logRequest()
383 {
384 if (!out.size && logType.oldType == LOG_TAG_NONE)
385 debugs(33, 5, "logging half-baked transaction: " << log_uri);
386
387 al->icp.opcode = ICP_INVALID;
388 al->url = log_uri;
389 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
390
391 const auto findReply = [this]() -> const HttpReply * {
392 if (al->reply)
393 return al->reply.getRaw();
394 if (const auto le = loggingEntry())
395 return le->hasFreshestReply();
396 return nullptr;
397 };
398 if (const auto reply = findReply()) {
399 al->http.code = reply->sline.status();
400 al->http.content_type = reply->content_type.termedBuf();
401 }
402
403 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
404
405 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
406 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
407
408 al->http.clientRequestSz.header = req_sz;
409 // the virgin request is saved to al->request
410 if (al->request && al->request->body_pipe)
411 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
412 al->http.clientReplySz.header = out.headers_sz;
413 // XXX: calculate without payload encoding or headers !!
414 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
415
416 al->cache.highOffset = out.offset;
417
418 al->cache.code = logType;
419
420 tvSub(al->cache.trTime, al->cache.start_time, current_time);
421
422 if (request)
423 prepareLogWithRequestDetails(request, al);
424
425 #if USE_OPENSSL && 0
426
427 /* This is broken. Fails if the connection has been closed. Needs
428 * to snarf the ssl details some place earlier..
429 */
430 if (getConn() != NULL)
431 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
432
433 #endif
434
435 /* Add notes (if we have a request to annotate) */
436 if (request) {
437 SBuf matched;
438 for (auto h: Config.notes) {
439 if (h->match(request, al->reply.getRaw(), al, matched)) {
440 request->notes()->add(h->key(), matched);
441 debugs(33, 3, h->key() << " " << matched);
442 }
443 }
444 // The al->notes and request->notes must point to the same object.
445 al->syncNotes(request);
446 }
447
448 ACLFilledChecklist checklist(NULL, request, NULL);
449 if (al->reply) {
450 checklist.reply = al->reply.getRaw();
451 HTTPMSGLOCK(checklist.reply);
452 }
453
454 if (request) {
455 HTTPMSGUNLOCK(al->adapted_request);
456 al->adapted_request = request;
457 HTTPMSGLOCK(al->adapted_request);
458 }
459 // no need checklist.syncAle(): already synced
460 checklist.al = al;
461 accessLogLog(al, &checklist);
462
463 bool updatePerformanceCounters = true;
464 if (Config.accessList.stats_collection) {
465 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
466 statsCheck.al = al;
467 if (al->reply) {
468 statsCheck.reply = al->reply.getRaw();
469 HTTPMSGLOCK(statsCheck.reply);
470 }
471 updatePerformanceCounters = statsCheck.fastCheck().allowed();
472 }
473
474 if (updatePerformanceCounters) {
475 if (request)
476 updateCounters();
477
478 if (getConn() != NULL && getConn()->clientConnection != NULL)
479 clientdbUpdate(getConn()->clientConnection->remote, logType, AnyP::PROTO_HTTP, out.size);
480 }
481 }
482
483 void
484 ClientHttpRequest::freeResources()
485 {
486 safe_free(uri);
487 safe_free(redirect.location);
488 range_iter.boundary.clean();
489 clearRequest();
490
491 if (client_stream.tail)
492 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
493 }
494
495 void
496 httpRequestFree(void *data)
497 {
498 ClientHttpRequest *http = (ClientHttpRequest *)data;
499 assert(http != NULL);
500 delete http;
501 }
502
503 /* This is a handler normally called by comm_close() */
504 void ConnStateData::connStateClosed(const CommCloseCbParams &)
505 {
506 deleteThis("ConnStateData::connStateClosed");
507 }
508
509 #if USE_AUTH
510 void
511 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
512 {
513 if (auth_ == NULL) {
514 if (aur != NULL) {
515 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
516 auth_ = aur;
517 }
518 return;
519 }
520
521 // clobered with self-pointer
522 // NP: something nasty is going on in Squid, but harmless.
523 if (aur == auth_) {
524 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
525 return;
526 }
527
528 /*
529 * Connection-auth relies on a single set of credentials being preserved
530 * for all requests on a connection once they have been setup.
531 * There are several things which need to happen to preserve security
532 * when connection-auth credentials change unexpectedly or are unset.
533 *
534 * 1) auth helper released from any active state
535 *
536 * They can only be reserved by a handshake process which this
537 * connection can now never complete.
538 * This prevents helpers hanging when their connections close.
539 *
540 * 2) pinning is expected to be removed and server conn closed
541 *
542 * The upstream link is authenticated with the same credentials.
543 * Expecting the same level of consistency we should have received.
544 * This prevents upstream being faced with multiple or missing
545 * credentials after authentication.
546 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
547 * we just trigger that cleanup here via comm_reset_close() or
548 * ConnStateData::stopReceiving()
549 *
550 * 3) the connection needs to close.
551 *
552 * This prevents attackers injecting requests into a connection,
553 * or gateways wrongly multiplexing users into a single connection.
554 *
555 * When credentials are missing closure needs to follow an auth
556 * challenge for best recovery by the client.
557 *
558 * When credentials change there is nothing we can do but abort as
559 * fast as possible. Sending TCP RST instead of an HTTP response
560 * is the best-case action.
561 */
562
563 // clobbered with nul-pointer
564 if (aur == NULL) {
565 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
566 auth_->releaseAuthServer();
567 auth_ = NULL;
568 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
569 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
570 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
571 stopReceiving("connection-auth removed");
572 return;
573 }
574
575 // clobbered with alternative credentials
576 if (aur != auth_) {
577 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
578 auth_->releaseAuthServer();
579 auth_ = NULL;
580 // this is a fatal type of problem.
581 // Close the connection immediately with TCP RST to abort all traffic flow
582 comm_reset_close(clientConnection);
583 return;
584 }
585
586 /* NOT REACHABLE */
587 }
588 #endif
589
590 void
591 ConnStateData::resetReadTimeout(const time_t timeout)
592 {
593 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
594 AsyncCall::Pointer callback = JobCallback(33, 5, TimeoutDialer, this, ConnStateData::requestTimeout);
595 commSetConnTimeout(clientConnection, timeout, callback);
596 }
597
598 void
599 ConnStateData::extendLifetime()
600 {
601 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
602 AsyncCall::Pointer callback = JobCallback(5, 4, TimeoutDialer, this, ConnStateData::lifetimeTimeout);
603 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, callback);
604 }
605
606 // cleans up before destructor is called
607 void
608 ConnStateData::swanSong()
609 {
610 debugs(33, 2, HERE << clientConnection);
611
612 flags.readMore = false;
613 clientdbEstablished(clientConnection->remote, -1); /* decrement */
614
615 terminateAll(ERR_NONE, LogTagsErrors());
616 checkLogging();
617
618 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
619 unpinConnection(true);
620
621 Server::swanSong();
622
623 #if USE_AUTH
624 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
625 setAuth(NULL, "ConnStateData::SwanSong cleanup");
626 #endif
627
628 flags.swanSang = true;
629 }
630
631 void
632 ConnStateData::callException(const std::exception &ex)
633 {
634 Server::callException(ex); // logs ex and stops the job
635
636 ErrorDetail::Pointer errorDetail;
637 if (const auto tex = dynamic_cast<const TextException*>(&ex))
638 errorDetail = new ExceptionErrorDetail(tex->id());
639 else
640 errorDetail = new ExceptionErrorDetail(Here().id());
641 updateError(ERR_GATEWAY_FAILURE, errorDetail);
642 }
643
644 void
645 ConnStateData::updateError(const Error &error)
646 {
647 if (const auto context = pipeline.front()) {
648 const auto http = context->http;
649 assert(http);
650 http->updateError(error);
651 } else {
652 bareError.update(error);
653 }
654 }
655
656 bool
657 ConnStateData::isOpen() const
658 {
659 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
660 Comm::IsConnOpen(clientConnection) &&
661 !fd_table[clientConnection->fd].closing();
662 }
663
664 ConnStateData::~ConnStateData()
665 {
666 debugs(33, 3, HERE << clientConnection);
667
668 if (isOpen())
669 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData did not close " << clientConnection);
670
671 if (!flags.swanSang)
672 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData was not destroyed properly; " << clientConnection);
673
674 if (bodyPipe != NULL)
675 stopProducingFor(bodyPipe, false);
676
677 delete bodyParser; // TODO: pool
678
679 #if USE_OPENSSL
680 delete sslServerBump;
681 #endif
682 }
683
684 /**
685 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
686 * This is the client-side persistent connection flag. We need
687 * to set this relatively early in the request processing
688 * to handle hacks for broken servers and clients.
689 */
690 void
691 clientSetKeepaliveFlag(ClientHttpRequest * http)
692 {
693 HttpRequest *request = http->request;
694
695 debugs(33, 3, "http_ver = " << request->http_ver);
696 debugs(33, 3, "method = " << request->method);
697
698 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
699 request->flags.proxyKeepalive = request->persistent();
700 }
701
702 /// checks body length of non-chunked requests
703 static int
704 clientIsContentLengthValid(HttpRequest * r)
705 {
706 // No Content-Length means this request just has no body, but conflicting
707 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
708 if (r->header.conflictingContentLength())
709 return 0;
710
711 switch (r->method.id()) {
712
713 case Http::METHOD_GET:
714
715 case Http::METHOD_HEAD:
716 /* We do not want to see a request entity on GET/HEAD requests */
717 return (r->content_length <= 0 || Config.onoff.request_entities);
718
719 default:
720 /* For other types of requests we don't care */
721 return 1;
722 }
723
724 /* NOT REACHED */
725 }
726
727 int
728 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
729 {
730 if (Config.maxRequestBodySize &&
731 bodyLength > Config.maxRequestBodySize)
732 return 1; /* too large */
733
734 return 0;
735 }
736
737 bool
738 ClientHttpRequest::multipartRangeRequest() const
739 {
740 return request->multipartRangeRequest();
741 }
742
743 void
744 clientPackTermBound(String boundary, MemBuf *mb)
745 {
746 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
747 debugs(33, 6, "buf offset: " << mb->size);
748 }
749
750 void
751 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
752 {
753 HttpHeader hdr(hoReply);
754 assert(rep);
755 assert(spec);
756
757 /* put boundary */
758 debugs(33, 5, "appending boundary: " << boundary);
759 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
760 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
761
762 /* stuff the header with required entries and pack it */
763
764 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
765 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
766
767 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
768
769 hdr.packInto(mb);
770 hdr.clean();
771
772 /* append <crlf> (we packed a header, not a reply) */
773 mb->append("\r\n", 2);
774 }
775
776 /** returns expected content length for multi-range replies
777 * note: assumes that httpHdrRangeCanonize has already been called
778 * warning: assumes that HTTP headers for individual ranges at the
779 * time of the actuall assembly will be exactly the same as
780 * the headers when clientMRangeCLen() is called */
781 int64_t
782 ClientHttpRequest::mRangeCLen() const
783 {
784 int64_t clen = 0;
785 MemBuf mb;
786
787 assert(memObject());
788
789 mb.init();
790 HttpHdrRange::iterator pos = request->range->begin();
791
792 while (pos != request->range->end()) {
793 /* account for headers for this range */
794 mb.reset();
795 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
796 *pos, range_iter.boundary, &mb);
797 clen += mb.size;
798
799 /* account for range content */
800 clen += (*pos)->length;
801
802 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
803 ++pos;
804 }
805
806 /* account for the terminating boundary */
807 mb.reset();
808
809 clientPackTermBound(range_iter.boundary, &mb);
810
811 clen += mb.size;
812
813 mb.clean();
814
815 return clen;
816 }
817
818 /**
819 * generates a "unique" boundary string for multipart responses
820 * the caller is responsible for cleaning the string */
821 String
822 ClientHttpRequest::rangeBoundaryStr() const
823 {
824 const char *key;
825 String b(APP_FULLNAME);
826 b.append(":",1);
827 key = storeEntry()->getMD5Text();
828 b.append(key, strlen(key));
829 return b;
830 }
831
832 /**
833 * Write a chunk of data to a client socket. If the reply is present,
834 * send the reply headers down the wire too, and clean them up when
835 * finished.
836 * Pre-condition:
837 * The request is one backed by a connection, not an internal request.
838 * data context is not NULL
839 * There are no more entries in the stream chain.
840 */
841 void
842 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
843 HttpReply * rep, StoreIOBuffer receivedData)
844 {
845 // do not try to deliver if client already ABORTED
846 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
847 return;
848
849 /* Test preconditions */
850 assert(node != NULL);
851 PROF_start(clientSocketRecipient);
852 /* TODO: handle this rather than asserting
853 * - it should only ever happen if we cause an abort and
854 * the callback chain loops back to here, so we can simply return.
855 * However, that itself shouldn't happen, so it stays as an assert for now.
856 */
857 assert(cbdataReferenceValid(node));
858 assert(node->node.next == NULL);
859 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
860 assert(context != NULL);
861
862 /* TODO: check offset is what we asked for */
863
864 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
865 if (context != http->getConn()->pipeline.front())
866 context->deferRecipientForLater(node, rep, receivedData);
867 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
868 context->deferRecipientForLater(node, rep, receivedData);
869 else
870 http->getConn()->handleReply(rep, receivedData);
871
872 PROF_stop(clientSocketRecipient);
873 }
874
875 /**
876 * Called when a downstream node is no longer interested in
877 * our data. As we are a terminal node, this means on aborts
878 * only
879 */
880 void
881 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
882 {
883 /* Test preconditions */
884 assert(node != NULL);
885 /* TODO: handle this rather than asserting
886 * - it should only ever happen if we cause an abort and
887 * the callback chain loops back to here, so we can simply return.
888 * However, that itself shouldn't happen, so it stays as an assert for now.
889 */
890 assert(cbdataReferenceValid(node));
891 /* Set null by ContextFree */
892 assert(node->node.next == NULL);
893 /* this is the assert discussed above */
894 assert(NULL == dynamic_cast<Http::Stream *>(node->data.getRaw()));
895 /* We are only called when the client socket shutsdown.
896 * Tell the prev pipeline member we're finished
897 */
898 clientStreamDetach(node, http);
899 }
900
901 void
902 ConnStateData::readNextRequest()
903 {
904 debugs(33, 5, HERE << clientConnection << " reading next req");
905
906 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
907 /**
908 * Set the timeout BEFORE calling readSomeData().
909 */
910 resetReadTimeout(clientConnection->timeLeft(idleTimeout()));
911
912 readSomeData();
913 /** Please don't do anything with the FD past here! */
914 }
915
916 static void
917 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
918 {
919 debugs(33, 2, HERE << conn->clientConnection << " Sending next");
920
921 /** If the client stream is waiting on a socket write to occur, then */
922
923 if (deferredRequest->flags.deferred) {
924 /** NO data is allowed to have been sent. */
925 assert(deferredRequest->http->out.size == 0);
926 /** defer now. */
927 clientSocketRecipient(deferredRequest->deferredparams.node,
928 deferredRequest->http,
929 deferredRequest->deferredparams.rep,
930 deferredRequest->deferredparams.queuedBuffer);
931 }
932
933 /** otherwise, the request is still active in a callbacksomewhere,
934 * and we are done
935 */
936 }
937
938 void
939 ConnStateData::kick()
940 {
941 if (!Comm::IsConnOpen(clientConnection)) {
942 debugs(33, 2, clientConnection << " Connection was closed");
943 return;
944 }
945
946 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
947 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
948 clientConnection->close();
949 return;
950 }
951
952 /** \par
953 * We are done with the response, and we are either still receiving request
954 * body (early response!) or have already stopped receiving anything.
955 *
956 * If we are still receiving, then clientParseRequest() below will fail.
957 * (XXX: but then we will call readNextRequest() which may succeed and
958 * execute a smuggled request as we are not done with the current request).
959 *
960 * If we stopped because we got everything, then try the next request.
961 *
962 * If we stopped receiving because of an error, then close now to avoid
963 * getting stuck and to prevent accidental request smuggling.
964 */
965
966 if (const char *reason = stoppedReceiving()) {
967 debugs(33, 3, "closing for earlier request error: " << reason);
968 clientConnection->close();
969 return;
970 }
971
972 /** \par
973 * Attempt to parse a request from the request buffer.
974 * If we've been fed a pipelined request it may already
975 * be in our read buffer.
976 *
977 \par
978 * This needs to fall through - if we're unlucky and parse the _last_ request
979 * from our read buffer we may never re-register for another client read.
980 */
981
982 if (clientParseRequests()) {
983 debugs(33, 3, clientConnection << ": parsed next request from buffer");
984 }
985
986 /** \par
987 * Either we need to kick-start another read or, if we have
988 * a half-closed connection, kill it after the last request.
989 * This saves waiting for half-closed connections to finished being
990 * half-closed _AND_ then, sometimes, spending "Timeout" time in
991 * the keepalive "Waiting for next request" state.
992 */
993 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
994 debugs(33, 3, "half-closed client with no pending requests, closing");
995 clientConnection->close();
996 return;
997 }
998
999 /** \par
1000 * At this point we either have a parsed request (which we've
1001 * kicked off the processing for) or not. If we have a deferred
1002 * request (parsed but deferred for pipeling processing reasons)
1003 * then look at processing it. If not, simply kickstart
1004 * another read.
1005 */
1006 Http::StreamPointer deferredRequest = pipeline.front();
1007 if (deferredRequest != nullptr) {
1008 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
1009 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
1010 } else if (flags.readMore) {
1011 debugs(33, 3, clientConnection << ": calling readNextRequest()");
1012 readNextRequest();
1013 } else {
1014 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
1015 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
1016 }
1017 }
1018
1019 void
1020 ConnStateData::stopSending(const char *error)
1021 {
1022 debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
1023 "; old receiving error: " <<
1024 (stoppedReceiving() ? stoppedReceiving_ : "none"));
1025
1026 if (const char *oldError = stoppedSending()) {
1027 debugs(33, 3, HERE << "already stopped sending: " << oldError);
1028 return; // nothing has changed as far as this connection is concerned
1029 }
1030 stoppedSending_ = error;
1031
1032 if (!stoppedReceiving()) {
1033 if (const int64_t expecting = mayNeedToReadMoreBody()) {
1034 debugs(33, 5, HERE << "must still read " << expecting <<
1035 " request body bytes with " << inBuf.length() << " unused");
1036 return; // wait for the request receiver to finish reading
1037 }
1038 }
1039
1040 clientConnection->close();
1041 }
1042
1043 void
1044 ConnStateData::afterClientWrite(size_t size)
1045 {
1046 if (pipeline.empty())
1047 return;
1048
1049 auto ctx = pipeline.front();
1050 if (size) {
1051 statCounter.client_http.kbytes_out += size;
1052 if (ctx->http->logType.isTcpHit())
1053 statCounter.client_http.hit_kbytes_out += size;
1054 }
1055 ctx->writeComplete(size);
1056 }
1057
1058 Http::Stream *
1059 ConnStateData::abortRequestParsing(const char *const uri)
1060 {
1061 ClientHttpRequest *http = new ClientHttpRequest(this);
1062 http->req_sz = inBuf.length();
1063 http->setErrorUri(uri);
1064 auto *context = new Http::Stream(clientConnection, http);
1065 StoreIOBuffer tempBuffer;
1066 tempBuffer.data = context->reqbuf;
1067 tempBuffer.length = HTTP_REQBUF_SZ;
1068 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1069 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1070 clientSocketDetach, context, tempBuffer);
1071 return context;
1072 }
1073
1074 void
1075 ConnStateData::startShutdown()
1076 {
1077 // RegisteredRunner API callback - Squid has been shut down
1078
1079 // if connection is idle terminate it now,
1080 // otherwise wait for grace period to end
1081 if (pipeline.empty())
1082 endingShutdown();
1083 }
1084
1085 void
1086 ConnStateData::endingShutdown()
1087 {
1088 // RegisteredRunner API callback - Squid shutdown grace period is over
1089
1090 // force the client connection to close immediately
1091 // swanSong() in the close handler will cleanup.
1092 if (Comm::IsConnOpen(clientConnection))
1093 clientConnection->close();
1094 }
1095
1096 char *
1097 skipLeadingSpace(char *aString)
1098 {
1099 char *result = aString;
1100
1101 while (xisspace(*aString))
1102 ++aString;
1103
1104 return result;
1105 }
1106
1107 /**
1108 * 'end' defaults to NULL for backwards compatibility
1109 * remove default value if we ever get rid of NULL-terminated
1110 * request buffers.
1111 */
1112 const char *
1113 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1114 {
1115 if (NULL == end) {
1116 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1117 assert(end);
1118 }
1119
1120 for (; end > uriAndHTTPVersion; --end) {
1121 if (*end == '\n' || *end == '\r')
1122 continue;
1123
1124 if (xisspace(*end)) {
1125 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1126 return end + 1;
1127 else
1128 break;
1129 }
1130 }
1131
1132 return NULL;
1133 }
1134
1135 static char *
1136 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1137 {
1138 int vhost = conn->port->vhost;
1139 int vport = conn->port->vport;
1140 static char ipbuf[MAX_IPSTRLEN];
1141
1142 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1143
1144 static const SBuf cache_object("cache_object://");
1145 if (hp->requestUri().startsWith(cache_object))
1146 return nullptr; /* already in good shape */
1147
1148 // XXX: re-use proper URL parser for this
1149 SBuf url = hp->requestUri(); // use full provided URI if we abort
1150 do { // use a loop so we can break out of it
1151 ::Parser::Tokenizer tok(url);
1152 if (tok.skip('/')) // origin-form URL already.
1153 break;
1154
1155 if (conn->port->vhost)
1156 return nullptr; /* already in good shape */
1157
1158 // skip the URI scheme
1159 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1160 static const SBuf uriSchemeEnd("://");
1161 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1162 break;
1163
1164 // skip the authority segment
1165 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1166 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1167 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1168 if (!tok.skipAll(authority))
1169 break;
1170
1171 static const SBuf slashUri("/");
1172 const SBuf t = tok.remaining();
1173 if (t.isEmpty())
1174 url = slashUri;
1175 else if (t[0]=='/') // looks like path
1176 url = t;
1177 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1178 url = slashUri;
1179 url.append(t);
1180 } // else do nothing. invalid path
1181
1182 } while(false);
1183
1184 #if SHOULD_REJECT_UNKNOWN_URLS
1185 // reject URI which are not well-formed even after the processing above
1186 if (url.isEmpty() || url[0] != '/') {
1187 hp->parseStatusCode = Http::scBadRequest;
1188 return conn->abortRequestParsing("error:invalid-request");
1189 }
1190 #endif
1191
1192 if (vport < 0)
1193 vport = conn->clientConnection->local.port();
1194
1195 char *receivedHost = nullptr;
1196 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1197 SBuf host(receivedHost);
1198 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1199 if (vport > 0) {
1200 // remove existing :port (if any), cope with IPv6+ without port
1201 const auto lastColonPos = host.rfind(':');
1202 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1203 host.chop(0, lastColonPos); // truncate until the last colon
1204 }
1205 host.appendf(":%d", vport);
1206 } // else nothing to alter port-wise.
1207 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1208 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1209 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1210 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1211 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1212 return uri;
1213 } else if (conn->port->defaultsite /* && !vhost */) {
1214 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1215 char vportStr[32];
1216 vportStr[0] = '\0';
1217 if (vport > 0) {
1218 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1219 }
1220 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1221 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1222 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1223 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1224 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1225 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1226 return uri;
1227 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1228 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1229 /* Put the local socket IP address as the hostname, with whatever vport we found */
1230 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1231 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1232 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1233 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1234 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1235 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1236 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1237 return uri;
1238 }
1239
1240 return nullptr;
1241 }
1242
1243 static char *
1244 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1245 {
1246 char *uri = nullptr;
1247 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1248 if (const char *host = hp->getHostHeaderField()) {
1249 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1250 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1251 uri = static_cast<char *>(xcalloc(url_sz, 1));
1252 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1253 SQUIDSBUFPRINT(scheme),
1254 host,
1255 SQUIDSBUFPRINT(hp->requestUri()));
1256 }
1257 return uri;
1258 }
1259
1260 char *
1261 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1262 {
1263 Must(switchedToHttps());
1264
1265 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1266 return nullptr; /* already in good shape */
1267
1268 char *uri = buildUrlFromHost(this, hp);
1269 #if USE_OPENSSL
1270 if (!uri) {
1271 Must(tlsConnectPort);
1272 Must(!tlsConnectHostOrIp.isEmpty());
1273 SBuf useHost;
1274 if (!tlsClientSni().isEmpty())
1275 useHost = tlsClientSni();
1276 else
1277 useHost = tlsConnectHostOrIp;
1278
1279 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1280 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1281 uri = static_cast<char *>(xcalloc(url_sz, 1));
1282 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1283 SQUIDSBUFPRINT(scheme),
1284 SQUIDSBUFPRINT(useHost),
1285 tlsConnectPort,
1286 SQUIDSBUFPRINT(hp->requestUri()));
1287 }
1288 #endif
1289 if (uri)
1290 debugs(33, 5, "TLS switching host rewrite: " << uri);
1291 return uri;
1292 }
1293
1294 static char *
1295 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1296 {
1297 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1298 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1299 return nullptr; /* already in good shape */
1300
1301 char *uri = buildUrlFromHost(conn, hp);
1302 if (!uri) {
1303 /* Put the local socket IP address as the hostname. */
1304 static char ipbuf[MAX_IPSTRLEN];
1305 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1306 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1307 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1308 uri = static_cast<char *>(xcalloc(url_sz, 1));
1309 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1310 SQUIDSBUFPRINT(scheme),
1311 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1312 }
1313
1314 if (uri)
1315 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1316 return uri;
1317 }
1318
1319 Http::Stream *
1320 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1321 {
1322 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1323 {
1324 Must(hp);
1325
1326 if (preservingClientData_)
1327 preservedClientData = inBuf;
1328
1329 const bool parsedOk = hp->parse(inBuf);
1330
1331 // sync the buffers after parsing.
1332 inBuf = hp->remaining();
1333
1334 if (hp->needsMoreData()) {
1335 debugs(33, 5, "Incomplete request, waiting for end of request line");
1336 return NULL;
1337 }
1338
1339 if (!parsedOk) {
1340 const bool tooBig =
1341 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1342 hp->parseStatusCode == Http::scUriTooLong;
1343 auto result = abortRequestParsing(
1344 tooBig ? "error:request-too-large" : "error:invalid-request");
1345 // assume that remaining leftovers belong to this bad request
1346 if (!inBuf.isEmpty())
1347 consumeInput(inBuf.length());
1348 return result;
1349 }
1350 }
1351
1352 /* We know the whole request is in parser now */
1353 debugs(11, 2, "HTTP Client " << clientConnection);
1354 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1355 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1356 hp->mimeHeader() <<
1357 "\n----------");
1358
1359 /* deny CONNECT via accelerated ports */
1360 if (hp->method() == Http::METHOD_CONNECT && port != NULL && port->flags.accelSurrogate) {
1361 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1362 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1363 hp->parseStatusCode = Http::scMethodNotAllowed;
1364 return abortRequestParsing("error:method-not-allowed");
1365 }
1366
1367 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1368 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1369 * If seen it signals a broken client or proxy has corrupted the traffic.
1370 */
1371 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1372 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1373 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1374 hp->parseStatusCode = Http::scMethodNotAllowed;
1375 return abortRequestParsing("error:method-not-allowed");
1376 }
1377
1378 if (hp->method() == Http::METHOD_NONE) {
1379 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1380 hp->parseStatusCode = Http::scMethodNotAllowed;
1381 return abortRequestParsing("error:unsupported-request-method");
1382 }
1383
1384 // Process headers after request line
1385 debugs(33, 3, "complete request received. " <<
1386 "prefix_sz = " << hp->messageHeaderSize() <<
1387 ", request-line-size=" << hp->firstLineSize() <<
1388 ", mime-header-size=" << hp->headerBlockSize() <<
1389 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1390
1391 /* Ok, all headers are received */
1392 ClientHttpRequest *http = new ClientHttpRequest(this);
1393
1394 http->req_sz = hp->messageHeaderSize();
1395 Http::Stream *result = new Http::Stream(clientConnection, http);
1396
1397 StoreIOBuffer tempBuffer;
1398 tempBuffer.data = result->reqbuf;
1399 tempBuffer.length = HTTP_REQBUF_SZ;
1400
1401 ClientStreamData newServer = new clientReplyContext(http);
1402 ClientStreamData newClient = result;
1403 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1404 clientReplyStatus, newServer, clientSocketRecipient,
1405 clientSocketDetach, newClient, tempBuffer);
1406
1407 /* set url */
1408 debugs(33,5, "Prepare absolute URL from " <<
1409 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1410 /* Rewrite the URL in transparent or accelerator mode */
1411 /* NP: there are several cases to traverse here:
1412 * - standard mode (forward proxy)
1413 * - transparent mode (TPROXY)
1414 * - transparent mode with failures
1415 * - intercept mode (NAT)
1416 * - intercept mode with failures
1417 * - accelerator mode (reverse proxy)
1418 * - internal relative-URL
1419 * - mixed combos of the above with internal URL
1420 * - remote interception with PROXY protocol
1421 * - remote reverse-proxy with PROXY protocol
1422 */
1423 if (switchedToHttps()) {
1424 http->uri = prepareTlsSwitchingURL(hp);
1425 } else if (transparent()) {
1426 /* intercept or transparent mode, properly working with no failures */
1427 http->uri = prepareTransparentURL(this, hp);
1428
1429 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1430 /* internal URL mode */
1431 /* prepend our name & port */
1432 http->uri = xstrdup(internalLocalUri(NULL, hp->requestUri()));
1433 // We just re-wrote the URL. Must replace the Host: header.
1434 // But have not parsed there yet!! flag for local-only handling.
1435 http->flags.internal = true;
1436
1437 } else if (port->flags.accelSurrogate) {
1438 /* accelerator mode */
1439 http->uri = prepareAcceleratedURL(this, hp);
1440 http->flags.accel = true;
1441 }
1442
1443 if (!http->uri) {
1444 /* No special rewrites have been applied above, use the
1445 * requested url. may be rewritten later, so make extra room */
1446 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1447 http->uri = (char *)xcalloc(url_sz, 1);
1448 SBufToCstring(http->uri, hp->requestUri());
1449 }
1450
1451 result->flags.parsed_ok = 1;
1452 return result;
1453 }
1454
1455 bool
1456 ConnStateData::shouldCloseOnEof() const
1457 {
1458 if (pipeline.empty() && inBuf.isEmpty()) {
1459 debugs(33, 4, "yes, without active requests and unparsed input");
1460 return true;
1461 }
1462
1463 if (!Config.onoff.half_closed_clients) {
1464 debugs(33, 3, "yes, without half_closed_clients");
1465 return true;
1466 }
1467
1468 // Squid currently tries to parse (possibly again) a partially received
1469 // request after an EOF with half_closed_clients. To give that last parse in
1470 // afterClientRead() a chance, we ignore partially parsed requests here.
1471 debugs(33, 3, "no, honoring half_closed_clients");
1472 return false;
1473 }
1474
1475 void
1476 ConnStateData::consumeInput(const size_t byteCount)
1477 {
1478 assert(byteCount > 0 && byteCount <= inBuf.length());
1479 inBuf.consume(byteCount);
1480 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1481 }
1482
1483 void
1484 ConnStateData::clientAfterReadingRequests()
1485 {
1486 // Were we expecting to read more request body from half-closed connection?
1487 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1488 debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection);
1489 clientConnection->close();
1490 return;
1491 }
1492
1493 if (flags.readMore)
1494 readSomeData();
1495 }
1496
1497 void
1498 ConnStateData::quitAfterError(HttpRequest *request)
1499 {
1500 // From HTTP p.o.v., we do not have to close after every error detected
1501 // at the client-side, but many such errors do require closure and the
1502 // client-side code is bad at handling errors so we play it safe.
1503 if (request)
1504 request->flags.proxyKeepalive = false;
1505 flags.readMore = false;
1506 debugs(33,4, HERE << "Will close after error: " << clientConnection);
1507 }
1508
1509 #if USE_OPENSSL
1510 bool ConnStateData::serveDelayedError(Http::Stream *context)
1511 {
1512 ClientHttpRequest *http = context->http;
1513
1514 if (!sslServerBump)
1515 return false;
1516
1517 assert(sslServerBump->entry);
1518 // Did we create an error entry while processing CONNECT?
1519 if (!sslServerBump->entry->isEmpty()) {
1520 quitAfterError(http->request);
1521
1522 // Get the saved error entry and send it to the client by replacing the
1523 // ClientHttpRequest store entry with it.
1524 clientStreamNode *node = context->getClientReplyContext();
1525 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1526 assert(repContext);
1527 debugs(33, 5, "Responding with delated error for " << http->uri);
1528 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1529
1530 // Get error details from the fake certificate-peeking request.
1531 http->request->error.update(sslServerBump->request->error);
1532 context->pullData();
1533 return true;
1534 }
1535
1536 // In bump-server-first mode, we have not necessarily seen the intended
1537 // server name at certificate-peeking time. Check for domain mismatch now,
1538 // when we can extract the intended name from the bumped HTTP request.
1539 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1540 HttpRequest *request = http->request;
1541 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1542 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1543 "does not match domainname " << request->url.host());
1544
1545 bool allowDomainMismatch = false;
1546 if (Config.ssl_client.cert_error) {
1547 ACLFilledChecklist check(Config.ssl_client.cert_error, request, dash_str);
1548 check.al = http->al;
1549 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1550 check.syncAle(request, http->log_uri);
1551 allowDomainMismatch = check.fastCheck().allowed();
1552 delete check.sslErrors;
1553 check.sslErrors = NULL;
1554 }
1555
1556 if (!allowDomainMismatch) {
1557 quitAfterError(request);
1558
1559 clientStreamNode *node = context->getClientReplyContext();
1560 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1561 assert (repContext);
1562
1563 request->hier = sslServerBump->request->hier;
1564
1565 // Create an error object and fill it
1566 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1567 err->src_addr = clientConnection->remote;
1568 const Security::ErrorDetail::Pointer errDetail = new Security::ErrorDetail(
1569 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1570 srvCert, nullptr);
1571 updateError(ERR_SECURE_CONNECT_FAIL, errDetail);
1572 repContext->setReplyToError(request->method, err);
1573 assert(context->http->out.offset == 0);
1574 context->pullData();
1575 return true;
1576 }
1577 }
1578 }
1579
1580 return false;
1581 }
1582 #endif // USE_OPENSSL
1583
1584 /// ConnStateData::tunnelOnError() wrapper. Reduces code changes. TODO: Remove.
1585 bool
1586 clientTunnelOnError(ConnStateData *conn, Http::StreamPointer &context, HttpRequest::Pointer &request, const HttpRequestMethod& method, err_type requestError)
1587 {
1588 assert(conn);
1589 assert(conn->pipeline.front() == context);
1590 return conn->tunnelOnError(method, requestError);
1591 }
1592
1593 /// initiate tunneling if possible or return false otherwise
1594 bool
1595 ConnStateData::tunnelOnError(const HttpRequestMethod &method, const err_type requestError)
1596 {
1597 if (!Config.accessList.on_unsupported_protocol) {
1598 debugs(33, 5, "disabled; send error: " << requestError);
1599 return false;
1600 }
1601
1602 if (!preservingClientData_) {
1603 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1604 return false;
1605 }
1606
1607 const auto context = pipeline.front();
1608 const auto http = context ? context->http : nullptr;
1609 const auto request = http ? http->request : nullptr;
1610
1611 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, request, nullptr);
1612 checklist.al = http ? http->al : nullptr;
1613 checklist.requestErrorType = requestError;
1614 checklist.src_addr = clientConnection->remote;
1615 checklist.my_addr = clientConnection->local;
1616 checklist.conn(this);
1617 const char *log_uri = http ? http->log_uri : nullptr;
1618 checklist.syncAle(request, log_uri);
1619 auto answer = checklist.fastCheck();
1620 if (answer.allowed() && answer.kind == 1) {
1621 debugs(33, 3, "Request will be tunneled to server");
1622 if (context)
1623 context->finished(); // Will remove from pipeline queue
1624 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, NULL, NULL, 0);
1625 return initiateTunneledRequest(request, Http::METHOD_NONE, "unknown-protocol", preservedClientData);
1626 }
1627 debugs(33, 3, "denied; send error: " << requestError);
1628 return false;
1629 }
1630
1631 void
1632 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1633 {
1634 /*
1635 * DPW 2007-05-18
1636 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1637 * to here because calling comm_reset_close() causes http to
1638 * be freed before accessing.
1639 */
1640 if (request != NULL && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1641 debugs(33, 3, HERE << "Sending TCP RST on " << conn->clientConnection);
1642 conn->flags.readMore = false;
1643 comm_reset_close(conn->clientConnection);
1644 }
1645 }
1646
1647 void
1648 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1649 {
1650 ClientHttpRequest *http = context->http;
1651 bool mustReplyToOptions = false;
1652 bool expectBody = false;
1653
1654 // We already have the request parsed and checked, so we
1655 // only need to go through the final body/conn setup to doCallouts().
1656 assert(http->request);
1657 HttpRequest::Pointer request = http->request;
1658
1659 // temporary hack to avoid splitting this huge function with sensitive code
1660 const bool isFtp = !hp;
1661
1662 // Some blobs below are still HTTP-specific, but we would have to rewrite
1663 // this entire function to remove them from the FTP code path. Connection
1664 // setup and body_pipe preparation blobs are needed for FTP.
1665
1666 request->manager(conn, http->al);
1667
1668 request->flags.accelerated = http->flags.accel;
1669 request->flags.sslBumped=conn->switchedToHttps();
1670 // TODO: decouple http->flags.accel from request->flags.sslBumped
1671 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1672 !conn->port->allow_direct : 0;
1673 request->sources |= isFtp ? Http::Message::srcFtp :
1674 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1675 #if USE_AUTH
1676 if (request->flags.sslBumped) {
1677 if (conn->getAuth() != NULL)
1678 request->auth_user_request = conn->getAuth();
1679 }
1680 #endif
1681
1682 if (internalCheck(request->url.path())) {
1683 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1684 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1685 http->flags.internal = true;
1686 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1687 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1688 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1689 request->url.host(internalHostname());
1690 request->url.port(getMyPort());
1691 http->flags.internal = true;
1692 http->setLogUriToRequestUri();
1693 } else
1694 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1695 }
1696
1697 request->flags.internal = http->flags.internal;
1698
1699 if (!isFtp) {
1700 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1701 // for now Squid only supports HTTP requests
1702 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1703 assert(request->http_ver.protocol == http_ver.protocol);
1704 request->http_ver.major = http_ver.major;
1705 request->http_ver.minor = http_ver.minor;
1706 }
1707
1708 const auto unsupportedTe = request->header.unsupportedTe();
1709
1710 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1711 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1712 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions || unsupportedTe) {
1713 clientStreamNode *node = context->getClientReplyContext();
1714 conn->quitAfterError(request.getRaw());
1715 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1716 assert (repContext);
1717 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, request->method, NULL,
1718 conn, request.getRaw(), nullptr, nullptr);
1719 assert(context->http->out.offset == 0);
1720 context->pullData();
1721 clientProcessRequestFinished(conn, request);
1722 return;
1723 }
1724
1725 const auto chunked = request->header.chunked();
1726 if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
1727 clientStreamNode *node = context->getClientReplyContext();
1728 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1729 assert (repContext);
1730 conn->quitAfterError(request.getRaw());
1731 repContext->setReplyToError(ERR_INVALID_REQ,
1732 Http::scLengthRequired, request->method, NULL,
1733 conn, request.getRaw(), nullptr, nullptr);
1734 assert(context->http->out.offset == 0);
1735 context->pullData();
1736 clientProcessRequestFinished(conn, request);
1737 return;
1738 }
1739
1740 clientSetKeepaliveFlag(http);
1741 // Let tunneling code be fully responsible for CONNECT requests
1742 if (http->request->method == Http::METHOD_CONNECT) {
1743 context->mayUseConnection(true);
1744 conn->flags.readMore = false;
1745 }
1746
1747 #if USE_OPENSSL
1748 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1749 clientProcessRequestFinished(conn, request);
1750 return;
1751 }
1752 #endif
1753
1754 /* Do we expect a request-body? */
1755 expectBody = chunked || request->content_length > 0;
1756 if (!context->mayUseConnection() && expectBody) {
1757 request->body_pipe = conn->expectRequestBody(
1758 chunked ? -1 : request->content_length);
1759
1760 /* Is it too large? */
1761 if (!chunked && // if chunked, we will check as we accumulate
1762 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1763 clientStreamNode *node = context->getClientReplyContext();
1764 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1765 assert (repContext);
1766 conn->quitAfterError(request.getRaw());
1767 repContext->setReplyToError(ERR_TOO_BIG,
1768 Http::scPayloadTooLarge, Http::METHOD_NONE, NULL,
1769 conn, http->request, nullptr, nullptr);
1770 assert(context->http->out.offset == 0);
1771 context->pullData();
1772 clientProcessRequestFinished(conn, request);
1773 return;
1774 }
1775
1776 if (!isFtp) {
1777 // We may stop producing, comm_close, and/or call setReplyToError()
1778 // below, so quit on errors to avoid http->doCallouts()
1779 if (!conn->handleRequestBodyData()) {
1780 clientProcessRequestFinished(conn, request);
1781 return;
1782 }
1783
1784 if (!request->body_pipe->productionEnded()) {
1785 debugs(33, 5, "need more request body");
1786 context->mayUseConnection(true);
1787 assert(conn->flags.readMore);
1788 }
1789 }
1790 }
1791
1792 http->calloutContext = new ClientRequestContext(http);
1793
1794 http->doCallouts();
1795
1796 clientProcessRequestFinished(conn, request);
1797 }
1798
1799 void
1800 ConnStateData::add(const Http::StreamPointer &context)
1801 {
1802 debugs(33, 3, context << " to " << pipeline.count() << '/' << pipeline.nrequests);
1803 if (bareError) {
1804 debugs(33, 5, "assigning " << bareError);
1805 assert(context);
1806 assert(context->http);
1807 context->http->updateError(bareError);
1808 bareError.clear();
1809 }
1810 pipeline.add(context);
1811 }
1812
1813 int
1814 ConnStateData::pipelinePrefetchMax() const
1815 {
1816 // TODO: Support pipelined requests through pinned connections.
1817 if (pinning.pinned)
1818 return 0;
1819 return Config.pipeline_max_prefetch;
1820 }
1821
1822 /**
1823 * Limit the number of concurrent requests.
1824 * \return true when there are available position(s) in the pipeline queue for another request.
1825 * \return false when the pipeline queue is full or disabled.
1826 */
1827 bool
1828 ConnStateData::concurrentRequestQueueFilled() const
1829 {
1830 const int existingRequestCount = pipeline.count();
1831
1832 // default to the configured pipeline size.
1833 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1834 #if USE_OPENSSL
1835 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1836 #else
1837 const int internalRequest = 0;
1838 #endif
1839 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1840
1841 // when queue filled already we can't add more.
1842 if (existingRequestCount >= concurrentRequestLimit) {
1843 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1844 debugs(33, 5, clientConnection << " deferring new request until one is done");
1845 return true;
1846 }
1847
1848 return false;
1849 }
1850
1851 /**
1852 * Perform proxy_protocol_access ACL tests on the client which
1853 * connected to PROXY protocol port to see if we trust the
1854 * sender enough to accept their PROXY header claim.
1855 */
1856 bool
1857 ConnStateData::proxyProtocolValidateClient()
1858 {
1859 if (!Config.accessList.proxyProtocol)
1860 return proxyProtocolError("PROXY client not permitted by default ACL");
1861
1862 ACLFilledChecklist ch(Config.accessList.proxyProtocol, NULL, clientConnection->rfc931);
1863 ch.src_addr = clientConnection->remote;
1864 ch.my_addr = clientConnection->local;
1865 ch.conn(this);
1866
1867 if (!ch.fastCheck().allowed())
1868 return proxyProtocolError("PROXY client not permitted by ACLs");
1869
1870 return true;
1871 }
1872
1873 /**
1874 * Perform cleanup on PROXY protocol errors.
1875 * If header parsing hits a fatal error terminate the connection,
1876 * otherwise wait for more data.
1877 */
1878 bool
1879 ConnStateData::proxyProtocolError(const char *msg)
1880 {
1881 if (msg) {
1882 // This is important to know, but maybe not so much that flooding the log is okay.
1883 #if QUIET_PROXY_PROTOCOL
1884 // display the first of every 32 occurrences at level 1, the others at level 2.
1885 static uint8_t hide = 0;
1886 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1887 #else
1888 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1889 #endif
1890 mustStop(msg);
1891 }
1892 return false;
1893 }
1894
1895 /// Attempts to extract a PROXY protocol header from the input buffer and,
1896 /// upon success, stores the parsed header in proxyProtocolHeader_.
1897 /// \returns true if the header was successfully parsed
1898 /// \returns false if more data is needed to parse the header or on error
1899 bool
1900 ConnStateData::parseProxyProtocolHeader()
1901 {
1902 try {
1903 const auto parsed = ProxyProtocol::Parse(inBuf);
1904 proxyProtocolHeader_ = parsed.header;
1905 assert(bool(proxyProtocolHeader_));
1906 inBuf.consume(parsed.size);
1907 needProxyProtocolHeader_ = false;
1908 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1909 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1910 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1911 if ((clientConnection->flags & COMM_TRANSPARENT))
1912 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1913 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1914 }
1915 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1916 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1917 return false;
1918 } catch (const std::exception &e) {
1919 return proxyProtocolError(e.what());
1920 }
1921 return true;
1922 }
1923
1924 void
1925 ConnStateData::receivedFirstByte()
1926 {
1927 if (receivedFirstByte_)
1928 return;
1929
1930 receivedFirstByte_ = true;
1931 resetReadTimeout(Config.Timeout.request);
1932 }
1933
1934 /**
1935 * Attempt to parse one or more requests from the input buffer.
1936 * Returns true after completing parsing of at least one request [header]. That
1937 * includes cases where parsing ended with an error (e.g., a huge request).
1938 */
1939 bool
1940 ConnStateData::clientParseRequests()
1941 {
1942 bool parsed_req = false;
1943
1944 debugs(33, 5, HERE << clientConnection << ": attempting to parse");
1945
1946 // Loop while we have read bytes that are not needed for producing the body
1947 // On errors, bodyPipe may become nil, but readMore will be cleared
1948 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1949
1950 // Prohibit concurrent requests when using a pinned to-server connection
1951 // because our Client classes do not support request pipelining.
1952 if (pinning.pinned && !pinning.readHandler) {
1953 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1954 break;
1955 }
1956
1957 /* Limit the number of concurrent requests */
1958 if (concurrentRequestQueueFilled())
1959 break;
1960
1961 // try to parse the PROXY protocol header magic bytes
1962 if (needProxyProtocolHeader_) {
1963 if (!parseProxyProtocolHeader())
1964 break;
1965
1966 // we have been waiting for PROXY to provide client-IP
1967 // for some lookups, ie rDNS and IDENT.
1968 whenClientIpKnown();
1969
1970 // Done with PROXY protocol which has cleared preservingClientData_.
1971 // If the next protocol supports on_unsupported_protocol, then its
1972 // parseOneRequest() must reset preservingClientData_.
1973 assert(!preservingClientData_);
1974 }
1975
1976 if (Http::StreamPointer context = parseOneRequest()) {
1977 debugs(33, 5, clientConnection << ": done parsing a request");
1978 extendLifetime();
1979 context->registerWithConn();
1980
1981 #if USE_OPENSSL
1982 if (switchedToHttps())
1983 parsedBumpedRequestCount++;
1984 #endif
1985
1986 processParsedRequest(context);
1987
1988 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1989
1990 if (context->mayUseConnection()) {
1991 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
1992 break;
1993 }
1994 } else {
1995 debugs(33, 5, clientConnection << ": not enough request data: " <<
1996 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1997 Must(inBuf.length() < Config.maxRequestHeaderSize);
1998 break;
1999 }
2000 }
2001
2002 /* XXX where to 'finish' the parsing pass? */
2003 return parsed_req;
2004 }
2005
2006 void
2007 ConnStateData::afterClientRead()
2008 {
2009 #if USE_OPENSSL
2010 if (parsingTlsHandshake) {
2011 parseTlsHandshake();
2012 return;
2013 }
2014 #endif
2015
2016 /* Process next request */
2017 if (pipeline.empty())
2018 fd_note(clientConnection->fd, "Reading next request");
2019
2020 if (!clientParseRequests()) {
2021 if (!isOpen())
2022 return;
2023 // We may get here if the client half-closed after sending a partial
2024 // request. See doClientRead() and shouldCloseOnEof().
2025 // XXX: This partially duplicates ConnStateData::kick().
2026 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
2027 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
2028 clientConnection->close();
2029 return;
2030 }
2031 }
2032
2033 if (!isOpen())
2034 return;
2035
2036 clientAfterReadingRequests();
2037 }
2038
2039 /**
2040 * called when new request data has been read from the socket
2041 *
2042 * \retval false called comm_close or setReplyToError (the caller should bail)
2043 * \retval true we did not call comm_close or setReplyToError
2044 */
2045 bool
2046 ConnStateData::handleReadData()
2047 {
2048 // if we are reading a body, stuff data into the body pipe
2049 if (bodyPipe != NULL)
2050 return handleRequestBodyData();
2051 return true;
2052 }
2053
2054 /**
2055 * called when new request body data has been buffered in inBuf
2056 * may close the connection if we were closing and piped everything out
2057 *
2058 * \retval false called comm_close or setReplyToError (the caller should bail)
2059 * \retval true we did not call comm_close or setReplyToError
2060 */
2061 bool
2062 ConnStateData::handleRequestBodyData()
2063 {
2064 assert(bodyPipe != NULL);
2065
2066 if (bodyParser) { // chunked encoding
2067 if (const err_type error = handleChunkedRequestBody()) {
2068 abortChunkedRequestBody(error);
2069 return false;
2070 }
2071 } else { // identity encoding
2072 debugs(33,5, HERE << "handling plain request body for " << clientConnection);
2073 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2074 if (putSize > 0)
2075 consumeInput(putSize);
2076
2077 if (!bodyPipe->mayNeedMoreData()) {
2078 // BodyPipe will clear us automagically when we produced everything
2079 bodyPipe = NULL;
2080 }
2081 }
2082
2083 if (!bodyPipe) {
2084 debugs(33,5, HERE << "produced entire request body for " << clientConnection);
2085
2086 if (const char *reason = stoppedSending()) {
2087 /* we've finished reading like good clients,
2088 * now do the close that initiateClose initiated.
2089 */
2090 debugs(33, 3, HERE << "closing for earlier sending error: " << reason);
2091 clientConnection->close();
2092 return false;
2093 }
2094 }
2095
2096 return true;
2097 }
2098
2099 /// parses available chunked encoded body bytes, checks size, returns errors
2100 err_type
2101 ConnStateData::handleChunkedRequestBody()
2102 {
2103 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2104
2105 try { // the parser will throw on errors
2106
2107 if (inBuf.isEmpty()) // nothing to do
2108 return ERR_NONE;
2109
2110 BodyPipeCheckout bpc(*bodyPipe);
2111 bodyParser->setPayloadBuffer(&bpc.buf);
2112 const bool parsed = bodyParser->parse(inBuf);
2113 inBuf = bodyParser->remaining(); // sync buffers
2114 bpc.checkIn();
2115
2116 // dechunk then check: the size limit applies to _dechunked_ content
2117 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2118 return ERR_TOO_BIG;
2119
2120 if (parsed) {
2121 finishDechunkingRequest(true);
2122 Must(!bodyPipe);
2123 return ERR_NONE; // nil bodyPipe implies body end for the caller
2124 }
2125
2126 // if chunk parser needs data, then the body pipe must need it too
2127 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2128
2129 // if parser needs more space and we can consume nothing, we will stall
2130 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2131 } catch (...) { // TODO: be more specific
2132 debugs(33, 3, HERE << "malformed chunks" << bodyPipe->status());
2133 return ERR_INVALID_REQ;
2134 }
2135
2136 debugs(33, 7, HERE << "need more chunked data" << *bodyPipe->status());
2137 return ERR_NONE;
2138 }
2139
2140 /// quit on errors related to chunked request body handling
2141 void
2142 ConnStateData::abortChunkedRequestBody(const err_type error)
2143 {
2144 finishDechunkingRequest(false);
2145
2146 // XXX: The code below works if we fail during initial request parsing,
2147 // but if we fail when the server connection is used already, the server may send
2148 // us its response too, causing various assertions. How to prevent that?
2149 #if WE_KNOW_HOW_TO_SEND_ERRORS
2150 Http::StreamPointer context = pipeline.front();
2151 if (context != NULL && !context->http->out.offset) { // output nothing yet
2152 clientStreamNode *node = context->getClientReplyContext();
2153 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2154 assert(repContext);
2155 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2156 Http::scPayloadTooLarge : HTTP_BAD_REQUEST;
2157 repContext->setReplyToError(error, scode,
2158 repContext->http->request->method,
2159 repContext->http->uri,
2160 CachePeer,
2161 repContext->http->request,
2162 inBuf, NULL);
2163 context->pullData();
2164 } else {
2165 // close or otherwise we may get stuck as nobody will notice the error?
2166 comm_reset_close(clientConnection);
2167 }
2168 #else
2169 debugs(33, 3, HERE << "aborting chunked request without error " << error);
2170 comm_reset_close(clientConnection);
2171 #endif
2172 flags.readMore = false;
2173 }
2174
2175 void
2176 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2177 {
2178 // request reader may get stuck waiting for space if nobody consumes body
2179 if (bodyPipe != NULL)
2180 bodyPipe->enableAutoConsumption();
2181
2182 // kids extend
2183 }
2184
2185 /** general lifetime handler for HTTP requests */
2186 void
2187 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2188 {
2189 if (!Comm::IsConnOpen(io.conn))
2190 return;
2191
2192 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2193 updateError(error);
2194 if (tunnelOnError(HttpRequestMethod(), error))
2195 return;
2196
2197 /*
2198 * Just close the connection to not confuse browsers
2199 * using persistent connections. Some browsers open
2200 * a connection and then do not use it until much
2201 * later (presumeably because the request triggering
2202 * the open has already been completed on another
2203 * connection)
2204 */
2205 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2206 io.conn->close();
2207 }
2208
2209 void
2210 ConnStateData::lifetimeTimeout(const CommTimeoutCbParams &io)
2211 {
2212 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout" <<
2213 Debug::Extra << "connection: " << io.conn);
2214
2215 LogTagsErrors lte;
2216 lte.timedout = true;
2217 terminateAll(ERR_LIFETIME_EXP, lte);
2218 }
2219
2220 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2221 AsyncJob("ConnStateData"), // kids overwrite
2222 Server(xact)
2223 #if USE_OPENSSL
2224 , tlsParser(Security::HandshakeParser::fromClient)
2225 #endif
2226 {
2227 // store the details required for creating more MasterXaction objects as new requests come in
2228 log_addr = xact->tcpClient->remote;
2229 log_addr.applyClientMask(Config.Addrs.client_netmask);
2230
2231 // register to receive notice of Squid signal events
2232 // which may affect long persisting client connections
2233 registerRunner();
2234 }
2235
2236 void
2237 ConnStateData::start()
2238 {
2239 BodyProducer::start();
2240 HttpControlMsgSink::start();
2241
2242 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2243 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2244 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2245 int i = IP_PMTUDISC_DONT;
2246 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2247 int xerrno = errno;
2248 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2249 }
2250 #else
2251 static bool reported = false;
2252
2253 if (!reported) {
2254 debugs(33, DBG_IMPORTANT, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2255 reported = true;
2256 }
2257 #endif
2258 }
2259
2260 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2261 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2262 comm_add_close_handler(clientConnection->fd, call);
2263
2264 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2265 if (needProxyProtocolHeader_) {
2266 if (!proxyProtocolValidateClient()) // will close the connection on failure
2267 return;
2268 } else
2269 whenClientIpKnown();
2270
2271 // requires needProxyProtocolHeader_ which is initialized above
2272 preservingClientData_ = shouldPreserveClientData();
2273 }
2274
2275 void
2276 ConnStateData::whenClientIpKnown()
2277 {
2278 if (Config.onoff.log_fqdn)
2279 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2280
2281 #if USE_IDENT
2282 if (Ident::TheConfig.identLookup) {
2283 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
2284 identChecklist.src_addr = clientConnection->remote;
2285 identChecklist.my_addr = clientConnection->local;
2286 if (identChecklist.fastCheck().allowed())
2287 Ident::Start(clientConnection, clientIdentDone, this);
2288 }
2289 #endif
2290
2291 clientdbEstablished(clientConnection->remote, 1);
2292
2293 #if USE_DELAY_POOLS
2294 fd_table[clientConnection->fd].clientInfo = NULL;
2295
2296 if (!Config.onoff.client_db)
2297 return; // client delay pools require client_db
2298
2299 const auto &pools = ClientDelayPools::Instance()->pools;
2300 if (pools.size()) {
2301 ACLFilledChecklist ch(NULL, NULL, NULL);
2302
2303 // TODO: we check early to limit error response bandwidth but we
2304 // should recheck when we can honor delay_pool_uses_indirect
2305 // TODO: we should also pass the port details for myportname here.
2306 ch.src_addr = clientConnection->remote;
2307 ch.my_addr = clientConnection->local;
2308
2309 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2310
2311 /* pools require explicit 'allow' to assign a client into them */
2312 if (pools[pool]->access) {
2313 ch.changeAcl(pools[pool]->access);
2314 auto answer = ch.fastCheck();
2315 if (answer.allowed()) {
2316
2317 /* request client information from db after we did all checks
2318 this will save hash lookup if client failed checks */
2319 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2320 assert(cli);
2321
2322 /* put client info in FDE */
2323 fd_table[clientConnection->fd].clientInfo = cli;
2324
2325 /* setup write limiter for this request */
2326 const double burst = floor(0.5 +
2327 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2328 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2329 break;
2330 } else {
2331 debugs(83, 4, HERE << "Delay pool " << pool << " skipped because ACL " << answer);
2332 }
2333 }
2334 }
2335 }
2336 #endif
2337
2338 // kids must extend to actually start doing something (e.g., reading)
2339 }
2340
2341 /** Handle a new connection on an HTTP socket. */
2342 void
2343 httpAccept(const CommAcceptCbParams &params)
2344 {
2345 MasterXaction::Pointer xact = params.xaction;
2346 AnyP::PortCfgPointer s = xact->squidPort;
2347
2348 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2349
2350 if (params.flag != Comm::OK) {
2351 // Its possible the call was still queued when the client disconnected
2352 debugs(33, 2, s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2353 return;
2354 }
2355
2356 debugs(33, 4, params.conn << ": accepted");
2357 fd_note(params.conn->fd, "client http connect");
2358
2359 if (s->tcp_keepalive.enabled)
2360 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2361
2362 ++incoming_sockets_accepted;
2363
2364 // Socket is ready, setup the connection manager to start using it
2365 auto *srv = Http::NewServer(xact);
2366 AsyncJob::Start(srv); // usually async-calls readSomeData()
2367 }
2368
2369 /// Create TLS connection structure and update fd_table
2370 static bool
2371 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2372 {
2373 const auto conn = connState->clientConnection;
2374 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2375 debugs(33, 5, "will negotiate TLS on " << conn);
2376 return true;
2377 }
2378
2379 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2380 conn->close();
2381 return false;
2382 }
2383
2384 /** negotiate an SSL connection */
2385 static void
2386 clientNegotiateSSL(int fd, void *data)
2387 {
2388 ConnStateData *conn = (ConnStateData *)data;
2389
2390 const auto handshakeResult = Security::Accept(*conn->clientConnection);
2391 switch (handshakeResult.category) {
2392 case Security::IoResult::ioSuccess:
2393 break;
2394
2395 case Security::IoResult::ioWantRead:
2396 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
2397 return;
2398
2399 case Security::IoResult::ioWantWrite:
2400 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
2401 return;
2402
2403 case Security::IoResult::ioError:
2404 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: " << handshakeResult.errorDescription <<
2405 " while accepting a TLS connection on " << conn->clientConnection << ": " << handshakeResult.errorDetail);
2406 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2407 // path because we cannot know the intended connection target?
2408 conn->updateError(ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
2409 conn->clientConnection->close();
2410 return;
2411 }
2412
2413 Security::SessionPointer session(fd_table[fd].ssl);
2414
2415 #if USE_OPENSSL
2416 if (Security::SessionIsResumed(session)) {
2417 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2418 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2419 ":" << (int)fd_table[fd].remote_port << ")");
2420 } else {
2421 if (Debug::Enabled(83, 4)) {
2422 /* Write out the SSL session details.. actually the call below, but
2423 * OpenSSL headers do strange typecasts confusing GCC.. */
2424 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2425 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2426 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2427 PEM_STRING_SSL_SESSION, debug_log,
2428 reinterpret_cast<char *>(SSL_get_session(session.get())),
2429 nullptr, nullptr, 0, nullptr, nullptr);
2430
2431 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2432
2433 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2434 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2435 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2436 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2437 * Because there are two possible usable cast, if you get an error here, try the other
2438 * commented line. */
2439
2440 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2441 debug_log,
2442 reinterpret_cast<char *>(SSL_get_session(session.get())),
2443 nullptr, nullptr, 0, nullptr, nullptr);
2444 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2445 debug_log,
2446 reinterpret_cast<char *>(SSL_get_session(session.get())),
2447 nullptr, nullptr, 0, nullptr, nullptr);
2448 */
2449 #else
2450 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2451
2452 #endif
2453 /* Note: This does not automatically fflush the log file.. */
2454 }
2455
2456 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2457 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2458 fd_table[fd].remote_port << ")");
2459 }
2460 #else
2461 debugs(83, 2, "TLS session reuse not yet implemented.");
2462 #endif
2463
2464 // Connection established. Retrieve TLS connection parameters for logging.
2465 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2466
2467 #if USE_OPENSSL
2468 X509 *client_cert = SSL_get_peer_certificate(session.get());
2469
2470 if (client_cert) {
2471 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2472 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
2473
2474 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2475 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
2476
2477 X509_free(client_cert);
2478 } else {
2479 debugs(83, 5, "FD " << fd << " has no client certificate.");
2480 }
2481 #else
2482 debugs(83, 2, "Client certificate requesting not yet implemented.");
2483 #endif
2484
2485 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2486 if (auto xact = conn->pipeline.front()) {
2487 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2488 xact->finished();
2489 // cannot proceed with encryption if requests wait for plain responses
2490 Must(conn->pipeline.empty());
2491 }
2492 /* careful: finished() above frees request, host, etc. */
2493
2494 conn->readSomeData();
2495 }
2496
2497 /**
2498 * If Security::ContextPointer is given, starts reading the TLS handshake.
2499 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2500 */
2501 static void
2502 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2503 {
2504 assert(connState);
2505 const Comm::ConnectionPointer &details = connState->clientConnection;
2506
2507 if (!ctx || !httpsCreate(connState, ctx))
2508 return;
2509
2510 connState->resetReadTimeout(Config.Timeout.request);
2511
2512 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2513 }
2514
2515 #if USE_OPENSSL
2516 /**
2517 * A callback function to use with the ACLFilledChecklist callback.
2518 */
2519 static void
2520 httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2521 {
2522 ConnStateData *connState = (ConnStateData *) data;
2523
2524 // if the connection is closed or closing, just return.
2525 if (!connState->isOpen())
2526 return;
2527
2528 if (answer.allowed()) {
2529 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2530 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2531 } else {
2532 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2533 connState->sslBumpMode = Ssl::bumpSplice;
2534 }
2535
2536 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2537 connState->clientConnection->close();
2538 return;
2539 }
2540
2541 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2542 connState->clientConnection->close();
2543 }
2544 #endif
2545
2546 /** handle a new HTTPS connection */
2547 static void
2548 httpsAccept(const CommAcceptCbParams &params)
2549 {
2550 MasterXaction::Pointer xact = params.xaction;
2551 const AnyP::PortCfgPointer s = xact->squidPort;
2552
2553 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2554
2555 if (params.flag != Comm::OK) {
2556 // Its possible the call was still queued when the client disconnected
2557 debugs(33, 2, "httpsAccept: " << s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2558 return;
2559 }
2560
2561 debugs(33, 4, HERE << params.conn << " accepted, starting SSL negotiation.");
2562 fd_note(params.conn->fd, "client https connect");
2563
2564 if (s->tcp_keepalive.enabled) {
2565 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2566 }
2567 ++incoming_sockets_accepted;
2568
2569 // Socket is ready, setup the connection manager to start using it
2570 auto *srv = Https::NewServer(xact);
2571 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2572 }
2573
2574 void
2575 ConnStateData::postHttpsAccept()
2576 {
2577 if (port->flags.tunnelSslBumping) {
2578 #if USE_OPENSSL
2579 debugs(33, 5, "accept transparent connection: " << clientConnection);
2580
2581 if (!Config.accessList.ssl_bump) {
2582 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2583 return;
2584 }
2585
2586 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
2587 mx->tcpClient = clientConnection;
2588 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2589 // using tproxy/intercept provided destination IP and port.
2590 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2591 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2592 HttpRequest *request = new HttpRequest(mx);
2593 static char ip[MAX_IPSTRLEN];
2594 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2595 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2596 request->url.port(clientConnection->local.port());
2597 request->myportname = port->name;
2598 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2599 CodeContext::Reset(connectAle);
2600 // TODO: Use these request/ALE when waiting for new bumped transactions.
2601
2602 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, NULL);
2603 acl_checklist->src_addr = clientConnection->remote;
2604 acl_checklist->my_addr = port->s;
2605 // Build a local AccessLogEntry to allow requiresAle() acls work
2606 acl_checklist->al = connectAle;
2607 acl_checklist->al->cache.start_time = current_time;
2608 acl_checklist->al->tcpClient = clientConnection;
2609 acl_checklist->al->cache.port = port;
2610 acl_checklist->al->cache.caddr = log_addr;
2611 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2612 acl_checklist->al->updateError(bareError);
2613 HTTPMSGUNLOCK(acl_checklist->al->request);
2614 acl_checklist->al->request = request;
2615 HTTPMSGLOCK(acl_checklist->al->request);
2616 Http::StreamPointer context = pipeline.front();
2617 ClientHttpRequest *http = context ? context->http : nullptr;
2618 const char *log_uri = http ? http->log_uri : nullptr;
2619 acl_checklist->syncAle(request, log_uri);
2620 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2621 #else
2622 fatal("FATAL: SSL-Bump requires --with-openssl");
2623 #endif
2624 return;
2625 } else {
2626 httpsEstablish(this, port->secure.staticContext);
2627 }
2628 }
2629
2630 #if USE_OPENSSL
2631 void
2632 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2633 {
2634 ConnStateData * state_data = (ConnStateData *)(data);
2635 state_data->sslCrtdHandleReply(reply);
2636 }
2637
2638 void
2639 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2640 {
2641 if (!isOpen()) {
2642 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2643 return;
2644 }
2645
2646 if (reply.result == Helper::BrokenHelper) {
2647 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2648 } else if (!reply.other().hasContent()) {
2649 debugs(1, DBG_IMPORTANT, HERE << "\"ssl_crtd\" helper returned <NULL> reply.");
2650 } else {
2651 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2652 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2653 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2654 } else {
2655 if (reply.result != Helper::Okay) {
2656 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2657 } else {
2658 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2659 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2660 doPeekAndSpliceStep();
2661 auto ssl = fd_table[clientConnection->fd].ssl.get();
2662 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2663 if (!ret)
2664 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2665
2666 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2667 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2668 } else {
2669 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2670 if (ctx && !sslBumpCertKey.isEmpty())
2671 storeTlsContextToCache(sslBumpCertKey, ctx);
2672 getSslContextDone(ctx);
2673 }
2674 return;
2675 }
2676 }
2677 }
2678 Security::ContextPointer nil;
2679 getSslContextDone(nil);
2680 }
2681
2682 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2683 {
2684 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2685
2686 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2687 if (connectedOk) {
2688 if (X509 *mimicCert = sslServerBump->serverCert.get())
2689 certProperties.mimicCert.resetAndLock(mimicCert);
2690
2691 ACLFilledChecklist checklist(NULL, sslServerBump->request.getRaw(),
2692 clientConnection != NULL ? clientConnection->rfc931 : dash_str);
2693 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
2694
2695 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != NULL; ca = ca->next) {
2696 // If the algorithm already set, then ignore it.
2697 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2698 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2699 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2700 continue;
2701
2702 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2703 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2704 const char *param = ca->param;
2705
2706 // For parameterless CN adaptation, use hostname from the
2707 // CONNECT request.
2708 if (ca->alg == Ssl::algSetCommonName) {
2709 if (!param)
2710 param = tlsConnectHostOrIp.c_str();
2711 certProperties.commonName = param;
2712 certProperties.setCommonName = true;
2713 } else if (ca->alg == Ssl::algSetValidAfter)
2714 certProperties.setValidAfter = true;
2715 else if (ca->alg == Ssl::algSetValidBefore)
2716 certProperties.setValidBefore = true;
2717
2718 debugs(33, 5, HERE << "Matches certificate adaptation aglorithm: " <<
2719 alg << " param: " << (param ? param : "-"));
2720 }
2721 }
2722
2723 certProperties.signAlgorithm = Ssl::algSignEnd;
2724 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != NULL; sg = sg->next) {
2725 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2726 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2727 break;
2728 }
2729 }
2730 } else {// did not try to connect (e.g. client-first) or failed to connect
2731 // In case of an error while connecting to the secure server, use a
2732 // trusted certificate, with no mimicked fields and no adaptation
2733 // algorithms. There is nothing we can mimic, so we want to minimize the
2734 // number of warnings the user will have to see to get to the error page.
2735 // We will close the connection, so that the trust is not extended to
2736 // non-Squid content.
2737 certProperties.signAlgorithm = Ssl::algSignTrusted;
2738 }
2739
2740 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2741
2742 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2743 assert(port->secure.untrustedSigningCa.cert);
2744 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2745 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2746 } else {
2747 assert(port->secure.signingCa.cert.get());
2748 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2749
2750 if (port->secure.signingCa.pkey)
2751 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2752 }
2753 signAlgorithm = certProperties.signAlgorithm;
2754
2755 certProperties.signHash = Ssl::DefaultSignHash;
2756 }
2757
2758 Security::ContextPointer
2759 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2760 {
2761 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2762 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2763 if (const auto ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2764 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2765 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2766 return *ctx;
2767 } else {
2768 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2769 if (ssl_ctx_cache)
2770 ssl_ctx_cache->del(cacheKey);
2771 }
2772 }
2773 return Security::ContextPointer(nullptr);
2774 }
2775
2776 void
2777 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2778 {
2779 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2780 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, ctx)) {
2781 // If it is not in storage delete after using. Else storage deleted it.
2782 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2783 }
2784 }
2785
2786 void
2787 ConnStateData::getSslContextStart()
2788 {
2789 if (port->secure.generateHostCertificates) {
2790 Ssl::CertificateProperties certProperties;
2791 buildSslCertGenerationParams(certProperties);
2792
2793 // Disable caching for bumpPeekAndSplice mode
2794 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2795 sslBumpCertKey.clear();
2796 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2797 assert(!sslBumpCertKey.isEmpty());
2798
2799 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2800 if (ctx) {
2801 getSslContextDone(ctx);
2802 return;
2803 }
2804 }
2805
2806 #if USE_SSL_CRTD
2807 try {
2808 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2809 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2810 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2811 request_message.composeRequest(certProperties);
2812 debugs(33, 5, HERE << "SSL crtd request: " << request_message.compose().c_str());
2813 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2814 return;
2815 } catch (const std::exception &e) {
2816 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2817 "request for " << certProperties.commonName <<
2818 " certificate: " << e.what() << "; will now block to " <<
2819 "generate that certificate.");
2820 // fall through to do blocking in-process generation.
2821 }
2822 #endif // USE_SSL_CRTD
2823
2824 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName);
2825 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2826 doPeekAndSpliceStep();
2827 auto ssl = fd_table[clientConnection->fd].ssl.get();
2828 if (!Ssl::configureSSL(ssl, certProperties, *port))
2829 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2830
2831 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2832 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2833 } else {
2834 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2835 if (dynCtx && !sslBumpCertKey.isEmpty())
2836 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2837 getSslContextDone(dynCtx);
2838 }
2839 return;
2840 }
2841
2842 Security::ContextPointer nil;
2843 getSslContextDone(nil);
2844 }
2845
2846 void
2847 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2848 {
2849 if (port->secure.generateHostCertificates && !ctx) {
2850 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2851 }
2852
2853 // If generated ssl context = NULL, try to use static ssl context.
2854 if (!ctx) {
2855 if (!port->secure.staticContext) {
2856 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2857 clientConnection->close();
2858 return;
2859 } else {
2860 debugs(33, 5, "Using static TLS context.");
2861 ctx = port->secure.staticContext;
2862 }
2863 }
2864
2865 if (!httpsCreate(this, ctx))
2866 return;
2867
2868 // bumped intercepted conns should already have Config.Timeout.request set
2869 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2870 // to make sure the connection does not get stuck on non-SSL clients.
2871 resetReadTimeout(Config.Timeout.request);
2872
2873 switchedToHttps_ = true;
2874
2875 auto ssl = fd_table[clientConnection->fd].ssl.get();
2876 BIO *b = SSL_get_rbio(ssl);
2877 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2878 bio->setReadBufData(inBuf);
2879 inBuf.clear();
2880 clientNegotiateSSL(clientConnection->fd, this);
2881 }
2882
2883 void
2884 ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2885 {
2886 assert(!switchedToHttps_);
2887 Must(http->request);
2888 auto &request = http->request;
2889
2890 // Depending on receivedFirstByte_, we are at the start of either an
2891 // established CONNECT tunnel with the client or an intercepted TCP (and
2892 // presumably TLS) connection from the client. Expect TLS Client Hello.
2893 const auto insideConnectTunnel = receivedFirstByte_;
2894 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2895
2896 tlsConnectHostOrIp = request->url.hostOrIp();
2897 tlsConnectPort = request->url.port();
2898 resetSslCommonName(request->url.host());
2899
2900 // We are going to read new request
2901 flags.readMore = true;
2902
2903 // keep version major.minor details the same.
2904 // but we are now performing the HTTPS handshake traffic
2905 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2906
2907 // If sslServerBump is set, then we have decided to deny CONNECT
2908 // and now want to switch to SSL to send the error to the client
2909 // without even peeking at the origin server certificate.
2910 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2911 request->flags.sslPeek = true;
2912 sslServerBump = new Ssl::ServerBump(http);
2913 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2914 request->flags.sslPeek = true;
2915 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2916 }
2917
2918 // commSetConnTimeout() was called for this request before we switched.
2919 // Fix timeout to request_start_timeout
2920 resetReadTimeout(Config.Timeout.request_start_timeout);
2921 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2922 // a bumbed "connect" request on non transparent port.
2923 receivedFirstByte_ = false;
2924 // Get more data to peek at Tls
2925 parsingTlsHandshake = true;
2926
2927 // If the protocol has changed, then reset preservingClientData_.
2928 // Otherwise, its value initially set in start() is still valid/fresh.
2929 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2930 if (insideConnectTunnel)
2931 preservingClientData_ = shouldPreserveClientData();
2932
2933 readSomeData();
2934 }
2935
2936 void
2937 ConnStateData::parseTlsHandshake()
2938 {
2939 Must(parsingTlsHandshake);
2940
2941 assert(!inBuf.isEmpty());
2942 receivedFirstByte();
2943 fd_note(clientConnection->fd, "Parsing TLS handshake");
2944
2945 // stops being nil if we fail to parse the handshake
2946 ErrorDetail::Pointer parseErrorDetails;
2947
2948 try {
2949 if (!tlsParser.parseHello(inBuf)) {
2950 // need more data to finish parsing
2951 readSomeData();
2952 return;
2953 }
2954 }
2955 catch (const TextException &ex) {
2956 debugs(83, 2, "exception: " << ex);
2957 parseErrorDetails = new ExceptionErrorDetail(ex.id());
2958 }
2959 catch (...) {
2960 debugs(83, 2, "exception: " << CurrentException);
2961 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2962 parseErrorDetails = d;
2963 }
2964
2965 parsingTlsHandshake = false;
2966
2967 // client data may be needed for splicing and for
2968 // tunneling unsupportedProtocol after an error
2969 preservedClientData = inBuf;
2970
2971 // Even if the parser failed, each TLS detail should either be set
2972 // correctly or still be "unknown"; copying unknown detail is a no-op.
2973 Security::TlsDetails::Pointer const &details = tlsParser.details;
2974 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2975 if (details && !details->serverName.isEmpty()) {
2976 resetSslCommonName(details->serverName.c_str());
2977 tlsClientSni_ = details->serverName;
2978 }
2979
2980 // We should disable read/write handlers
2981 Comm::ResetSelect(clientConnection->fd);
2982
2983 if (parseErrorDetails) {
2984 Http::StreamPointer context = pipeline.front();
2985 Must(context && context->http);
2986 HttpRequest::Pointer request = context->http->request;
2987 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2988 updateError(ERR_PROTOCOL_UNKNOWN, parseErrorDetails);
2989 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN))
2990 clientConnection->close();
2991 return;
2992 }
2993
2994 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
2995 getSslContextStart();
2996 return;
2997 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
2998 Http::StreamPointer context = pipeline.front();
2999 ClientHttpRequest *http = context ? context->http : nullptr;
3000 // will call httpsPeeked() with certificate and connection, eventually
3001 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3002 } else {
3003 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
3004 startPeekAndSplice();
3005 }
3006 }
3007
3008 void httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
3009 {
3010 ConnStateData *connState = (ConnStateData *) data;
3011
3012 // if the connection is closed or closing, just return.
3013 if (!connState->isOpen())
3014 return;
3015
3016 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
3017 assert(connState->serverBump());
3018 Ssl::BumpMode bumpAction;
3019 if (answer.allowed()) {
3020 bumpAction = (Ssl::BumpMode)answer.kind;
3021 } else
3022 bumpAction = Ssl::bumpSplice;
3023
3024 connState->serverBump()->act.step2 = bumpAction;
3025 connState->sslBumpMode = bumpAction;
3026 Http::StreamPointer context = connState->pipeline.front();
3027 if (ClientHttpRequest *http = (context ? context->http : nullptr))
3028 http->al->ssl.bumpMode = bumpAction;
3029
3030 if (bumpAction == Ssl::bumpTerminate) {
3031 connState->clientConnection->close();
3032 } else if (bumpAction != Ssl::bumpSplice) {
3033 connState->startPeekAndSplice();
3034 } else if (!connState->splice())
3035 connState->clientConnection->close();
3036 }
3037
3038 bool
3039 ConnStateData::splice()
3040 {
3041 // normally we can splice here, because we just got client hello message
3042
3043 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
3044 // we should not lose any raw bytes when switching to raw I/O here.
3045 if (fd_table[clientConnection->fd].ssl.get())
3046 fd_table[clientConnection->fd].useDefaultIo();
3047
3048 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3049 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3050 transferProtocol = Http::ProtocolVersion();
3051 assert(!pipeline.empty());
3052 Http::StreamPointer context = pipeline.front();
3053 Must(context);
3054 Must(context->http);
3055 ClientHttpRequest *http = context->http;
3056 HttpRequest::Pointer request = http->request;
3057 context->finished();
3058 if (transparent()) {
3059 // For transparent connections, make a new fake CONNECT request, now
3060 // with SNI as target. doCallout() checks, adaptations may need that.
3061 return fakeAConnectRequest("splice", preservedClientData);
3062 } else {
3063 // For non transparent connections make a new tunneled CONNECT, which
3064 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3065 // respond with "Connection Established" to the client.
3066 // This fake CONNECT request required to allow use of SNI in
3067 // doCallout() checks and adaptations.
3068 return initiateTunneledRequest(request, Http::METHOD_CONNECT, "splice", preservedClientData);
3069 }
3070 }
3071
3072 void
3073 ConnStateData::startPeekAndSplice()
3074 {
3075 // This is the Step2 of the SSL bumping
3076 assert(sslServerBump);
3077 Http::StreamPointer context = pipeline.front();
3078 ClientHttpRequest *http = context ? context->http : nullptr;
3079
3080 if (sslServerBump->at(XactionStep::tlsBump1)) {
3081 sslServerBump->step = XactionStep::tlsBump2;
3082 // Run a accessList check to check if want to splice or continue bumping
3083
3084 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3085 acl_checklist->al = http ? http->al : nullptr;
3086 //acl_checklist->src_addr = params.conn->remote;
3087 //acl_checklist->my_addr = s->s;
3088 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
3089 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3090 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3091 const char *log_uri = http ? http->log_uri : nullptr;
3092 acl_checklist->syncAle(sslServerBump->request.getRaw(), log_uri);
3093 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3094 return;
3095 }
3096
3097 // will call httpsPeeked() with certificate and connection, eventually
3098 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3099 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3100
3101 if (!httpsCreate(this, unConfiguredCTX))
3102 return;
3103
3104 switchedToHttps_ = true;
3105
3106 auto ssl = fd_table[clientConnection->fd].ssl.get();
3107 BIO *b = SSL_get_rbio(ssl);
3108 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3109 bio->setReadBufData(inBuf);
3110 bio->hold(true);
3111
3112 // We have successfully parsed client Hello, but our TLS handshake parser is
3113 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3114 // can honor on_unsupported_protocol if needed. If there are no errors, we
3115 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3116 // also allow an ioWantRead result in case some fancy TLS extension that
3117 // Squid does not yet understand requires reading post-Hello client bytes.
3118 const auto handshakeResult = Security::Accept(*clientConnection);
3119 if (!handshakeResult.wantsIo())
3120 return handleSslBumpHandshakeError(handshakeResult);
3121
3122 // We need to reset inBuf here, to be used by incoming requests in the case
3123 // of SSL bump
3124 inBuf.clear();
3125
3126 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3127 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : NULL);
3128 }
3129
3130 /// process a problematic Security::Accept() result on the SslBump code path
3131 void
3132 ConnStateData::handleSslBumpHandshakeError(const Security::IoResult &handshakeResult)
3133 {
3134 auto errCategory = ERR_NONE;
3135
3136 switch (handshakeResult.category) {
3137 case Security::IoResult::ioSuccess: {
3138 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3139 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3140 break;
3141 }
3142
3143 case Security::IoResult::ioWantRead: {
3144 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3145 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3146 break;
3147 }
3148
3149 case Security::IoResult::ioWantWrite: {
3150 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3151 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3152 break;
3153 }
3154
3155 case Security::IoResult::ioError:
3156 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: " << handshakeResult.errorDescription <<
3157 " while SslBump-accepting a TLS connection on " << clientConnection << ": " << handshakeResult.errorDetail);
3158 updateError(errCategory = ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
3159 break;
3160
3161 }
3162
3163 if (!tunnelOnError(HttpRequestMethod(), errCategory))
3164 clientConnection->close();
3165 }
3166
3167 void
3168 ConnStateData::doPeekAndSpliceStep()
3169 {
3170 auto ssl = fd_table[clientConnection->fd].ssl.get();
3171 BIO *b = SSL_get_rbio(ssl);
3172 assert(b);
3173 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3174
3175 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3176 bio->hold(false);
3177
3178 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3179 switchedToHttps_ = true;
3180 }
3181
3182 void
3183 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3184 {
3185 Must(sslServerBump != NULL);
3186 Must(sslServerBump->request == pic.request);
3187 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3188
3189 if (Comm::IsConnOpen(pic.connection)) {
3190 notePinnedConnectionBecameIdle(pic);
3191 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3192 } else
3193 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3194
3195 getSslContextStart();
3196 }
3197
3198 #endif /* USE_OPENSSL */
3199
3200 bool
3201 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, Http::MethodType const method, const char *reason, const SBuf &payload)
3202 {
3203 // fake a CONNECT request to force connState to tunnel
3204 SBuf connectHost;
3205 unsigned short connectPort = 0;
3206
3207 if (pinning.serverConnection != nullptr) {
3208 static char ip[MAX_IPSTRLEN];
3209 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3210 connectPort = pinning.serverConnection->remote.port();
3211 } else if (cause) {
3212 connectHost = cause->url.hostOrIp();
3213 connectPort = cause->url.port();
3214 #if USE_OPENSSL
3215 } else if (!tlsConnectHostOrIp.isEmpty()) {
3216 connectHost = tlsConnectHostOrIp;
3217 connectPort = tlsConnectPort;
3218 #endif
3219 } else if (transparent()) {
3220 static char ip[MAX_IPSTRLEN];
3221 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3222 connectPort = clientConnection->local.port();
3223 } else {
3224 // Typical cases are malformed HTTP requests on http_port and malformed
3225 // TLS handshakes on non-bumping https_port. TODO: Discover these
3226 // problems earlier so that they can be classified/detailed better.
3227 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3228 // TODO: throw when nonBlockingCheck() callbacks gain job protections
3229 static const auto d = MakeNamedErrorDetail("TUNNEL_TARGET");
3230 updateError(ERR_INVALID_REQ, d);
3231 return false;
3232 }
3233
3234 debugs(33, 2, "Request tunneling for " << reason);
3235 ClientHttpRequest *http = buildFakeRequest(method, connectHost, connectPort, payload);
3236 HttpRequest::Pointer request = http->request;
3237 request->flags.forceTunnel = true;
3238 http->calloutContext = new ClientRequestContext(http);
3239 http->doCallouts();
3240 clientProcessRequestFinished(this, request);
3241 return true;
3242 }
3243
3244 bool
3245 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3246 {
3247 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3248
3249 SBuf connectHost;
3250 assert(transparent());
3251 const unsigned short connectPort = clientConnection->local.port();
3252
3253 #if USE_OPENSSL
3254 if (!tlsClientSni_.isEmpty())
3255 connectHost.assign(tlsClientSni_);
3256 else
3257 #endif
3258 {
3259 static char ip[MAX_IPSTRLEN];
3260 clientConnection->local.toHostStr(ip, sizeof(ip));
3261 connectHost.assign(ip);
3262 }
3263
3264 ClientHttpRequest *http = buildFakeRequest(Http::METHOD_CONNECT, connectHost, connectPort, payload);
3265
3266 http->calloutContext = new ClientRequestContext(http);
3267 HttpRequest::Pointer request = http->request;
3268 http->doCallouts();
3269 clientProcessRequestFinished(this, request);
3270 return true;
3271 }
3272
3273 ClientHttpRequest *
3274 ConnStateData::buildFakeRequest(Http::MethodType const method, SBuf &useHost, unsigned short usePort, const SBuf &payload)
3275 {
3276 ClientHttpRequest *http = new ClientHttpRequest(this);
3277 Http::Stream *stream = new Http::Stream(clientConnection, http);
3278
3279 StoreIOBuffer tempBuffer;
3280 tempBuffer.data = stream->reqbuf;
3281 tempBuffer.length = HTTP_REQBUF_SZ;
3282
3283 ClientStreamData newServer = new clientReplyContext(http);
3284 ClientStreamData newClient = stream;
3285 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3286 clientReplyStatus, newServer, clientSocketRecipient,
3287 clientSocketDetach, newClient, tempBuffer);
3288
3289 stream->flags.parsed_ok = 1; // Do we need it?
3290 stream->mayUseConnection(true);
3291 extendLifetime();
3292 stream->registerWithConn();
3293
3294 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
3295 mx->tcpClient = clientConnection;
3296 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3297 // clientProcessRequest
3298 HttpRequest::Pointer request = new HttpRequest(mx);
3299 AnyP::ProtocolType proto = (method == Http::METHOD_NONE) ? AnyP::PROTO_AUTHORITY_FORM : AnyP::PROTO_HTTP;
3300 request->url.setScheme(proto, nullptr);
3301 request->method = method;
3302 request->url.host(useHost.c_str());
3303 request->url.port(usePort);
3304
3305 http->uri = SBufToCstring(request->effectiveRequestUri());
3306 http->initRequest(request.getRaw());
3307
3308 request->manager(this, http->al);
3309
3310 if (proto == AnyP::PROTO_HTTP)
3311 request->header.putStr(Http::HOST, useHost.c_str());
3312
3313 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3314 #if USE_AUTH
3315 if (getAuth())
3316 request->auth_user_request = getAuth();
3317 #endif
3318
3319 inBuf = payload;
3320 flags.readMore = false;
3321
3322 return http;
3323 }
3324
3325 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3326 static bool
3327 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3328 {
3329 if (!Comm::IsConnOpen(c)) {
3330 Must(NHttpSockets > 0); // we tried to open some
3331 --NHttpSockets; // there will be fewer sockets than planned
3332 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3333
3334 if (!NHttpSockets) // we could not open any listen sockets at all
3335 fatalf("Unable to open %s",FdNote(portType));
3336
3337 return false;
3338 }
3339 return true;
3340 }
3341
3342 /// find any unused HttpSockets[] slot and store fd there or return false
3343 static bool
3344 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3345 {
3346 bool found = false;
3347 for (int i = 0; i < NHttpSockets && !found; ++i) {
3348 if ((found = HttpSockets[i] < 0))
3349 HttpSockets[i] = conn->fd;
3350 }
3351 return found;
3352 }
3353
3354 static void
3355 clientHttpConnectionsOpen(void)
3356 {
3357 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3358 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3359
3360 if (MAXTCPLISTENPORTS == NHttpSockets) {
3361 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3362 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3363 continue;
3364 }
3365
3366 #if USE_OPENSSL
3367 if (s->flags.tunnelSslBumping) {
3368 if (!Config.accessList.ssl_bump) {
3369 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3370 s->flags.tunnelSslBumping = false;
3371 }
3372 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3373 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3374 s->flags.tunnelSslBumping = false;
3375 if (s->transport.protocol == AnyP::PROTO_HTTP)
3376 s->secure.encryptTransport = false;
3377 }
3378 if (s->flags.tunnelSslBumping) {
3379 // Create ssl_ctx cache for this port.
3380 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3381 }
3382 }
3383 #endif
3384
3385 if (s->secure.encryptTransport && !s->secure.staticContext) {
3386 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3387 continue;
3388 }
3389
3390 // Fill out a Comm::Connection which IPC will open as a listener for us
3391 // then pass back when active so we can start a TcpAcceptor subscription.
3392 s->listenConn = new Comm::Connection;
3393 s->listenConn->local = s->s;
3394
3395 s->listenConn->flags = COMM_NONBLOCKING | (s->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3396 (s->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3397 (s->workerQueues ? COMM_REUSEPORT : 0);
3398
3399 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3400 if (s->transport.protocol == AnyP::PROTO_HTTP) {
3401 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3402 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept, CommAcceptCbParams(NULL)));
3403 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3404
3405 AsyncCall::Pointer listenCall = asyncCall(33,2, "clientListenerConnectionOpened",
3406 ListeningStartedDialer(&clientListenerConnectionOpened, s, Ipc::fdnHttpSocket, sub));
3407 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpSocket, listenCall);
3408
3409 } else if (s->transport.protocol == AnyP::PROTO_HTTPS) {
3410 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3411 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept, CommAcceptCbParams(NULL)));
3412 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3413
3414 AsyncCall::Pointer listenCall = asyncCall(33, 2, "clientListenerConnectionOpened",
3415 ListeningStartedDialer(&clientListenerConnectionOpened,
3416 s, Ipc::fdnHttpsSocket, sub));
3417 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpsSocket, listenCall);
3418 }
3419
3420 HttpSockets[NHttpSockets] = -1; // set in clientListenerConnectionOpened
3421 ++NHttpSockets;
3422 }
3423 }
3424
3425 void
3426 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3427 {
3428 // Fill out a Comm::Connection which IPC will open as a listener for us
3429 port->listenConn = new Comm::Connection;
3430 port->listenConn->local = port->s;
3431 port->listenConn->flags =
3432 COMM_NONBLOCKING |
3433 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3434 (port->flags.natIntercept ? COMM_INTERCEPTION : 0);
3435
3436 // route new connections to subCall
3437 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3438 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3439 AsyncCall::Pointer listenCall =
3440 asyncCall(33, 2, "clientListenerConnectionOpened",
3441 ListeningStartedDialer(&clientListenerConnectionOpened,
3442 port, fdNote, sub));
3443 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3444
3445 assert(NHttpSockets < MAXTCPLISTENPORTS);
3446 HttpSockets[NHttpSockets] = -1;
3447 ++NHttpSockets;
3448 }
3449
3450 /// process clientHttpConnectionsOpen result
3451 static void
3452 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3453 {
3454 Must(s != NULL);
3455
3456 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3457 return;
3458
3459 Must(Comm::IsConnOpen(s->listenConn));
3460
3461 // TCP: setup a job to handle accept() with subscribed handler
3462 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3463
3464 debugs(1, DBG_IMPORTANT, "Accepting " <<
3465 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3466 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3467 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3468 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3469 << FdNote(portTypeNote) << " connections at "
3470 << s->listenConn);
3471
3472 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3473
3474 #if USE_SYSTEMD
3475 // When the very first port opens, tell systemd we are able to serve connections.
3476 // Subsequent sd_notify() calls, including calls during reconfiguration,
3477 // do nothing because the first call parameter is 1.
3478 // XXX: Send the notification only after opening all configured ports.
3479 if (opt_foreground || opt_no_daemon) {
3480 const auto result = sd_notify(1, "READY=1");
3481 if (result < 0) {
3482 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3483 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3484 }
3485 }
3486 #endif
3487 }
3488
3489 void
3490 clientOpenListenSockets(void)
3491 {
3492 clientHttpConnectionsOpen();
3493 Ftp::StartListening();
3494
3495 if (NHttpSockets < 1)
3496 fatal("No HTTP, HTTPS, or FTP ports configured");
3497 }
3498
3499 void
3500 clientConnectionsClose()
3501 {
3502 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3503 if (s->listenConn != NULL) {
3504 debugs(1, DBG_IMPORTANT, "Closing HTTP(S) port " << s->listenConn->local);
3505 s->listenConn->close();
3506 s->listenConn = NULL;
3507 }
3508 }
3509
3510 Ftp::StopListening();
3511
3512 // TODO see if we can drop HttpSockets array entirely */
3513 for (int i = 0; i < NHttpSockets; ++i) {
3514 HttpSockets[i] = -1;
3515 }
3516
3517 NHttpSockets = 0;
3518 }
3519
3520 int
3521 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3522 {
3523 SBuf vary(request->vary_headers);
3524 const auto &reply = entry->mem().freshestReply();
3525 auto has_vary = reply.header.has(Http::HdrType::VARY);
3526 #if X_ACCELERATOR_VARY
3527
3528 has_vary |=
3529 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3530 #endif
3531
3532 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3533 if (!vary.isEmpty()) {
3534 /* Oops... something odd is going on here.. */
3535 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3536 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3537 request->vary_headers.clear();
3538 return VARY_CANCEL;
3539 }
3540
3541 if (!has_vary) {
3542 /* This is not a varying object */
3543 return VARY_NONE;
3544 }
3545
3546 /* virtual "vary" object found. Calculate the vary key and
3547 * continue the search
3548 */
3549 vary = httpMakeVaryMark(request, &reply);
3550
3551 if (!vary.isEmpty()) {
3552 request->vary_headers = vary;
3553 return VARY_OTHER;
3554 } else {
3555 /* Ouch.. we cannot handle this kind of variance */
3556 /* XXX This cannot really happen, but just to be complete */
3557 return VARY_CANCEL;
3558 }
3559 } else {
3560 if (vary.isEmpty()) {
3561 vary = httpMakeVaryMark(request, &reply);
3562
3563 if (!vary.isEmpty())
3564 request->vary_headers = vary;
3565 }
3566
3567 if (vary.isEmpty()) {
3568 /* Ouch.. we cannot handle this kind of variance */
3569 /* XXX This cannot really happen, but just to be complete */
3570 return VARY_CANCEL;
3571 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3572 return VARY_MATCH;
3573 } else {
3574 /* Oops.. we have already been here and still haven't
3575 * found the requested variant. Bail out
3576 */
3577 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3578 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3579 return VARY_CANCEL;
3580 }
3581 }
3582 }
3583
3584 ACLFilledChecklist *
3585 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3586 {
3587 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3588 clientAclChecklistFill(*checklist, http);
3589 return checklist;
3590 }
3591
3592 void
3593 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3594 {
3595 checklist.setRequest(http->request);
3596 checklist.al = http->al;
3597 checklist.syncAle(http->request, http->log_uri);
3598
3599 // TODO: If http->getConn is always http->request->clientConnectionManager,
3600 // then call setIdent() inside checklist.setRequest(). Otherwise, restore
3601 // USE_IDENT lost in commit 94439e4.
3602 ConnStateData * conn = http->getConn();
3603 const char *ident = (cbdataReferenceValid(conn) &&
3604 conn && conn->clientConnection) ?
3605 conn->clientConnection->rfc931 : dash_str;
3606 checklist.setIdent(ident);
3607 }
3608
3609 bool
3610 ConnStateData::transparent() const
3611 {
3612 return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3613 }
3614
3615 BodyPipe::Pointer
3616 ConnStateData::expectRequestBody(int64_t size)
3617 {
3618 bodyPipe = new BodyPipe(this);
3619 if (size >= 0)
3620 bodyPipe->setBodySize(size);
3621 else
3622 startDechunkingRequest();
3623 return bodyPipe;
3624 }
3625
3626 int64_t
3627 ConnStateData::mayNeedToReadMoreBody() const
3628 {
3629 if (!bodyPipe)
3630 return 0; // request without a body or read/produced all body bytes
3631
3632 if (!bodyPipe->bodySizeKnown())
3633 return -1; // probably need to read more, but we cannot be sure
3634
3635 const int64_t needToProduce = bodyPipe->unproducedSize();
3636 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3637
3638 if (needToProduce <= haveAvailable)
3639 return 0; // we have read what we need (but are waiting for pipe space)
3640
3641 return needToProduce - haveAvailable;
3642 }
3643
3644 void
3645 ConnStateData::stopReceiving(const char *error)
3646 {
3647 debugs(33, 4, HERE << "receiving error (" << clientConnection << "): " << error <<
3648 "; old sending error: " <<
3649 (stoppedSending() ? stoppedSending_ : "none"));
3650
3651 if (const char *oldError = stoppedReceiving()) {
3652 debugs(33, 3, HERE << "already stopped receiving: " << oldError);
3653 return; // nothing has changed as far as this connection is concerned
3654 }
3655
3656 stoppedReceiving_ = error;
3657
3658 if (const char *sendError = stoppedSending()) {
3659 debugs(33, 3, HERE << "closing because also stopped sending: " << sendError);
3660 clientConnection->close();
3661 }
3662 }
3663
3664 void
3665 ConnStateData::expectNoForwarding()
3666 {
3667 if (bodyPipe != NULL) {
3668 debugs(33, 4, HERE << "no consumer for virgin body " << bodyPipe->status());
3669 bodyPipe->expectNoConsumption();
3670 }
3671 }
3672
3673 /// initialize dechunking state
3674 void
3675 ConnStateData::startDechunkingRequest()
3676 {
3677 Must(bodyPipe != NULL);
3678 debugs(33, 5, HERE << "start dechunking" << bodyPipe->status());
3679 assert(!bodyParser);
3680 bodyParser = new Http1::TeChunkedParser;
3681 }
3682
3683 /// put parsed content into input buffer and clean up
3684 void
3685 ConnStateData::finishDechunkingRequest(bool withSuccess)
3686 {
3687 debugs(33, 5, HERE << "finish dechunking: " << withSuccess);
3688
3689 if (bodyPipe != NULL) {
3690 debugs(33, 7, HERE << "dechunked tail: " << bodyPipe->status());
3691 BodyPipe::Pointer myPipe = bodyPipe;
3692 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3693 Must(!bodyPipe); // we rely on it being nil after we are done with body
3694 if (withSuccess) {
3695 Must(myPipe->bodySizeKnown());
3696 Http::StreamPointer context = pipeline.front();
3697 if (context != NULL && context->http && context->http->request)
3698 context->http->request->setContentLength(myPipe->bodySize());
3699 }
3700 }
3701
3702 delete bodyParser;
3703 bodyParser = NULL;
3704 }
3705
3706 // XXX: this is an HTTP/1-only operation
3707 void
3708 ConnStateData::sendControlMsg(HttpControlMsg msg)
3709 {
3710 if (const auto context = pipeline.front()) {
3711 if (context->http)
3712 context->http->al->reply = msg.reply;
3713 }
3714
3715 if (!isOpen()) {
3716 debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
3717 return;
3718 }
3719
3720 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3721 if (!pipeline.empty()) {
3722 HttpReply::Pointer rep(msg.reply);
3723 Must(rep);
3724 // remember the callback
3725 cbControlMsgSent = msg.cbSuccess;
3726
3727 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3728 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3729
3730 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3731 // but still inform the caller (so it may resume its operation)
3732 doneWithControlMsg();
3733 }
3734 return;
3735 }
3736
3737 debugs(33, 3, HERE << " closing due to missing context for 1xx");
3738 clientConnection->close();
3739 }
3740
3741 void
3742 ConnStateData::doneWithControlMsg()
3743 {
3744 HttpControlMsgSink::doneWithControlMsg();
3745
3746 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3747 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3748 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3749 }
3750 }
3751
3752 /// Our close handler called by Comm when the pinned connection is closed
3753 void
3754 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3755 {
3756 // FwdState might repin a failed connection sooner than this close
3757 // callback is called for the failed connection.
3758 assert(pinning.serverConnection == io.conn);
3759 pinning.closeHandler = NULL; // Comm unregisters handlers before calling
3760 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3761 pinning.serverConnection->noteClosure();
3762 unpinConnection(false);
3763
3764 if (sawZeroReply && clientConnection != NULL) {
3765 debugs(33, 3, "Closing client connection on pinned zero reply.");
3766 clientConnection->close();
3767 }
3768
3769 }
3770
3771 void
3772 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3773 {
3774 pinConnection(pinServer, *request);
3775 }
3776
3777 void
3778 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3779 {
3780 Must(pic.connection);
3781 Must(pic.request);
3782 pinConnection(pic.connection, *pic.request);
3783
3784 // monitor pinned server connection for remote-end closures.
3785 startPinnedConnectionMonitoring();
3786
3787 if (pipeline.empty())
3788 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3789 }
3790
3791 /// Forward future client requests using the given server connection.
3792 void
3793 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3794 {
3795 if (Comm::IsConnOpen(pinning.serverConnection) &&
3796 pinning.serverConnection->fd == pinServer->fd) {
3797 debugs(33, 3, "already pinned" << pinServer);
3798 return;
3799 }
3800
3801 unpinConnection(true); // closes pinned connection, if any, and resets fields
3802
3803 pinning.serverConnection = pinServer;
3804
3805 debugs(33, 3, HERE << pinning.serverConnection);
3806
3807 Must(pinning.serverConnection != NULL);
3808
3809 const char *pinnedHost = "[unknown]";
3810 pinning.host = xstrdup(request.url.host());
3811 pinning.port = request.url.port();
3812 pinnedHost = pinning.host;
3813 pinning.pinned = true;
3814 if (CachePeer *aPeer = pinServer->getPeer())
3815 pinning.peer = cbdataReference(aPeer);
3816 pinning.auth = request.flags.connectionAuth;
3817 char stmp[MAX_IPSTRLEN];
3818 char desc[FD_DESC_SZ];
3819 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3820 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3821 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3822 clientConnection->fd);
3823 fd_note(pinning.serverConnection->fd, desc);
3824
3825 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3826 pinning.closeHandler = JobCallback(33, 5,
3827 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3828 // remember the pinned connection so that cb does not unpin a fresher one
3829 typedef CommCloseCbParams Params;
3830 Params &params = GetCommParams<Params>(pinning.closeHandler);
3831 params.conn = pinning.serverConnection;
3832 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3833 }
3834
3835 /// [re]start monitoring pinned connection for peer closures so that we can
3836 /// propagate them to an _idle_ client pinned to that peer
3837 void
3838 ConnStateData::startPinnedConnectionMonitoring()
3839 {
3840 if (pinning.readHandler != NULL)
3841 return; // already monitoring
3842
3843 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3844 pinning.readHandler = JobCallback(33, 3,
3845 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3846 Comm::Read(pinning.serverConnection, pinning.readHandler);
3847 }
3848
3849 void
3850 ConnStateData::stopPinnedConnectionMonitoring()
3851 {
3852 if (pinning.readHandler != NULL) {
3853 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3854 pinning.readHandler = NULL;
3855 }
3856 }
3857
3858 #if USE_OPENSSL
3859 bool
3860 ConnStateData::handleIdleClientPinnedTlsRead()
3861 {
3862 // A ready-for-reading connection means that the TLS server either closed
3863 // the connection, sent us some unexpected HTTP data, or started TLS
3864 // renegotiations. We should close the connection except for the last case.
3865
3866 Must(pinning.serverConnection != nullptr);
3867 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3868 if (!ssl)
3869 return false;
3870
3871 char buf[1];
3872 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3873
3874 if (readResult > 0 || SSL_pending(ssl) > 0) {
3875 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3876 return false;
3877 }
3878
3879 switch(const int error = SSL_get_error(ssl, readResult)) {
3880 case SSL_ERROR_WANT_WRITE:
3881 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3882 // fall through to restart monitoring, for now
3883 case SSL_ERROR_NONE:
3884 case SSL_ERROR_WANT_READ:
3885 startPinnedConnectionMonitoring();
3886 return true;
3887
3888 default:
3889 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3890 return false;
3891 }
3892
3893 // not reached
3894 return true;
3895 }
3896 #endif
3897
3898 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3899 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3900 void
3901 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3902 {
3903 pinning.readHandler = NULL; // Comm unregisters handlers before calling
3904
3905 if (io.flag == Comm::ERR_CLOSING)
3906 return; // close handler will clean up
3907
3908 Must(pinning.serverConnection == io.conn);
3909
3910 #if USE_OPENSSL
3911 if (handleIdleClientPinnedTlsRead())
3912 return;
3913 #endif
3914
3915 const bool clientIsIdle = pipeline.empty();
3916
3917 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3918 io.size << (clientIsIdle ? " with idle client" : ""));
3919
3920 pinning.serverConnection->close();
3921
3922 // If we are still sending data to the client, do not close now. When we are done sending,
3923 // ConnStateData::kick() checks pinning.serverConnection and will close.
3924 // However, if we are idle, then we must close to inform the idle client and minimize races.
3925 if (clientIsIdle && clientConnection != NULL)
3926 clientConnection->close();
3927 }
3928
3929 Comm::ConnectionPointer
3930 ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3931 {
3932 debugs(33, 7, pinning.serverConnection);
3933 Must(request);
3934
3935 const auto pinningError = [&](const err_type type) {
3936 unpinConnection(true);
3937 HttpRequestPointer requestPointer = request;
3938 return ErrorState::NewForwarding(type, requestPointer, ale);
3939 };
3940
3941 if (!Comm::IsConnOpen(pinning.serverConnection))
3942 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3943
3944 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3945 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3946
3947 if (pinning.port != request->url.port())
3948 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3949
3950 if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3951 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3952
3953 if (pinning.peerAccessDenied)
3954 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3955
3956 stopPinnedConnectionMonitoring();
3957 return pinning.serverConnection;
3958 }
3959
3960 Comm::ConnectionPointer
3961 ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3962 {
3963 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
3964 return connManager->borrowPinnedConnection(request, ale);
3965
3966 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3967 // there is no point since the client connection is now gone
3968 HttpRequestPointer requestPointer = request;
3969 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
3970 }
3971
3972 void
3973 ConnStateData::unpinConnection(const bool andClose)
3974 {
3975 debugs(33, 3, HERE << pinning.serverConnection);
3976
3977 if (pinning.peer)
3978 cbdataReferenceDone(pinning.peer);
3979
3980 if (Comm::IsConnOpen(pinning.serverConnection)) {
3981 if (pinning.closeHandler != NULL) {
3982 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3983 pinning.closeHandler = NULL;
3984 }
3985
3986 stopPinnedConnectionMonitoring();
3987
3988 // close the server side socket if requested
3989 if (andClose)
3990 pinning.serverConnection->close();
3991 pinning.serverConnection = NULL;
3992 }
3993
3994 safe_free(pinning.host);
3995
3996 pinning.zeroReply = false;
3997 pinning.peerAccessDenied = false;
3998
3999 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
4000 * connection has gone away */
4001 }
4002
4003 void
4004 ConnStateData::terminateAll(const Error &error, const LogTagsErrors &lte)
4005 {
4006 debugs(33, 3, pipeline.count() << '/' << pipeline.nrequests << " after " << error);
4007
4008 if (pipeline.empty()) {
4009 bareError.update(error); // XXX: bareLogTagsErrors
4010 } else {
4011 // We terminate the current CONNECT/PUT/etc. context below, logging any
4012 // error details, but that context may leave unparsed bytes behind.
4013 // Consume them to stop checkLogging() from logging them again later.
4014 const auto intputToConsume =
4015 #if USE_OPENSSL
4016 parsingTlsHandshake ? "TLS handshake" : // more specific than CONNECT
4017 #endif
4018 bodyPipe ? "HTTP request body" :
4019 pipeline.back()->mayUseConnection() ? "HTTP CONNECT" :
4020 nullptr;
4021
4022 while (const auto context = pipeline.front()) {
4023 context->noteIoError(error, lte);
4024 context->finished(); // cleanup and self-deregister
4025 assert(context != pipeline.front());
4026 }
4027
4028 if (intputToConsume && !inBuf.isEmpty()) {
4029 debugs(83, 5, "forgetting client " << intputToConsume << " bytes: " << inBuf.length());
4030 inBuf.clear();
4031 }
4032 }
4033
4034 clientConnection->close();
4035 }
4036
4037 /// log the last (attempt at) transaction if nobody else did
4038 void
4039 ConnStateData::checkLogging()
4040 {
4041 // to simplify our logic, we assume that terminateAll() has been called
4042 assert(pipeline.empty());
4043
4044 // do not log connections that closed after a transaction (it is normal)
4045 // TODO: access_log needs ACLs to match received-no-bytes connections
4046 if (pipeline.nrequests && inBuf.isEmpty())
4047 return;
4048
4049 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4050 ClientHttpRequest http(this);
4051 http.req_sz = inBuf.length();
4052 // XXX: Or we died while waiting for the pinned connection to become idle.
4053 http.setErrorUri("error:transaction-end-before-headers");
4054 http.updateError(bareError);
4055 }
4056
4057 bool
4058 ConnStateData::shouldPreserveClientData() const
4059 {
4060 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4061 if (needProxyProtocolHeader_)
4062 return false;
4063
4064 // If our decision here is negative, configuration changes are irrelevant.
4065 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4066 if (!Config.accessList.on_unsupported_protocol)
4067 return false;
4068
4069 // TODO: Figure out whether/how we can support FTP tunneling.
4070 if (port->transport.protocol == AnyP::PROTO_FTP)
4071 return false;
4072
4073 #if USE_OPENSSL
4074 if (parsingTlsHandshake)
4075 return true;
4076
4077 // the 1st HTTP request on a bumped connection
4078 if (!parsedBumpedRequestCount && switchedToHttps())
4079 return true;
4080 #endif
4081
4082 // the 1st HTTP(S) request on a connection to an intercepting port
4083 if (!pipeline.nrequests && transparent())
4084 return true;
4085
4086 return false;
4087 }
4088
4089 NotePairs::Pointer
4090 ConnStateData::notes()
4091 {
4092 if (!theNotes)
4093 theNotes = new NotePairs;
4094 return theNotes;
4095 }
4096
4097 std::ostream &
4098 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4099 {
4100 return os << pic.connection << ", request=" << pic.request;
4101 }
4102
4103 std::ostream &
4104 operator <<(std::ostream &os, const ConnStateData::ServerConnectionContext &scc)
4105 {
4106 return os << scc.conn_ << ", srv_bytes=" << scc.preReadServerBytes.length();
4107 }
4108