]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
cache_log_message directive (#775)
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "DebugMessages.h"
80 #include "error/ExceptionErrorDetail.h"
81 #include "errorpage.h"
82 #include "fd.h"
83 #include "fde.h"
84 #include "fqdncache.h"
85 #include "FwdState.h"
86 #include "globals.h"
87 #include "helper.h"
88 #include "helper/Reply.h"
89 #include "http.h"
90 #include "http/one/RequestParser.h"
91 #include "http/one/TeChunkedParser.h"
92 #include "http/Stream.h"
93 #include "HttpHdrContRange.h"
94 #include "HttpHeaderTools.h"
95 #include "HttpReply.h"
96 #include "HttpRequest.h"
97 #include "ident/Config.h"
98 #include "ident/Ident.h"
99 #include "internal.h"
100 #include "ipc/FdNotes.h"
101 #include "ipc/StartListening.h"
102 #include "log/access_log.h"
103 #include "MemBuf.h"
104 #include "MemObject.h"
105 #include "mime_header.h"
106 #include "parser/Tokenizer.h"
107 #include "profiler/Profiler.h"
108 #include "proxyp/Header.h"
109 #include "proxyp/Parser.h"
110 #include "sbuf/Stream.h"
111 #include "security/Io.h"
112 #include "security/CommunicationSecrets.h"
113 #include "security/NegotiationHistory.h"
114 #include "security/KeyLog.h"
115 #include "servers/forward.h"
116 #include "SquidConfig.h"
117 #include "SquidTime.h"
118 #include "StatCounters.h"
119 #include "StatHist.h"
120 #include "Store.h"
121 #include "TimeOrTag.h"
122 #include "tools.h"
123
124 #if USE_AUTH
125 #include "auth/UserRequest.h"
126 #endif
127 #if USE_DELAY_POOLS
128 #include "ClientInfo.h"
129 #include "MessageDelayPools.h"
130 #endif
131 #if USE_OPENSSL
132 #include "ssl/bio.h"
133 #include "ssl/context_storage.h"
134 #include "ssl/gadgets.h"
135 #include "ssl/helper.h"
136 #include "ssl/ProxyCerts.h"
137 #include "ssl/ServerBump.h"
138 #include "ssl/support.h"
139 #endif
140
141 // for tvSubUsec() which should be in SquidTime.h
142 #include "util.h"
143
144 #include <climits>
145 #include <cmath>
146 #include <limits>
147
148 #if HAVE_SYSTEMD_SD_DAEMON_H
149 #include <systemd/sd-daemon.h>
150 #endif
151
152 /// dials clientListenerConnectionOpened call
153 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
154 {
155 public:
156 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
157 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
158 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
159
160 virtual void print(std::ostream &os) const {
161 startPrint(os) <<
162 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
163 }
164
165 virtual bool canDial(AsyncCall &) const { return true; }
166 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
167
168 public:
169 Handler handler;
170
171 private:
172 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
173 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
174 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
175 };
176
177 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
178
179 static IOACB httpAccept;
180 #if USE_IDENT
181 static IDCB clientIdentDone;
182 #endif
183 static int clientIsContentLengthValid(HttpRequest * r);
184 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
185
186 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
187 static void clientUpdateStatCounters(const LogTags &logType);
188 static void clientUpdateHierCounters(HierarchyLogEntry *);
189 static bool clientPingHasFinished(ping_data const *aPing);
190 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &);
191 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
192
193 char *skipLeadingSpace(char *aString);
194
195 #if USE_IDENT
196 static void
197 clientIdentDone(const char *ident, void *data)
198 {
199 ConnStateData *conn = (ConnStateData *)data;
200 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
201 }
202 #endif
203
204 void
205 clientUpdateStatCounters(const LogTags &logType)
206 {
207 ++statCounter.client_http.requests;
208
209 if (logType.isTcpHit())
210 ++statCounter.client_http.hits;
211
212 if (logType.oldType == LOG_TCP_HIT)
213 ++statCounter.client_http.disk_hits;
214 else if (logType.oldType == LOG_TCP_MEM_HIT)
215 ++statCounter.client_http.mem_hits;
216 }
217
218 void
219 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
220 {
221 statCounter.client_http.allSvcTime.count(svc_time);
222 /**
223 * The idea here is not to be complete, but to get service times
224 * for only well-defined types. For example, we don't include
225 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
226 * (we *tried* to validate it, but failed).
227 */
228
229 switch (logType.oldType) {
230
231 case LOG_TCP_REFRESH_UNMODIFIED:
232 statCounter.client_http.nearHitSvcTime.count(svc_time);
233 break;
234
235 case LOG_TCP_INM_HIT:
236 case LOG_TCP_IMS_HIT:
237 statCounter.client_http.nearMissSvcTime.count(svc_time);
238 break;
239
240 case LOG_TCP_HIT:
241
242 case LOG_TCP_MEM_HIT:
243
244 case LOG_TCP_OFFLINE_HIT:
245 statCounter.client_http.hitSvcTime.count(svc_time);
246 break;
247
248 case LOG_TCP_MISS:
249
250 case LOG_TCP_CLIENT_REFRESH_MISS:
251 statCounter.client_http.missSvcTime.count(svc_time);
252 break;
253
254 default:
255 /* make compiler warnings go away */
256 break;
257 }
258 }
259
260 bool
261 clientPingHasFinished(ping_data const *aPing)
262 {
263 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
264 return true;
265
266 return false;
267 }
268
269 void
270 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
271 {
272 ping_data *i;
273
274 switch (someEntry->code) {
275 #if USE_CACHE_DIGESTS
276
277 case CD_PARENT_HIT:
278
279 case CD_SIBLING_HIT:
280 ++ statCounter.cd.times_used;
281 break;
282 #endif
283
284 case SIBLING_HIT:
285
286 case PARENT_HIT:
287
288 case FIRST_PARENT_MISS:
289
290 case CLOSEST_PARENT_MISS:
291 ++ statCounter.icp.times_used;
292 i = &someEntry->ping;
293
294 if (clientPingHasFinished(i))
295 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
296
297 if (i->timeout)
298 ++ statCounter.icp.query_timeouts;
299
300 break;
301
302 case CLOSEST_PARENT:
303
304 case CLOSEST_DIRECT:
305 ++ statCounter.netdb.times_used;
306
307 break;
308
309 default:
310 break;
311 }
312 }
313
314 void
315 ClientHttpRequest::updateCounters()
316 {
317 clientUpdateStatCounters(logType);
318
319 if (request->error)
320 ++ statCounter.client_http.errors;
321
322 clientUpdateStatHistCounters(logType,
323 tvSubMsec(al->cache.start_time, current_time));
324
325 clientUpdateHierCounters(&request->hier);
326 }
327
328 void
329 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry::Pointer &aLogEntry)
330 {
331 assert(request);
332 assert(aLogEntry != NULL);
333
334 if (Config.onoff.log_mime_hdrs) {
335 MemBuf mb;
336 mb.init();
337 request->header.packInto(&mb);
338 //This is the request after adaptation or redirection
339 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
340
341 // the virgin request is saved to aLogEntry->request
342 if (aLogEntry->request) {
343 mb.reset();
344 aLogEntry->request->header.packInto(&mb);
345 aLogEntry->headers.request = xstrdup(mb.buf);
346 }
347
348 #if USE_ADAPTATION
349 const Adaptation::History::Pointer ah = request->adaptLogHistory();
350 if (ah != NULL) {
351 mb.reset();
352 ah->lastMeta.packInto(&mb);
353 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
354 }
355 #endif
356
357 mb.clean();
358 }
359
360 #if ICAP_CLIENT
361 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
362 if (ih != NULL)
363 ih->processingTime(aLogEntry->icap.processingTime);
364 #endif
365
366 aLogEntry->http.method = request->method;
367 aLogEntry->http.version = request->http_ver;
368 aLogEntry->hier = request->hier;
369 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
370
371 // Adapted request, if any, inherits and then collects all the stats, but
372 // the virgin request gets logged instead; copy the stats to log them.
373 // TODO: avoid losses by keeping these stats in a shared history object?
374 if (aLogEntry->request) {
375 aLogEntry->request->dnsWait = request->dnsWait;
376 aLogEntry->request->error = request->error;
377 }
378 }
379
380 void
381 ClientHttpRequest::logRequest()
382 {
383 if (!out.size && logType.oldType == LOG_TAG_NONE)
384 debugs(33, 5, "logging half-baked transaction: " << log_uri);
385
386 al->icp.opcode = ICP_INVALID;
387 al->url = log_uri;
388 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
389
390 const auto findReply = [this]() -> const HttpReply * {
391 if (al->reply)
392 return al->reply.getRaw();
393 if (const auto le = loggingEntry())
394 return le->hasFreshestReply();
395 return nullptr;
396 };
397 if (const auto reply = findReply()) {
398 al->http.code = reply->sline.status();
399 al->http.content_type = reply->content_type.termedBuf();
400 }
401
402 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
403
404 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
405 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
406
407 al->http.clientRequestSz.header = req_sz;
408 // the virgin request is saved to al->request
409 if (al->request && al->request->body_pipe)
410 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
411 al->http.clientReplySz.header = out.headers_sz;
412 // XXX: calculate without payload encoding or headers !!
413 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
414
415 al->cache.highOffset = out.offset;
416
417 al->cache.code = logType;
418
419 tvSub(al->cache.trTime, al->cache.start_time, current_time);
420
421 if (request)
422 prepareLogWithRequestDetails(request, al);
423
424 #if USE_OPENSSL && 0
425
426 /* This is broken. Fails if the connection has been closed. Needs
427 * to snarf the ssl details some place earlier..
428 */
429 if (getConn() != NULL)
430 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
431
432 #endif
433
434 /* Add notes (if we have a request to annotate) */
435 if (request) {
436 SBuf matched;
437 for (auto h: Config.notes) {
438 if (h->match(request, al->reply.getRaw(), al, matched)) {
439 request->notes()->add(h->key(), matched);
440 debugs(33, 3, h->key() << " " << matched);
441 }
442 }
443 // The al->notes and request->notes must point to the same object.
444 al->syncNotes(request);
445 }
446
447 ACLFilledChecklist checklist(NULL, request, NULL);
448 if (al->reply) {
449 checklist.reply = al->reply.getRaw();
450 HTTPMSGLOCK(checklist.reply);
451 }
452
453 if (request) {
454 HTTPMSGUNLOCK(al->adapted_request);
455 al->adapted_request = request;
456 HTTPMSGLOCK(al->adapted_request);
457 }
458 // no need checklist.syncAle(): already synced
459 checklist.al = al;
460 accessLogLog(al, &checklist);
461
462 bool updatePerformanceCounters = true;
463 if (Config.accessList.stats_collection) {
464 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
465 statsCheck.al = al;
466 if (al->reply) {
467 statsCheck.reply = al->reply.getRaw();
468 HTTPMSGLOCK(statsCheck.reply);
469 }
470 updatePerformanceCounters = statsCheck.fastCheck().allowed();
471 }
472
473 if (updatePerformanceCounters) {
474 if (request)
475 updateCounters();
476
477 if (getConn() != NULL && getConn()->clientConnection != NULL)
478 clientdbUpdate(getConn()->clientConnection->remote, logType, AnyP::PROTO_HTTP, out.size);
479 }
480 }
481
482 void
483 ClientHttpRequest::freeResources()
484 {
485 safe_free(uri);
486 safe_free(redirect.location);
487 range_iter.boundary.clean();
488 clearRequest();
489
490 if (client_stream.tail)
491 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
492 }
493
494 void
495 httpRequestFree(void *data)
496 {
497 ClientHttpRequest *http = (ClientHttpRequest *)data;
498 assert(http != NULL);
499 delete http;
500 }
501
502 /* This is a handler normally called by comm_close() */
503 void ConnStateData::connStateClosed(const CommCloseCbParams &)
504 {
505 deleteThis("ConnStateData::connStateClosed");
506 }
507
508 #if USE_AUTH
509 void
510 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
511 {
512 if (auth_ == NULL) {
513 if (aur != NULL) {
514 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
515 auth_ = aur;
516 }
517 return;
518 }
519
520 // clobered with self-pointer
521 // NP: something nasty is going on in Squid, but harmless.
522 if (aur == auth_) {
523 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
524 return;
525 }
526
527 /*
528 * Connection-auth relies on a single set of credentials being preserved
529 * for all requests on a connection once they have been setup.
530 * There are several things which need to happen to preserve security
531 * when connection-auth credentials change unexpectedly or are unset.
532 *
533 * 1) auth helper released from any active state
534 *
535 * They can only be reserved by a handshake process which this
536 * connection can now never complete.
537 * This prevents helpers hanging when their connections close.
538 *
539 * 2) pinning is expected to be removed and server conn closed
540 *
541 * The upstream link is authenticated with the same credentials.
542 * Expecting the same level of consistency we should have received.
543 * This prevents upstream being faced with multiple or missing
544 * credentials after authentication.
545 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
546 * we just trigger that cleanup here via comm_reset_close() or
547 * ConnStateData::stopReceiving()
548 *
549 * 3) the connection needs to close.
550 *
551 * This prevents attackers injecting requests into a connection,
552 * or gateways wrongly multiplexing users into a single connection.
553 *
554 * When credentials are missing closure needs to follow an auth
555 * challenge for best recovery by the client.
556 *
557 * When credentials change there is nothing we can do but abort as
558 * fast as possible. Sending TCP RST instead of an HTTP response
559 * is the best-case action.
560 */
561
562 // clobbered with nul-pointer
563 if (aur == NULL) {
564 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
565 auth_->releaseAuthServer();
566 auth_ = NULL;
567 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
568 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
569 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
570 stopReceiving("connection-auth removed");
571 return;
572 }
573
574 // clobbered with alternative credentials
575 if (aur != auth_) {
576 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
577 auth_->releaseAuthServer();
578 auth_ = NULL;
579 // this is a fatal type of problem.
580 // Close the connection immediately with TCP RST to abort all traffic flow
581 comm_reset_close(clientConnection);
582 return;
583 }
584
585 /* NOT REACHABLE */
586 }
587 #endif
588
589 void
590 ConnStateData::resetReadTimeout(const time_t timeout)
591 {
592 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
593 AsyncCall::Pointer callback = JobCallback(33, 5, TimeoutDialer, this, ConnStateData::requestTimeout);
594 commSetConnTimeout(clientConnection, timeout, callback);
595 }
596
597 void
598 ConnStateData::extendLifetime()
599 {
600 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
601 AsyncCall::Pointer callback = JobCallback(5, 4, TimeoutDialer, this, ConnStateData::lifetimeTimeout);
602 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, callback);
603 }
604
605 // cleans up before destructor is called
606 void
607 ConnStateData::swanSong()
608 {
609 debugs(33, 2, HERE << clientConnection);
610
611 flags.readMore = false;
612 clientdbEstablished(clientConnection->remote, -1); /* decrement */
613
614 terminateAll(ERR_NONE, LogTagsErrors());
615 checkLogging();
616
617 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
618 unpinConnection(true);
619
620 Server::swanSong();
621
622 #if USE_AUTH
623 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
624 setAuth(NULL, "ConnStateData::SwanSong cleanup");
625 #endif
626
627 flags.swanSang = true;
628 }
629
630 void
631 ConnStateData::callException(const std::exception &ex)
632 {
633 Server::callException(ex); // logs ex and stops the job
634
635 ErrorDetail::Pointer errorDetail;
636 if (const auto tex = dynamic_cast<const TextException*>(&ex))
637 errorDetail = new ExceptionErrorDetail(tex->id());
638 else
639 errorDetail = new ExceptionErrorDetail(Here().id());
640 updateError(ERR_GATEWAY_FAILURE, errorDetail);
641 }
642
643 void
644 ConnStateData::updateError(const Error &error)
645 {
646 if (const auto context = pipeline.front()) {
647 const auto http = context->http;
648 assert(http);
649 http->updateError(error);
650 } else {
651 bareError.update(error);
652 }
653 }
654
655 bool
656 ConnStateData::isOpen() const
657 {
658 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
659 Comm::IsConnOpen(clientConnection) &&
660 !fd_table[clientConnection->fd].closing();
661 }
662
663 ConnStateData::~ConnStateData()
664 {
665 debugs(33, 3, HERE << clientConnection);
666
667 if (isOpen())
668 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData did not close " << clientConnection);
669
670 if (!flags.swanSang)
671 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData was not destroyed properly; " << clientConnection);
672
673 if (bodyPipe != NULL)
674 stopProducingFor(bodyPipe, false);
675
676 delete bodyParser; // TODO: pool
677
678 #if USE_OPENSSL
679 delete sslServerBump;
680 #endif
681 }
682
683 /**
684 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
685 * This is the client-side persistent connection flag. We need
686 * to set this relatively early in the request processing
687 * to handle hacks for broken servers and clients.
688 */
689 void
690 clientSetKeepaliveFlag(ClientHttpRequest * http)
691 {
692 HttpRequest *request = http->request;
693
694 debugs(33, 3, "http_ver = " << request->http_ver);
695 debugs(33, 3, "method = " << request->method);
696
697 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
698 request->flags.proxyKeepalive = request->persistent();
699 }
700
701 /// checks body length of non-chunked requests
702 static int
703 clientIsContentLengthValid(HttpRequest * r)
704 {
705 // No Content-Length means this request just has no body, but conflicting
706 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
707 if (r->header.conflictingContentLength())
708 return 0;
709
710 switch (r->method.id()) {
711
712 case Http::METHOD_GET:
713
714 case Http::METHOD_HEAD:
715 /* We do not want to see a request entity on GET/HEAD requests */
716 return (r->content_length <= 0 || Config.onoff.request_entities);
717
718 default:
719 /* For other types of requests we don't care */
720 return 1;
721 }
722
723 /* NOT REACHED */
724 }
725
726 int
727 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
728 {
729 if (Config.maxRequestBodySize &&
730 bodyLength > Config.maxRequestBodySize)
731 return 1; /* too large */
732
733 return 0;
734 }
735
736 bool
737 ClientHttpRequest::multipartRangeRequest() const
738 {
739 return request->multipartRangeRequest();
740 }
741
742 void
743 clientPackTermBound(String boundary, MemBuf *mb)
744 {
745 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
746 debugs(33, 6, "buf offset: " << mb->size);
747 }
748
749 void
750 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
751 {
752 HttpHeader hdr(hoReply);
753 assert(rep);
754 assert(spec);
755
756 /* put boundary */
757 debugs(33, 5, "appending boundary: " << boundary);
758 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
759 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
760
761 /* stuff the header with required entries and pack it */
762
763 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
764 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
765
766 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
767
768 hdr.packInto(mb);
769 hdr.clean();
770
771 /* append <crlf> (we packed a header, not a reply) */
772 mb->append("\r\n", 2);
773 }
774
775 /** returns expected content length for multi-range replies
776 * note: assumes that httpHdrRangeCanonize has already been called
777 * warning: assumes that HTTP headers for individual ranges at the
778 * time of the actuall assembly will be exactly the same as
779 * the headers when clientMRangeCLen() is called */
780 int64_t
781 ClientHttpRequest::mRangeCLen() const
782 {
783 int64_t clen = 0;
784 MemBuf mb;
785
786 assert(memObject());
787
788 mb.init();
789 HttpHdrRange::iterator pos = request->range->begin();
790
791 while (pos != request->range->end()) {
792 /* account for headers for this range */
793 mb.reset();
794 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
795 *pos, range_iter.boundary, &mb);
796 clen += mb.size;
797
798 /* account for range content */
799 clen += (*pos)->length;
800
801 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
802 ++pos;
803 }
804
805 /* account for the terminating boundary */
806 mb.reset();
807
808 clientPackTermBound(range_iter.boundary, &mb);
809
810 clen += mb.size;
811
812 mb.clean();
813
814 return clen;
815 }
816
817 /**
818 * generates a "unique" boundary string for multipart responses
819 * the caller is responsible for cleaning the string */
820 String
821 ClientHttpRequest::rangeBoundaryStr() const
822 {
823 const char *key;
824 String b(APP_FULLNAME);
825 b.append(":",1);
826 key = storeEntry()->getMD5Text();
827 b.append(key, strlen(key));
828 return b;
829 }
830
831 /**
832 * Write a chunk of data to a client socket. If the reply is present,
833 * send the reply headers down the wire too, and clean them up when
834 * finished.
835 * Pre-condition:
836 * The request is one backed by a connection, not an internal request.
837 * data context is not NULL
838 * There are no more entries in the stream chain.
839 */
840 void
841 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
842 HttpReply * rep, StoreIOBuffer receivedData)
843 {
844 // do not try to deliver if client already ABORTED
845 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
846 return;
847
848 /* Test preconditions */
849 assert(node != NULL);
850 PROF_start(clientSocketRecipient);
851 /* TODO: handle this rather than asserting
852 * - it should only ever happen if we cause an abort and
853 * the callback chain loops back to here, so we can simply return.
854 * However, that itself shouldn't happen, so it stays as an assert for now.
855 */
856 assert(cbdataReferenceValid(node));
857 assert(node->node.next == NULL);
858 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
859 assert(context != NULL);
860
861 /* TODO: check offset is what we asked for */
862
863 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
864 if (context != http->getConn()->pipeline.front())
865 context->deferRecipientForLater(node, rep, receivedData);
866 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
867 context->deferRecipientForLater(node, rep, receivedData);
868 else
869 http->getConn()->handleReply(rep, receivedData);
870
871 PROF_stop(clientSocketRecipient);
872 }
873
874 /**
875 * Called when a downstream node is no longer interested in
876 * our data. As we are a terminal node, this means on aborts
877 * only
878 */
879 void
880 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
881 {
882 /* Test preconditions */
883 assert(node != NULL);
884 /* TODO: handle this rather than asserting
885 * - it should only ever happen if we cause an abort and
886 * the callback chain loops back to here, so we can simply return.
887 * However, that itself shouldn't happen, so it stays as an assert for now.
888 */
889 assert(cbdataReferenceValid(node));
890 /* Set null by ContextFree */
891 assert(node->node.next == NULL);
892 /* this is the assert discussed above */
893 assert(NULL == dynamic_cast<Http::Stream *>(node->data.getRaw()));
894 /* We are only called when the client socket shutsdown.
895 * Tell the prev pipeline member we're finished
896 */
897 clientStreamDetach(node, http);
898 }
899
900 void
901 ConnStateData::readNextRequest()
902 {
903 debugs(33, 5, HERE << clientConnection << " reading next req");
904
905 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
906 /**
907 * Set the timeout BEFORE calling readSomeData().
908 */
909 resetReadTimeout(clientConnection->timeLeft(idleTimeout()));
910
911 readSomeData();
912 /** Please don't do anything with the FD past here! */
913 }
914
915 static void
916 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
917 {
918 debugs(33, 2, HERE << conn->clientConnection << " Sending next");
919
920 /** If the client stream is waiting on a socket write to occur, then */
921
922 if (deferredRequest->flags.deferred) {
923 /** NO data is allowed to have been sent. */
924 assert(deferredRequest->http->out.size == 0);
925 /** defer now. */
926 clientSocketRecipient(deferredRequest->deferredparams.node,
927 deferredRequest->http,
928 deferredRequest->deferredparams.rep,
929 deferredRequest->deferredparams.queuedBuffer);
930 }
931
932 /** otherwise, the request is still active in a callbacksomewhere,
933 * and we are done
934 */
935 }
936
937 void
938 ConnStateData::kick()
939 {
940 if (!Comm::IsConnOpen(clientConnection)) {
941 debugs(33, 2, clientConnection << " Connection was closed");
942 return;
943 }
944
945 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
946 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
947 clientConnection->close();
948 return;
949 }
950
951 /** \par
952 * We are done with the response, and we are either still receiving request
953 * body (early response!) or have already stopped receiving anything.
954 *
955 * If we are still receiving, then clientParseRequest() below will fail.
956 * (XXX: but then we will call readNextRequest() which may succeed and
957 * execute a smuggled request as we are not done with the current request).
958 *
959 * If we stopped because we got everything, then try the next request.
960 *
961 * If we stopped receiving because of an error, then close now to avoid
962 * getting stuck and to prevent accidental request smuggling.
963 */
964
965 if (const char *reason = stoppedReceiving()) {
966 debugs(33, 3, "closing for earlier request error: " << reason);
967 clientConnection->close();
968 return;
969 }
970
971 /** \par
972 * Attempt to parse a request from the request buffer.
973 * If we've been fed a pipelined request it may already
974 * be in our read buffer.
975 *
976 \par
977 * This needs to fall through - if we're unlucky and parse the _last_ request
978 * from our read buffer we may never re-register for another client read.
979 */
980
981 if (clientParseRequests()) {
982 debugs(33, 3, clientConnection << ": parsed next request from buffer");
983 }
984
985 /** \par
986 * Either we need to kick-start another read or, if we have
987 * a half-closed connection, kill it after the last request.
988 * This saves waiting for half-closed connections to finished being
989 * half-closed _AND_ then, sometimes, spending "Timeout" time in
990 * the keepalive "Waiting for next request" state.
991 */
992 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
993 debugs(33, 3, "half-closed client with no pending requests, closing");
994 clientConnection->close();
995 return;
996 }
997
998 /** \par
999 * At this point we either have a parsed request (which we've
1000 * kicked off the processing for) or not. If we have a deferred
1001 * request (parsed but deferred for pipeling processing reasons)
1002 * then look at processing it. If not, simply kickstart
1003 * another read.
1004 */
1005 Http::StreamPointer deferredRequest = pipeline.front();
1006 if (deferredRequest != nullptr) {
1007 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
1008 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
1009 } else if (flags.readMore) {
1010 debugs(33, 3, clientConnection << ": calling readNextRequest()");
1011 readNextRequest();
1012 } else {
1013 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
1014 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
1015 }
1016 }
1017
1018 void
1019 ConnStateData::stopSending(const char *error)
1020 {
1021 debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
1022 "; old receiving error: " <<
1023 (stoppedReceiving() ? stoppedReceiving_ : "none"));
1024
1025 if (const char *oldError = stoppedSending()) {
1026 debugs(33, 3, HERE << "already stopped sending: " << oldError);
1027 return; // nothing has changed as far as this connection is concerned
1028 }
1029 stoppedSending_ = error;
1030
1031 if (!stoppedReceiving()) {
1032 if (const int64_t expecting = mayNeedToReadMoreBody()) {
1033 debugs(33, 5, HERE << "must still read " << expecting <<
1034 " request body bytes with " << inBuf.length() << " unused");
1035 return; // wait for the request receiver to finish reading
1036 }
1037 }
1038
1039 clientConnection->close();
1040 }
1041
1042 void
1043 ConnStateData::afterClientWrite(size_t size)
1044 {
1045 if (pipeline.empty())
1046 return;
1047
1048 auto ctx = pipeline.front();
1049 if (size) {
1050 statCounter.client_http.kbytes_out += size;
1051 if (ctx->http->logType.isTcpHit())
1052 statCounter.client_http.hit_kbytes_out += size;
1053 }
1054 ctx->writeComplete(size);
1055 }
1056
1057 Http::Stream *
1058 ConnStateData::abortRequestParsing(const char *const uri)
1059 {
1060 ClientHttpRequest *http = new ClientHttpRequest(this);
1061 http->req_sz = inBuf.length();
1062 http->setErrorUri(uri);
1063 auto *context = new Http::Stream(clientConnection, http);
1064 StoreIOBuffer tempBuffer;
1065 tempBuffer.data = context->reqbuf;
1066 tempBuffer.length = HTTP_REQBUF_SZ;
1067 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1068 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1069 clientSocketDetach, context, tempBuffer);
1070 return context;
1071 }
1072
1073 void
1074 ConnStateData::startShutdown()
1075 {
1076 // RegisteredRunner API callback - Squid has been shut down
1077
1078 // if connection is idle terminate it now,
1079 // otherwise wait for grace period to end
1080 if (pipeline.empty())
1081 endingShutdown();
1082 }
1083
1084 void
1085 ConnStateData::endingShutdown()
1086 {
1087 // RegisteredRunner API callback - Squid shutdown grace period is over
1088
1089 // force the client connection to close immediately
1090 // swanSong() in the close handler will cleanup.
1091 if (Comm::IsConnOpen(clientConnection))
1092 clientConnection->close();
1093 }
1094
1095 char *
1096 skipLeadingSpace(char *aString)
1097 {
1098 char *result = aString;
1099
1100 while (xisspace(*aString))
1101 ++aString;
1102
1103 return result;
1104 }
1105
1106 /**
1107 * 'end' defaults to NULL for backwards compatibility
1108 * remove default value if we ever get rid of NULL-terminated
1109 * request buffers.
1110 */
1111 const char *
1112 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1113 {
1114 if (NULL == end) {
1115 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1116 assert(end);
1117 }
1118
1119 for (; end > uriAndHTTPVersion; --end) {
1120 if (*end == '\n' || *end == '\r')
1121 continue;
1122
1123 if (xisspace(*end)) {
1124 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1125 return end + 1;
1126 else
1127 break;
1128 }
1129 }
1130
1131 return NULL;
1132 }
1133
1134 static char *
1135 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1136 {
1137 int vhost = conn->port->vhost;
1138 int vport = conn->port->vport;
1139 static char ipbuf[MAX_IPSTRLEN];
1140
1141 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1142
1143 static const SBuf cache_object("cache_object://");
1144 if (hp->requestUri().startsWith(cache_object))
1145 return nullptr; /* already in good shape */
1146
1147 // XXX: re-use proper URL parser for this
1148 SBuf url = hp->requestUri(); // use full provided URI if we abort
1149 do { // use a loop so we can break out of it
1150 ::Parser::Tokenizer tok(url);
1151 if (tok.skip('/')) // origin-form URL already.
1152 break;
1153
1154 if (conn->port->vhost)
1155 return nullptr; /* already in good shape */
1156
1157 // skip the URI scheme
1158 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1159 static const SBuf uriSchemeEnd("://");
1160 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1161 break;
1162
1163 // skip the authority segment
1164 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1165 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1166 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1167 if (!tok.skipAll(authority))
1168 break;
1169
1170 static const SBuf slashUri("/");
1171 const SBuf t = tok.remaining();
1172 if (t.isEmpty())
1173 url = slashUri;
1174 else if (t[0]=='/') // looks like path
1175 url = t;
1176 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1177 url = slashUri;
1178 url.append(t);
1179 } // else do nothing. invalid path
1180
1181 } while(false);
1182
1183 #if SHOULD_REJECT_UNKNOWN_URLS
1184 // reject URI which are not well-formed even after the processing above
1185 if (url.isEmpty() || url[0] != '/') {
1186 hp->parseStatusCode = Http::scBadRequest;
1187 return conn->abortRequestParsing("error:invalid-request");
1188 }
1189 #endif
1190
1191 if (vport < 0)
1192 vport = conn->clientConnection->local.port();
1193
1194 char *receivedHost = nullptr;
1195 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1196 SBuf host(receivedHost);
1197 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1198 if (vport > 0) {
1199 // remove existing :port (if any), cope with IPv6+ without port
1200 const auto lastColonPos = host.rfind(':');
1201 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1202 host.chop(0, lastColonPos); // truncate until the last colon
1203 }
1204 host.appendf(":%d", vport);
1205 } // else nothing to alter port-wise.
1206 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1207 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1208 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1209 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1210 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1211 return uri;
1212 } else if (conn->port->defaultsite /* && !vhost */) {
1213 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1214 char vportStr[32];
1215 vportStr[0] = '\0';
1216 if (vport > 0) {
1217 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1218 }
1219 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1220 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1221 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1222 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1223 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1224 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1225 return uri;
1226 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1227 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1228 /* Put the local socket IP address as the hostname, with whatever vport we found */
1229 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1230 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1231 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1232 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1233 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1234 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1235 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1236 return uri;
1237 }
1238
1239 return nullptr;
1240 }
1241
1242 static char *
1243 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1244 {
1245 char *uri = nullptr;
1246 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1247 if (const char *host = hp->getHostHeaderField()) {
1248 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1249 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1250 uri = static_cast<char *>(xcalloc(url_sz, 1));
1251 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1252 SQUIDSBUFPRINT(scheme),
1253 host,
1254 SQUIDSBUFPRINT(hp->requestUri()));
1255 }
1256 return uri;
1257 }
1258
1259 char *
1260 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1261 {
1262 Must(switchedToHttps());
1263
1264 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1265 return nullptr; /* already in good shape */
1266
1267 char *uri = buildUrlFromHost(this, hp);
1268 #if USE_OPENSSL
1269 if (!uri) {
1270 Must(tlsConnectPort);
1271 Must(!tlsConnectHostOrIp.isEmpty());
1272 SBuf useHost;
1273 if (!tlsClientSni().isEmpty())
1274 useHost = tlsClientSni();
1275 else
1276 useHost = tlsConnectHostOrIp;
1277
1278 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1279 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1280 uri = static_cast<char *>(xcalloc(url_sz, 1));
1281 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1282 SQUIDSBUFPRINT(scheme),
1283 SQUIDSBUFPRINT(useHost),
1284 tlsConnectPort,
1285 SQUIDSBUFPRINT(hp->requestUri()));
1286 }
1287 #endif
1288 if (uri)
1289 debugs(33, 5, "TLS switching host rewrite: " << uri);
1290 return uri;
1291 }
1292
1293 static char *
1294 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1295 {
1296 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1297 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1298 return nullptr; /* already in good shape */
1299
1300 char *uri = buildUrlFromHost(conn, hp);
1301 if (!uri) {
1302 /* Put the local socket IP address as the hostname. */
1303 static char ipbuf[MAX_IPSTRLEN];
1304 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1305 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1306 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1307 uri = static_cast<char *>(xcalloc(url_sz, 1));
1308 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1309 SQUIDSBUFPRINT(scheme),
1310 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1311 }
1312
1313 if (uri)
1314 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1315 return uri;
1316 }
1317
1318 Http::Stream *
1319 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1320 {
1321 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1322 {
1323 Must(hp);
1324
1325 if (preservingClientData_)
1326 preservedClientData = inBuf;
1327
1328 const bool parsedOk = hp->parse(inBuf);
1329
1330 // sync the buffers after parsing.
1331 inBuf = hp->remaining();
1332
1333 if (hp->needsMoreData()) {
1334 debugs(33, 5, "Incomplete request, waiting for end of request line");
1335 return NULL;
1336 }
1337
1338 if (!parsedOk) {
1339 const bool tooBig =
1340 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1341 hp->parseStatusCode == Http::scUriTooLong;
1342 auto result = abortRequestParsing(
1343 tooBig ? "error:request-too-large" : "error:invalid-request");
1344 // assume that remaining leftovers belong to this bad request
1345 if (!inBuf.isEmpty())
1346 consumeInput(inBuf.length());
1347 return result;
1348 }
1349 }
1350
1351 /* We know the whole request is in parser now */
1352 debugs(11, 2, "HTTP Client " << clientConnection);
1353 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1354 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1355 hp->mimeHeader() <<
1356 "\n----------");
1357
1358 /* deny CONNECT via accelerated ports */
1359 if (hp->method() == Http::METHOD_CONNECT && port != NULL && port->flags.accelSurrogate) {
1360 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1361 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1362 hp->parseStatusCode = Http::scMethodNotAllowed;
1363 return abortRequestParsing("error:method-not-allowed");
1364 }
1365
1366 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1367 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1368 * If seen it signals a broken client or proxy has corrupted the traffic.
1369 */
1370 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1371 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1372 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1373 hp->parseStatusCode = Http::scMethodNotAllowed;
1374 return abortRequestParsing("error:method-not-allowed");
1375 }
1376
1377 if (hp->method() == Http::METHOD_NONE) {
1378 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1379 hp->parseStatusCode = Http::scMethodNotAllowed;
1380 return abortRequestParsing("error:unsupported-request-method");
1381 }
1382
1383 // Process headers after request line
1384 debugs(33, 3, "complete request received. " <<
1385 "prefix_sz = " << hp->messageHeaderSize() <<
1386 ", request-line-size=" << hp->firstLineSize() <<
1387 ", mime-header-size=" << hp->headerBlockSize() <<
1388 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1389
1390 /* Ok, all headers are received */
1391 ClientHttpRequest *http = new ClientHttpRequest(this);
1392
1393 http->req_sz = hp->messageHeaderSize();
1394 Http::Stream *result = new Http::Stream(clientConnection, http);
1395
1396 StoreIOBuffer tempBuffer;
1397 tempBuffer.data = result->reqbuf;
1398 tempBuffer.length = HTTP_REQBUF_SZ;
1399
1400 ClientStreamData newServer = new clientReplyContext(http);
1401 ClientStreamData newClient = result;
1402 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1403 clientReplyStatus, newServer, clientSocketRecipient,
1404 clientSocketDetach, newClient, tempBuffer);
1405
1406 /* set url */
1407 debugs(33,5, "Prepare absolute URL from " <<
1408 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1409 /* Rewrite the URL in transparent or accelerator mode */
1410 /* NP: there are several cases to traverse here:
1411 * - standard mode (forward proxy)
1412 * - transparent mode (TPROXY)
1413 * - transparent mode with failures
1414 * - intercept mode (NAT)
1415 * - intercept mode with failures
1416 * - accelerator mode (reverse proxy)
1417 * - internal relative-URL
1418 * - mixed combos of the above with internal URL
1419 * - remote interception with PROXY protocol
1420 * - remote reverse-proxy with PROXY protocol
1421 */
1422 if (switchedToHttps()) {
1423 http->uri = prepareTlsSwitchingURL(hp);
1424 } else if (transparent()) {
1425 /* intercept or transparent mode, properly working with no failures */
1426 http->uri = prepareTransparentURL(this, hp);
1427
1428 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1429 /* internal URL mode */
1430 /* prepend our name & port */
1431 http->uri = xstrdup(internalLocalUri(NULL, hp->requestUri()));
1432 // We just re-wrote the URL. Must replace the Host: header.
1433 // But have not parsed there yet!! flag for local-only handling.
1434 http->flags.internal = true;
1435
1436 } else if (port->flags.accelSurrogate) {
1437 /* accelerator mode */
1438 http->uri = prepareAcceleratedURL(this, hp);
1439 http->flags.accel = true;
1440 }
1441
1442 if (!http->uri) {
1443 /* No special rewrites have been applied above, use the
1444 * requested url. may be rewritten later, so make extra room */
1445 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1446 http->uri = (char *)xcalloc(url_sz, 1);
1447 SBufToCstring(http->uri, hp->requestUri());
1448 }
1449
1450 result->flags.parsed_ok = 1;
1451 return result;
1452 }
1453
1454 bool
1455 ConnStateData::shouldCloseOnEof() const
1456 {
1457 if (pipeline.empty() && inBuf.isEmpty()) {
1458 debugs(33, 4, "yes, without active requests and unparsed input");
1459 return true;
1460 }
1461
1462 if (!Config.onoff.half_closed_clients) {
1463 debugs(33, 3, "yes, without half_closed_clients");
1464 return true;
1465 }
1466
1467 // Squid currently tries to parse (possibly again) a partially received
1468 // request after an EOF with half_closed_clients. To give that last parse in
1469 // afterClientRead() a chance, we ignore partially parsed requests here.
1470 debugs(33, 3, "no, honoring half_closed_clients");
1471 return false;
1472 }
1473
1474 void
1475 ConnStateData::consumeInput(const size_t byteCount)
1476 {
1477 assert(byteCount > 0 && byteCount <= inBuf.length());
1478 inBuf.consume(byteCount);
1479 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1480 }
1481
1482 void
1483 ConnStateData::clientAfterReadingRequests()
1484 {
1485 // Were we expecting to read more request body from half-closed connection?
1486 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1487 debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection);
1488 clientConnection->close();
1489 return;
1490 }
1491
1492 if (flags.readMore)
1493 readSomeData();
1494 }
1495
1496 void
1497 ConnStateData::quitAfterError(HttpRequest *request)
1498 {
1499 // From HTTP p.o.v., we do not have to close after every error detected
1500 // at the client-side, but many such errors do require closure and the
1501 // client-side code is bad at handling errors so we play it safe.
1502 if (request)
1503 request->flags.proxyKeepalive = false;
1504 flags.readMore = false;
1505 debugs(33,4, HERE << "Will close after error: " << clientConnection);
1506 }
1507
1508 #if USE_OPENSSL
1509 bool ConnStateData::serveDelayedError(Http::Stream *context)
1510 {
1511 ClientHttpRequest *http = context->http;
1512
1513 if (!sslServerBump)
1514 return false;
1515
1516 assert(sslServerBump->entry);
1517 // Did we create an error entry while processing CONNECT?
1518 if (!sslServerBump->entry->isEmpty()) {
1519 quitAfterError(http->request);
1520
1521 // Get the saved error entry and send it to the client by replacing the
1522 // ClientHttpRequest store entry with it.
1523 clientStreamNode *node = context->getClientReplyContext();
1524 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1525 assert(repContext);
1526 debugs(33, 5, "Responding with delated error for " << http->uri);
1527 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1528
1529 // Get error details from the fake certificate-peeking request.
1530 http->request->error.update(sslServerBump->request->error);
1531 context->pullData();
1532 return true;
1533 }
1534
1535 // In bump-server-first mode, we have not necessarily seen the intended
1536 // server name at certificate-peeking time. Check for domain mismatch now,
1537 // when we can extract the intended name from the bumped HTTP request.
1538 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1539 HttpRequest *request = http->request;
1540 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1541 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1542 "does not match domainname " << request->url.host());
1543
1544 bool allowDomainMismatch = false;
1545 if (Config.ssl_client.cert_error) {
1546 ACLFilledChecklist check(Config.ssl_client.cert_error, nullptr);
1547 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1548 clientAclChecklistFill(check, http);
1549 allowDomainMismatch = check.fastCheck().allowed();
1550 delete check.sslErrors;
1551 check.sslErrors = NULL;
1552 }
1553
1554 if (!allowDomainMismatch) {
1555 quitAfterError(request);
1556
1557 clientStreamNode *node = context->getClientReplyContext();
1558 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1559 assert (repContext);
1560
1561 request->hier = sslServerBump->request->hier;
1562
1563 // Create an error object and fill it
1564 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1565 err->src_addr = clientConnection->remote;
1566 const Security::ErrorDetail::Pointer errDetail = new Security::ErrorDetail(
1567 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1568 srvCert, nullptr);
1569 updateError(ERR_SECURE_CONNECT_FAIL, errDetail);
1570 repContext->setReplyToError(request->method, err);
1571 assert(context->http->out.offset == 0);
1572 context->pullData();
1573 return true;
1574 }
1575 }
1576 }
1577
1578 return false;
1579 }
1580 #endif // USE_OPENSSL
1581
1582 /// ConnStateData::tunnelOnError() wrapper. Reduces code changes. TODO: Remove.
1583 bool
1584 clientTunnelOnError(ConnStateData *conn, Http::StreamPointer &context, HttpRequest::Pointer &request, const HttpRequestMethod& method, err_type requestError)
1585 {
1586 assert(conn);
1587 assert(conn->pipeline.front() == context);
1588 return conn->tunnelOnError(method, requestError);
1589 }
1590
1591 /// initiate tunneling if possible or return false otherwise
1592 bool
1593 ConnStateData::tunnelOnError(const HttpRequestMethod &method, const err_type requestError)
1594 {
1595 if (!Config.accessList.on_unsupported_protocol) {
1596 debugs(33, 5, "disabled; send error: " << requestError);
1597 return false;
1598 }
1599
1600 if (!preservingClientData_) {
1601 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1602 return false;
1603 }
1604
1605 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, nullptr);
1606 checklist.requestErrorType = requestError;
1607 fillChecklist(checklist);
1608 auto answer = checklist.fastCheck();
1609 if (answer.allowed() && answer.kind == 1) {
1610 debugs(33, 3, "Request will be tunneled to server");
1611 const auto context = pipeline.front();
1612 const auto http = context ? context->http : nullptr;
1613 const auto request = http ? http->request : nullptr;
1614 if (context)
1615 context->finished(); // Will remove from pipeline queue
1616 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, NULL, NULL, 0);
1617 return initiateTunneledRequest(request, Http::METHOD_NONE, "unknown-protocol", preservedClientData);
1618 }
1619 debugs(33, 3, "denied; send error: " << requestError);
1620 return false;
1621 }
1622
1623 void
1624 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1625 {
1626 /*
1627 * DPW 2007-05-18
1628 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1629 * to here because calling comm_reset_close() causes http to
1630 * be freed before accessing.
1631 */
1632 if (request != NULL && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1633 debugs(33, 3, HERE << "Sending TCP RST on " << conn->clientConnection);
1634 conn->flags.readMore = false;
1635 comm_reset_close(conn->clientConnection);
1636 }
1637 }
1638
1639 void
1640 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1641 {
1642 ClientHttpRequest *http = context->http;
1643 bool mustReplyToOptions = false;
1644 bool expectBody = false;
1645
1646 // We already have the request parsed and checked, so we
1647 // only need to go through the final body/conn setup to doCallouts().
1648 assert(http->request);
1649 HttpRequest::Pointer request = http->request;
1650
1651 // temporary hack to avoid splitting this huge function with sensitive code
1652 const bool isFtp = !hp;
1653
1654 // Some blobs below are still HTTP-specific, but we would have to rewrite
1655 // this entire function to remove them from the FTP code path. Connection
1656 // setup and body_pipe preparation blobs are needed for FTP.
1657
1658 request->manager(conn, http->al);
1659
1660 request->flags.accelerated = http->flags.accel;
1661 request->flags.sslBumped=conn->switchedToHttps();
1662 // TODO: decouple http->flags.accel from request->flags.sslBumped
1663 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1664 !conn->port->allow_direct : 0;
1665 request->sources |= isFtp ? Http::Message::srcFtp :
1666 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1667 #if USE_AUTH
1668 if (request->flags.sslBumped) {
1669 if (conn->getAuth() != NULL)
1670 request->auth_user_request = conn->getAuth();
1671 }
1672 #endif
1673
1674 if (internalCheck(request->url.path())) {
1675 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1676 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1677 http->flags.internal = true;
1678 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1679 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1680 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1681 request->url.host(internalHostname());
1682 request->url.port(getMyPort());
1683 http->flags.internal = true;
1684 http->setLogUriToRequestUri();
1685 } else
1686 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1687 }
1688
1689 request->flags.internal = http->flags.internal;
1690
1691 if (!isFtp) {
1692 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1693 // for now Squid only supports HTTP requests
1694 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1695 assert(request->http_ver.protocol == http_ver.protocol);
1696 request->http_ver.major = http_ver.major;
1697 request->http_ver.minor = http_ver.minor;
1698 }
1699
1700 const auto unsupportedTe = request->header.unsupportedTe();
1701
1702 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1703 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1704 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions || unsupportedTe) {
1705 clientStreamNode *node = context->getClientReplyContext();
1706 conn->quitAfterError(request.getRaw());
1707 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1708 assert (repContext);
1709 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, request->method, NULL,
1710 conn, request.getRaw(), nullptr, nullptr);
1711 assert(context->http->out.offset == 0);
1712 context->pullData();
1713 clientProcessRequestFinished(conn, request);
1714 return;
1715 }
1716
1717 const auto chunked = request->header.chunked();
1718 if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
1719 clientStreamNode *node = context->getClientReplyContext();
1720 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1721 assert (repContext);
1722 conn->quitAfterError(request.getRaw());
1723 repContext->setReplyToError(ERR_INVALID_REQ,
1724 Http::scLengthRequired, request->method, NULL,
1725 conn, request.getRaw(), nullptr, nullptr);
1726 assert(context->http->out.offset == 0);
1727 context->pullData();
1728 clientProcessRequestFinished(conn, request);
1729 return;
1730 }
1731
1732 clientSetKeepaliveFlag(http);
1733 // Let tunneling code be fully responsible for CONNECT requests
1734 if (http->request->method == Http::METHOD_CONNECT) {
1735 context->mayUseConnection(true);
1736 conn->flags.readMore = false;
1737 }
1738
1739 #if USE_OPENSSL
1740 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1741 clientProcessRequestFinished(conn, request);
1742 return;
1743 }
1744 #endif
1745
1746 /* Do we expect a request-body? */
1747 expectBody = chunked || request->content_length > 0;
1748 if (!context->mayUseConnection() && expectBody) {
1749 request->body_pipe = conn->expectRequestBody(
1750 chunked ? -1 : request->content_length);
1751
1752 /* Is it too large? */
1753 if (!chunked && // if chunked, we will check as we accumulate
1754 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1755 clientStreamNode *node = context->getClientReplyContext();
1756 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1757 assert (repContext);
1758 conn->quitAfterError(request.getRaw());
1759 repContext->setReplyToError(ERR_TOO_BIG,
1760 Http::scPayloadTooLarge, Http::METHOD_NONE, NULL,
1761 conn, http->request, nullptr, nullptr);
1762 assert(context->http->out.offset == 0);
1763 context->pullData();
1764 clientProcessRequestFinished(conn, request);
1765 return;
1766 }
1767
1768 if (!isFtp) {
1769 // We may stop producing, comm_close, and/or call setReplyToError()
1770 // below, so quit on errors to avoid http->doCallouts()
1771 if (!conn->handleRequestBodyData()) {
1772 clientProcessRequestFinished(conn, request);
1773 return;
1774 }
1775
1776 if (!request->body_pipe->productionEnded()) {
1777 debugs(33, 5, "need more request body");
1778 context->mayUseConnection(true);
1779 assert(conn->flags.readMore);
1780 }
1781 }
1782 }
1783
1784 http->calloutContext = new ClientRequestContext(http);
1785
1786 http->doCallouts();
1787
1788 clientProcessRequestFinished(conn, request);
1789 }
1790
1791 void
1792 ConnStateData::add(const Http::StreamPointer &context)
1793 {
1794 debugs(33, 3, context << " to " << pipeline.count() << '/' << pipeline.nrequests);
1795 if (bareError) {
1796 debugs(33, 5, "assigning " << bareError);
1797 assert(context);
1798 assert(context->http);
1799 context->http->updateError(bareError);
1800 bareError.clear();
1801 }
1802 pipeline.add(context);
1803 }
1804
1805 int
1806 ConnStateData::pipelinePrefetchMax() const
1807 {
1808 // TODO: Support pipelined requests through pinned connections.
1809 if (pinning.pinned)
1810 return 0;
1811 return Config.pipeline_max_prefetch;
1812 }
1813
1814 /**
1815 * Limit the number of concurrent requests.
1816 * \return true when there are available position(s) in the pipeline queue for another request.
1817 * \return false when the pipeline queue is full or disabled.
1818 */
1819 bool
1820 ConnStateData::concurrentRequestQueueFilled() const
1821 {
1822 const int existingRequestCount = pipeline.count();
1823
1824 // default to the configured pipeline size.
1825 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1826 #if USE_OPENSSL
1827 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1828 #else
1829 const int internalRequest = 0;
1830 #endif
1831 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1832
1833 // when queue filled already we can't add more.
1834 if (existingRequestCount >= concurrentRequestLimit) {
1835 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1836 debugs(33, 5, clientConnection << " deferring new request until one is done");
1837 return true;
1838 }
1839
1840 return false;
1841 }
1842
1843 /**
1844 * Perform proxy_protocol_access ACL tests on the client which
1845 * connected to PROXY protocol port to see if we trust the
1846 * sender enough to accept their PROXY header claim.
1847 */
1848 bool
1849 ConnStateData::proxyProtocolValidateClient()
1850 {
1851 if (!Config.accessList.proxyProtocol)
1852 return proxyProtocolError("PROXY client not permitted by default ACL");
1853
1854 ACLFilledChecklist ch(Config.accessList.proxyProtocol, nullptr);
1855 fillChecklist(ch);
1856 if (!ch.fastCheck().allowed())
1857 return proxyProtocolError("PROXY client not permitted by ACLs");
1858
1859 return true;
1860 }
1861
1862 /**
1863 * Perform cleanup on PROXY protocol errors.
1864 * If header parsing hits a fatal error terminate the connection,
1865 * otherwise wait for more data.
1866 */
1867 bool
1868 ConnStateData::proxyProtocolError(const char *msg)
1869 {
1870 if (msg) {
1871 // This is important to know, but maybe not so much that flooding the log is okay.
1872 #if QUIET_PROXY_PROTOCOL
1873 // display the first of every 32 occurrences at level 1, the others at level 2.
1874 static uint8_t hide = 0;
1875 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1876 #else
1877 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1878 #endif
1879 mustStop(msg);
1880 }
1881 return false;
1882 }
1883
1884 /// Attempts to extract a PROXY protocol header from the input buffer and,
1885 /// upon success, stores the parsed header in proxyProtocolHeader_.
1886 /// \returns true if the header was successfully parsed
1887 /// \returns false if more data is needed to parse the header or on error
1888 bool
1889 ConnStateData::parseProxyProtocolHeader()
1890 {
1891 try {
1892 const auto parsed = ProxyProtocol::Parse(inBuf);
1893 proxyProtocolHeader_ = parsed.header;
1894 assert(bool(proxyProtocolHeader_));
1895 inBuf.consume(parsed.size);
1896 needProxyProtocolHeader_ = false;
1897 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1898 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1899 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1900 if ((clientConnection->flags & COMM_TRANSPARENT))
1901 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1902 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1903 }
1904 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1905 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1906 return false;
1907 } catch (const std::exception &e) {
1908 return proxyProtocolError(e.what());
1909 }
1910 return true;
1911 }
1912
1913 void
1914 ConnStateData::receivedFirstByte()
1915 {
1916 if (receivedFirstByte_)
1917 return;
1918
1919 receivedFirstByte_ = true;
1920 resetReadTimeout(Config.Timeout.request);
1921 }
1922
1923 /**
1924 * Attempt to parse one or more requests from the input buffer.
1925 * Returns true after completing parsing of at least one request [header]. That
1926 * includes cases where parsing ended with an error (e.g., a huge request).
1927 */
1928 bool
1929 ConnStateData::clientParseRequests()
1930 {
1931 bool parsed_req = false;
1932
1933 debugs(33, 5, HERE << clientConnection << ": attempting to parse");
1934
1935 // Loop while we have read bytes that are not needed for producing the body
1936 // On errors, bodyPipe may become nil, but readMore will be cleared
1937 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1938
1939 // Prohibit concurrent requests when using a pinned to-server connection
1940 // because our Client classes do not support request pipelining.
1941 if (pinning.pinned && !pinning.readHandler) {
1942 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1943 break;
1944 }
1945
1946 /* Limit the number of concurrent requests */
1947 if (concurrentRequestQueueFilled())
1948 break;
1949
1950 // try to parse the PROXY protocol header magic bytes
1951 if (needProxyProtocolHeader_) {
1952 if (!parseProxyProtocolHeader())
1953 break;
1954
1955 // we have been waiting for PROXY to provide client-IP
1956 // for some lookups, ie rDNS and IDENT.
1957 whenClientIpKnown();
1958
1959 // Done with PROXY protocol which has cleared preservingClientData_.
1960 // If the next protocol supports on_unsupported_protocol, then its
1961 // parseOneRequest() must reset preservingClientData_.
1962 assert(!preservingClientData_);
1963 }
1964
1965 if (Http::StreamPointer context = parseOneRequest()) {
1966 debugs(33, 5, clientConnection << ": done parsing a request");
1967 extendLifetime();
1968 context->registerWithConn();
1969
1970 #if USE_OPENSSL
1971 if (switchedToHttps())
1972 parsedBumpedRequestCount++;
1973 #endif
1974
1975 processParsedRequest(context);
1976
1977 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1978
1979 if (context->mayUseConnection()) {
1980 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
1981 break;
1982 }
1983 } else {
1984 debugs(33, 5, clientConnection << ": not enough request data: " <<
1985 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1986 Must(inBuf.length() < Config.maxRequestHeaderSize);
1987 break;
1988 }
1989 }
1990
1991 /* XXX where to 'finish' the parsing pass? */
1992 return parsed_req;
1993 }
1994
1995 void
1996 ConnStateData::afterClientRead()
1997 {
1998 #if USE_OPENSSL
1999 if (parsingTlsHandshake) {
2000 parseTlsHandshake();
2001 return;
2002 }
2003 #endif
2004
2005 /* Process next request */
2006 if (pipeline.empty())
2007 fd_note(clientConnection->fd, "Reading next request");
2008
2009 if (!clientParseRequests()) {
2010 if (!isOpen())
2011 return;
2012 // We may get here if the client half-closed after sending a partial
2013 // request. See doClientRead() and shouldCloseOnEof().
2014 // XXX: This partially duplicates ConnStateData::kick().
2015 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
2016 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
2017 clientConnection->close();
2018 return;
2019 }
2020 }
2021
2022 if (!isOpen())
2023 return;
2024
2025 clientAfterReadingRequests();
2026 }
2027
2028 /**
2029 * called when new request data has been read from the socket
2030 *
2031 * \retval false called comm_close or setReplyToError (the caller should bail)
2032 * \retval true we did not call comm_close or setReplyToError
2033 */
2034 bool
2035 ConnStateData::handleReadData()
2036 {
2037 // if we are reading a body, stuff data into the body pipe
2038 if (bodyPipe != NULL)
2039 return handleRequestBodyData();
2040 return true;
2041 }
2042
2043 /**
2044 * called when new request body data has been buffered in inBuf
2045 * may close the connection if we were closing and piped everything out
2046 *
2047 * \retval false called comm_close or setReplyToError (the caller should bail)
2048 * \retval true we did not call comm_close or setReplyToError
2049 */
2050 bool
2051 ConnStateData::handleRequestBodyData()
2052 {
2053 assert(bodyPipe != NULL);
2054
2055 if (bodyParser) { // chunked encoding
2056 if (const err_type error = handleChunkedRequestBody()) {
2057 abortChunkedRequestBody(error);
2058 return false;
2059 }
2060 } else { // identity encoding
2061 debugs(33,5, HERE << "handling plain request body for " << clientConnection);
2062 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2063 if (putSize > 0)
2064 consumeInput(putSize);
2065
2066 if (!bodyPipe->mayNeedMoreData()) {
2067 // BodyPipe will clear us automagically when we produced everything
2068 bodyPipe = NULL;
2069 }
2070 }
2071
2072 if (!bodyPipe) {
2073 debugs(33,5, HERE << "produced entire request body for " << clientConnection);
2074
2075 if (const char *reason = stoppedSending()) {
2076 /* we've finished reading like good clients,
2077 * now do the close that initiateClose initiated.
2078 */
2079 debugs(33, 3, HERE << "closing for earlier sending error: " << reason);
2080 clientConnection->close();
2081 return false;
2082 }
2083 }
2084
2085 return true;
2086 }
2087
2088 /// parses available chunked encoded body bytes, checks size, returns errors
2089 err_type
2090 ConnStateData::handleChunkedRequestBody()
2091 {
2092 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2093
2094 try { // the parser will throw on errors
2095
2096 if (inBuf.isEmpty()) // nothing to do
2097 return ERR_NONE;
2098
2099 BodyPipeCheckout bpc(*bodyPipe);
2100 bodyParser->setPayloadBuffer(&bpc.buf);
2101 const bool parsed = bodyParser->parse(inBuf);
2102 inBuf = bodyParser->remaining(); // sync buffers
2103 bpc.checkIn();
2104
2105 // dechunk then check: the size limit applies to _dechunked_ content
2106 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2107 return ERR_TOO_BIG;
2108
2109 if (parsed) {
2110 finishDechunkingRequest(true);
2111 Must(!bodyPipe);
2112 return ERR_NONE; // nil bodyPipe implies body end for the caller
2113 }
2114
2115 // if chunk parser needs data, then the body pipe must need it too
2116 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2117
2118 // if parser needs more space and we can consume nothing, we will stall
2119 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2120 } catch (...) { // TODO: be more specific
2121 debugs(33, 3, HERE << "malformed chunks" << bodyPipe->status());
2122 return ERR_INVALID_REQ;
2123 }
2124
2125 debugs(33, 7, HERE << "need more chunked data" << *bodyPipe->status());
2126 return ERR_NONE;
2127 }
2128
2129 /// quit on errors related to chunked request body handling
2130 void
2131 ConnStateData::abortChunkedRequestBody(const err_type error)
2132 {
2133 finishDechunkingRequest(false);
2134
2135 // XXX: The code below works if we fail during initial request parsing,
2136 // but if we fail when the server connection is used already, the server may send
2137 // us its response too, causing various assertions. How to prevent that?
2138 #if WE_KNOW_HOW_TO_SEND_ERRORS
2139 Http::StreamPointer context = pipeline.front();
2140 if (context != NULL && !context->http->out.offset) { // output nothing yet
2141 clientStreamNode *node = context->getClientReplyContext();
2142 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2143 assert(repContext);
2144 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2145 Http::scPayloadTooLarge : HTTP_BAD_REQUEST;
2146 repContext->setReplyToError(error, scode,
2147 repContext->http->request->method,
2148 repContext->http->uri,
2149 CachePeer,
2150 repContext->http->request,
2151 inBuf, NULL);
2152 context->pullData();
2153 } else {
2154 // close or otherwise we may get stuck as nobody will notice the error?
2155 comm_reset_close(clientConnection);
2156 }
2157 #else
2158 debugs(33, 3, HERE << "aborting chunked request without error " << error);
2159 comm_reset_close(clientConnection);
2160 #endif
2161 flags.readMore = false;
2162 }
2163
2164 void
2165 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2166 {
2167 // request reader may get stuck waiting for space if nobody consumes body
2168 if (bodyPipe != NULL)
2169 bodyPipe->enableAutoConsumption();
2170
2171 // kids extend
2172 }
2173
2174 /** general lifetime handler for HTTP requests */
2175 void
2176 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2177 {
2178 if (!Comm::IsConnOpen(io.conn))
2179 return;
2180
2181 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2182 updateError(error);
2183 if (tunnelOnError(HttpRequestMethod(), error))
2184 return;
2185
2186 /*
2187 * Just close the connection to not confuse browsers
2188 * using persistent connections. Some browsers open
2189 * a connection and then do not use it until much
2190 * later (presumeably because the request triggering
2191 * the open has already been completed on another
2192 * connection)
2193 */
2194 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2195 io.conn->close();
2196 }
2197
2198 void
2199 ConnStateData::lifetimeTimeout(const CommTimeoutCbParams &io)
2200 {
2201 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout" <<
2202 Debug::Extra << "connection: " << io.conn);
2203
2204 LogTagsErrors lte;
2205 lte.timedout = true;
2206 terminateAll(ERR_LIFETIME_EXP, lte);
2207 }
2208
2209 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2210 AsyncJob("ConnStateData"), // kids overwrite
2211 Server(xact)
2212 #if USE_OPENSSL
2213 , tlsParser(Security::HandshakeParser::fromClient)
2214 #endif
2215 {
2216 // store the details required for creating more MasterXaction objects as new requests come in
2217 log_addr = xact->tcpClient->remote;
2218 log_addr.applyClientMask(Config.Addrs.client_netmask);
2219
2220 // register to receive notice of Squid signal events
2221 // which may affect long persisting client connections
2222 registerRunner();
2223 }
2224
2225 void
2226 ConnStateData::start()
2227 {
2228 BodyProducer::start();
2229 HttpControlMsgSink::start();
2230
2231 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2232 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2233 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2234 int i = IP_PMTUDISC_DONT;
2235 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2236 int xerrno = errno;
2237 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2238 }
2239 #else
2240 static bool reported = false;
2241
2242 if (!reported) {
2243 debugs(33, DBG_IMPORTANT, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2244 reported = true;
2245 }
2246 #endif
2247 }
2248
2249 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2250 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2251 comm_add_close_handler(clientConnection->fd, call);
2252
2253 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2254 if (needProxyProtocolHeader_) {
2255 if (!proxyProtocolValidateClient()) // will close the connection on failure
2256 return;
2257 } else
2258 whenClientIpKnown();
2259
2260 // requires needProxyProtocolHeader_ which is initialized above
2261 preservingClientData_ = shouldPreserveClientData();
2262 }
2263
2264 void
2265 ConnStateData::whenClientIpKnown()
2266 {
2267 if (Config.onoff.log_fqdn)
2268 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2269
2270 #if USE_IDENT
2271 if (Ident::TheConfig.identLookup) {
2272 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
2273 fillChecklist(identChecklist);
2274 if (identChecklist.fastCheck().allowed())
2275 Ident::Start(clientConnection, clientIdentDone, this);
2276 }
2277 #endif
2278
2279 clientdbEstablished(clientConnection->remote, 1);
2280
2281 #if USE_DELAY_POOLS
2282 fd_table[clientConnection->fd].clientInfo = NULL;
2283
2284 if (!Config.onoff.client_db)
2285 return; // client delay pools require client_db
2286
2287 const auto &pools = ClientDelayPools::Instance()->pools;
2288 if (pools.size()) {
2289 ACLFilledChecklist ch(NULL, NULL, NULL);
2290 fillChecklist(ch);
2291 // TODO: we check early to limit error response bandwidth but we
2292 // should recheck when we can honor delay_pool_uses_indirect
2293 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2294
2295 /* pools require explicit 'allow' to assign a client into them */
2296 if (pools[pool]->access) {
2297 ch.changeAcl(pools[pool]->access);
2298 auto answer = ch.fastCheck();
2299 if (answer.allowed()) {
2300
2301 /* request client information from db after we did all checks
2302 this will save hash lookup if client failed checks */
2303 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2304 assert(cli);
2305
2306 /* put client info in FDE */
2307 fd_table[clientConnection->fd].clientInfo = cli;
2308
2309 /* setup write limiter for this request */
2310 const double burst = floor(0.5 +
2311 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2312 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2313 break;
2314 } else {
2315 debugs(83, 4, HERE << "Delay pool " << pool << " skipped because ACL " << answer);
2316 }
2317 }
2318 }
2319 }
2320 #endif
2321
2322 // kids must extend to actually start doing something (e.g., reading)
2323 }
2324
2325 Security::IoResult
2326 ConnStateData::acceptTls()
2327 {
2328 const auto handshakeResult = Security::Accept(*clientConnection);
2329
2330 #if USE_OPENSSL
2331 // log ASAP, even if the handshake has not completed (or failed)
2332 const auto fd = clientConnection->fd;
2333 assert(fd >= 0);
2334 keyLogger.checkpoint(*fd_table[fd].ssl, *this);
2335 #else
2336 // TODO: Support fd_table[fd].ssl dereference in other builds.
2337 #endif
2338
2339 return handshakeResult;
2340 }
2341
2342 /** Handle a new connection on an HTTP socket. */
2343 void
2344 httpAccept(const CommAcceptCbParams &params)
2345 {
2346 MasterXaction::Pointer xact = params.xaction;
2347 AnyP::PortCfgPointer s = xact->squidPort;
2348
2349 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2350
2351 if (params.flag != Comm::OK) {
2352 // Its possible the call was still queued when the client disconnected
2353 debugs(33, 2, s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2354 return;
2355 }
2356
2357 debugs(33, 4, params.conn << ": accepted");
2358 fd_note(params.conn->fd, "client http connect");
2359
2360 if (s->tcp_keepalive.enabled)
2361 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2362
2363 ++incoming_sockets_accepted;
2364
2365 // Socket is ready, setup the connection manager to start using it
2366 auto *srv = Http::NewServer(xact);
2367 AsyncJob::Start(srv); // usually async-calls readSomeData()
2368 }
2369
2370 /// Create TLS connection structure and update fd_table
2371 static bool
2372 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2373 {
2374 const auto conn = connState->clientConnection;
2375 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2376 debugs(33, 5, "will negotiate TLS on " << conn);
2377 return true;
2378 }
2379
2380 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2381 conn->close();
2382 return false;
2383 }
2384
2385 /** negotiate an SSL connection */
2386 static void
2387 clientNegotiateSSL(int fd, void *data)
2388 {
2389 ConnStateData *conn = (ConnStateData *)data;
2390
2391 const auto handshakeResult = conn->acceptTls();
2392 switch (handshakeResult.category) {
2393 case Security::IoResult::ioSuccess:
2394 break;
2395
2396 case Security::IoResult::ioWantRead:
2397 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, clientNegotiateSSL, conn, 0);
2398 return;
2399
2400 case Security::IoResult::ioWantWrite:
2401 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, conn, 0);
2402 return;
2403
2404 case Security::IoResult::ioError:
2405 debugs(83, (handshakeResult.important ? Important(62) : 2), "ERROR: " << handshakeResult.errorDescription <<
2406 " while accepting a TLS connection on " << conn->clientConnection << ": " << handshakeResult.errorDetail);
2407 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2408 // path because we cannot know the intended connection target?
2409 conn->updateError(ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
2410 conn->clientConnection->close();
2411 return;
2412 }
2413
2414 Security::SessionPointer session(fd_table[fd].ssl);
2415
2416 #if USE_OPENSSL
2417 if (Security::SessionIsResumed(session)) {
2418 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2419 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2420 ":" << (int)fd_table[fd].remote_port << ")");
2421 } else {
2422 if (Debug::Enabled(83, 4)) {
2423 /* Write out the SSL session details.. actually the call below, but
2424 * OpenSSL headers do strange typecasts confusing GCC.. */
2425 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2426 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2427 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2428 PEM_STRING_SSL_SESSION, debug_log,
2429 reinterpret_cast<char *>(SSL_get_session(session.get())),
2430 nullptr, nullptr, 0, nullptr, nullptr);
2431
2432 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2433
2434 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2435 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2436 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2437 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2438 * Because there are two possible usable cast, if you get an error here, try the other
2439 * commented line. */
2440
2441 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2442 debug_log,
2443 reinterpret_cast<char *>(SSL_get_session(session.get())),
2444 nullptr, nullptr, 0, nullptr, nullptr);
2445 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2446 debug_log,
2447 reinterpret_cast<char *>(SSL_get_session(session.get())),
2448 nullptr, nullptr, 0, nullptr, nullptr);
2449 */
2450 #else
2451 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2452
2453 #endif
2454 /* Note: This does not automatically fflush the log file.. */
2455 }
2456
2457 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2458 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2459 fd_table[fd].remote_port << ")");
2460 }
2461 #else
2462 debugs(83, 2, "TLS session reuse not yet implemented.");
2463 #endif
2464
2465 // Connection established. Retrieve TLS connection parameters for logging.
2466 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2467
2468 #if USE_OPENSSL
2469 X509 *client_cert = SSL_get_peer_certificate(session.get());
2470
2471 if (client_cert) {
2472 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2473 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
2474
2475 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2476 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
2477
2478 X509_free(client_cert);
2479 } else {
2480 debugs(83, 5, "FD " << fd << " has no client certificate.");
2481 }
2482 #else
2483 debugs(83, 2, "Client certificate requesting not yet implemented.");
2484 #endif
2485
2486 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2487 if (auto xact = conn->pipeline.front()) {
2488 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2489 xact->finished();
2490 // cannot proceed with encryption if requests wait for plain responses
2491 Must(conn->pipeline.empty());
2492 }
2493 /* careful: finished() above frees request, host, etc. */
2494
2495 conn->readSomeData();
2496 }
2497
2498 /**
2499 * If Security::ContextPointer is given, starts reading the TLS handshake.
2500 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2501 */
2502 static void
2503 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2504 {
2505 assert(connState);
2506 const Comm::ConnectionPointer &details = connState->clientConnection;
2507
2508 if (!ctx || !httpsCreate(connState, ctx))
2509 return;
2510
2511 connState->resetReadTimeout(Config.Timeout.request);
2512
2513 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2514 }
2515
2516 #if USE_OPENSSL
2517 /**
2518 * A callback function to use with the ACLFilledChecklist callback.
2519 */
2520 static void
2521 httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2522 {
2523 ConnStateData *connState = (ConnStateData *) data;
2524
2525 // if the connection is closed or closing, just return.
2526 if (!connState->isOpen())
2527 return;
2528
2529 if (answer.allowed()) {
2530 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2531 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2532 } else {
2533 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2534 connState->sslBumpMode = Ssl::bumpSplice;
2535 }
2536
2537 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2538 connState->clientConnection->close();
2539 return;
2540 }
2541
2542 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2543 connState->clientConnection->close();
2544 }
2545 #endif
2546
2547 /** handle a new HTTPS connection */
2548 static void
2549 httpsAccept(const CommAcceptCbParams &params)
2550 {
2551 MasterXaction::Pointer xact = params.xaction;
2552 const AnyP::PortCfgPointer s = xact->squidPort;
2553
2554 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2555
2556 if (params.flag != Comm::OK) {
2557 // Its possible the call was still queued when the client disconnected
2558 debugs(33, 2, "httpsAccept: " << s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2559 return;
2560 }
2561
2562 debugs(33, 4, HERE << params.conn << " accepted, starting SSL negotiation.");
2563 fd_note(params.conn->fd, "client https connect");
2564
2565 if (s->tcp_keepalive.enabled) {
2566 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2567 }
2568 ++incoming_sockets_accepted;
2569
2570 // Socket is ready, setup the connection manager to start using it
2571 auto *srv = Https::NewServer(xact);
2572 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2573 }
2574
2575 void
2576 ConnStateData::postHttpsAccept()
2577 {
2578 if (port->flags.tunnelSslBumping) {
2579 #if USE_OPENSSL
2580 debugs(33, 5, "accept transparent connection: " << clientConnection);
2581
2582 if (!Config.accessList.ssl_bump) {
2583 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2584 return;
2585 }
2586
2587 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
2588 mx->tcpClient = clientConnection;
2589 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2590 // using tproxy/intercept provided destination IP and port.
2591 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2592 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2593 HttpRequest *request = new HttpRequest(mx);
2594 static char ip[MAX_IPSTRLEN];
2595 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2596 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2597 request->url.port(clientConnection->local.port());
2598 request->myportname = port->name;
2599 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2600 CodeContext::Reset(connectAle);
2601 // TODO: Use these request/ALE when waiting for new bumped transactions.
2602
2603 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, NULL);
2604 fillChecklist(*acl_checklist);
2605 // Build a local AccessLogEntry to allow requiresAle() acls work
2606 acl_checklist->al = connectAle;
2607 acl_checklist->al->cache.start_time = current_time;
2608 acl_checklist->al->tcpClient = clientConnection;
2609 acl_checklist->al->cache.port = port;
2610 acl_checklist->al->cache.caddr = log_addr;
2611 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2612 acl_checklist->al->updateError(bareError);
2613 HTTPMSGUNLOCK(acl_checklist->al->request);
2614 acl_checklist->al->request = request;
2615 HTTPMSGLOCK(acl_checklist->al->request);
2616 Http::StreamPointer context = pipeline.front();
2617 ClientHttpRequest *http = context ? context->http : nullptr;
2618 const char *log_uri = http ? http->log_uri : nullptr;
2619 acl_checklist->syncAle(request, log_uri);
2620 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2621 #else
2622 fatal("FATAL: SSL-Bump requires --with-openssl");
2623 #endif
2624 return;
2625 } else {
2626 httpsEstablish(this, port->secure.staticContext);
2627 }
2628 }
2629
2630 #if USE_OPENSSL
2631 void
2632 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2633 {
2634 ConnStateData * state_data = (ConnStateData *)(data);
2635 state_data->sslCrtdHandleReply(reply);
2636 }
2637
2638 void
2639 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2640 {
2641 if (!isOpen()) {
2642 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2643 return;
2644 }
2645
2646 if (reply.result == Helper::BrokenHelper) {
2647 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2648 } else if (!reply.other().hasContent()) {
2649 debugs(1, DBG_IMPORTANT, HERE << "\"ssl_crtd\" helper returned <NULL> reply.");
2650 } else {
2651 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2652 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2653 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2654 } else {
2655 if (reply.result != Helper::Okay) {
2656 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2657 } else {
2658 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2659 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2660 doPeekAndSpliceStep();
2661 auto ssl = fd_table[clientConnection->fd].ssl.get();
2662 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2663 if (!ret)
2664 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2665
2666 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2667 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2668 } else {
2669 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2670 if (ctx && !sslBumpCertKey.isEmpty())
2671 storeTlsContextToCache(sslBumpCertKey, ctx);
2672 getSslContextDone(ctx);
2673 }
2674 return;
2675 }
2676 }
2677 }
2678 Security::ContextPointer nil;
2679 getSslContextDone(nil);
2680 }
2681
2682 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2683 {
2684 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2685
2686 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2687 if (connectedOk) {
2688 if (X509 *mimicCert = sslServerBump->serverCert.get())
2689 certProperties.mimicCert.resetAndLock(mimicCert);
2690
2691 ACLFilledChecklist checklist(nullptr, sslServerBump->request.getRaw());
2692 fillChecklist(checklist);
2693
2694 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != NULL; ca = ca->next) {
2695 // If the algorithm already set, then ignore it.
2696 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2697 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2698 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2699 continue;
2700
2701 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2702 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2703 const char *param = ca->param;
2704
2705 // For parameterless CN adaptation, use hostname from the
2706 // CONNECT request.
2707 if (ca->alg == Ssl::algSetCommonName) {
2708 if (!param)
2709 param = tlsConnectHostOrIp.c_str();
2710 certProperties.commonName = param;
2711 certProperties.setCommonName = true;
2712 } else if (ca->alg == Ssl::algSetValidAfter)
2713 certProperties.setValidAfter = true;
2714 else if (ca->alg == Ssl::algSetValidBefore)
2715 certProperties.setValidBefore = true;
2716
2717 debugs(33, 5, HERE << "Matches certificate adaptation aglorithm: " <<
2718 alg << " param: " << (param ? param : "-"));
2719 }
2720 }
2721
2722 certProperties.signAlgorithm = Ssl::algSignEnd;
2723 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != NULL; sg = sg->next) {
2724 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2725 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2726 break;
2727 }
2728 }
2729 } else {// did not try to connect (e.g. client-first) or failed to connect
2730 // In case of an error while connecting to the secure server, use a
2731 // trusted certificate, with no mimicked fields and no adaptation
2732 // algorithms. There is nothing we can mimic, so we want to minimize the
2733 // number of warnings the user will have to see to get to the error page.
2734 // We will close the connection, so that the trust is not extended to
2735 // non-Squid content.
2736 certProperties.signAlgorithm = Ssl::algSignTrusted;
2737 }
2738
2739 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2740
2741 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2742 assert(port->secure.untrustedSigningCa.cert);
2743 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2744 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2745 } else {
2746 assert(port->secure.signingCa.cert.get());
2747 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2748
2749 if (port->secure.signingCa.pkey)
2750 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2751 }
2752 signAlgorithm = certProperties.signAlgorithm;
2753
2754 certProperties.signHash = Ssl::DefaultSignHash;
2755 }
2756
2757 Security::ContextPointer
2758 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2759 {
2760 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2761 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2762 if (const auto ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2763 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2764 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2765 return *ctx;
2766 } else {
2767 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2768 if (ssl_ctx_cache)
2769 ssl_ctx_cache->del(cacheKey);
2770 }
2771 }
2772 return Security::ContextPointer(nullptr);
2773 }
2774
2775 void
2776 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2777 {
2778 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2779 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, ctx)) {
2780 // If it is not in storage delete after using. Else storage deleted it.
2781 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2782 }
2783 }
2784
2785 void
2786 ConnStateData::getSslContextStart()
2787 {
2788 if (port->secure.generateHostCertificates) {
2789 Ssl::CertificateProperties certProperties;
2790 buildSslCertGenerationParams(certProperties);
2791
2792 // Disable caching for bumpPeekAndSplice mode
2793 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2794 sslBumpCertKey.clear();
2795 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2796 assert(!sslBumpCertKey.isEmpty());
2797
2798 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2799 if (ctx) {
2800 getSslContextDone(ctx);
2801 return;
2802 }
2803 }
2804
2805 #if USE_SSL_CRTD
2806 try {
2807 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2808 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2809 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2810 request_message.composeRequest(certProperties);
2811 debugs(33, 5, HERE << "SSL crtd request: " << request_message.compose().c_str());
2812 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2813 return;
2814 } catch (const std::exception &e) {
2815 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2816 "request for " << certProperties.commonName <<
2817 " certificate: " << e.what() << "; will now block to " <<
2818 "generate that certificate.");
2819 // fall through to do blocking in-process generation.
2820 }
2821 #endif // USE_SSL_CRTD
2822
2823 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName);
2824 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2825 doPeekAndSpliceStep();
2826 auto ssl = fd_table[clientConnection->fd].ssl.get();
2827 if (!Ssl::configureSSL(ssl, certProperties, *port))
2828 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2829
2830 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2831 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2832 } else {
2833 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2834 if (dynCtx && !sslBumpCertKey.isEmpty())
2835 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2836 getSslContextDone(dynCtx);
2837 }
2838 return;
2839 }
2840
2841 Security::ContextPointer nil;
2842 getSslContextDone(nil);
2843 }
2844
2845 void
2846 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2847 {
2848 if (port->secure.generateHostCertificates && !ctx) {
2849 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2850 }
2851
2852 // If generated ssl context = NULL, try to use static ssl context.
2853 if (!ctx) {
2854 if (!port->secure.staticContext) {
2855 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2856 clientConnection->close();
2857 return;
2858 } else {
2859 debugs(33, 5, "Using static TLS context.");
2860 ctx = port->secure.staticContext;
2861 }
2862 }
2863
2864 if (!httpsCreate(this, ctx))
2865 return;
2866
2867 // bumped intercepted conns should already have Config.Timeout.request set
2868 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2869 // to make sure the connection does not get stuck on non-SSL clients.
2870 resetReadTimeout(Config.Timeout.request);
2871
2872 switchedToHttps_ = true;
2873
2874 auto ssl = fd_table[clientConnection->fd].ssl.get();
2875 BIO *b = SSL_get_rbio(ssl);
2876 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2877 bio->setReadBufData(inBuf);
2878 inBuf.clear();
2879 clientNegotiateSSL(clientConnection->fd, this);
2880 }
2881
2882 void
2883 ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2884 {
2885 assert(!switchedToHttps_);
2886 Must(http->request);
2887 auto &request = http->request;
2888
2889 // Depending on receivedFirstByte_, we are at the start of either an
2890 // established CONNECT tunnel with the client or an intercepted TCP (and
2891 // presumably TLS) connection from the client. Expect TLS Client Hello.
2892 const auto insideConnectTunnel = receivedFirstByte_;
2893 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2894
2895 tlsConnectHostOrIp = request->url.hostOrIp();
2896 tlsConnectPort = request->url.port();
2897 resetSslCommonName(request->url.host());
2898
2899 // We are going to read new request
2900 flags.readMore = true;
2901
2902 // keep version major.minor details the same.
2903 // but we are now performing the HTTPS handshake traffic
2904 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2905
2906 // If sslServerBump is set, then we have decided to deny CONNECT
2907 // and now want to switch to SSL to send the error to the client
2908 // without even peeking at the origin server certificate.
2909 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2910 request->flags.sslPeek = true;
2911 sslServerBump = new Ssl::ServerBump(http);
2912 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2913 request->flags.sslPeek = true;
2914 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2915 }
2916
2917 // commSetConnTimeout() was called for this request before we switched.
2918 // Fix timeout to request_start_timeout
2919 resetReadTimeout(Config.Timeout.request_start_timeout);
2920 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2921 // a bumbed "connect" request on non transparent port.
2922 receivedFirstByte_ = false;
2923 // Get more data to peek at Tls
2924 parsingTlsHandshake = true;
2925
2926 // If the protocol has changed, then reset preservingClientData_.
2927 // Otherwise, its value initially set in start() is still valid/fresh.
2928 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2929 if (insideConnectTunnel)
2930 preservingClientData_ = shouldPreserveClientData();
2931
2932 readSomeData();
2933 }
2934
2935 void
2936 ConnStateData::parseTlsHandshake()
2937 {
2938 Must(parsingTlsHandshake);
2939
2940 assert(!inBuf.isEmpty());
2941 receivedFirstByte();
2942 fd_note(clientConnection->fd, "Parsing TLS handshake");
2943
2944 // stops being nil if we fail to parse the handshake
2945 ErrorDetail::Pointer parseErrorDetails;
2946
2947 try {
2948 if (!tlsParser.parseHello(inBuf)) {
2949 // need more data to finish parsing
2950 readSomeData();
2951 return;
2952 }
2953 }
2954 catch (const TextException &ex) {
2955 debugs(83, 2, "exception: " << ex);
2956 parseErrorDetails = new ExceptionErrorDetail(ex.id());
2957 }
2958 catch (...) {
2959 debugs(83, 2, "exception: " << CurrentException);
2960 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2961 parseErrorDetails = d;
2962 }
2963
2964 parsingTlsHandshake = false;
2965
2966 // client data may be needed for splicing and for
2967 // tunneling unsupportedProtocol after an error
2968 preservedClientData = inBuf;
2969
2970 // Even if the parser failed, each TLS detail should either be set
2971 // correctly or still be "unknown"; copying unknown detail is a no-op.
2972 Security::TlsDetails::Pointer const &details = tlsParser.details;
2973 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2974 if (details && !details->serverName.isEmpty()) {
2975 resetSslCommonName(details->serverName.c_str());
2976 tlsClientSni_ = details->serverName;
2977 }
2978
2979 // We should disable read/write handlers
2980 Comm::ResetSelect(clientConnection->fd);
2981
2982 if (parseErrorDetails) {
2983 Http::StreamPointer context = pipeline.front();
2984 Must(context && context->http);
2985 HttpRequest::Pointer request = context->http->request;
2986 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2987 updateError(ERR_PROTOCOL_UNKNOWN, parseErrorDetails);
2988 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN))
2989 clientConnection->close();
2990 return;
2991 }
2992
2993 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
2994 getSslContextStart();
2995 return;
2996 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
2997 Http::StreamPointer context = pipeline.front();
2998 ClientHttpRequest *http = context ? context->http : nullptr;
2999 // will call httpsPeeked() with certificate and connection, eventually
3000 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3001 } else {
3002 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
3003 startPeekAndSplice();
3004 }
3005 }
3006
3007 void httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
3008 {
3009 ConnStateData *connState = (ConnStateData *) data;
3010
3011 // if the connection is closed or closing, just return.
3012 if (!connState->isOpen())
3013 return;
3014
3015 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
3016 assert(connState->serverBump());
3017 Ssl::BumpMode bumpAction;
3018 if (answer.allowed()) {
3019 bumpAction = (Ssl::BumpMode)answer.kind;
3020 } else
3021 bumpAction = Ssl::bumpSplice;
3022
3023 connState->serverBump()->act.step2 = bumpAction;
3024 connState->sslBumpMode = bumpAction;
3025 Http::StreamPointer context = connState->pipeline.front();
3026 if (ClientHttpRequest *http = (context ? context->http : nullptr))
3027 http->al->ssl.bumpMode = bumpAction;
3028
3029 if (bumpAction == Ssl::bumpTerminate) {
3030 connState->clientConnection->close();
3031 } else if (bumpAction != Ssl::bumpSplice) {
3032 connState->startPeekAndSplice();
3033 } else if (!connState->splice())
3034 connState->clientConnection->close();
3035 }
3036
3037 bool
3038 ConnStateData::splice()
3039 {
3040 // normally we can splice here, because we just got client hello message
3041
3042 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
3043 // we should not lose any raw bytes when switching to raw I/O here.
3044 if (fd_table[clientConnection->fd].ssl.get())
3045 fd_table[clientConnection->fd].useDefaultIo();
3046
3047 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3048 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3049 transferProtocol = Http::ProtocolVersion();
3050 assert(!pipeline.empty());
3051 Http::StreamPointer context = pipeline.front();
3052 Must(context);
3053 Must(context->http);
3054 ClientHttpRequest *http = context->http;
3055 HttpRequest::Pointer request = http->request;
3056 context->finished();
3057 if (transparent()) {
3058 // For transparent connections, make a new fake CONNECT request, now
3059 // with SNI as target. doCallout() checks, adaptations may need that.
3060 return fakeAConnectRequest("splice", preservedClientData);
3061 } else {
3062 // For non transparent connections make a new tunneled CONNECT, which
3063 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3064 // respond with "Connection Established" to the client.
3065 // This fake CONNECT request required to allow use of SNI in
3066 // doCallout() checks and adaptations.
3067 return initiateTunneledRequest(request, Http::METHOD_CONNECT, "splice", preservedClientData);
3068 }
3069 }
3070
3071 void
3072 ConnStateData::startPeekAndSplice()
3073 {
3074 // This is the Step2 of the SSL bumping
3075 assert(sslServerBump);
3076 Http::StreamPointer context = pipeline.front();
3077 ClientHttpRequest *http = context ? context->http : nullptr;
3078
3079 if (sslServerBump->at(XactionStep::tlsBump1)) {
3080 sslServerBump->step = XactionStep::tlsBump2;
3081 // Run a accessList check to check if want to splice or continue bumping
3082
3083 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3084 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
3085 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3086 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3087 fillChecklist(*acl_checklist);
3088 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3089 return;
3090 }
3091
3092 // will call httpsPeeked() with certificate and connection, eventually
3093 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3094 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3095
3096 if (!httpsCreate(this, unConfiguredCTX))
3097 return;
3098
3099 switchedToHttps_ = true;
3100
3101 auto ssl = fd_table[clientConnection->fd].ssl.get();
3102 BIO *b = SSL_get_rbio(ssl);
3103 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3104 bio->setReadBufData(inBuf);
3105 bio->hold(true);
3106
3107 // We have successfully parsed client Hello, but our TLS handshake parser is
3108 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3109 // can honor on_unsupported_protocol if needed. If there are no errors, we
3110 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3111 // also allow an ioWantRead result in case some fancy TLS extension that
3112 // Squid does not yet understand requires reading post-Hello client bytes.
3113 const auto handshakeResult = acceptTls();
3114 if (!handshakeResult.wantsIo())
3115 return handleSslBumpHandshakeError(handshakeResult);
3116
3117 // We need to reset inBuf here, to be used by incoming requests in the case
3118 // of SSL bump
3119 inBuf.clear();
3120
3121 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3122 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : NULL);
3123 }
3124
3125 /// process a problematic Security::Accept() result on the SslBump code path
3126 void
3127 ConnStateData::handleSslBumpHandshakeError(const Security::IoResult &handshakeResult)
3128 {
3129 auto errCategory = ERR_NONE;
3130
3131 switch (handshakeResult.category) {
3132 case Security::IoResult::ioSuccess: {
3133 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3134 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3135 break;
3136 }
3137
3138 case Security::IoResult::ioWantRead: {
3139 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3140 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3141 break;
3142 }
3143
3144 case Security::IoResult::ioWantWrite: {
3145 static const auto d = MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3146 updateError(errCategory = ERR_GATEWAY_FAILURE, d);
3147 break;
3148 }
3149
3150 case Security::IoResult::ioError:
3151 debugs(83, (handshakeResult.important ? DBG_IMPORTANT : 2), "ERROR: " << handshakeResult.errorDescription <<
3152 " while SslBump-accepting a TLS connection on " << clientConnection << ": " << handshakeResult.errorDetail);
3153 updateError(errCategory = ERR_SECURE_ACCEPT_FAIL, handshakeResult.errorDetail);
3154 break;
3155
3156 }
3157
3158 if (!tunnelOnError(HttpRequestMethod(), errCategory))
3159 clientConnection->close();
3160 }
3161
3162 void
3163 ConnStateData::doPeekAndSpliceStep()
3164 {
3165 auto ssl = fd_table[clientConnection->fd].ssl.get();
3166 BIO *b = SSL_get_rbio(ssl);
3167 assert(b);
3168 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3169
3170 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3171 bio->hold(false);
3172
3173 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3174 switchedToHttps_ = true;
3175 }
3176
3177 void
3178 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3179 {
3180 Must(sslServerBump != NULL);
3181 Must(sslServerBump->request == pic.request);
3182 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3183
3184 if (Comm::IsConnOpen(pic.connection)) {
3185 notePinnedConnectionBecameIdle(pic);
3186 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3187 } else
3188 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3189
3190 getSslContextStart();
3191 }
3192
3193 #endif /* USE_OPENSSL */
3194
3195 bool
3196 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, Http::MethodType const method, const char *reason, const SBuf &payload)
3197 {
3198 // fake a CONNECT request to force connState to tunnel
3199 SBuf connectHost;
3200 unsigned short connectPort = 0;
3201
3202 if (pinning.serverConnection != nullptr) {
3203 static char ip[MAX_IPSTRLEN];
3204 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3205 connectPort = pinning.serverConnection->remote.port();
3206 } else if (cause) {
3207 connectHost = cause->url.hostOrIp();
3208 connectPort = cause->url.port();
3209 #if USE_OPENSSL
3210 } else if (!tlsConnectHostOrIp.isEmpty()) {
3211 connectHost = tlsConnectHostOrIp;
3212 connectPort = tlsConnectPort;
3213 #endif
3214 } else if (transparent()) {
3215 static char ip[MAX_IPSTRLEN];
3216 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3217 connectPort = clientConnection->local.port();
3218 } else {
3219 // Typical cases are malformed HTTP requests on http_port and malformed
3220 // TLS handshakes on non-bumping https_port. TODO: Discover these
3221 // problems earlier so that they can be classified/detailed better.
3222 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3223 // TODO: throw when nonBlockingCheck() callbacks gain job protections
3224 static const auto d = MakeNamedErrorDetail("TUNNEL_TARGET");
3225 updateError(ERR_INVALID_REQ, d);
3226 return false;
3227 }
3228
3229 debugs(33, 2, "Request tunneling for " << reason);
3230 ClientHttpRequest *http = buildFakeRequest(method, connectHost, connectPort, payload);
3231 HttpRequest::Pointer request = http->request;
3232 request->flags.forceTunnel = true;
3233 http->calloutContext = new ClientRequestContext(http);
3234 http->doCallouts();
3235 clientProcessRequestFinished(this, request);
3236 return true;
3237 }
3238
3239 bool
3240 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3241 {
3242 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3243
3244 SBuf connectHost;
3245 assert(transparent());
3246 const unsigned short connectPort = clientConnection->local.port();
3247
3248 #if USE_OPENSSL
3249 if (!tlsClientSni_.isEmpty())
3250 connectHost.assign(tlsClientSni_);
3251 else
3252 #endif
3253 {
3254 static char ip[MAX_IPSTRLEN];
3255 clientConnection->local.toHostStr(ip, sizeof(ip));
3256 connectHost.assign(ip);
3257 }
3258
3259 ClientHttpRequest *http = buildFakeRequest(Http::METHOD_CONNECT, connectHost, connectPort, payload);
3260
3261 http->calloutContext = new ClientRequestContext(http);
3262 HttpRequest::Pointer request = http->request;
3263 http->doCallouts();
3264 clientProcessRequestFinished(this, request);
3265 return true;
3266 }
3267
3268 ClientHttpRequest *
3269 ConnStateData::buildFakeRequest(Http::MethodType const method, SBuf &useHost, unsigned short usePort, const SBuf &payload)
3270 {
3271 ClientHttpRequest *http = new ClientHttpRequest(this);
3272 Http::Stream *stream = new Http::Stream(clientConnection, http);
3273
3274 StoreIOBuffer tempBuffer;
3275 tempBuffer.data = stream->reqbuf;
3276 tempBuffer.length = HTTP_REQBUF_SZ;
3277
3278 ClientStreamData newServer = new clientReplyContext(http);
3279 ClientStreamData newClient = stream;
3280 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3281 clientReplyStatus, newServer, clientSocketRecipient,
3282 clientSocketDetach, newClient, tempBuffer);
3283
3284 stream->flags.parsed_ok = 1; // Do we need it?
3285 stream->mayUseConnection(true);
3286 extendLifetime();
3287 stream->registerWithConn();
3288
3289 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
3290 mx->tcpClient = clientConnection;
3291 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3292 // clientProcessRequest
3293 HttpRequest::Pointer request = new HttpRequest(mx);
3294 AnyP::ProtocolType proto = (method == Http::METHOD_NONE) ? AnyP::PROTO_AUTHORITY_FORM : AnyP::PROTO_HTTP;
3295 request->url.setScheme(proto, nullptr);
3296 request->method = method;
3297 request->url.host(useHost.c_str());
3298 request->url.port(usePort);
3299
3300 http->uri = SBufToCstring(request->effectiveRequestUri());
3301 http->initRequest(request.getRaw());
3302
3303 request->manager(this, http->al);
3304
3305 if (proto == AnyP::PROTO_HTTP)
3306 request->header.putStr(Http::HOST, useHost.c_str());
3307
3308 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3309 #if USE_AUTH
3310 if (getAuth())
3311 request->auth_user_request = getAuth();
3312 #endif
3313
3314 inBuf = payload;
3315 flags.readMore = false;
3316
3317 return http;
3318 }
3319
3320 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3321 static bool
3322 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3323 {
3324 if (!Comm::IsConnOpen(c)) {
3325 Must(NHttpSockets > 0); // we tried to open some
3326 --NHttpSockets; // there will be fewer sockets than planned
3327 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3328
3329 if (!NHttpSockets) // we could not open any listen sockets at all
3330 fatalf("Unable to open %s",FdNote(portType));
3331
3332 return false;
3333 }
3334 return true;
3335 }
3336
3337 /// find any unused HttpSockets[] slot and store fd there or return false
3338 static bool
3339 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3340 {
3341 bool found = false;
3342 for (int i = 0; i < NHttpSockets && !found; ++i) {
3343 if ((found = HttpSockets[i] < 0))
3344 HttpSockets[i] = conn->fd;
3345 }
3346 return found;
3347 }
3348
3349 static void
3350 clientHttpConnectionsOpen(void)
3351 {
3352 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3353 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3354
3355 if (MAXTCPLISTENPORTS == NHttpSockets) {
3356 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3357 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3358 continue;
3359 }
3360
3361 #if USE_OPENSSL
3362 if (s->flags.tunnelSslBumping) {
3363 if (!Config.accessList.ssl_bump) {
3364 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3365 s->flags.tunnelSslBumping = false;
3366 }
3367 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3368 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3369 s->flags.tunnelSslBumping = false;
3370 if (s->transport.protocol == AnyP::PROTO_HTTP)
3371 s->secure.encryptTransport = false;
3372 }
3373 if (s->flags.tunnelSslBumping) {
3374 // Create ssl_ctx cache for this port.
3375 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3376 }
3377 }
3378 #endif
3379
3380 if (s->secure.encryptTransport && !s->secure.staticContext) {
3381 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3382 continue;
3383 }
3384
3385 // Fill out a Comm::Connection which IPC will open as a listener for us
3386 // then pass back when active so we can start a TcpAcceptor subscription.
3387 s->listenConn = new Comm::Connection;
3388 s->listenConn->local = s->s;
3389
3390 s->listenConn->flags = COMM_NONBLOCKING | (s->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3391 (s->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3392 (s->workerQueues ? COMM_REUSEPORT : 0);
3393
3394 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3395 if (s->transport.protocol == AnyP::PROTO_HTTP) {
3396 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3397 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept, CommAcceptCbParams(NULL)));
3398 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3399
3400 AsyncCall::Pointer listenCall = asyncCall(33,2, "clientListenerConnectionOpened",
3401 ListeningStartedDialer(&clientListenerConnectionOpened, s, Ipc::fdnHttpSocket, sub));
3402 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpSocket, listenCall);
3403
3404 } else if (s->transport.protocol == AnyP::PROTO_HTTPS) {
3405 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3406 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept, CommAcceptCbParams(NULL)));
3407 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3408
3409 AsyncCall::Pointer listenCall = asyncCall(33, 2, "clientListenerConnectionOpened",
3410 ListeningStartedDialer(&clientListenerConnectionOpened,
3411 s, Ipc::fdnHttpsSocket, sub));
3412 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpsSocket, listenCall);
3413 }
3414
3415 HttpSockets[NHttpSockets] = -1; // set in clientListenerConnectionOpened
3416 ++NHttpSockets;
3417 }
3418 }
3419
3420 void
3421 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3422 {
3423 // Fill out a Comm::Connection which IPC will open as a listener for us
3424 port->listenConn = new Comm::Connection;
3425 port->listenConn->local = port->s;
3426 port->listenConn->flags =
3427 COMM_NONBLOCKING |
3428 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3429 (port->flags.natIntercept ? COMM_INTERCEPTION : 0);
3430
3431 // route new connections to subCall
3432 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3433 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3434 AsyncCall::Pointer listenCall =
3435 asyncCall(33, 2, "clientListenerConnectionOpened",
3436 ListeningStartedDialer(&clientListenerConnectionOpened,
3437 port, fdNote, sub));
3438 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3439
3440 assert(NHttpSockets < MAXTCPLISTENPORTS);
3441 HttpSockets[NHttpSockets] = -1;
3442 ++NHttpSockets;
3443 }
3444
3445 /// process clientHttpConnectionsOpen result
3446 static void
3447 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3448 {
3449 Must(s != NULL);
3450
3451 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3452 return;
3453
3454 Must(Comm::IsConnOpen(s->listenConn));
3455
3456 // TCP: setup a job to handle accept() with subscribed handler
3457 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3458
3459 debugs(1, Important(13), "Accepting " <<
3460 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3461 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3462 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3463 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3464 << FdNote(portTypeNote) << " connections at "
3465 << s->listenConn);
3466
3467 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3468
3469 #if USE_SYSTEMD
3470 // When the very first port opens, tell systemd we are able to serve connections.
3471 // Subsequent sd_notify() calls, including calls during reconfiguration,
3472 // do nothing because the first call parameter is 1.
3473 // XXX: Send the notification only after opening all configured ports.
3474 if (opt_foreground || opt_no_daemon) {
3475 const auto result = sd_notify(1, "READY=1");
3476 if (result < 0) {
3477 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3478 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3479 }
3480 }
3481 #endif
3482 }
3483
3484 void
3485 clientOpenListenSockets(void)
3486 {
3487 clientHttpConnectionsOpen();
3488 Ftp::StartListening();
3489
3490 if (NHttpSockets < 1)
3491 fatal("No HTTP, HTTPS, or FTP ports configured");
3492 }
3493
3494 void
3495 clientConnectionsClose()
3496 {
3497 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3498 if (s->listenConn != NULL) {
3499 debugs(1, Important(14), "Closing HTTP(S) port " << s->listenConn->local);
3500 s->listenConn->close();
3501 s->listenConn = NULL;
3502 }
3503 }
3504
3505 Ftp::StopListening();
3506
3507 // TODO see if we can drop HttpSockets array entirely */
3508 for (int i = 0; i < NHttpSockets; ++i) {
3509 HttpSockets[i] = -1;
3510 }
3511
3512 NHttpSockets = 0;
3513 }
3514
3515 int
3516 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3517 {
3518 SBuf vary(request->vary_headers);
3519 const auto &reply = entry->mem().freshestReply();
3520 auto has_vary = reply.header.has(Http::HdrType::VARY);
3521 #if X_ACCELERATOR_VARY
3522
3523 has_vary |=
3524 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3525 #endif
3526
3527 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3528 if (!vary.isEmpty()) {
3529 /* Oops... something odd is going on here.. */
3530 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3531 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3532 request->vary_headers.clear();
3533 return VARY_CANCEL;
3534 }
3535
3536 if (!has_vary) {
3537 /* This is not a varying object */
3538 return VARY_NONE;
3539 }
3540
3541 /* virtual "vary" object found. Calculate the vary key and
3542 * continue the search
3543 */
3544 vary = httpMakeVaryMark(request, &reply);
3545
3546 if (!vary.isEmpty()) {
3547 request->vary_headers = vary;
3548 return VARY_OTHER;
3549 } else {
3550 /* Ouch.. we cannot handle this kind of variance */
3551 /* XXX This cannot really happen, but just to be complete */
3552 return VARY_CANCEL;
3553 }
3554 } else {
3555 if (vary.isEmpty()) {
3556 vary = httpMakeVaryMark(request, &reply);
3557
3558 if (!vary.isEmpty())
3559 request->vary_headers = vary;
3560 }
3561
3562 if (vary.isEmpty()) {
3563 /* Ouch.. we cannot handle this kind of variance */
3564 /* XXX This cannot really happen, but just to be complete */
3565 return VARY_CANCEL;
3566 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3567 return VARY_MATCH;
3568 } else {
3569 /* Oops.. we have already been here and still haven't
3570 * found the requested variant. Bail out
3571 */
3572 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3573 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3574 return VARY_CANCEL;
3575 }
3576 }
3577 }
3578
3579 ACLFilledChecklist *
3580 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3581 {
3582 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3583 clientAclChecklistFill(*checklist, http);
3584 return checklist;
3585 }
3586
3587 void
3588 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3589 {
3590 assert(http);
3591
3592 if (!checklist.request && http->request)
3593 checklist.setRequest(http->request);
3594
3595 if (!checklist.al && http->al) {
3596 checklist.al = http->al;
3597 checklist.syncAle(http->request, http->log_uri);
3598 if (!checklist.reply && http->al->reply) {
3599 checklist.reply = http->al->reply.getRaw();
3600 HTTPMSGLOCK(checklist.reply);
3601 }
3602 }
3603
3604 if (const auto conn = http->getConn())
3605 checklist.setConn(conn); // may already be set
3606 }
3607
3608 void
3609 ConnStateData::fillChecklist(ACLFilledChecklist &checklist) const
3610 {
3611 const auto context = pipeline.front();
3612 if (const auto http = context ? context->http : nullptr)
3613 return clientAclChecklistFill(checklist, http); // calls checklist.setConn()
3614
3615 // no requests, but we always have connection-level details
3616 // TODO: ACL checks should not require a mutable ConnStateData. Adjust the
3617 // code that accidentally violates that principle to remove this const_cast!
3618 checklist.setConn(const_cast<ConnStateData*>(this));
3619
3620 // Set other checklist fields inside our fillConnectionLevelDetails() rather
3621 // than here because clientAclChecklistFill() code path calls that method
3622 // (via ACLFilledChecklist::setConn()) rather than calling us directly.
3623 }
3624
3625 void
3626 ConnStateData::fillConnectionLevelDetails(ACLFilledChecklist &checklist) const
3627 {
3628 assert(checklist.conn() == this);
3629 assert(clientConnection);
3630
3631 if (!checklist.request) { // preserve (better) addresses supplied by setRequest()
3632 checklist.src_addr = clientConnection->remote;
3633 checklist.my_addr = clientConnection->local; // TODO: or port->s?
3634 }
3635
3636 #if USE_OPENSSL
3637 if (!checklist.sslErrors && sslServerBump)
3638 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
3639 #endif
3640
3641 if (!checklist.rfc931[0]) // checklist creator may have supplied it already
3642 checklist.setIdent(clientConnection->rfc931);
3643
3644 }
3645
3646 bool
3647 ConnStateData::transparent() const
3648 {
3649 return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3650 }
3651
3652 BodyPipe::Pointer
3653 ConnStateData::expectRequestBody(int64_t size)
3654 {
3655 bodyPipe = new BodyPipe(this);
3656 if (size >= 0)
3657 bodyPipe->setBodySize(size);
3658 else
3659 startDechunkingRequest();
3660 return bodyPipe;
3661 }
3662
3663 int64_t
3664 ConnStateData::mayNeedToReadMoreBody() const
3665 {
3666 if (!bodyPipe)
3667 return 0; // request without a body or read/produced all body bytes
3668
3669 if (!bodyPipe->bodySizeKnown())
3670 return -1; // probably need to read more, but we cannot be sure
3671
3672 const int64_t needToProduce = bodyPipe->unproducedSize();
3673 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3674
3675 if (needToProduce <= haveAvailable)
3676 return 0; // we have read what we need (but are waiting for pipe space)
3677
3678 return needToProduce - haveAvailable;
3679 }
3680
3681 void
3682 ConnStateData::stopReceiving(const char *error)
3683 {
3684 debugs(33, 4, HERE << "receiving error (" << clientConnection << "): " << error <<
3685 "; old sending error: " <<
3686 (stoppedSending() ? stoppedSending_ : "none"));
3687
3688 if (const char *oldError = stoppedReceiving()) {
3689 debugs(33, 3, HERE << "already stopped receiving: " << oldError);
3690 return; // nothing has changed as far as this connection is concerned
3691 }
3692
3693 stoppedReceiving_ = error;
3694
3695 if (const char *sendError = stoppedSending()) {
3696 debugs(33, 3, HERE << "closing because also stopped sending: " << sendError);
3697 clientConnection->close();
3698 }
3699 }
3700
3701 void
3702 ConnStateData::expectNoForwarding()
3703 {
3704 if (bodyPipe != NULL) {
3705 debugs(33, 4, HERE << "no consumer for virgin body " << bodyPipe->status());
3706 bodyPipe->expectNoConsumption();
3707 }
3708 }
3709
3710 /// initialize dechunking state
3711 void
3712 ConnStateData::startDechunkingRequest()
3713 {
3714 Must(bodyPipe != NULL);
3715 debugs(33, 5, HERE << "start dechunking" << bodyPipe->status());
3716 assert(!bodyParser);
3717 bodyParser = new Http1::TeChunkedParser;
3718 }
3719
3720 /// put parsed content into input buffer and clean up
3721 void
3722 ConnStateData::finishDechunkingRequest(bool withSuccess)
3723 {
3724 debugs(33, 5, HERE << "finish dechunking: " << withSuccess);
3725
3726 if (bodyPipe != NULL) {
3727 debugs(33, 7, HERE << "dechunked tail: " << bodyPipe->status());
3728 BodyPipe::Pointer myPipe = bodyPipe;
3729 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3730 Must(!bodyPipe); // we rely on it being nil after we are done with body
3731 if (withSuccess) {
3732 Must(myPipe->bodySizeKnown());
3733 Http::StreamPointer context = pipeline.front();
3734 if (context != NULL && context->http && context->http->request)
3735 context->http->request->setContentLength(myPipe->bodySize());
3736 }
3737 }
3738
3739 delete bodyParser;
3740 bodyParser = NULL;
3741 }
3742
3743 // XXX: this is an HTTP/1-only operation
3744 void
3745 ConnStateData::sendControlMsg(HttpControlMsg msg)
3746 {
3747 if (const auto context = pipeline.front()) {
3748 if (context->http)
3749 context->http->al->reply = msg.reply;
3750 }
3751
3752 if (!isOpen()) {
3753 debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
3754 return;
3755 }
3756
3757 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3758 if (!pipeline.empty()) {
3759 HttpReply::Pointer rep(msg.reply);
3760 Must(rep);
3761 // remember the callback
3762 cbControlMsgSent = msg.cbSuccess;
3763
3764 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3765 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3766
3767 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3768 // but still inform the caller (so it may resume its operation)
3769 doneWithControlMsg();
3770 }
3771 return;
3772 }
3773
3774 debugs(33, 3, HERE << " closing due to missing context for 1xx");
3775 clientConnection->close();
3776 }
3777
3778 void
3779 ConnStateData::doneWithControlMsg()
3780 {
3781 HttpControlMsgSink::doneWithControlMsg();
3782
3783 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3784 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3785 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3786 }
3787 }
3788
3789 /// Our close handler called by Comm when the pinned connection is closed
3790 void
3791 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3792 {
3793 // FwdState might repin a failed connection sooner than this close
3794 // callback is called for the failed connection.
3795 assert(pinning.serverConnection == io.conn);
3796 pinning.closeHandler = NULL; // Comm unregisters handlers before calling
3797 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3798 pinning.serverConnection->noteClosure();
3799 unpinConnection(false);
3800
3801 if (sawZeroReply && clientConnection != NULL) {
3802 debugs(33, 3, "Closing client connection on pinned zero reply.");
3803 clientConnection->close();
3804 }
3805
3806 }
3807
3808 void
3809 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3810 {
3811 pinConnection(pinServer, *request);
3812 }
3813
3814 void
3815 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3816 {
3817 Must(pic.connection);
3818 Must(pic.request);
3819 pinConnection(pic.connection, *pic.request);
3820
3821 // monitor pinned server connection for remote-end closures.
3822 startPinnedConnectionMonitoring();
3823
3824 if (pipeline.empty())
3825 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3826 }
3827
3828 /// Forward future client requests using the given server connection.
3829 void
3830 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3831 {
3832 if (Comm::IsConnOpen(pinning.serverConnection) &&
3833 pinning.serverConnection->fd == pinServer->fd) {
3834 debugs(33, 3, "already pinned" << pinServer);
3835 return;
3836 }
3837
3838 unpinConnection(true); // closes pinned connection, if any, and resets fields
3839
3840 pinning.serverConnection = pinServer;
3841
3842 debugs(33, 3, HERE << pinning.serverConnection);
3843
3844 Must(pinning.serverConnection != NULL);
3845
3846 const char *pinnedHost = "[unknown]";
3847 pinning.host = xstrdup(request.url.host());
3848 pinning.port = request.url.port();
3849 pinnedHost = pinning.host;
3850 pinning.pinned = true;
3851 if (CachePeer *aPeer = pinServer->getPeer())
3852 pinning.peer = cbdataReference(aPeer);
3853 pinning.auth = request.flags.connectionAuth;
3854 char stmp[MAX_IPSTRLEN];
3855 char desc[FD_DESC_SZ];
3856 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3857 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3858 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3859 clientConnection->fd);
3860 fd_note(pinning.serverConnection->fd, desc);
3861
3862 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3863 pinning.closeHandler = JobCallback(33, 5,
3864 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3865 // remember the pinned connection so that cb does not unpin a fresher one
3866 typedef CommCloseCbParams Params;
3867 Params &params = GetCommParams<Params>(pinning.closeHandler);
3868 params.conn = pinning.serverConnection;
3869 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3870 }
3871
3872 /// [re]start monitoring pinned connection for peer closures so that we can
3873 /// propagate them to an _idle_ client pinned to that peer
3874 void
3875 ConnStateData::startPinnedConnectionMonitoring()
3876 {
3877 if (pinning.readHandler != NULL)
3878 return; // already monitoring
3879
3880 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3881 pinning.readHandler = JobCallback(33, 3,
3882 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3883 Comm::Read(pinning.serverConnection, pinning.readHandler);
3884 }
3885
3886 void
3887 ConnStateData::stopPinnedConnectionMonitoring()
3888 {
3889 if (pinning.readHandler != NULL) {
3890 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3891 pinning.readHandler = NULL;
3892 }
3893 }
3894
3895 #if USE_OPENSSL
3896 bool
3897 ConnStateData::handleIdleClientPinnedTlsRead()
3898 {
3899 // A ready-for-reading connection means that the TLS server either closed
3900 // the connection, sent us some unexpected HTTP data, or started TLS
3901 // renegotiations. We should close the connection except for the last case.
3902
3903 Must(pinning.serverConnection != nullptr);
3904 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3905 if (!ssl)
3906 return false;
3907
3908 char buf[1];
3909 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3910
3911 if (readResult > 0 || SSL_pending(ssl) > 0) {
3912 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3913 return false;
3914 }
3915
3916 switch(const int error = SSL_get_error(ssl, readResult)) {
3917 case SSL_ERROR_WANT_WRITE:
3918 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3919 // fall through to restart monitoring, for now
3920 case SSL_ERROR_NONE:
3921 case SSL_ERROR_WANT_READ:
3922 startPinnedConnectionMonitoring();
3923 return true;
3924
3925 default:
3926 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3927 return false;
3928 }
3929
3930 // not reached
3931 return true;
3932 }
3933 #endif
3934
3935 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3936 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3937 void
3938 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3939 {
3940 pinning.readHandler = NULL; // Comm unregisters handlers before calling
3941
3942 if (io.flag == Comm::ERR_CLOSING)
3943 return; // close handler will clean up
3944
3945 Must(pinning.serverConnection == io.conn);
3946
3947 #if USE_OPENSSL
3948 if (handleIdleClientPinnedTlsRead())
3949 return;
3950 #endif
3951
3952 const bool clientIsIdle = pipeline.empty();
3953
3954 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3955 io.size << (clientIsIdle ? " with idle client" : ""));
3956
3957 pinning.serverConnection->close();
3958
3959 // If we are still sending data to the client, do not close now. When we are done sending,
3960 // ConnStateData::kick() checks pinning.serverConnection and will close.
3961 // However, if we are idle, then we must close to inform the idle client and minimize races.
3962 if (clientIsIdle && clientConnection != NULL)
3963 clientConnection->close();
3964 }
3965
3966 Comm::ConnectionPointer
3967 ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3968 {
3969 debugs(33, 7, pinning.serverConnection);
3970 Must(request);
3971
3972 const auto pinningError = [&](const err_type type) {
3973 unpinConnection(true);
3974 HttpRequestPointer requestPointer = request;
3975 return ErrorState::NewForwarding(type, requestPointer, ale);
3976 };
3977
3978 if (!Comm::IsConnOpen(pinning.serverConnection))
3979 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3980
3981 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3982 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3983
3984 if (pinning.port != request->url.port())
3985 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3986
3987 if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3988 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3989
3990 if (pinning.peerAccessDenied)
3991 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3992
3993 stopPinnedConnectionMonitoring();
3994 return pinning.serverConnection;
3995 }
3996
3997 Comm::ConnectionPointer
3998 ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3999 {
4000 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
4001 return connManager->borrowPinnedConnection(request, ale);
4002
4003 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
4004 // there is no point since the client connection is now gone
4005 HttpRequestPointer requestPointer = request;
4006 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
4007 }
4008
4009 void
4010 ConnStateData::unpinConnection(const bool andClose)
4011 {
4012 debugs(33, 3, HERE << pinning.serverConnection);
4013
4014 if (pinning.peer)
4015 cbdataReferenceDone(pinning.peer);
4016
4017 if (Comm::IsConnOpen(pinning.serverConnection)) {
4018 if (pinning.closeHandler != NULL) {
4019 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
4020 pinning.closeHandler = NULL;
4021 }
4022
4023 stopPinnedConnectionMonitoring();
4024
4025 // close the server side socket if requested
4026 if (andClose)
4027 pinning.serverConnection->close();
4028 pinning.serverConnection = NULL;
4029 }
4030
4031 safe_free(pinning.host);
4032
4033 pinning.zeroReply = false;
4034 pinning.peerAccessDenied = false;
4035
4036 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
4037 * connection has gone away */
4038 }
4039
4040 void
4041 ConnStateData::terminateAll(const Error &error, const LogTagsErrors &lte)
4042 {
4043 debugs(33, 3, pipeline.count() << '/' << pipeline.nrequests << " after " << error);
4044
4045 if (pipeline.empty()) {
4046 bareError.update(error); // XXX: bareLogTagsErrors
4047 } else {
4048 // We terminate the current CONNECT/PUT/etc. context below, logging any
4049 // error details, but that context may leave unparsed bytes behind.
4050 // Consume them to stop checkLogging() from logging them again later.
4051 const auto intputToConsume =
4052 #if USE_OPENSSL
4053 parsingTlsHandshake ? "TLS handshake" : // more specific than CONNECT
4054 #endif
4055 bodyPipe ? "HTTP request body" :
4056 pipeline.back()->mayUseConnection() ? "HTTP CONNECT" :
4057 nullptr;
4058
4059 while (const auto context = pipeline.front()) {
4060 context->noteIoError(error, lte);
4061 context->finished(); // cleanup and self-deregister
4062 assert(context != pipeline.front());
4063 }
4064
4065 if (intputToConsume && !inBuf.isEmpty()) {
4066 debugs(83, 5, "forgetting client " << intputToConsume << " bytes: " << inBuf.length());
4067 inBuf.clear();
4068 }
4069 }
4070
4071 clientConnection->close();
4072 }
4073
4074 /// log the last (attempt at) transaction if nobody else did
4075 void
4076 ConnStateData::checkLogging()
4077 {
4078 // to simplify our logic, we assume that terminateAll() has been called
4079 assert(pipeline.empty());
4080
4081 // do not log connections that closed after a transaction (it is normal)
4082 // TODO: access_log needs ACLs to match received-no-bytes connections
4083 if (pipeline.nrequests && inBuf.isEmpty())
4084 return;
4085
4086 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4087 ClientHttpRequest http(this);
4088 http.req_sz = inBuf.length();
4089 // XXX: Or we died while waiting for the pinned connection to become idle.
4090 http.setErrorUri("error:transaction-end-before-headers");
4091 http.updateError(bareError);
4092 }
4093
4094 bool
4095 ConnStateData::shouldPreserveClientData() const
4096 {
4097 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4098 if (needProxyProtocolHeader_)
4099 return false;
4100
4101 // If our decision here is negative, configuration changes are irrelevant.
4102 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4103 if (!Config.accessList.on_unsupported_protocol)
4104 return false;
4105
4106 // TODO: Figure out whether/how we can support FTP tunneling.
4107 if (port->transport.protocol == AnyP::PROTO_FTP)
4108 return false;
4109
4110 #if USE_OPENSSL
4111 if (parsingTlsHandshake)
4112 return true;
4113
4114 // the 1st HTTP request on a bumped connection
4115 if (!parsedBumpedRequestCount && switchedToHttps())
4116 return true;
4117 #endif
4118
4119 // the 1st HTTP(S) request on a connection to an intercepting port
4120 if (!pipeline.nrequests && transparent())
4121 return true;
4122
4123 return false;
4124 }
4125
4126 NotePairs::Pointer
4127 ConnStateData::notes()
4128 {
4129 if (!theNotes)
4130 theNotes = new NotePairs;
4131 return theNotes;
4132 }
4133
4134 std::ostream &
4135 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4136 {
4137 return os << pic.connection << ", request=" << pic.request;
4138 }
4139
4140 std::ostream &
4141 operator <<(std::ostream &os, const ConnStateData::ServerConnectionContext &scc)
4142 {
4143 return os << scc.conn_ << ", srv_bytes=" << scc.preReadServerBytes.length();
4144 }
4145