2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 33 Client-side Routines */
12 \defgroup ClientSide Client-Side Logics
14 \section cserrors Errors and client side
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
39 \section pconn_logic Persistent connection logic:
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
61 #include "acl/FilledChecklist.h"
62 #include "anyp/Host.h"
63 #include "anyp/PortCfg.h"
64 #include "base/AsyncCallbacks.h"
65 #include "base/Subscription.h"
66 #include "base/TextException.h"
67 #include "CachePeer.h"
68 #include "client_db.h"
69 #include "client_side.h"
70 #include "client_side_reply.h"
71 #include "client_side_request.h"
72 #include "ClientRequestContext.h"
73 #include "clientStream.h"
75 #include "comm/Connection.h"
76 #include "comm/Loops.h"
77 #include "comm/Read.h"
78 #include "comm/TcpAcceptor.h"
79 #include "comm/Write.h"
80 #include "CommCalls.h"
81 #include "debug/Messages.h"
82 #include "error/ExceptionErrorDetail.h"
83 #include "errorpage.h"
86 #include "fqdncache.h"
90 #include "helper/Reply.h"
92 #include "http/one/RequestParser.h"
93 #include "http/one/TeChunkedParser.h"
94 #include "http/Stream.h"
95 #include "HttpHdrContRange.h"
96 #include "HttpHeaderTools.h"
97 #include "HttpReply.h"
98 #include "HttpRequest.h"
100 #include "ipc/FdNotes.h"
101 #include "ipc/StartListening.h"
102 #include "log/access_log.h"
104 #include "MemObject.h"
105 #include "mime_header.h"
106 #include "parser/Tokenizer.h"
107 #include "proxyp/Header.h"
108 #include "proxyp/Parser.h"
109 #include "sbuf/Stream.h"
110 #include "security/Certificate.h"
111 #include "security/CommunicationSecrets.h"
112 #include "security/Io.h"
113 #include "security/KeyLog.h"
114 #include "security/NegotiationHistory.h"
115 #include "servers/forward.h"
116 #include "SquidConfig.h"
117 #include "StatCounters.h"
118 #include "StatHist.h"
120 #include "TimeOrTag.h"
124 #include "auth/UserRequest.h"
127 #include "ClientInfo.h"
128 #include "MessageDelayPools.h"
132 #include "ssl/context_storage.h"
133 #include "ssl/gadgets.h"
134 #include "ssl/helper.h"
135 #include "ssl/ProxyCerts.h"
136 #include "ssl/ServerBump.h"
137 #include "ssl/support.h"
144 #if HAVE_SYSTEMD_SD_DAEMON_H
145 #include <systemd/sd-daemon.h>
148 // TODO: Remove this custom dialer and simplify by creating the TcpAcceptor
149 // subscription later, inside clientListenerConnectionOpened() callback, just
150 // like htcpOpenPorts(), icpOpenPorts(), and snmpPortOpened() do it.
151 /// dials clientListenerConnectionOpened call
152 class ListeningStartedDialer
:
154 public WithAnswer
<Ipc::StartListeningAnswer
>
157 typedef void (*Handler
)(AnyP::PortCfgPointer
&portCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&sub
);
158 ListeningStartedDialer(Handler aHandler
, AnyP::PortCfgPointer
&aPortCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&aSub
):
159 handler(aHandler
), portCfg(aPortCfg
), portTypeNote(note
), sub(aSub
) {}
162 void print(std::ostream
&os
) const override
{
163 os
<< '(' << answer_
<< ", " << FdNote(portTypeNote
) << " port=" << (void*)&portCfg
<< ')';
166 virtual bool canDial(AsyncCall
&) const { return true; }
167 virtual void dial(AsyncCall
&) { (handler
)(portCfg
, portTypeNote
, sub
); }
170 Ipc::StartListeningAnswer
&answer() override
{ return answer_
; }
176 // answer_.conn (set/updated by IPC code) is portCfg.listenConn (used by us)
177 Ipc::StartListeningAnswer answer_
; ///< StartListening() results
178 AnyP::PortCfgPointer portCfg
; ///< from HttpPortList
179 Ipc::FdNoteId portTypeNote
; ///< Type of IPC socket being opened
180 Subscription::Pointer sub
; ///< The handler to be subscribed for this connection listener
183 static void clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
);
185 static IOACB httpAccept
;
186 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
);
188 static void clientUpdateStatHistCounters(const LogTags
&logType
, int svc_time
);
189 static void clientUpdateStatCounters(const LogTags
&logType
);
190 static void clientUpdateHierCounters(HierarchyLogEntry
*);
191 static bool clientPingHasFinished(ping_data
const *aPing
);
192 void prepareLogWithRequestDetails(HttpRequest
*, const AccessLogEntryPointer
&);
193 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest
, ConnStateData
* conn
);
195 char *skipLeadingSpace(char *aString
);
198 clientUpdateStatCounters(const LogTags
&logType
)
200 ++statCounter
.client_http
.requests
;
202 if (logType
.isTcpHit())
203 ++statCounter
.client_http
.hits
;
205 if (logType
.oldType
== LOG_TCP_HIT
)
206 ++statCounter
.client_http
.disk_hits
;
207 else if (logType
.oldType
== LOG_TCP_MEM_HIT
)
208 ++statCounter
.client_http
.mem_hits
;
212 clientUpdateStatHistCounters(const LogTags
&logType
, int svc_time
)
214 statCounter
.client_http
.allSvcTime
.count(svc_time
);
216 * The idea here is not to be complete, but to get service times
217 * for only well-defined types. For example, we don't include
218 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
219 * (we *tried* to validate it, but failed).
222 switch (logType
.oldType
) {
224 case LOG_TCP_REFRESH_UNMODIFIED
:
225 statCounter
.client_http
.nearHitSvcTime
.count(svc_time
);
228 case LOG_TCP_INM_HIT
:
229 case LOG_TCP_IMS_HIT
:
230 statCounter
.client_http
.nearMissSvcTime
.count(svc_time
);
235 case LOG_TCP_MEM_HIT
:
237 case LOG_TCP_OFFLINE_HIT
:
238 statCounter
.client_http
.hitSvcTime
.count(svc_time
);
243 case LOG_TCP_CLIENT_REFRESH_MISS
:
244 statCounter
.client_http
.missSvcTime
.count(svc_time
);
248 /* make compiler warnings go away */
254 clientPingHasFinished(ping_data
const *aPing
)
256 if (0 != aPing
->stop
.tv_sec
&& 0 != aPing
->start
.tv_sec
)
263 clientUpdateHierCounters(HierarchyLogEntry
* someEntry
)
267 switch (someEntry
->code
) {
268 #if USE_CACHE_DIGESTS
273 ++ statCounter
.cd
.times_used
;
281 case FIRST_PARENT_MISS
:
283 case CLOSEST_PARENT_MISS
:
284 ++ statCounter
.icp
.times_used
;
285 i
= &someEntry
->ping
;
287 if (clientPingHasFinished(i
))
288 statCounter
.icp
.querySvcTime
.count(tvSubUsec(i
->start
, i
->stop
));
291 ++ statCounter
.icp
.query_timeouts
;
298 ++ statCounter
.netdb
.times_used
;
308 ClientHttpRequest::updateCounters()
310 clientUpdateStatCounters(loggingTags());
313 ++ statCounter
.client_http
.errors
;
315 clientUpdateStatHistCounters(loggingTags(),
316 tvSubMsec(al
->cache
.start_time
, current_time
));
318 clientUpdateHierCounters(&request
->hier
);
322 prepareLogWithRequestDetails(HttpRequest
*request
, const AccessLogEntryPointer
&aLogEntry
)
325 assert(aLogEntry
!= nullptr);
327 if (Config
.onoff
.log_mime_hdrs
) {
330 request
->header
.packInto(&mb
);
331 //This is the request after adaptation or redirection
332 aLogEntry
->headers
.adapted_request
= xstrdup(mb
.buf
);
334 // the virgin request is saved to aLogEntry->request
335 if (aLogEntry
->request
) {
337 aLogEntry
->request
->header
.packInto(&mb
);
338 aLogEntry
->headers
.request
= xstrdup(mb
.buf
);
342 const Adaptation::History::Pointer ah
= request
->adaptLogHistory();
345 ah
->lastMeta
.packInto(&mb
);
346 aLogEntry
->adapt
.last_meta
= xstrdup(mb
.buf
);
354 const Adaptation::Icap::History::Pointer ih
= request
->icapHistory();
356 ih
->processingTime(aLogEntry
->icap
.processingTime
);
359 aLogEntry
->http
.method
= request
->method
;
360 aLogEntry
->http
.version
= request
->http_ver
;
361 aLogEntry
->hier
= request
->hier
;
362 aLogEntry
->cache
.extuser
= request
->extacl_user
.termedBuf();
364 // Adapted request, if any, inherits and then collects all the stats, but
365 // the virgin request gets logged instead; copy the stats to log them.
366 // TODO: avoid losses by keeping these stats in a shared history object?
367 if (aLogEntry
->request
) {
368 aLogEntry
->request
->dnsWait
= request
->dnsWait
;
369 aLogEntry
->request
->error
= request
->error
;
374 ClientHttpRequest::logRequest()
376 if (!out
.size
&& loggingTags().oldType
== LOG_TAG_NONE
)
377 debugs(33, 5, "logging half-baked transaction: " << log_uri
);
379 al
->icp
.opcode
= ICP_INVALID
;
381 debugs(33, 9, "clientLogRequest: al.url='" << al
->url
<< "'");
383 const auto findReply
= [this]() -> const HttpReply
* {
385 return al
->reply
.getRaw();
386 if (const auto le
= loggingEntry())
387 return le
->hasFreshestReply();
390 if (const auto reply
= findReply()) {
391 al
->http
.code
= reply
->sline
.status();
392 al
->http
.content_type
= reply
->content_type
.termedBuf();
395 debugs(33, 9, "clientLogRequest: http.code='" << al
->http
.code
<< "'");
397 if (loggingEntry() && loggingEntry()->mem_obj
&& loggingEntry()->objectLen() >= 0)
398 al
->cache
.objectSize
= loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
400 al
->http
.clientRequestSz
.header
= req_sz
;
401 // the virgin request is saved to al->request
402 if (al
->request
&& al
->request
->body_pipe
)
403 al
->http
.clientRequestSz
.payloadData
= al
->request
->body_pipe
->producedSize();
404 al
->http
.clientReplySz
.header
= out
.headers_sz
;
405 // XXX: calculate without payload encoding or headers !!
406 al
->http
.clientReplySz
.payloadData
= out
.size
- out
.headers_sz
; // pretend its all un-encoded data for now.
408 al
->cache
.highOffset
= out
.offset
;
410 tvSub(al
->cache
.trTime
, al
->cache
.start_time
, current_time
);
413 prepareLogWithRequestDetails(request
, al
);
417 /* This is broken. Fails if the connection has been closed. Needs
418 * to snarf the ssl details some place earlier..
420 if (getConn() != NULL
)
421 al
->cache
.ssluser
= sslGetUserEmail(fd_table
[getConn()->fd
].ssl
);
427 for (auto h
: Config
.notes
) {
428 if (h
->match(request
, al
->reply
.getRaw(), al
, matched
)) {
429 request
->notes()->add(h
->key(), matched
);
430 debugs(33, 3, h
->key() << " " << matched
);
433 // The al->notes and request->notes must point to the same object.
434 al
->syncNotes(request
);
436 HTTPMSGUNLOCK(al
->adapted_request
);
437 al
->adapted_request
= request
;
438 HTTPMSGLOCK(al
->adapted_request
);
441 ACLFilledChecklist
checklist(nullptr, request
);
442 checklist
.updateAle(al
);
443 // no need checklist.syncAle(): already synced
444 accessLogLog(al
, &checklist
);
446 bool updatePerformanceCounters
= true;
447 if (Config
.accessList
.stats_collection
) {
448 ACLFilledChecklist
statsCheck(Config
.accessList
.stats_collection
, request
);
449 statsCheck
.updateAle(al
);
450 updatePerformanceCounters
= statsCheck
.fastCheck().allowed();
453 if (updatePerformanceCounters
) {
457 if (getConn() != nullptr && getConn()->clientConnection
!= nullptr)
458 clientdbUpdate(getConn()->clientConnection
->remote
, loggingTags(), AnyP::PROTO_HTTP
, out
.size
);
463 ClientHttpRequest::freeResources()
466 safe_free(redirect
.location
);
467 range_iter
.boundary
.clean();
470 if (client_stream
.tail
)
471 clientStreamAbort((clientStreamNode
*)client_stream
.tail
->data
, this);
475 httpRequestFree(void *data
)
477 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
478 assert(http
!= nullptr);
482 /* This is a handler normally called by comm_close() */
483 void ConnStateData::connStateClosed(const CommCloseCbParams
&)
485 if (clientConnection
) {
486 clientConnection
->noteClosure();
487 // keep closed clientConnection for logging, clientdb cleanup, etc.
489 deleteThis("ConnStateData::connStateClosed");
494 ConnStateData::setAuth(const Auth::UserRequest::Pointer
&aur
, const char *by
)
496 if (auth_
== nullptr) {
497 if (aur
!= nullptr) {
498 debugs(33, 2, "Adding connection-auth to " << clientConnection
<< " from " << by
);
504 // clobered with self-pointer
505 // NP: something nasty is going on in Squid, but harmless.
507 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection
<< " from " << by
);
512 * Connection-auth relies on a single set of credentials being preserved
513 * for all requests on a connection once they have been setup.
514 * There are several things which need to happen to preserve security
515 * when connection-auth credentials change unexpectedly or are unset.
517 * 1) auth helper released from any active state
519 * They can only be reserved by a handshake process which this
520 * connection can now never complete.
521 * This prevents helpers hanging when their connections close.
523 * 2) pinning is expected to be removed and server conn closed
525 * The upstream link is authenticated with the same credentials.
526 * Expecting the same level of consistency we should have received.
527 * This prevents upstream being faced with multiple or missing
528 * credentials after authentication.
529 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
530 * we just trigger that cleanup here via comm_reset_close() or
531 * ConnStateData::stopReceiving()
533 * 3) the connection needs to close.
535 * This prevents attackers injecting requests into a connection,
536 * or gateways wrongly multiplexing users into a single connection.
538 * When credentials are missing closure needs to follow an auth
539 * challenge for best recovery by the client.
541 * When credentials change there is nothing we can do but abort as
542 * fast as possible. Sending TCP RST instead of an HTTP response
543 * is the best-case action.
546 // clobbered with nul-pointer
547 if (aur
== nullptr) {
548 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection
<< " due to connection-auth erase from " << by
);
549 auth_
->releaseAuthServer();
551 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
552 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
553 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
554 stopReceiving("connection-auth removed");
558 // clobbered with alternative credentials
560 debugs(33, 2, "ERROR: Closing " << clientConnection
<< " due to change of connection-auth from " << by
);
561 auth_
->releaseAuthServer();
563 // this is a fatal type of problem.
564 // Close the connection immediately with TCP RST to abort all traffic flow
565 comm_reset_close(clientConnection
);
574 ConnStateData::resetReadTimeout(const time_t timeout
)
576 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
577 AsyncCall::Pointer callback
= JobCallback(33, 5, TimeoutDialer
, this, ConnStateData::requestTimeout
);
578 commSetConnTimeout(clientConnection
, timeout
, callback
);
582 ConnStateData::extendLifetime()
584 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
585 AsyncCall::Pointer callback
= JobCallback(5, 4, TimeoutDialer
, this, ConnStateData::lifetimeTimeout
);
586 commSetConnTimeout(clientConnection
, Config
.Timeout
.lifetime
, callback
);
589 // cleans up before destructor is called
591 ConnStateData::swanSong()
593 debugs(33, 2, clientConnection
);
595 flags
.readMore
= false;
596 clientdbEstablished(clientConnection
->remote
, -1); /* decrement */
598 terminateAll(ERR_NONE
, LogTagsErrors());
601 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
602 unpinConnection(true);
607 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
608 setAuth(nullptr, "ConnStateData::SwanSong cleanup");
611 flags
.swanSang
= true;
615 ConnStateData::callException(const std::exception
&ex
)
617 Server::callException(ex
); // logs ex and stops the job
619 ErrorDetail::Pointer errorDetail
;
620 if (const auto tex
= dynamic_cast<const TextException
*>(&ex
))
621 errorDetail
= new ExceptionErrorDetail(tex
->id());
623 errorDetail
= new ExceptionErrorDetail(Here().id());
624 updateError(ERR_GATEWAY_FAILURE
, errorDetail
);
628 ConnStateData::updateError(const Error
&error
)
630 if (const auto context
= pipeline
.front()) {
631 const auto http
= context
->http
;
633 http
->updateError(error
);
635 bareError
.update(error
);
640 ConnStateData::isOpen() const
642 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
643 Comm::IsConnOpen(clientConnection
) &&
644 !fd_table
[clientConnection
->fd
].closing();
647 ConnStateData::~ConnStateData()
649 debugs(33, 3, clientConnection
);
652 debugs(33, DBG_IMPORTANT
, "ERROR: Squid BUG: ConnStateData did not close " << clientConnection
);
655 debugs(33, DBG_IMPORTANT
, "ERROR: Squid BUG: ConnStateData was not destroyed properly; " << clientConnection
);
657 if (bodyPipe
!= nullptr)
658 stopProducingFor(bodyPipe
, false);
660 delete bodyParser
; // TODO: pool
663 delete sslServerBump
;
668 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
669 * This is the client-side persistent connection flag. We need
670 * to set this relatively early in the request processing
671 * to handle hacks for broken servers and clients.
674 clientSetKeepaliveFlag(ClientHttpRequest
* http
)
676 HttpRequest
*request
= http
->request
;
678 debugs(33, 3, "http_ver = " << request
->http_ver
);
679 debugs(33, 3, "method = " << request
->method
);
681 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
682 request
->flags
.proxyKeepalive
= request
->persistent();
686 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
)
688 if (Config
.maxRequestBodySize
&&
689 bodyLength
> Config
.maxRequestBodySize
)
690 return 1; /* too large */
696 ClientHttpRequest::multipartRangeRequest() const
698 return request
->multipartRangeRequest();
702 clientPackTermBound(String boundary
, MemBuf
*mb
)
704 mb
->appendf("\r\n--" SQUIDSTRINGPH
"--\r\n", SQUIDSTRINGPRINT(boundary
));
705 debugs(33, 6, "buf offset: " << mb
->size
);
709 clientPackRangeHdr(const HttpReplyPointer
&rep
, const HttpHdrRangeSpec
* spec
, String boundary
, MemBuf
* mb
)
711 HttpHeader
hdr(hoReply
);
716 debugs(33, 5, "appending boundary: " << boundary
);
717 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
718 mb
->appendf("\r\n--" SQUIDSTRINGPH
"\r\n", SQUIDSTRINGPRINT(boundary
));
720 /* stuff the header with required entries and pack it */
722 if (rep
->header
.has(Http::HdrType::CONTENT_TYPE
))
723 hdr
.putStr(Http::HdrType::CONTENT_TYPE
, rep
->header
.getStr(Http::HdrType::CONTENT_TYPE
));
725 httpHeaderAddContRange(&hdr
, *spec
, rep
->content_length
);
730 /* append <crlf> (we packed a header, not a reply) */
731 mb
->append("\r\n", 2);
734 /** returns expected content length for multi-range replies
735 * note: assumes that httpHdrRangeCanonize has already been called
736 * warning: assumes that HTTP headers for individual ranges at the
737 * time of the actuall assembly will be exactly the same as
738 * the headers when clientMRangeCLen() is called */
740 ClientHttpRequest::mRangeCLen() const
748 HttpHdrRange::iterator pos
= request
->range
->begin();
750 while (pos
!= request
->range
->end()) {
751 /* account for headers for this range */
753 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
754 *pos
, range_iter
.boundary
, &mb
);
757 /* account for range content */
758 clen
+= (*pos
)->length
;
760 debugs(33, 6, "clientMRangeCLen: (clen += " << mb
.size
<< " + " << (*pos
)->length
<< ") == " << clen
);
764 /* account for the terminating boundary */
767 clientPackTermBound(range_iter
.boundary
, &mb
);
777 * generates a "unique" boundary string for multipart responses
778 * the caller is responsible for cleaning the string */
780 ClientHttpRequest::rangeBoundaryStr() const
783 String
b(visible_appname_string
);
785 key
= storeEntry()->getMD5Text();
786 b
.append(key
, strlen(key
));
791 * Write a chunk of data to a client socket. If the reply is present,
792 * send the reply headers down the wire too, and clean them up when
795 * The request is one backed by a connection, not an internal request.
796 * data context is not NULL
797 * There are no more entries in the stream chain.
800 clientSocketRecipient(clientStreamNode
* node
, ClientHttpRequest
* http
,
801 HttpReply
* rep
, StoreIOBuffer receivedData
)
803 // do not try to deliver if client already ABORTED
804 if (!http
->getConn() || !cbdataReferenceValid(http
->getConn()) || !Comm::IsConnOpen(http
->getConn()->clientConnection
))
807 /* Test preconditions */
808 assert(node
!= nullptr);
809 /* TODO: handle this rather than asserting
810 * - it should only ever happen if we cause an abort and
811 * the callback chain loops back to here, so we can simply return.
812 * However, that itself shouldn't happen, so it stays as an assert for now.
814 assert(cbdataReferenceValid(node
));
815 assert(node
->node
.next
== nullptr);
816 Http::StreamPointer context
= dynamic_cast<Http::Stream
*>(node
->data
.getRaw());
817 assert(context
!= nullptr);
819 /* TODO: check offset is what we asked for */
821 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
822 if (context
!= http
->getConn()->pipeline
.front())
823 context
->deferRecipientForLater(node
, rep
, receivedData
);
824 else if (http
->getConn()->cbControlMsgSent
) // 1xx to the user is pending
825 context
->deferRecipientForLater(node
, rep
, receivedData
);
827 http
->getConn()->handleReply(rep
, receivedData
);
831 * Called when a downstream node is no longer interested in
832 * our data. As we are a terminal node, this means on aborts
836 clientSocketDetach(clientStreamNode
* node
, ClientHttpRequest
* http
)
838 /* Test preconditions */
839 assert(node
!= nullptr);
840 /* TODO: handle this rather than asserting
841 * - it should only ever happen if we cause an abort and
842 * the callback chain loops back to here, so we can simply return.
843 * However, that itself shouldn't happen, so it stays as an assert for now.
845 assert(cbdataReferenceValid(node
));
846 /* Set null by ContextFree */
847 assert(node
->node
.next
== nullptr);
848 /* this is the assert discussed above */
849 assert(nullptr == dynamic_cast<Http::Stream
*>(node
->data
.getRaw()));
850 /* We are only called when the client socket shutsdown.
851 * Tell the prev pipeline member we're finished
853 clientStreamDetach(node
, http
);
857 ConnStateData::readNextRequest()
859 debugs(33, 5, clientConnection
<< " reading next req");
861 fd_note(clientConnection
->fd
, "Idle client: Waiting for next request");
863 * Set the timeout BEFORE calling readSomeData().
865 resetReadTimeout(clientConnection
->timeLeft(idleTimeout()));
868 /** Please don't do anything with the FD past here! */
872 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest
, ConnStateData
* conn
)
874 debugs(33, 2, conn
->clientConnection
<< " Sending next");
876 /** If the client stream is waiting on a socket write to occur, then */
878 if (deferredRequest
->flags
.deferred
) {
879 /** NO data is allowed to have been sent. */
880 assert(deferredRequest
->http
->out
.size
== 0);
882 clientSocketRecipient(deferredRequest
->deferredparams
.node
,
883 deferredRequest
->http
,
884 deferredRequest
->deferredparams
.rep
,
885 deferredRequest
->deferredparams
.queuedBuffer
);
888 /** otherwise, the request is still active in a callbacksomewhere,
894 ConnStateData::kick()
896 if (!Comm::IsConnOpen(clientConnection
)) {
897 debugs(33, 2, clientConnection
<< " Connection was closed");
901 if (pinning
.pinned
&& !Comm::IsConnOpen(pinning
.serverConnection
)) {
902 debugs(33, 2, clientConnection
<< " Connection was pinned but server side gone. Terminating client connection");
903 clientConnection
->close();
908 * We are done with the response, and we are either still receiving request
909 * body (early response!) or have already stopped receiving anything.
911 * If we are still receiving, then parseRequests() below will fail.
912 * (XXX: but then we will call readNextRequest() which may succeed and
913 * execute a smuggled request as we are not done with the current request).
915 * If we stopped because we got everything, then try the next request.
917 * If we stopped receiving because of an error, then close now to avoid
918 * getting stuck and to prevent accidental request smuggling.
921 if (const char *reason
= stoppedReceiving()) {
922 debugs(33, 3, "closing for earlier request error: " << reason
);
923 clientConnection
->close();
928 * Attempt to parse a request from the request buffer.
929 * If we've been fed a pipelined request it may already
930 * be in our read buffer.
939 * At this point we either have a parsed request (which we've
940 * kicked off the processing for) or not. If we have a deferred
941 * request (parsed but deferred for pipeling processing reasons)
942 * then look at processing it. If not, simply kickstart
945 Http::StreamPointer deferredRequest
= pipeline
.front();
946 if (deferredRequest
!= nullptr) {
947 debugs(33, 3, clientConnection
<< ": calling PushDeferredIfNeeded");
948 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, this);
949 } else if (flags
.readMore
) {
950 debugs(33, 3, clientConnection
<< ": calling readNextRequest()");
953 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
954 debugs(33, DBG_IMPORTANT
, MYNAME
<< "abandoning " << clientConnection
);
959 ConnStateData::stopSending(const char *error
)
961 debugs(33, 4, "sending error (" << clientConnection
<< "): " << error
<<
962 "; old receiving error: " <<
963 (stoppedReceiving() ? stoppedReceiving_
: "none"));
965 if (const char *oldError
= stoppedSending()) {
966 debugs(33, 3, "already stopped sending: " << oldError
);
967 return; // nothing has changed as far as this connection is concerned
969 stoppedSending_
= error
;
971 if (!stoppedReceiving()) {
972 if (const int64_t expecting
= mayNeedToReadMoreBody()) {
973 debugs(33, 5, "must still read " << expecting
<<
974 " request body bytes with " << inBuf
.length() << " unused");
975 return; // wait for the request receiver to finish reading
979 clientConnection
->close();
983 ConnStateData::afterClientWrite(size_t size
)
985 if (pipeline
.empty())
988 auto ctx
= pipeline
.front();
990 statCounter
.client_http
.kbytes_out
+= size
;
991 if (ctx
->http
->loggingTags().isTcpHit())
992 statCounter
.client_http
.hit_kbytes_out
+= size
;
994 ctx
->writeComplete(size
);
998 ConnStateData::abortRequestParsing(const char *const uri
)
1000 ClientHttpRequest
*http
= new ClientHttpRequest(this);
1001 http
->req_sz
= inBuf
.length();
1002 http
->setErrorUri(uri
);
1003 auto *context
= new Http::Stream(clientConnection
, http
);
1004 StoreIOBuffer tempBuffer
;
1005 tempBuffer
.data
= context
->reqbuf
;
1006 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1007 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1008 clientReplyStatus
, new clientReplyContext(http
), clientSocketRecipient
,
1009 clientSocketDetach
, context
, tempBuffer
);
1014 ConnStateData::startShutdown()
1016 // RegisteredRunner API callback - Squid has been shut down
1018 // if connection is idle terminate it now,
1019 // otherwise wait for grace period to end
1020 if (pipeline
.empty())
1025 ConnStateData::endingShutdown()
1027 // RegisteredRunner API callback - Squid shutdown grace period is over
1029 // force the client connection to close immediately
1030 // swanSong() in the close handler will cleanup.
1031 if (Comm::IsConnOpen(clientConnection
))
1032 clientConnection
->close();
1036 skipLeadingSpace(char *aString
)
1038 char *result
= aString
;
1040 while (xisspace(*aString
))
1047 * 'end' defaults to NULL for backwards compatibility
1048 * remove default value if we ever get rid of NULL-terminated
1052 findTrailingHTTPVersion(const char *uriAndHTTPVersion
, const char *end
)
1054 if (nullptr == end
) {
1055 end
= uriAndHTTPVersion
+ strcspn(uriAndHTTPVersion
, "\r\n");
1059 for (; end
> uriAndHTTPVersion
; --end
) {
1060 if (*end
== '\n' || *end
== '\r')
1063 if (xisspace(*end
)) {
1064 if (strncasecmp(end
+ 1, "HTTP/", 5) == 0)
1075 prepareAcceleratedURL(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1077 int vhost
= conn
->port
->vhost
;
1078 int vport
= conn
->port
->vport
;
1079 static char ipbuf
[MAX_IPSTRLEN
];
1081 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1083 // XXX: re-use proper URL parser for this
1084 SBuf url
= hp
->requestUri(); // use full provided URI if we abort
1085 do { // use a loop so we can break out of it
1086 ::Parser::Tokenizer
tok(url
);
1087 if (tok
.skip('/')) // origin-form URL already.
1090 if (conn
->port
->vhost
)
1091 return nullptr; /* already in good shape */
1093 // skip the URI scheme
1094 static const CharacterSet uriScheme
= CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA
+ CharacterSet::DIGIT
;
1095 static const SBuf
uriSchemeEnd("://");
1096 if (!tok
.skipAll(uriScheme
) || !tok
.skip(uriSchemeEnd
))
1099 // skip the authority segment
1100 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1101 static const CharacterSet authority
= CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1102 CharacterSet::HEXDIG
+ CharacterSet::ALPHA
+ CharacterSet::DIGIT
;
1103 if (!tok
.skipAll(authority
))
1106 static const SBuf
slashUri("/");
1107 const SBuf t
= tok
.remaining();
1110 else if (t
[0]=='/') // looks like path
1112 else if (t
[0]=='?' || t
[0]=='#') { // looks like query or fragment. fix '/'
1115 } // else do nothing. invalid path
1119 #if SHOULD_REJECT_UNKNOWN_URLS
1120 // reject URI which are not well-formed even after the processing above
1121 if (url
.isEmpty() || url
[0] != '/') {
1122 hp
->parseStatusCode
= Http::scBadRequest
;
1123 return conn
->abortRequestParsing("error:invalid-request");
1128 vport
= conn
->clientConnection
->local
.port();
1130 char *receivedHost
= nullptr;
1131 if (vhost
&& (receivedHost
= hp
->getHostHeaderField())) {
1132 SBuf
host(receivedHost
);
1133 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host
<< " + vport=" << vport
);
1135 // remove existing :port (if any), cope with IPv6+ without port
1136 const auto lastColonPos
= host
.rfind(':');
1137 if (lastColonPos
!= SBuf::npos
&& *host
.rbegin() != ']') {
1138 host
.chop(0, lastColonPos
); // truncate until the last colon
1140 host
.appendf(":%d", vport
);
1141 } // else nothing to alter port-wise.
1142 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1143 const auto url_sz
= scheme
.length() + host
.length() + url
.length() + 32;
1144 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1145 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://" SQUIDSBUFPH SQUIDSBUFPH
, SQUIDSBUFPRINT(scheme
), SQUIDSBUFPRINT(host
), SQUIDSBUFPRINT(url
));
1146 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri
);
1148 } else if (conn
->port
->defaultsite
/* && !vhost */) {
1149 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn
->port
->defaultsite
<< " + vport=" << vport
);
1153 snprintf(vportStr
, sizeof(vportStr
),":%d",vport
);
1155 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1156 const int url_sz
= scheme
.length() + strlen(conn
->port
->defaultsite
) + sizeof(vportStr
) + url
.length() + 32;
1157 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1158 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s%s" SQUIDSBUFPH
,
1159 SQUIDSBUFPRINT(scheme
), conn
->port
->defaultsite
, vportStr
, SQUIDSBUFPRINT(url
));
1160 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri
);
1162 } else if (vport
> 0 /* && (!vhost || no Host:) */) {
1163 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport
);
1164 /* Put the local socket IP address as the hostname, with whatever vport we found */
1165 conn
->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
1166 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1167 const int url_sz
= scheme
.length() + sizeof(ipbuf
) + url
.length() + 32;
1168 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1169 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s:%d" SQUIDSBUFPH
,
1170 SQUIDSBUFPRINT(scheme
), ipbuf
, vport
, SQUIDSBUFPRINT(url
));
1171 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri
);
1179 buildUrlFromHost(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1181 char *uri
= nullptr;
1182 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1183 if (const char *host
= hp
->getHostHeaderField()) {
1184 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1185 const int url_sz
= scheme
.length() + strlen(host
) + hp
->requestUri().length() + 32;
1186 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1187 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s" SQUIDSBUFPH
,
1188 SQUIDSBUFPRINT(scheme
),
1190 SQUIDSBUFPRINT(hp
->requestUri()));
1196 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer
&hp
)
1198 Must(switchedToHttps());
1200 if (!hp
->requestUri().isEmpty() && hp
->requestUri()[0] != '/')
1201 return nullptr; /* already in good shape */
1203 char *uri
= buildUrlFromHost(this, hp
);
1206 Must(tlsConnectPort
);
1207 Must(!tlsConnectHostOrIp
.isEmpty());
1209 if (!tlsClientSni().isEmpty())
1210 useHost
= tlsClientSni();
1212 useHost
= tlsConnectHostOrIp
;
1214 const SBuf
&scheme
= AnyP::UriScheme(transferProtocol
.protocol
).image();
1215 const int url_sz
= scheme
.length() + useHost
.length() + hp
->requestUri().length() + 32;
1216 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1217 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://" SQUIDSBUFPH
":%hu" SQUIDSBUFPH
,
1218 SQUIDSBUFPRINT(scheme
),
1219 SQUIDSBUFPRINT(useHost
),
1221 SQUIDSBUFPRINT(hp
->requestUri()));
1225 debugs(33, 5, "TLS switching host rewrite: " << uri
);
1230 prepareTransparentURL(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1232 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1233 if (!hp
->requestUri().isEmpty() && hp
->requestUri()[0] != '/')
1234 return nullptr; /* already in good shape */
1236 char *uri
= buildUrlFromHost(conn
, hp
);
1238 /* Put the local socket IP address as the hostname. */
1239 static char ipbuf
[MAX_IPSTRLEN
];
1240 conn
->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
1241 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1242 const int url_sz
= sizeof(ipbuf
) + hp
->requestUri().length() + 32;
1243 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1244 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s:%d" SQUIDSBUFPH
,
1245 SQUIDSBUFPRINT(scheme
),
1246 ipbuf
, conn
->clientConnection
->local
.port(), SQUIDSBUFPRINT(hp
->requestUri()));
1250 debugs(33, 5, "TRANSPARENT REWRITE: " << uri
);
1255 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer
&hp
)
1257 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1261 if (preservingClientData_
)
1262 preservedClientData
= inBuf
;
1264 const bool parsedOk
= hp
->parse(inBuf
);
1266 // sync the buffers after parsing.
1267 inBuf
= hp
->remaining();
1269 if (hp
->needsMoreData()) {
1270 debugs(33, 5, "Incomplete request, waiting for end of request line");
1276 hp
->parseStatusCode
== Http::scRequestHeaderFieldsTooLarge
||
1277 hp
->parseStatusCode
== Http::scUriTooLong
;
1278 auto result
= abortRequestParsing(
1279 tooBig
? "error:request-too-large" : "error:invalid-request");
1280 // assume that remaining leftovers belong to this bad request
1281 if (!inBuf
.isEmpty())
1282 consumeInput(inBuf
.length());
1287 /* We know the whole request is in parser now */
1288 debugs(11, 2, "HTTP Client " << clientConnection
);
1289 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1290 hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol() << "\n" <<
1294 /* deny CONNECT via accelerated ports */
1295 if (hp
->method() == Http::METHOD_CONNECT
&& port
!= nullptr && port
->flags
.accelSurrogate
) {
1296 debugs(33, DBG_IMPORTANT
, "WARNING: CONNECT method received on " << transferProtocol
<< " Accelerator port " << port
->s
.port());
1297 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1298 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1299 return abortRequestParsing("error:method-not-allowed");
1302 /* HTTP/2 connection magic prefix starts with "PRI ".
1303 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1304 * If seen it signals a broken client or proxy has corrupted the traffic.
1306 if (hp
->method() == Http::METHOD_PRI
&& hp
->messageProtocol() < Http::ProtocolVersion(2,0)) {
1307 debugs(33, DBG_IMPORTANT
, "WARNING: PRI method received on " << transferProtocol
<< " port " << port
->s
.port());
1308 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1309 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1310 return abortRequestParsing("error:method-not-allowed");
1313 if (hp
->method() == Http::METHOD_NONE
) {
1314 debugs(33, DBG_IMPORTANT
, "WARNING: Unsupported method: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1315 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1316 return abortRequestParsing("error:unsupported-request-method");
1319 // Process headers after request line
1320 debugs(33, 3, "complete request received. " <<
1321 "prefix_sz = " << hp
->messageHeaderSize() <<
1322 ", request-line-size=" << hp
->firstLineSize() <<
1323 ", mime-header-size=" << hp
->headerBlockSize() <<
1324 ", mime header block:\n" << hp
->mimeHeader() << "\n----------");
1326 /* Ok, all headers are received */
1327 ClientHttpRequest
*http
= new ClientHttpRequest(this);
1329 http
->req_sz
= hp
->messageHeaderSize();
1330 Http::Stream
*result
= new Http::Stream(clientConnection
, http
);
1332 StoreIOBuffer tempBuffer
;
1333 tempBuffer
.data
= result
->reqbuf
;
1334 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1336 ClientStreamData newServer
= new clientReplyContext(http
);
1337 ClientStreamData newClient
= result
;
1338 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1339 clientReplyStatus
, newServer
, clientSocketRecipient
,
1340 clientSocketDetach
, newClient
, tempBuffer
);
1343 debugs(33,5, "Prepare absolute URL from " <<
1344 (transparent()?"intercept":(port
->flags
.accelSurrogate
? "accel":"")));
1345 /* Rewrite the URL in transparent or accelerator mode */
1346 /* NP: there are several cases to traverse here:
1347 * - standard mode (forward proxy)
1348 * - transparent mode (TPROXY)
1349 * - transparent mode with failures
1350 * - intercept mode (NAT)
1351 * - intercept mode with failures
1352 * - accelerator mode (reverse proxy)
1353 * - internal relative-URL
1354 * - mixed combos of the above with internal URL
1355 * - remote interception with PROXY protocol
1356 * - remote reverse-proxy with PROXY protocol
1358 if (switchedToHttps()) {
1359 http
->uri
= prepareTlsSwitchingURL(hp
);
1360 } else if (transparent()) {
1361 /* intercept or transparent mode, properly working with no failures */
1362 http
->uri
= prepareTransparentURL(this, hp
);
1364 } else if (internalCheck(hp
->requestUri())) { // NP: only matches relative-URI
1365 /* internal URL mode */
1366 // XXX: By prepending our name and port, we create an absolute URL
1367 // that may mismatch the (yet unparsed) Host header in the request.
1368 http
->uri
= xstrdup(internalLocalUri(nullptr, hp
->requestUri()));
1370 } else if (port
->flags
.accelSurrogate
) {
1371 /* accelerator mode */
1372 http
->uri
= prepareAcceleratedURL(this, hp
);
1373 http
->flags
.accel
= true;
1377 /* No special rewrites have been applied above, use the
1378 * requested url. may be rewritten later, so make extra room */
1379 int url_sz
= hp
->requestUri().length() + Config
.appendDomainLen
+ 5;
1380 http
->uri
= (char *)xcalloc(url_sz
, 1);
1381 SBufToCstring(http
->uri
, hp
->requestUri());
1384 result
->flags
.parsed_ok
= 1;
1389 ConnStateData::shouldCloseOnEof() const
1391 if (pipeline
.empty() && inBuf
.isEmpty()) {
1392 debugs(33, 4, "yes, without active requests and unparsed input");
1396 if (!Config
.onoff
.half_closed_clients
) {
1397 debugs(33, 3, "yes, without half_closed_clients");
1401 // Squid currently tries to parse (possibly again) a partially received
1402 // request after an EOF with half_closed_clients. To give that last parse in
1403 // afterClientRead() a chance, we ignore partially parsed requests here.
1404 debugs(33, 3, "no, honoring half_closed_clients");
1409 ConnStateData::consumeInput(const size_t byteCount
)
1411 assert(byteCount
> 0 && byteCount
<= inBuf
.length());
1412 inBuf
.consume(byteCount
);
1413 debugs(33, 5, "inBuf has " << inBuf
.length() << " unused bytes");
1417 ConnStateData::clientAfterReadingRequests()
1419 // Were we expecting to read more request body from half-closed connection?
1420 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection
->fd
)) {
1421 debugs(33, 3, "truncated body: closing half-closed " << clientConnection
);
1422 clientConnection
->close();
1431 ConnStateData::quitAfterError(HttpRequest
*request
)
1433 // From HTTP p.o.v., we do not have to close after every error detected
1434 // at the client-side, but many such errors do require closure and the
1435 // client-side code is bad at handling errors so we play it safe.
1437 request
->flags
.proxyKeepalive
= false;
1438 flags
.readMore
= false;
1439 debugs(33,4, "Will close after error: " << clientConnection
);
1443 bool ConnStateData::serveDelayedError(Http::Stream
*context
)
1445 ClientHttpRequest
*http
= context
->http
;
1450 assert(sslServerBump
->entry
);
1451 // Did we create an error entry while processing CONNECT?
1452 if (!sslServerBump
->entry
->isEmpty()) {
1453 quitAfterError(http
->request
);
1455 // Get the saved error entry and send it to the client by replacing the
1456 // ClientHttpRequest store entry with it.
1457 clientStreamNode
*node
= context
->getClientReplyContext();
1458 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1460 debugs(33, 5, "Responding with delated error for " << http
->uri
);
1461 repContext
->setReplyToStoreEntry(sslServerBump
->entry
, "delayed SslBump error");
1463 // Get error details from the fake certificate-peeking request.
1464 http
->request
->error
.update(sslServerBump
->request
->error
);
1465 context
->pullData();
1469 // In bump-server-first mode, we have not necessarily seen the intended
1470 // server name at certificate-peeking time. Check for domain mismatch now,
1471 // when we can extract the intended name from the bumped HTTP request.
1472 if (const Security::CertPointer
&srvCert
= sslServerBump
->serverCert
) {
1473 HttpRequest
*request
= http
->request
;
1474 const auto host
= request
->url
.parsedHost();
1475 if (host
&& Ssl::HasSubjectName(*srvCert
, *host
)) {
1476 debugs(33, 5, "certificate matches requested host: " << *host
);
1479 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1480 "does not match request target " << RawPointer(host
));
1482 bool allowDomainMismatch
= false;
1483 if (Config
.ssl_client
.cert_error
) {
1484 ACLFilledChecklist
check(Config
.ssl_client
.cert_error
, nullptr);
1485 const auto sslErrors
= std::make_unique
<Security::CertErrors
>(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH
, srvCert
));
1486 check
.sslErrors
= sslErrors
.get();
1487 clientAclChecklistFill(check
, http
);
1488 allowDomainMismatch
= check
.fastCheck().allowed();
1491 if (!allowDomainMismatch
) {
1492 quitAfterError(request
);
1494 clientStreamNode
*node
= context
->getClientReplyContext();
1495 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1496 assert (repContext
);
1498 request
->hier
= sslServerBump
->request
->hier
;
1500 // Create an error object and fill it
1501 const auto err
= new ErrorState(ERR_SECURE_CONNECT_FAIL
, Http::scServiceUnavailable
, request
, http
->al
);
1502 err
->src_addr
= clientConnection
->remote
;
1503 const Security::ErrorDetail::Pointer errDetail
= new Security::ErrorDetail(
1504 SQUID_X509_V_ERR_DOMAIN_MISMATCH
,
1506 updateError(ERR_SECURE_CONNECT_FAIL
, errDetail
);
1507 repContext
->setReplyToError(request
->method
, err
);
1508 assert(context
->http
->out
.offset
== 0);
1509 context
->pullData();
1517 #endif // USE_OPENSSL
1519 /// initiate tunneling if possible or return false otherwise
1521 ConnStateData::tunnelOnError(const err_type requestError
)
1523 if (!Config
.accessList
.on_unsupported_protocol
) {
1524 debugs(33, 5, "disabled; send error: " << requestError
);
1528 if (!preservingClientData_
) {
1529 debugs(33, 3, "may have forgotten client data; send error: " << requestError
);
1533 ACLFilledChecklist
checklist(Config
.accessList
.on_unsupported_protocol
, nullptr);
1534 checklist
.requestErrorType
= requestError
;
1535 fillChecklist(checklist
);
1536 const auto &answer
= checklist
.fastCheck();
1537 if (answer
.allowed() && answer
.kind
== 1) {
1538 debugs(33, 3, "Request will be tunneled to server");
1539 const auto context
= pipeline
.front();
1540 const auto http
= context
? context
->http
: nullptr;
1541 const auto request
= http
? http
->request
: nullptr;
1543 context
->finished(); // Will remove from pipeline queue
1544 Comm::SetSelect(clientConnection
->fd
, COMM_SELECT_READ
, nullptr, nullptr, 0);
1545 return initiateTunneledRequest(request
, "unknown-protocol", preservedClientData
);
1547 debugs(33, 3, "denied; send error: " << requestError
);
1552 clientProcessRequestFinished(ConnStateData
*conn
, const HttpRequest::Pointer
&request
)
1556 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1557 * to here because calling comm_reset_close() causes http to
1558 * be freed before accessing.
1560 if (request
!= nullptr && request
->flags
.resetTcp
&& Comm::IsConnOpen(conn
->clientConnection
)) {
1561 debugs(33, 3, "Sending TCP RST on " << conn
->clientConnection
);
1562 conn
->flags
.readMore
= false;
1563 comm_reset_close(conn
->clientConnection
);
1568 clientProcessRequest(ConnStateData
*conn
, const Http1::RequestParserPointer
&hp
, Http::Stream
*context
)
1570 ClientHttpRequest
*http
= context
->http
;
1571 bool mustReplyToOptions
= false;
1572 bool expectBody
= false;
1574 // We already have the request parsed and checked, so we
1575 // only need to go through the final body/conn setup to doCallouts().
1576 assert(http
->request
);
1577 HttpRequest::Pointer request
= http
->request
;
1579 // temporary hack to avoid splitting this huge function with sensitive code
1580 const bool isFtp
= !hp
;
1582 // Some blobs below are still HTTP-specific, but we would have to rewrite
1583 // this entire function to remove them from the FTP code path. Connection
1584 // setup and body_pipe preparation blobs are needed for FTP.
1586 request
->manager(conn
, http
->al
);
1588 request
->flags
.accelerated
= http
->flags
.accel
;
1589 request
->flags
.sslBumped
=conn
->switchedToHttps();
1590 // TODO: decouple http->flags.accel from request->flags.sslBumped
1591 request
->flags
.noDirect
= (request
->flags
.accelerated
&& !request
->flags
.sslBumped
) ?
1592 !conn
->port
->allow_direct
: 0;
1593 request
->sources
|= isFtp
? Http::Message::srcFtp
:
1594 ((request
->flags
.sslBumped
|| conn
->port
->transport
.protocol
== AnyP::PROTO_HTTPS
) ? Http::Message::srcHttps
: Http::Message::srcHttp
);
1596 if (request
->flags
.sslBumped
) {
1597 if (conn
->getAuth() != nullptr)
1598 request
->auth_user_request
= conn
->getAuth();
1602 http
->checkForInternalAccess();
1605 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1606 // for now Squid only supports HTTP requests
1607 const AnyP::ProtocolVersion
&http_ver
= hp
->messageProtocol();
1608 assert(request
->http_ver
.protocol
== http_ver
.protocol
);
1609 request
->http_ver
.major
= http_ver
.major
;
1610 request
->http_ver
.minor
= http_ver
.minor
;
1613 mustReplyToOptions
= (request
->method
== Http::METHOD_OPTIONS
) &&
1614 (request
->header
.getInt64(Http::HdrType::MAX_FORWARDS
) == 0);
1615 if (!urlCheckRequest(request
.getRaw()) || mustReplyToOptions
) {
1616 clientStreamNode
*node
= context
->getClientReplyContext();
1617 conn
->quitAfterError(request
.getRaw());
1618 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1619 assert (repContext
);
1620 repContext
->setReplyToError(ERR_UNSUP_REQ
, Http::scNotImplemented
, nullptr,
1621 conn
, request
.getRaw(), nullptr, nullptr);
1622 assert(context
->http
->out
.offset
== 0);
1623 context
->pullData();
1624 clientProcessRequestFinished(conn
, request
);
1628 const auto frameStatus
= request
->checkEntityFraming();
1629 if (frameStatus
!= Http::scNone
) {
1630 clientStreamNode
*node
= context
->getClientReplyContext();
1631 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1632 assert (repContext
);
1633 conn
->quitAfterError(request
.getRaw());
1634 repContext
->setReplyToError(ERR_INVALID_REQ
, frameStatus
, nullptr, conn
, request
.getRaw(), nullptr, nullptr);
1635 assert(context
->http
->out
.offset
== 0);
1636 context
->pullData();
1637 clientProcessRequestFinished(conn
, request
);
1641 clientSetKeepaliveFlag(http
);
1642 // Let tunneling code be fully responsible for CONNECT requests
1643 if (http
->request
->method
== Http::METHOD_CONNECT
) {
1644 context
->mayUseConnection(true);
1645 conn
->flags
.readMore
= false;
1649 if (conn
->switchedToHttps() && conn
->serveDelayedError(context
)) {
1650 clientProcessRequestFinished(conn
, request
);
1655 /* Do we expect a request-body? */
1656 const auto chunked
= request
->header
.chunked();
1657 expectBody
= chunked
|| request
->content_length
> 0;
1658 if (!context
->mayUseConnection() && expectBody
) {
1659 request
->body_pipe
= conn
->expectRequestBody(
1660 chunked
? -1 : request
->content_length
);
1662 /* Is it too large? */
1663 if (!chunked
&& // if chunked, we will check as we accumulate
1664 clientIsRequestBodyTooLargeForPolicy(request
->content_length
)) {
1665 clientStreamNode
*node
= context
->getClientReplyContext();
1666 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1667 assert (repContext
);
1668 conn
->quitAfterError(request
.getRaw());
1669 repContext
->setReplyToError(ERR_TOO_BIG
,
1670 Http::scContentTooLarge
, nullptr,
1671 conn
, http
->request
, nullptr, nullptr);
1672 assert(context
->http
->out
.offset
== 0);
1673 context
->pullData();
1674 clientProcessRequestFinished(conn
, request
);
1679 // We may stop producing, comm_close, and/or call setReplyToError()
1680 // below, so quit on errors to avoid http->doCallouts()
1681 if (!conn
->handleRequestBodyData()) {
1682 clientProcessRequestFinished(conn
, request
);
1686 if (!request
->body_pipe
->productionEnded()) {
1687 debugs(33, 5, "need more request body");
1688 context
->mayUseConnection(true);
1689 assert(conn
->flags
.readMore
);
1694 http
->calloutContext
= new ClientRequestContext(http
);
1698 clientProcessRequestFinished(conn
, request
);
1702 ConnStateData::add(const Http::StreamPointer
&context
)
1704 debugs(33, 3, context
<< " to " << pipeline
.count() << '/' << pipeline
.nrequests
);
1706 debugs(33, 5, "assigning " << bareError
);
1708 assert(context
->http
);
1709 context
->http
->updateError(bareError
);
1712 pipeline
.add(context
);
1716 ConnStateData::pipelinePrefetchMax() const
1718 // TODO: Support pipelined requests through pinned connections.
1721 return Config
.pipeline_max_prefetch
;
1725 * Limit the number of concurrent requests.
1726 * \return true when there are available position(s) in the pipeline queue for another request.
1727 * \return false when the pipeline queue is full or disabled.
1730 ConnStateData::concurrentRequestQueueFilled() const
1732 const int existingRequestCount
= pipeline
.count();
1734 // default to the configured pipeline size.
1735 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1737 const int internalRequest
= (transparent() && sslBumpMode
== Ssl::bumpSplice
) ? 1 : 0;
1739 const int internalRequest
= 0;
1741 const int concurrentRequestLimit
= pipelinePrefetchMax() + 1 + internalRequest
;
1743 // when queue filled already we can't add more.
1744 if (existingRequestCount
>= concurrentRequestLimit
) {
1745 debugs(33, 3, clientConnection
<< " max concurrent requests reached (" << concurrentRequestLimit
<< ")");
1746 debugs(33, 5, clientConnection
<< " deferring new request until one is done");
1754 * Perform proxy_protocol_access ACL tests on the client which
1755 * connected to PROXY protocol port to see if we trust the
1756 * sender enough to accept their PROXY header claim.
1759 ConnStateData::proxyProtocolValidateClient()
1761 if (!Config
.accessList
.proxyProtocol
)
1762 return proxyProtocolError("PROXY client not permitted by default ACL");
1764 ACLFilledChecklist
ch(Config
.accessList
.proxyProtocol
, nullptr);
1766 if (!ch
.fastCheck().allowed())
1767 return proxyProtocolError("PROXY client not permitted by ACLs");
1773 * Perform cleanup on PROXY protocol errors.
1774 * If header parsing hits a fatal error terminate the connection,
1775 * otherwise wait for more data.
1778 ConnStateData::proxyProtocolError(const char *msg
)
1781 // This is important to know, but maybe not so much that flooding the log is okay.
1782 #if QUIET_PROXY_PROTOCOL
1783 // display the first of every 32 occurrences at level 1, the others at level 2.
1784 static uint8_t hide
= 0;
1785 debugs(33, (hide
++ % 32 == 0 ? DBG_IMPORTANT
: 2), msg
<< " from " << clientConnection
);
1787 debugs(33, DBG_IMPORTANT
, msg
<< " from " << clientConnection
);
1794 /// Attempts to extract a PROXY protocol header from the input buffer and,
1795 /// upon success, stores the parsed header in proxyProtocolHeader_.
1796 /// \returns true if the header was successfully parsed
1797 /// \returns false if more data is needed to parse the header or on error
1799 ConnStateData::parseProxyProtocolHeader()
1802 const auto parsed
= ProxyProtocol::Parse(inBuf
);
1803 proxyProtocolHeader_
= parsed
.header
;
1804 assert(bool(proxyProtocolHeader_
));
1805 inBuf
.consume(parsed
.size
);
1806 needProxyProtocolHeader_
= false;
1807 if (proxyProtocolHeader_
->hasForwardedAddresses()) {
1808 clientConnection
->local
= proxyProtocolHeader_
->destinationAddress
;
1809 clientConnection
->remote
= proxyProtocolHeader_
->sourceAddress
;
1810 if ((clientConnection
->flags
& COMM_TRANSPARENT
))
1811 clientConnection
->flags
^= COMM_TRANSPARENT
; // prevent TPROXY spoofing of this new IP.
1812 debugs(33, 5, "PROXY/" << proxyProtocolHeader_
->version() << " upgrade: " << clientConnection
);
1814 } catch (const Parser::BinaryTokenizer::InsufficientInput
&) {
1815 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf
.length() << " bytes");
1817 } catch (const std::exception
&e
) {
1818 return proxyProtocolError(e
.what());
1824 ConnStateData::receivedFirstByte()
1826 if (receivedFirstByte_
)
1829 receivedFirstByte_
= true;
1830 resetReadTimeout(Config
.Timeout
.request
);
1833 /// Attempt to parse one or more requests from the input buffer.
1834 /// May close the connection.
1836 ConnStateData::parseRequests()
1838 debugs(33, 5, clientConnection
<< ": attempting to parse");
1840 // Loop while we have read bytes that are not needed for producing the body
1841 // On errors, bodyPipe may become nil, but readMore will be cleared
1842 while (!inBuf
.isEmpty() && !bodyPipe
&& flags
.readMore
) {
1844 // Prohibit concurrent requests when using a pinned to-server connection
1845 // because our Client classes do not support request pipelining.
1846 if (pinning
.pinned
&& !pinning
.readHandler
) {
1847 debugs(33, 3, clientConnection
<< " waits for busy " << pinning
.serverConnection
);
1851 /* Limit the number of concurrent requests */
1852 if (concurrentRequestQueueFilled())
1855 // try to parse the PROXY protocol header magic bytes
1856 if (needProxyProtocolHeader_
) {
1857 if (!parseProxyProtocolHeader())
1860 // we have been waiting for PROXY to provide client-IP
1861 // for some lookups, ie rDNS
1862 whenClientIpKnown();
1864 // Done with PROXY protocol which has cleared preservingClientData_.
1865 // If the next protocol supports on_unsupported_protocol, then its
1866 // parseOneRequest() must reset preservingClientData_.
1867 assert(!preservingClientData_
);
1870 if (Http::StreamPointer context
= parseOneRequest()) {
1871 debugs(33, 5, clientConnection
<< ": done parsing a request");
1873 context
->registerWithConn();
1876 if (switchedToHttps())
1877 parsedBumpedRequestCount
++;
1880 processParsedRequest(context
);
1882 if (context
->mayUseConnection()) {
1883 debugs(33, 3, "Not parsing new requests, as this request may need the connection");
1887 debugs(33, 5, clientConnection
<< ": not enough request data: " <<
1888 inBuf
.length() << " < " << Config
.maxRequestHeaderSize
);
1889 Must(inBuf
.length() < Config
.maxRequestHeaderSize
);
1894 debugs(33, 7, "buffered leftovers: " << inBuf
.length());
1896 if (isOpen() && commIsHalfClosed(clientConnection
->fd
)) {
1897 if (pipeline
.empty()) {
1898 // we processed what we could parse, and no more data is coming
1899 debugs(33, 5, "closing half-closed without parsed requests: " << clientConnection
);
1900 clientConnection
->close();
1902 // we parsed what we could, and no more data is coming
1903 debugs(33, 5, "monitoring half-closed while processing parsed requests: " << clientConnection
);
1904 flags
.readMore
= false; // may already be false
1910 ConnStateData::afterClientRead()
1913 if (parsingTlsHandshake
) {
1914 parseTlsHandshake();
1919 /* Process next request */
1920 if (pipeline
.empty())
1921 fd_note(clientConnection
->fd
, "Reading next request");
1928 clientAfterReadingRequests();
1932 * called when new request data has been read from the socket
1934 * \retval false called comm_close or setReplyToError (the caller should bail)
1935 * \retval true we did not call comm_close or setReplyToError
1938 ConnStateData::handleReadData()
1940 // if we are reading a body, stuff data into the body pipe
1941 if (bodyPipe
!= nullptr)
1942 return handleRequestBodyData();
1947 * called when new request body data has been buffered in inBuf
1948 * may close the connection if we were closing and piped everything out
1950 * \retval false called comm_close or setReplyToError (the caller should bail)
1951 * \retval true we did not call comm_close or setReplyToError
1954 ConnStateData::handleRequestBodyData()
1956 assert(bodyPipe
!= nullptr);
1958 if (bodyParser
) { // chunked encoding
1959 if (const err_type error
= handleChunkedRequestBody()) {
1960 abortChunkedRequestBody(error
);
1963 } else { // identity encoding
1964 debugs(33,5, "handling plain request body for " << clientConnection
);
1965 const auto putSize
= bodyPipe
->putMoreData(inBuf
.rawContent(), inBuf
.length());
1967 consumeInput(putSize
);
1969 if (!bodyPipe
->mayNeedMoreData()) {
1970 // BodyPipe will clear us automagically when we produced everything
1976 debugs(33,5, "produced entire request body for " << clientConnection
);
1978 if (const char *reason
= stoppedSending()) {
1979 /* we've finished reading like good clients,
1980 * now do the close that initiateClose initiated.
1982 debugs(33, 3, "closing for earlier sending error: " << reason
);
1983 clientConnection
->close();
1991 /// parses available chunked encoded body bytes, checks size, returns errors
1993 ConnStateData::handleChunkedRequestBody()
1995 debugs(33, 7, "chunked from " << clientConnection
<< ": " << inBuf
.length());
1997 try { // the parser will throw on errors
1999 if (inBuf
.isEmpty()) // nothing to do
2002 BodyPipeCheckout
bpc(*bodyPipe
);
2003 bodyParser
->setPayloadBuffer(&bpc
.buf
);
2004 const bool parsed
= bodyParser
->parse(inBuf
);
2005 inBuf
= bodyParser
->remaining(); // sync buffers
2008 // dechunk then check: the size limit applies to _dechunked_ content
2009 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe
->producedSize()))
2013 finishDechunkingRequest(true);
2015 return ERR_NONE
; // nil bodyPipe implies body end for the caller
2018 // if chunk parser needs data, then the body pipe must need it too
2019 Must(!bodyParser
->needsMoreData() || bodyPipe
->mayNeedMoreData());
2021 // if parser needs more space and we can consume nothing, we will stall
2022 Must(!bodyParser
->needsMoreSpace() || bodyPipe
->buf().hasContent());
2023 } catch (...) { // TODO: be more specific
2024 debugs(33, 3, "malformed chunks" << bodyPipe
->status());
2025 return ERR_INVALID_REQ
;
2028 debugs(33, 7, "need more chunked data" << *bodyPipe
->status());
2032 /// quit on errors related to chunked request body handling
2034 ConnStateData::abortChunkedRequestBody(const err_type error
)
2036 finishDechunkingRequest(false);
2038 // XXX: The code below works if we fail during initial request parsing,
2039 // but if we fail when the server connection is used already, the server may send
2040 // us its response too, causing various assertions. How to prevent that?
2041 #if WE_KNOW_HOW_TO_SEND_ERRORS
2042 Http::StreamPointer context
= pipeline
.front();
2043 if (context
!= NULL
&& !context
->http
->out
.offset
) { // output nothing yet
2044 clientStreamNode
*node
= context
->getClientReplyContext();
2045 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2047 const Http::StatusCode scode
= (error
== ERR_TOO_BIG
) ?
2048 Http::scContentTooLarge
: HTTP_BAD_REQUEST
;
2049 repContext
->setReplyToError(error
, scode
,
2050 repContext
->http
->uri
,
2052 repContext
->http
->request
,
2054 context
->pullData();
2056 // close or otherwise we may get stuck as nobody will notice the error?
2057 comm_reset_close(clientConnection
);
2060 debugs(33, 3, "aborting chunked request without error " << error
);
2061 comm_reset_close(clientConnection
);
2063 flags
.readMore
= false;
2067 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer
)
2069 // request reader may get stuck waiting for space if nobody consumes body
2070 if (bodyPipe
!= nullptr)
2071 bodyPipe
->enableAutoConsumption();
2076 /** general lifetime handler for HTTP requests */
2078 ConnStateData::requestTimeout(const CommTimeoutCbParams
&io
)
2080 if (!Comm::IsConnOpen(io
.conn
))
2083 const err_type error
= receivedFirstByte_
? ERR_REQUEST_PARSE_TIMEOUT
: ERR_REQUEST_START_TIMEOUT
;
2085 if (tunnelOnError(error
))
2089 * Just close the connection to not confuse browsers
2090 * using persistent connections. Some browsers open
2091 * a connection and then do not use it until much
2092 * later (presumeably because the request triggering
2093 * the open has already been completed on another
2096 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
2101 ConnStateData::lifetimeTimeout(const CommTimeoutCbParams
&io
)
2103 debugs(33, DBG_IMPORTANT
, "WARNING: Closing client connection due to lifetime timeout" <<
2104 Debug::Extra
<< "connection: " << io
.conn
);
2107 lte
.timedout
= true;
2108 terminateAll(ERR_LIFETIME_EXP
, lte
);
2111 ConnStateData::ConnStateData(const MasterXaction::Pointer
&xact
) :
2112 AsyncJob("ConnStateData"), // kids overwrite
2115 , tlsParser(Security::HandshakeParser::fromClient
)
2118 // store the details required for creating more MasterXaction objects as new requests come in
2119 log_addr
= xact
->tcpClient
->remote
;
2120 log_addr
.applyClientMask(Config
.Addrs
.client_netmask
);
2122 // register to receive notice of Squid signal events
2123 // which may affect long persisting client connections
2128 ConnStateData::start()
2130 BodyProducer::start();
2131 HttpControlMsgSink::start();
2133 if (port
->disable_pmtu_discovery
!= DISABLE_PMTU_OFF
&&
2134 (transparent() || port
->disable_pmtu_discovery
== DISABLE_PMTU_ALWAYS
)) {
2135 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2136 int i
= IP_PMTUDISC_DONT
;
2137 if (setsockopt(clientConnection
->fd
, SOL_IP
, IP_MTU_DISCOVER
, &i
, sizeof(i
)) < 0) {
2139 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection
<< " : " << xstrerr(xerrno
));
2142 static bool reported
= false;
2145 debugs(33, DBG_IMPORTANT
, "WARNING: Path MTU discovery disabling is not supported on your platform.");
2151 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
2152 AsyncCall::Pointer call
= JobCallback(33, 5, Dialer
, this, ConnStateData::connStateClosed
);
2153 comm_add_close_handler(clientConnection
->fd
, call
);
2155 needProxyProtocolHeader_
= port
->flags
.proxySurrogate
;
2156 if (needProxyProtocolHeader_
) {
2157 if (!proxyProtocolValidateClient()) // will close the connection on failure
2160 whenClientIpKnown();
2162 // requires needProxyProtocolHeader_ which is initialized above
2163 preservingClientData_
= shouldPreserveClientData();
2167 ConnStateData::whenClientIpKnown()
2169 debugs(33, 7, clientConnection
->remote
);
2170 if (Dns::ResolveClientAddressesAsap
)
2171 fqdncache_gethostbyaddr(clientConnection
->remote
, FQDN_LOOKUP_IF_MISS
);
2173 clientdbEstablished(clientConnection
->remote
, 1);
2176 fd_table
[clientConnection
->fd
].clientInfo
= nullptr;
2178 if (!Config
.onoff
.client_db
)
2179 return; // client delay pools require client_db
2181 const auto &pools
= ClientDelayPools::Instance()->pools
;
2183 ACLFilledChecklist
ch(nullptr, nullptr);
2185 // TODO: we check early to limit error response bandwidth but we
2186 // should recheck when we can honor delay_pool_uses_indirect
2187 for (unsigned int pool
= 0; pool
< pools
.size(); ++pool
) {
2189 /* pools require explicit 'allow' to assign a client into them */
2190 if (pools
[pool
]->access
) {
2191 ch
.changeAcl(pools
[pool
]->access
);
2192 const auto &answer
= ch
.fastCheck();
2193 if (answer
.allowed()) {
2195 /* request client information from db after we did all checks
2196 this will save hash lookup if client failed checks */
2197 ClientInfo
* cli
= clientdbGetInfo(clientConnection
->remote
);
2200 /* put client info in FDE */
2201 fd_table
[clientConnection
->fd
].clientInfo
= cli
;
2203 /* setup write limiter for this request */
2204 const double burst
= floor(0.5 +
2205 (pools
[pool
]->highwatermark
* Config
.ClientDelay
.initial
)/100.0);
2206 cli
->setWriteLimiter(pools
[pool
]->rate
, burst
, pools
[pool
]->highwatermark
);
2209 debugs(83, 4, "Delay pool " << pool
<< " skipped because ACL " << answer
);
2216 // kids must extend to actually start doing something (e.g., reading)
2220 ConnStateData::acceptTls()
2222 const auto handshakeResult
= Security::Accept(*clientConnection
);
2225 // log ASAP, even if the handshake has not completed (or failed)
2226 const auto fd
= clientConnection
->fd
;
2228 keyLogger
.checkpoint(*fd_table
[fd
].ssl
, *this);
2230 // TODO: Support fd_table[fd].ssl dereference in other builds.
2233 return handshakeResult
;
2236 /** Handle a new connection on an HTTP socket. */
2238 httpAccept(const CommAcceptCbParams
¶ms
)
2240 Assure(params
.port
);
2242 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2244 if (params
.flag
!= Comm::OK
) {
2245 // Its possible the call was still queued when the client disconnected
2246 debugs(33, 2, params
.port
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
2250 debugs(33, 4, params
.conn
<< ": accepted");
2251 fd_note(params
.conn
->fd
, "client http connect");
2252 const auto xact
= MasterXaction::MakePortful(params
.port
);
2253 xact
->tcpClient
= params
.conn
;
2255 // Socket is ready, setup the connection manager to start using it
2256 auto *srv
= Http::NewServer(xact
);
2257 // XXX: do not abandon the MasterXaction object
2258 AsyncJob::Start(srv
); // usually async-calls readSomeData()
2261 /// Create TLS connection structure and update fd_table
2263 httpsCreate(const ConnStateData
*connState
, const Security::ContextPointer
&ctx
)
2265 const auto conn
= connState
->clientConnection
;
2266 if (Security::CreateServerSession(ctx
, conn
, connState
->port
->secure
, "client https start")) {
2267 debugs(33, 5, "will negotiate TLS on " << conn
);
2271 debugs(33, DBG_IMPORTANT
, "ERROR: could not create TLS server context for " << conn
);
2276 /** negotiate an SSL connection */
2278 clientNegotiateSSL(int fd
, void *data
)
2280 ConnStateData
*conn
= (ConnStateData
*)data
;
2282 const auto handshakeResult
= conn
->acceptTls();
2283 switch (handshakeResult
.category
) {
2284 case Security::IoResult::ioSuccess
:
2287 case Security::IoResult::ioWantRead
:
2288 Comm::SetSelect(conn
->clientConnection
->fd
, COMM_SELECT_READ
, clientNegotiateSSL
, conn
, 0);
2291 case Security::IoResult::ioWantWrite
:
2292 Comm::SetSelect(conn
->clientConnection
->fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, conn
, 0);
2295 case Security::IoResult::ioError
:
2296 debugs(83, (handshakeResult
.important
? Important(62) : 2), "ERROR: Cannot accept a TLS connection" <<
2297 Debug::Extra
<< "problem: " << WithExtras(handshakeResult
));
2298 // TODO: No ConnStateData::tunnelOnError() on this forward-proxy code
2299 // path because we cannot know the intended connection target?
2300 conn
->updateError(ERR_SECURE_ACCEPT_FAIL
, handshakeResult
.errorDetail
);
2301 conn
->clientConnection
->close();
2305 Security::SessionPointer
session(fd_table
[fd
].ssl
);
2308 if (Security::SessionIsResumed(session
)) {
2309 debugs(83, 2, "Session " << SSL_get_session(session
.get()) <<
2310 " reused on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<<
2311 ":" << (int)fd_table
[fd
].remote_port
<< ")");
2313 if (Debug::Enabled(83, 4)) {
2314 /* Write out the SSL session details.. actually the call below, but
2315 * OpenSSL headers do strange typecasts confusing GCC.. */
2316 /* PEM_write_SSL_SESSION(DebugStream(), SSL_get_session(ssl)); */
2317 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2318 PEM_ASN1_write(reinterpret_cast<i2d_of_void
*>(i2d_SSL_SESSION
),
2319 PEM_STRING_SSL_SESSION
, DebugStream(),
2320 reinterpret_cast<char *>(SSL_get_session(session
.get())),
2321 nullptr, nullptr, 0, nullptr, nullptr);
2323 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2325 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2326 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2327 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2328 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2329 * Because there are two possible usable cast, if you get an error here, try the other
2330 * commented line. */
2332 PEM_ASN1_write((int(*)())i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
,
2334 reinterpret_cast<char *>(SSL_get_session(session
.get())),
2335 nullptr, nullptr, 0, nullptr, nullptr);
2336 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2338 reinterpret_cast<char *>(SSL_get_session(session.get())),
2339 nullptr, nullptr, 0, nullptr, nullptr);
2342 debugs(83, 4, "With " OPENSSL_VERSION_TEXT
", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2345 /* Note: This does not automatically fflush the log file.. */
2348 debugs(83, 2, "New session " << SSL_get_session(session
.get()) <<
2349 " on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<< ":" <<
2350 fd_table
[fd
].remote_port
<< ")");
2353 debugs(83, 2, "TLS session reuse not yet implemented.");
2356 // Connection established. Retrieve TLS connection parameters for logging.
2357 conn
->clientConnection
->tlsNegotiations()->retrieveNegotiatedInfo(session
);
2360 X509
*client_cert
= SSL_get_peer_certificate(session
.get());
2363 debugs(83, 3, "FD " << fd
<< " client certificate: subject: " <<
2364 Security::SubjectName(*client_cert
));
2366 debugs(83, 3, "FD " << fd
<< " client certificate: issuer: " <<
2367 Security::IssuerName(*client_cert
));
2369 X509_free(client_cert
);
2371 debugs(83, 5, "FD " << fd
<< " has no client certificate.");
2374 debugs(83, 2, "Client certificate requesting not yet implemented.");
2377 // If we are called, then bumped CONNECT has succeeded. Finalize it.
2378 if (auto xact
= conn
->pipeline
.front()) {
2379 if (xact
->http
&& xact
->http
->request
&& xact
->http
->request
->method
== Http::METHOD_CONNECT
)
2381 // cannot proceed with encryption if requests wait for plain responses
2382 Must(conn
->pipeline
.empty());
2384 /* careful: finished() above frees request, host, etc. */
2386 conn
->readSomeData();
2390 * If Security::ContextPointer is given, starts reading the TLS handshake.
2391 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2394 httpsEstablish(ConnStateData
*connState
, const Security::ContextPointer
&ctx
)
2397 const Comm::ConnectionPointer
&details
= connState
->clientConnection
;
2399 if (!ctx
|| !httpsCreate(connState
, ctx
))
2402 connState
->resetReadTimeout(Config
.Timeout
.request
);
2404 Comm::SetSelect(details
->fd
, COMM_SELECT_READ
, clientNegotiateSSL
, connState
, 0);
2409 * A callback function to use with the ACLFilledChecklist callback.
2412 httpsSslBumpAccessCheckDone(Acl::Answer answer
, void *data
)
2414 ConnStateData
*connState
= (ConnStateData
*) data
;
2416 // if the connection is closed or closing, just return.
2417 if (!connState
->isOpen())
2420 if (answer
.allowed()) {
2421 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer
.kind
) << "needed for " << connState
->clientConnection
);
2422 connState
->sslBumpMode
= static_cast<Ssl::BumpMode
>(answer
.kind
);
2424 debugs(33, 3, "sslBump not needed for " << connState
->clientConnection
);
2425 connState
->sslBumpMode
= Ssl::bumpSplice
;
2428 if (connState
->sslBumpMode
== Ssl::bumpTerminate
) {
2429 connState
->clientConnection
->close();
2433 if (!connState
->fakeAConnectRequest("ssl-bump", connState
->inBuf
))
2434 connState
->clientConnection
->close();
2438 /** handle a new HTTPS connection */
2440 httpsAccept(const CommAcceptCbParams
¶ms
)
2442 Assure(params
.port
);
2444 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2446 if (params
.flag
!= Comm::OK
) {
2447 // Its possible the call was still queued when the client disconnected
2448 debugs(33, 2, "httpsAccept: " << params
.port
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
2452 const auto xact
= MasterXaction::MakePortful(params
.port
);
2453 xact
->tcpClient
= params
.conn
;
2455 debugs(33, 4, params
.conn
<< " accepted, starting SSL negotiation.");
2456 fd_note(params
.conn
->fd
, "client https connect");
2458 // Socket is ready, setup the connection manager to start using it
2459 auto *srv
= Https::NewServer(xact
);
2460 // XXX: do not abandon the MasterXaction object
2461 AsyncJob::Start(srv
); // usually async-calls postHttpsAccept()
2465 ConnStateData::postHttpsAccept()
2467 if (port
->flags
.tunnelSslBumping
) {
2469 debugs(33, 5, "accept transparent connection: " << clientConnection
);
2471 if (!Config
.accessList
.ssl_bump
) {
2472 httpsSslBumpAccessCheckDone(ACCESS_DENIED
, this);
2476 const auto mx
= MasterXaction::MakePortful(port
);
2477 mx
->tcpClient
= clientConnection
;
2478 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2479 // using tproxy/intercept provided destination IP and port.
2480 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2481 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2482 HttpRequest
*request
= new HttpRequest(mx
);
2483 static char ip
[MAX_IPSTRLEN
];
2484 assert(clientConnection
->flags
& (COMM_TRANSPARENT
| COMM_INTERCEPTION
));
2485 request
->url
.host(clientConnection
->local
.toStr(ip
, sizeof(ip
)));
2486 request
->url
.port(clientConnection
->local
.port());
2487 request
->myportname
= port
->name
;
2488 const AccessLogEntry::Pointer connectAle
= new AccessLogEntry
;
2489 CodeContext::Reset(connectAle
);
2490 // TODO: Use these request/ALE when waiting for new bumped transactions.
2492 auto acl_checklist
= ACLFilledChecklist::Make(Config
.accessList
.ssl_bump
, request
);
2493 fillChecklist(*acl_checklist
);
2494 // Build a local AccessLogEntry to allow requiresAle() acls work
2495 acl_checklist
->al
= connectAle
;
2496 acl_checklist
->al
->cache
.start_time
= current_time
;
2497 acl_checklist
->al
->tcpClient
= clientConnection
;
2498 acl_checklist
->al
->cache
.port
= port
;
2499 acl_checklist
->al
->cache
.caddr
= log_addr
;
2500 acl_checklist
->al
->proxyProtocolHeader
= proxyProtocolHeader_
;
2501 acl_checklist
->al
->updateError(bareError
);
2502 HTTPMSGUNLOCK(acl_checklist
->al
->request
);
2503 acl_checklist
->al
->request
= request
;
2504 HTTPMSGLOCK(acl_checklist
->al
->request
);
2505 Http::StreamPointer context
= pipeline
.front();
2506 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
2507 const char *log_uri
= http
? http
->log_uri
: nullptr;
2508 acl_checklist
->syncAle(request
, log_uri
);
2509 ACLFilledChecklist::NonBlockingCheck(std::move(acl_checklist
), httpsSslBumpAccessCheckDone
, this);
2511 fatal("FATAL: SSL-Bump requires --with-openssl");
2515 httpsEstablish(this, port
->secure
.staticContext
);
2521 ConnStateData::sslCrtdHandleReplyWrapper(void *data
, const Helper::Reply
&reply
)
2523 ConnStateData
* state_data
= (ConnStateData
*)(data
);
2524 state_data
->sslCrtdHandleReply(reply
);
2528 ConnStateData::sslCrtdHandleReply(const Helper::Reply
&reply
)
2531 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply
);
2535 if (reply
.result
== Helper::BrokenHelper
) {
2536 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply
);
2537 } else if (!reply
.other().hasContent()) {
2538 debugs(1, DBG_IMPORTANT
, "\"ssl_crtd\" helper returned <NULL> reply.");
2540 Ssl::CrtdMessage
reply_message(Ssl::CrtdMessage::REPLY
);
2541 if (reply_message
.parse(reply
.other().content(), reply
.other().contentSize()) != Ssl::CrtdMessage::OK
) {
2542 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp
<< " is incorrect");
2544 if (reply
.result
!= Helper::Okay
) {
2545 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply_message
.getBody());
2547 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp
<< " was successfully received from ssl_crtd");
2548 if (sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
)) {
2549 doPeekAndSpliceStep();
2550 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2551 bool ret
= Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl
, reply_message
.getBody().c_str(), *port
);
2553 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2555 Security::ContextPointer
ctx(Security::GetFrom(fd_table
[clientConnection
->fd
].ssl
));
2556 Ssl::configureUnconfiguredSslContext(ctx
, signAlgorithm
, *port
);
2558 Security::ContextPointer
ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message
.getBody().c_str(), port
->secure
, (signAlgorithm
== Ssl::algSignTrusted
)));
2559 if (ctx
&& !sslBumpCertKey
.isEmpty())
2560 storeTlsContextToCache(sslBumpCertKey
, ctx
);
2561 getSslContextDone(ctx
);
2567 Security::ContextPointer nil
;
2568 getSslContextDone(nil
);
2571 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties
&certProperties
)
2573 certProperties
.commonName
= sslCommonName_
.isEmpty() ? tlsConnectHostOrIp
.c_str() : sslCommonName_
.c_str();
2575 const bool connectedOk
= sslServerBump
&& sslServerBump
->connectedOk();
2577 if (X509
*mimicCert
= sslServerBump
->serverCert
.get())
2578 certProperties
.mimicCert
.resetAndLock(mimicCert
);
2580 ACLFilledChecklist
checklist(nullptr, sslServerBump
->request
.getRaw());
2581 fillChecklist(checklist
);
2583 for (sslproxy_cert_adapt
*ca
= Config
.ssl_client
.cert_adapt
; ca
!= nullptr; ca
= ca
->next
) {
2584 // If the algorithm already set, then ignore it.
2585 if ((ca
->alg
== Ssl::algSetCommonName
&& certProperties
.setCommonName
) ||
2586 (ca
->alg
== Ssl::algSetValidAfter
&& certProperties
.setValidAfter
) ||
2587 (ca
->alg
== Ssl::algSetValidBefore
&& certProperties
.setValidBefore
) )
2590 if (ca
->aclList
&& checklist
.fastCheck(ca
->aclList
).allowed()) {
2591 const char *alg
= Ssl::CertAdaptAlgorithmStr
[ca
->alg
];
2592 const char *param
= ca
->param
;
2594 // For parameterless CN adaptation, use hostname from the
2596 if (ca
->alg
== Ssl::algSetCommonName
) {
2598 param
= tlsConnectHostOrIp
.c_str();
2599 certProperties
.commonName
= param
;
2600 certProperties
.setCommonName
= true;
2601 } else if (ca
->alg
== Ssl::algSetValidAfter
)
2602 certProperties
.setValidAfter
= true;
2603 else if (ca
->alg
== Ssl::algSetValidBefore
)
2604 certProperties
.setValidBefore
= true;
2606 debugs(33, 5, "Matches certificate adaptation aglorithm: " <<
2607 alg
<< " param: " << (param
? param
: "-"));
2611 certProperties
.signAlgorithm
= Ssl::algSignEnd
;
2612 for (sslproxy_cert_sign
*sg
= Config
.ssl_client
.cert_sign
; sg
!= nullptr; sg
= sg
->next
) {
2613 if (sg
->aclList
&& checklist
.fastCheck(sg
->aclList
).allowed()) {
2614 certProperties
.signAlgorithm
= (Ssl::CertSignAlgorithm
)sg
->alg
;
2618 } else {// did not try to connect (e.g. client-first) or failed to connect
2619 // In case of an error while connecting to the secure server, use a
2620 // trusted certificate, with no mimicked fields and no adaptation
2621 // algorithms. There is nothing we can mimic, so we want to minimize the
2622 // number of warnings the user will have to see to get to the error page.
2623 // We will close the connection, so that the trust is not extended to
2624 // non-Squid content.
2625 certProperties
.signAlgorithm
= Ssl::algSignTrusted
;
2628 assert(certProperties
.signAlgorithm
!= Ssl::algSignEnd
);
2630 if (certProperties
.signAlgorithm
== Ssl::algSignUntrusted
) {
2631 assert(port
->secure
.untrustedSigningCa
.cert
);
2632 certProperties
.signWithX509
.resetAndLock(port
->secure
.untrustedSigningCa
.cert
.get());
2633 certProperties
.signWithPkey
.resetAndLock(port
->secure
.untrustedSigningCa
.pkey
.get());
2635 assert(port
->secure
.signingCa
.cert
.get());
2636 certProperties
.signWithX509
.resetAndLock(port
->secure
.signingCa
.cert
.get());
2638 if (port
->secure
.signingCa
.pkey
)
2639 certProperties
.signWithPkey
.resetAndLock(port
->secure
.signingCa
.pkey
.get());
2641 signAlgorithm
= certProperties
.signAlgorithm
;
2643 certProperties
.signHash
= Ssl::DefaultSignHash
;
2646 Security::ContextPointer
2647 ConnStateData::getTlsContextFromCache(const SBuf
&cacheKey
, const Ssl::CertificateProperties
&certProperties
)
2649 debugs(33, 5, "Finding SSL certificate for " << cacheKey
<< " in cache");
2650 const auto ssl_ctx_cache
= Ssl::TheGlobalContextStorage().getLocalStorage(port
->s
);
2651 if (const auto ctx
= ssl_ctx_cache
? ssl_ctx_cache
->get(cacheKey
) : nullptr) {
2652 if (Ssl::verifySslCertificate(*ctx
, certProperties
)) {
2653 debugs(33, 5, "Cached SSL certificate for " << certProperties
.commonName
<< " is valid");
2656 debugs(33, 5, "Cached SSL certificate for " << certProperties
.commonName
<< " is out of date. Delete this certificate from cache");
2658 ssl_ctx_cache
->del(cacheKey
);
2661 return Security::ContextPointer(nullptr);
2665 ConnStateData::storeTlsContextToCache(const SBuf
&cacheKey
, Security::ContextPointer
&ctx
)
2667 const auto ssl_ctx_cache
= Ssl::TheGlobalContextStorage().getLocalStorage(port
->s
);
2668 if (!ssl_ctx_cache
|| !ssl_ctx_cache
->add(cacheKey
, ctx
)) {
2669 // If it is not in storage delete after using. Else storage deleted it.
2670 fd_table
[clientConnection
->fd
].dynamicTlsContext
= ctx
;
2675 ConnStateData::getSslContextStart()
2677 if (port
->secure
.generateHostCertificates
) {
2678 Ssl::CertificateProperties certProperties
;
2679 buildSslCertGenerationParams(certProperties
);
2681 // Disable caching for bumpPeekAndSplice mode
2682 if (!(sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
))) {
2683 sslBumpCertKey
.clear();
2684 Ssl::InRamCertificateDbKey(certProperties
, sslBumpCertKey
);
2685 assert(!sslBumpCertKey
.isEmpty());
2687 Security::ContextPointer
ctx(getTlsContextFromCache(sslBumpCertKey
, certProperties
));
2689 getSslContextDone(ctx
);
2696 debugs(33, 5, "Generating SSL certificate for " << certProperties
.commonName
<< " using ssl_crtd.");
2697 Ssl::CrtdMessage
request_message(Ssl::CrtdMessage::REQUEST
);
2698 request_message
.setCode(Ssl::CrtdMessage::code_new_certificate
);
2699 request_message
.composeRequest(certProperties
);
2700 debugs(33, 5, "SSL crtd request: " << request_message
.compose().c_str());
2701 Ssl::Helper::Submit(request_message
, sslCrtdHandleReplyWrapper
, this);
2703 } catch (const std::exception
&e
) {
2704 debugs(33, DBG_IMPORTANT
, "ERROR: Failed to compose ssl_crtd " <<
2705 "request for " << certProperties
.commonName
<<
2706 " certificate: " << e
.what() << "; will now block to " <<
2707 "generate that certificate.");
2708 // fall through to do blocking in-process generation.
2710 #endif // USE_SSL_CRTD
2712 debugs(33, 5, "Generating SSL certificate for " << certProperties
.commonName
);
2713 if (sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
)) {
2714 doPeekAndSpliceStep();
2715 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2716 if (!Ssl::configureSSL(ssl
, certProperties
, *port
))
2717 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2719 Security::ContextPointer
ctx(Security::GetFrom(fd_table
[clientConnection
->fd
].ssl
));
2720 Ssl::configureUnconfiguredSslContext(ctx
, certProperties
.signAlgorithm
, *port
);
2722 Security::ContextPointer
dynCtx(Ssl::GenerateSslContext(certProperties
, port
->secure
, (signAlgorithm
== Ssl::algSignTrusted
)));
2723 if (dynCtx
&& !sslBumpCertKey
.isEmpty())
2724 storeTlsContextToCache(sslBumpCertKey
, dynCtx
);
2725 getSslContextDone(dynCtx
);
2730 Security::ContextPointer nil
;
2731 getSslContextDone(nil
);
2735 ConnStateData::getSslContextDone(Security::ContextPointer
&ctx
)
2737 if (port
->secure
.generateHostCertificates
&& !ctx
) {
2738 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp
);
2741 // If generated ssl context = nullptr, try to use static ssl context.
2743 if (!port
->secure
.staticContext
) {
2744 debugs(83, DBG_IMPORTANT
, "Closing " << clientConnection
->remote
<< " as lacking TLS context");
2745 clientConnection
->close();
2748 debugs(33, 5, "Using static TLS context.");
2749 ctx
= port
->secure
.staticContext
;
2753 if (!httpsCreate(this, ctx
))
2756 // bumped intercepted conns should already have Config.Timeout.request set
2757 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2758 // to make sure the connection does not get stuck on non-SSL clients.
2759 resetReadTimeout(Config
.Timeout
.request
);
2761 switchedToHttps_
= true;
2763 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2764 BIO
*b
= SSL_get_rbio(ssl
);
2765 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
2766 bio
->setReadBufData(inBuf
);
2768 clientNegotiateSSL(clientConnection
->fd
, this);
2772 ConnStateData::switchToHttps(ClientHttpRequest
*http
, Ssl::BumpMode bumpServerMode
)
2774 assert(!switchedToHttps_
);
2775 Must(http
->request
);
2776 auto &request
= http
->request
;
2778 // Depending on receivedFirstByte_, we are at the start of either an
2779 // established CONNECT tunnel with the client or an intercepted TCP (and
2780 // presumably TLS) connection from the client. Expect TLS Client Hello.
2781 const auto insideConnectTunnel
= receivedFirstByte_
;
2782 debugs(33, 5, (insideConnectTunnel
? "post-CONNECT " : "raw TLS ") << clientConnection
);
2784 tlsConnectHostOrIp
= request
->url
.hostOrIp();
2785 tlsConnectPort
= request
->url
.port();
2786 resetSslCommonName(request
->url
.host());
2788 // We are going to read new request
2789 flags
.readMore
= true;
2791 // keep version major.minor details the same.
2792 // but we are now performing the HTTPS handshake traffic
2793 transferProtocol
.protocol
= AnyP::PROTO_HTTPS
;
2795 // If sslServerBump is set, then we have decided to deny CONNECT
2796 // and now want to switch to SSL to send the error to the client
2797 // without even peeking at the origin server certificate.
2798 if (bumpServerMode
== Ssl::bumpServerFirst
&& !sslServerBump
) {
2799 request
->flags
.sslPeek
= true;
2800 sslServerBump
= new Ssl::ServerBump(http
);
2801 } else if (bumpServerMode
== Ssl::bumpPeek
|| bumpServerMode
== Ssl::bumpStare
) {
2802 request
->flags
.sslPeek
= true;
2803 sslServerBump
= new Ssl::ServerBump(http
, nullptr, bumpServerMode
);
2806 // commSetConnTimeout() was called for this request before we switched.
2807 // Fix timeout to request_start_timeout
2808 resetReadTimeout(Config
.Timeout
.request_start_timeout
);
2809 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2810 // a bumbed "connect" request on non transparent port.
2811 receivedFirstByte_
= false;
2812 // Get more data to peek at Tls
2813 parsingTlsHandshake
= true;
2815 // If the protocol has changed, then reset preservingClientData_.
2816 // Otherwise, its value initially set in start() is still valid/fresh.
2817 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2818 if (insideConnectTunnel
)
2819 preservingClientData_
= shouldPreserveClientData();
2825 ConnStateData::parseTlsHandshake()
2827 Must(parsingTlsHandshake
);
2829 assert(!inBuf
.isEmpty());
2830 receivedFirstByte();
2831 fd_note(clientConnection
->fd
, "Parsing TLS handshake");
2833 // stops being nil if we fail to parse the handshake
2834 ErrorDetail::Pointer parseErrorDetails
;
2837 if (!tlsParser
.parseHello(inBuf
)) {
2838 // need more data to finish parsing
2843 catch (const TextException
&ex
) {
2844 debugs(83, 2, "exception: " << ex
);
2845 parseErrorDetails
= new ExceptionErrorDetail(ex
.id());
2848 debugs(83, 2, "exception: " << CurrentException
);
2849 static const auto d
= MakeNamedErrorDetail("TLS_ACCEPT_PARSE");
2850 parseErrorDetails
= d
;
2853 parsingTlsHandshake
= false;
2855 // client data may be needed for splicing and for
2856 // tunneling unsupportedProtocol after an error
2857 preservedClientData
= inBuf
;
2859 // Even if the parser failed, each TLS detail should either be set
2860 // correctly or still be "unknown"; copying unknown detail is a no-op.
2861 Security::TlsDetails::Pointer
const &details
= tlsParser
.details
;
2862 clientConnection
->tlsNegotiations()->retrieveParsedInfo(details
);
2863 if (details
&& !details
->serverName
.isEmpty()) {
2864 resetSslCommonName(details
->serverName
.c_str());
2865 tlsClientSni_
= details
->serverName
;
2868 // We should disable read/write handlers
2869 Comm::ResetSelect(clientConnection
->fd
);
2871 if (parseErrorDetails
) {
2872 Http::StreamPointer context
= pipeline
.front();
2873 Must(context
&& context
->http
);
2874 HttpRequest::Pointer request
= context
->http
->request
;
2875 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
2876 updateError(ERR_PROTOCOL_UNKNOWN
, parseErrorDetails
);
2877 if (!tunnelOnError(ERR_PROTOCOL_UNKNOWN
))
2878 clientConnection
->close();
2882 if (!sslServerBump
|| sslServerBump
->act
.step1
== Ssl::bumpClientFirst
) { // Either means client-first.
2883 getSslContextStart();
2885 } else if (sslServerBump
->act
.step1
== Ssl::bumpServerFirst
) {
2886 debugs(83, 5, "server-first skips step2; start forwarding the request");
2887 sslServerBump
->step
= XactionStep::tlsBump3
;
2888 Http::StreamPointer context
= pipeline
.front();
2889 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
2890 // will call httpsPeeked() with certificate and connection, eventually
2891 FwdState::Start(clientConnection
, sslServerBump
->entry
, sslServerBump
->request
.getRaw(), http
? http
->al
: nullptr);
2893 Must(sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
);
2894 startPeekAndSplice();
2899 httpsSslBumpStep2AccessCheckDone(Acl::Answer answer
, void *data
)
2901 ConnStateData
*connState
= (ConnStateData
*) data
;
2903 // if the connection is closed or closing, just return.
2904 if (!connState
->isOpen())
2907 debugs(33, 5, "Answer: " << answer
<< " kind:" << answer
.kind
);
2908 assert(connState
->serverBump());
2909 Ssl::BumpMode bumpAction
;
2910 if (answer
.allowed()) {
2911 bumpAction
= (Ssl::BumpMode
)answer
.kind
;
2913 bumpAction
= Ssl::bumpSplice
;
2915 connState
->serverBump()->act
.step2
= bumpAction
;
2916 connState
->sslBumpMode
= bumpAction
;
2917 Http::StreamPointer context
= connState
->pipeline
.front();
2918 if (ClientHttpRequest
*http
= (context
? context
->http
: nullptr))
2919 http
->al
->ssl
.bumpMode
= bumpAction
;
2921 if (bumpAction
== Ssl::bumpTerminate
) {
2922 connState
->clientConnection
->close();
2923 } else if (bumpAction
!= Ssl::bumpSplice
) {
2924 connState
->startPeekAndSplice();
2925 } else if (!connState
->splice())
2926 connState
->clientConnection
->close();
2930 ConnStateData::splice()
2932 // normally we can splice here, because we just got client hello message
2934 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
2935 // we should not lose any raw bytes when switching to raw I/O here.
2936 if (fd_table
[clientConnection
->fd
].ssl
.get())
2937 fd_table
[clientConnection
->fd
].useDefaultIo();
2939 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
2940 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
2941 transferProtocol
= Http::ProtocolVersion();
2942 assert(!pipeline
.empty());
2943 Http::StreamPointer context
= pipeline
.front();
2945 Must(context
->http
);
2946 ClientHttpRequest
*http
= context
->http
;
2947 HttpRequest::Pointer request
= http
->request
;
2948 context
->finished();
2949 if (transparent()) {
2950 // For transparent connections, make a new fake CONNECT request, now
2951 // with SNI as target. doCallout() checks, adaptations may need that.
2952 return fakeAConnectRequest("splice", preservedClientData
);
2954 // For non transparent connections make a new tunneled CONNECT, which
2955 // also sets the HttpRequest::flags::forceTunnel flag to avoid
2956 // respond with "Connection Established" to the client.
2957 // This fake CONNECT request required to allow use of SNI in
2958 // doCallout() checks and adaptations.
2959 return initiateTunneledRequest(request
, "splice", preservedClientData
);
2964 ConnStateData::startPeekAndSplice()
2966 // This is the Step2 of the SSL bumping
2967 assert(sslServerBump
);
2968 Http::StreamPointer context
= pipeline
.front();
2969 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
2971 if (sslServerBump
->at(XactionStep::tlsBump1
)) {
2972 sslServerBump
->step
= XactionStep::tlsBump2
;
2973 // Run a accessList check to check if want to splice or continue bumping
2975 auto acl_checklist
= ACLFilledChecklist::Make(Config
.accessList
.ssl_bump
, sslServerBump
->request
.getRaw());
2976 acl_checklist
->banAction(Acl::Answer(ACCESS_ALLOWED
, Ssl::bumpNone
));
2977 acl_checklist
->banAction(Acl::Answer(ACCESS_ALLOWED
, Ssl::bumpClientFirst
));
2978 acl_checklist
->banAction(Acl::Answer(ACCESS_ALLOWED
, Ssl::bumpServerFirst
));
2979 fillChecklist(*acl_checklist
);
2980 ACLFilledChecklist::NonBlockingCheck(std::move(acl_checklist
), httpsSslBumpStep2AccessCheckDone
, this);
2984 // will call httpsPeeked() with certificate and connection, eventually
2985 Security::ContextPointer
unConfiguredCTX(Ssl::createSSLContext(port
->secure
.signingCa
.cert
, port
->secure
.signingCa
.pkey
, port
->secure
));
2986 fd_table
[clientConnection
->fd
].dynamicTlsContext
= unConfiguredCTX
;
2988 if (!httpsCreate(this, unConfiguredCTX
))
2991 switchedToHttps_
= true;
2993 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2994 BIO
*b
= SSL_get_rbio(ssl
);
2995 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
2996 bio
->setReadBufData(inBuf
);
2999 // We have successfully parsed client Hello, but our TLS handshake parser is
3000 // forgiving. Now we use a TLS library to parse the same bytes, so that we
3001 // can honor on_unsupported_protocol if needed. If there are no errors, we
3002 // expect Security::Accept() to ask us to write (our) TLS server Hello. We
3003 // also allow an ioWantRead result in case some fancy TLS extension that
3004 // Squid does not yet understand requires reading post-Hello client bytes.
3005 const auto handshakeResult
= acceptTls();
3006 if (!handshakeResult
.wantsIo())
3007 return handleSslBumpHandshakeError(handshakeResult
);
3009 // We need to reset inBuf here, to be used by incoming requests in the case
3013 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3014 sslServerBump
->step
= XactionStep::tlsBump3
;
3015 FwdState::Start(clientConnection
, sslServerBump
->entry
, sslServerBump
->request
.getRaw(), http
? http
->al
: nullptr);
3018 /// process a problematic Security::Accept() result on the SslBump code path
3020 ConnStateData::handleSslBumpHandshakeError(const Security::IoResult
&handshakeResult
)
3022 auto errCategory
= ERR_NONE
;
3024 switch (handshakeResult
.category
) {
3025 case Security::IoResult::ioSuccess
: {
3026 static const auto d
= MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_SUCCESS");
3027 updateError(errCategory
= ERR_GATEWAY_FAILURE
, d
);
3031 case Security::IoResult::ioWantRead
: {
3032 static const auto d
= MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_READ");
3033 updateError(errCategory
= ERR_GATEWAY_FAILURE
, d
);
3037 case Security::IoResult::ioWantWrite
: {
3038 static const auto d
= MakeNamedErrorDetail("TLS_ACCEPT_UNEXPECTED_WRITE");
3039 updateError(errCategory
= ERR_GATEWAY_FAILURE
, d
);
3043 case Security::IoResult::ioError
:
3044 debugs(83, (handshakeResult
.important
? DBG_IMPORTANT
: 2), "ERROR: Cannot SslBump-accept a TLS connection" <<
3045 Debug::Extra
<< "problem: " << WithExtras(handshakeResult
));
3046 updateError(errCategory
= ERR_SECURE_ACCEPT_FAIL
, handshakeResult
.errorDetail
);
3051 if (!tunnelOnError(errCategory
))
3052 clientConnection
->close();
3056 ConnStateData::doPeekAndSpliceStep()
3058 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
3059 BIO
*b
= SSL_get_rbio(ssl
);
3061 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
3063 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl
));
3066 Comm::SetSelect(clientConnection
->fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, this, 0);
3067 switchedToHttps_
= true;
3071 ConnStateData::httpsPeeked(PinnedIdleContext pic
)
3073 Must(sslServerBump
!= nullptr);
3074 Must(sslServerBump
->request
== pic
.request
);
3075 Must(pipeline
.empty() || pipeline
.front()->http
== nullptr || pipeline
.front()->http
->request
== pic
.request
.getRaw());
3077 if (Comm::IsConnOpen(pic
.connection
)) {
3078 notePinnedConnectionBecameIdle(pic
);
3079 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp
);
3081 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp
);
3083 getSslContextStart();
3086 #endif /* USE_OPENSSL */
3089 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer
const &cause
, const char *reason
, const SBuf
&payload
)
3091 // fake a CONNECT request to force connState to tunnel
3093 AnyP::Port connectPort
;
3095 if (pinning
.serverConnection
!= nullptr) {
3096 static char ip
[MAX_IPSTRLEN
];
3097 connectHost
= pinning
.serverConnection
->remote
.toStr(ip
, sizeof(ip
));
3098 if (const auto remotePort
= pinning
.serverConnection
->remote
.port())
3099 connectPort
= remotePort
;
3101 connectHost
= cause
->url
.hostOrIp();
3102 connectPort
= cause
->url
.port();
3104 } else if (!tlsConnectHostOrIp
.isEmpty()) {
3105 connectHost
= tlsConnectHostOrIp
;
3106 connectPort
= tlsConnectPort
;
3108 } else if (transparent()) {
3109 static char ip
[MAX_IPSTRLEN
];
3110 connectHost
= clientConnection
->local
.toStr(ip
, sizeof(ip
));
3111 connectPort
= clientConnection
->local
.port();
3115 // Typical cases are malformed HTTP requests on http_port and malformed
3116 // TLS handshakes on non-bumping https_port. TODO: Discover these
3117 // problems earlier so that they can be classified/detailed better.
3118 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason
);
3119 // TODO: throw when NonBlockingCheck() callbacks gain job protections
3120 static const auto d
= MakeNamedErrorDetail("TUNNEL_TARGET");
3121 updateError(ERR_INVALID_REQ
, d
);
3125 debugs(33, 2, "Request tunneling for " << reason
);
3126 const auto http
= buildFakeRequest(connectHost
, *connectPort
, payload
);
3127 HttpRequest::Pointer request
= http
->request
;
3128 request
->flags
.forceTunnel
= true;
3129 http
->calloutContext
= new ClientRequestContext(http
);
3131 clientProcessRequestFinished(this, request
);
3136 ConnStateData::fakeAConnectRequest(const char *reason
, const SBuf
&payload
)
3138 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason
);
3141 assert(transparent());
3142 const unsigned short connectPort
= clientConnection
->local
.port();
3145 if (!tlsClientSni_
.isEmpty())
3146 connectHost
.assign(tlsClientSni_
);
3150 static char ip
[MAX_IPSTRLEN
];
3151 clientConnection
->local
.toHostStr(ip
, sizeof(ip
));
3152 connectHost
.assign(ip
);
3155 ClientHttpRequest
*http
= buildFakeRequest(connectHost
, connectPort
, payload
);
3157 http
->calloutContext
= new ClientRequestContext(http
);
3158 HttpRequest::Pointer request
= http
->request
;
3160 clientProcessRequestFinished(this, request
);
3165 ConnStateData::buildFakeRequest(SBuf
&useHost
, const AnyP::KnownPort usePort
, const SBuf
&payload
)
3167 ClientHttpRequest
*http
= new ClientHttpRequest(this);
3168 Http::Stream
*stream
= new Http::Stream(clientConnection
, http
);
3170 StoreIOBuffer tempBuffer
;
3171 tempBuffer
.data
= stream
->reqbuf
;
3172 tempBuffer
.length
= HTTP_REQBUF_SZ
;
3174 ClientStreamData newServer
= new clientReplyContext(http
);
3175 ClientStreamData newClient
= stream
;
3176 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
3177 clientReplyStatus
, newServer
, clientSocketRecipient
,
3178 clientSocketDetach
, newClient
, tempBuffer
);
3180 stream
->flags
.parsed_ok
= 1; // Do we need it?
3181 stream
->mayUseConnection(true);
3183 stream
->registerWithConn();
3185 const auto mx
= MasterXaction::MakePortful(port
);
3186 mx
->tcpClient
= clientConnection
;
3187 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3188 // clientProcessRequest
3189 HttpRequest::Pointer request
= new HttpRequest(mx
);
3190 request
->url
.setScheme(AnyP::PROTO_AUTHORITY_FORM
, nullptr);
3191 request
->method
= Http::METHOD_CONNECT
;
3192 request
->url
.host(useHost
.c_str());
3193 request
->url
.port(usePort
);
3195 http
->uri
= SBufToCstring(request
->effectiveRequestUri());
3196 http
->initRequest(request
.getRaw());
3198 request
->manager(this, http
->al
);
3200 request
->header
.putStr(Http::HOST
, useHost
.c_str());
3202 request
->sources
|= ((switchedToHttps() || port
->transport
.protocol
== AnyP::PROTO_HTTPS
) ? Http::Message::srcHttps
: Http::Message::srcHttp
);
3205 request
->auth_user_request
= getAuth();
3209 flags
.readMore
= false;
3214 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3216 OpenedHttpSocket(const Comm::ConnectionPointer
&c
, const Ipc::FdNoteId portType
)
3218 if (!Comm::IsConnOpen(c
)) {
3219 Must(NHttpSockets
> 0); // we tried to open some
3220 --NHttpSockets
; // there will be fewer sockets than planned
3221 Must(HttpSockets
[NHttpSockets
] < 0); // no extra fds received
3223 if (!NHttpSockets
) // we could not open any listen sockets at all
3224 fatalf("Unable to open %s",FdNote(portType
));
3231 /// find any unused HttpSockets[] slot and store fd there or return false
3233 AddOpenedHttpSocket(const Comm::ConnectionPointer
&conn
)
3236 for (int i
= 0; i
< NHttpSockets
&& !found
; ++i
) {
3237 if ((found
= HttpSockets
[i
] < 0))
3238 HttpSockets
[i
] = conn
->fd
;
3244 clientHttpConnectionsOpen(void)
3246 const auto savedContext
= CodeContext::Current();
3247 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= nullptr; s
= s
->next
) {
3248 CodeContext::Reset(s
);
3249 const SBuf
&scheme
= AnyP::UriScheme(s
->transport
.protocol
).image();
3251 if (MAXTCPLISTENPORTS
== NHttpSockets
) {
3252 debugs(1, DBG_IMPORTANT
, "WARNING: You have too many '" << scheme
<< "_port' lines." <<
3253 Debug::Extra
<< "The limit is " << MAXTCPLISTENPORTS
<< " HTTP ports.");
3258 if (s
->flags
.tunnelSslBumping
) {
3259 if (!Config
.accessList
.ssl_bump
) {
3260 debugs(33, DBG_IMPORTANT
, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme
<< "_port " << s
->s
);
3261 s
->flags
.tunnelSslBumping
= false;
3263 if (!s
->secure
.staticContext
&& !s
->secure
.generateHostCertificates
) {
3264 debugs(1, DBG_IMPORTANT
, "Will not bump SSL at " << scheme
<< "_port " << s
->s
<< " due to TLS initialization failure.");
3265 s
->flags
.tunnelSslBumping
= false;
3266 if (s
->transport
.protocol
== AnyP::PROTO_HTTP
)
3267 s
->secure
.encryptTransport
= false;
3269 if (s
->flags
.tunnelSslBumping
) {
3270 // Create ssl_ctx cache for this port.
3271 Ssl::TheGlobalContextStorage().addLocalStorage(s
->s
, s
->secure
.dynamicCertMemCacheSize
);
3276 if (s
->secure
.encryptTransport
&& !s
->secure
.staticContext
) {
3277 debugs(1, DBG_CRITICAL
, "ERROR: Ignoring " << scheme
<< "_port " << s
->s
<< " due to TLS context initialization failure.");
3281 const auto protocol
= s
->transport
.protocol
;
3282 assert(protocol
== AnyP::PROTO_HTTP
|| protocol
== AnyP::PROTO_HTTPS
);
3283 const auto isHttps
= protocol
== AnyP::PROTO_HTTPS
;
3284 using AcceptCall
= CommCbFunPtrCallT
<CommAcceptCbPtrFun
>;
3285 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, isHttps
? "httpsAccept" : "httpAccept",
3286 CommAcceptCbPtrFun(isHttps
? httpsAccept
: httpAccept
, CommAcceptCbParams(nullptr)));
3287 clientStartListeningOn(s
, subCall
, isHttps
? Ipc::fdnHttpsSocket
: Ipc::fdnHttpSocket
);
3289 CodeContext::Reset(savedContext
);
3293 clientStartListeningOn(AnyP::PortCfgPointer
&port
, const RefCount
< CommCbFunPtrCallT
<CommAcceptCbPtrFun
> > &subCall
, const Ipc::FdNoteId fdNote
)
3295 // Fill out a Comm::Connection which IPC will open as a listener for us
3296 port
->listenConn
= new Comm::Connection
;
3297 port
->listenConn
->local
= port
->s
;
3298 port
->listenConn
->flags
=
3300 (port
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) |
3301 (port
->flags
.natIntercept
? COMM_INTERCEPTION
: 0) |
3302 (port
->workerQueues
? COMM_REUSEPORT
: 0);
3304 // route new connections to subCall
3305 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
3306 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
3307 const auto listenCall
=
3308 asyncCall(33, 2, "clientListenerConnectionOpened",
3309 ListeningStartedDialer(&clientListenerConnectionOpened
,
3310 port
, fdNote
, sub
));
3311 AsyncCallback
<Ipc::StartListeningAnswer
> callback(listenCall
);
3312 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, port
->listenConn
, fdNote
, callback
);
3314 assert(NHttpSockets
< MAXTCPLISTENPORTS
);
3315 HttpSockets
[NHttpSockets
] = -1;
3319 /// process clientHttpConnectionsOpen result
3321 clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
)
3325 if (!OpenedHttpSocket(s
->listenConn
, portTypeNote
))
3328 Must(Comm::IsConnOpen(s
->listenConn
));
3330 // TCP: setup a job to handle accept() with subscribed handler
3331 AsyncJob::Start(new Comm::TcpAcceptor(s
, FdNote(portTypeNote
), sub
));
3333 debugs(1, Important(13), "Accepting " <<
3334 (s
->flags
.natIntercept
? "NAT intercepted " : "") <<
3335 (s
->flags
.tproxyIntercept
? "TPROXY intercepted " : "") <<
3336 (s
->flags
.tunnelSslBumping
? "SSL bumped " : "") <<
3337 (s
->flags
.accelSurrogate
? "reverse-proxy " : "")
3338 << FdNote(portTypeNote
) << " connections at "
3341 Must(AddOpenedHttpSocket(s
->listenConn
)); // otherwise, we have received a fd we did not ask for
3344 // When the very first port opens, tell systemd we are able to serve connections.
3345 // Subsequent sd_notify() calls, including calls during reconfiguration,
3346 // do nothing because the first call parameter is 1.
3347 // XXX: Send the notification only after opening all configured ports.
3348 if (opt_foreground
|| opt_no_daemon
) {
3349 const auto result
= sd_notify(1, "READY=1");
3351 debugs(1, DBG_IMPORTANT
, "WARNING: failed to send start-up notification to systemd" <<
3352 Debug::Extra
<< "sd_notify() error: " << xstrerr(-result
));
3359 clientOpenListenSockets(void)
3361 clientHttpConnectionsOpen();
3362 Ftp::StartListening();
3364 if (NHttpSockets
< 1)
3365 fatal("No HTTP, HTTPS, or FTP ports configured");
3369 clientConnectionsClose()
3371 const auto savedContext
= CodeContext::Current();
3372 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= nullptr; s
= s
->next
) {
3373 CodeContext::Reset(s
);
3374 if (s
->listenConn
!= nullptr) {
3375 debugs(1, Important(14), "Closing HTTP(S) port " << s
->listenConn
->local
);
3376 s
->listenConn
->close();
3377 s
->listenConn
= nullptr;
3380 CodeContext::Reset(savedContext
);
3382 Ftp::StopListening();
3384 // TODO see if we can drop HttpSockets array entirely */
3385 for (int i
= 0; i
< NHttpSockets
; ++i
) {
3386 HttpSockets
[i
] = -1;
3393 varyEvaluateMatch(StoreEntry
* entry
, HttpRequest
* request
)
3395 SBuf
vary(request
->vary_headers
);
3396 const auto &reply
= entry
->mem().freshestReply();
3397 auto has_vary
= reply
.header
.has(Http::HdrType::VARY
);
3398 #if X_ACCELERATOR_VARY
3401 reply
.header
.has(Http::HdrType::HDR_X_ACCELERATOR_VARY
);
3404 if (!has_vary
|| entry
->mem_obj
->vary_headers
.isEmpty()) {
3405 if (!vary
.isEmpty()) {
3406 /* Oops... something odd is going on here.. */
3407 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3408 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
3409 request
->vary_headers
.clear();
3414 /* This is not a varying object */
3418 /* virtual "vary" object found. Calculate the vary key and
3419 * continue the search
3421 vary
= httpMakeVaryMark(request
, &reply
);
3423 if (!vary
.isEmpty()) {
3424 request
->vary_headers
= vary
;
3427 /* Ouch.. we cannot handle this kind of variance */
3428 /* XXX This cannot really happen, but just to be complete */
3432 if (vary
.isEmpty()) {
3433 vary
= httpMakeVaryMark(request
, &reply
);
3435 if (!vary
.isEmpty())
3436 request
->vary_headers
= vary
;
3439 if (vary
.isEmpty()) {
3440 /* Ouch.. we cannot handle this kind of variance */
3441 /* XXX This cannot really happen, but just to be complete */
3443 } else if (vary
.cmp(entry
->mem_obj
->vary_headers
) == 0) {
3446 /* Oops.. we have already been here and still haven't
3447 * found the requested variant. Bail out
3449 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3450 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
3456 ACLFilledChecklist::MakingPointer
3457 clientAclChecklistCreate(const acl_access
* acl
, ClientHttpRequest
* http
)
3459 auto checklist
= ACLFilledChecklist::Make(acl
, nullptr);
3460 clientAclChecklistFill(*checklist
, http
);
3465 clientAclChecklistFill(ACLFilledChecklist
&checklist
, ClientHttpRequest
*http
)
3469 if (!checklist
.request
&& http
->request
)
3470 checklist
.setRequest(http
->request
);
3472 if (!checklist
.al
&& http
->al
) {
3473 checklist
.updateAle(http
->al
);
3474 checklist
.syncAle(http
->request
, http
->log_uri
);
3477 if (const auto conn
= http
->getConn())
3478 checklist
.setConn(conn
); // may already be set
3482 ConnStateData::fillChecklist(ACLFilledChecklist
&checklist
) const
3484 const auto context
= pipeline
.front();
3485 if (const auto http
= context
? context
->http
: nullptr)
3486 return clientAclChecklistFill(checklist
, http
); // calls checklist.setConn()
3488 // no requests, but we always have connection-level details
3489 // TODO: ACL checks should not require a mutable ConnStateData. Adjust the
3490 // code that accidentally violates that principle to remove this const_cast!
3491 checklist
.setConn(const_cast<ConnStateData
*>(this));
3493 // Set other checklist fields inside our fillConnectionLevelDetails() rather
3494 // than here because clientAclChecklistFill() code path calls that method
3495 // (via ACLFilledChecklist::setConn()) rather than calling us directly.
3499 ConnStateData::fillConnectionLevelDetails(ACLFilledChecklist
&checklist
) const
3501 assert(checklist
.conn() == this);
3502 assert(clientConnection
);
3504 if (!checklist
.request
) { // preserve (better) addresses supplied by setRequest()
3505 checklist
.src_addr
= clientConnection
->remote
;
3506 checklist
.my_addr
= clientConnection
->local
; // TODO: or port->s?
3510 if (!checklist
.sslErrors
&& sslServerBump
)
3511 checklist
.sslErrors
= sslServerBump
->sslErrors();
3516 ConnStateData::transparent() const
3518 return clientConnection
!= nullptr && (clientConnection
->flags
& (COMM_TRANSPARENT
|COMM_INTERCEPTION
));
3522 ConnStateData::expectRequestBody(int64_t size
)
3524 bodyPipe
= new BodyPipe(this);
3526 bodyPipe
->setBodySize(size
);
3528 startDechunkingRequest();
3533 ConnStateData::mayNeedToReadMoreBody() const
3536 return 0; // request without a body or read/produced all body bytes
3538 if (!bodyPipe
->bodySizeKnown())
3539 return -1; // probably need to read more, but we cannot be sure
3541 const int64_t needToProduce
= bodyPipe
->unproducedSize();
3542 const int64_t haveAvailable
= static_cast<int64_t>(inBuf
.length());
3544 if (needToProduce
<= haveAvailable
)
3545 return 0; // we have read what we need (but are waiting for pipe space)
3547 return needToProduce
- haveAvailable
;
3551 ConnStateData::stopReceiving(const char *error
)
3553 debugs(33, 4, "receiving error (" << clientConnection
<< "): " << error
<<
3554 "; old sending error: " <<
3555 (stoppedSending() ? stoppedSending_
: "none"));
3557 if (const char *oldError
= stoppedReceiving()) {
3558 debugs(33, 3, "already stopped receiving: " << oldError
);
3559 return; // nothing has changed as far as this connection is concerned
3562 stoppedReceiving_
= error
;
3564 if (const char *sendError
= stoppedSending()) {
3565 debugs(33, 3, "closing because also stopped sending: " << sendError
);
3566 clientConnection
->close();
3571 ConnStateData::expectNoForwarding()
3573 if (bodyPipe
!= nullptr) {
3574 debugs(33, 4, "no consumer for virgin body " << bodyPipe
->status());
3575 bodyPipe
->expectNoConsumption();
3579 /// initialize dechunking state
3581 ConnStateData::startDechunkingRequest()
3583 Must(bodyPipe
!= nullptr);
3584 debugs(33, 5, "start dechunking" << bodyPipe
->status());
3585 assert(!bodyParser
);
3586 bodyParser
= new Http1::TeChunkedParser
;
3589 /// put parsed content into input buffer and clean up
3591 ConnStateData::finishDechunkingRequest(bool withSuccess
)
3593 debugs(33, 5, "finish dechunking: " << withSuccess
);
3595 if (bodyPipe
!= nullptr) {
3596 debugs(33, 7, "dechunked tail: " << bodyPipe
->status());
3597 BodyPipe::Pointer myPipe
= bodyPipe
;
3598 stopProducingFor(bodyPipe
, withSuccess
); // sets bodyPipe->bodySize()
3599 Must(!bodyPipe
); // we rely on it being nil after we are done with body
3601 Must(myPipe
->bodySizeKnown());
3602 Http::StreamPointer context
= pipeline
.front();
3603 if (context
!= nullptr && context
->http
&& context
->http
->request
)
3604 context
->http
->request
->setContentLength(myPipe
->bodySize());
3609 bodyParser
= nullptr;
3612 // XXX: this is an HTTP/1-only operation
3614 ConnStateData::sendControlMsg(HttpControlMsg msg
)
3616 if (const auto context
= pipeline
.front()) {
3618 context
->http
->al
->reply
= msg
.reply
;
3622 debugs(33, 3, "ignoring 1xx due to earlier closure");
3626 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3627 if (!pipeline
.empty()) {
3628 HttpReply::Pointer
rep(msg
.reply
);
3630 // remember the callback
3631 cbControlMsgSent
= msg
.cbSuccess
;
3633 typedef CommCbMemFunT
<HttpControlMsgSink
, CommIoCbParams
> Dialer
;
3634 AsyncCall::Pointer call
= JobCallback(33, 5, Dialer
, this, HttpControlMsgSink::wroteControlMsg
);
3636 if (!writeControlMsgAndCall(rep
.getRaw(), call
)) {
3637 // but still inform the caller (so it may resume its operation)
3638 doneWithControlMsg();
3643 debugs(33, 3, " closing due to missing context for 1xx");
3644 clientConnection
->close();
3648 ConnStateData::doneWithControlMsg()
3650 HttpControlMsgSink::doneWithControlMsg();
3652 if (Http::StreamPointer deferredRequest
= pipeline
.front()) {
3653 debugs(33, 3, clientConnection
<< ": calling PushDeferredIfNeeded after control msg wrote");
3654 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, this);
3658 /// Our close handler called by Comm when the pinned connection is closed
3660 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams
&io
)
3662 // FwdState might repin a failed connection sooner than this close
3663 // callback is called for the failed connection.
3664 assert(pinning
.serverConnection
== io
.conn
);
3665 pinning
.closeHandler
= nullptr; // Comm unregisters handlers before calling
3666 const bool sawZeroReply
= pinning
.zeroReply
; // reset when unpinning
3667 pinning
.serverConnection
->noteClosure();
3668 unpinConnection(false);
3670 if (sawZeroReply
&& clientConnection
!= nullptr) {
3671 debugs(33, 3, "Closing client connection on pinned zero reply.");
3672 clientConnection
->close();
3678 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer
&pinServer
, const HttpRequest::Pointer
&request
)
3680 pinConnection(pinServer
, *request
);
3684 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic
)
3686 Must(pic
.connection
);
3688 pinConnection(pic
.connection
, *pic
.request
);
3690 // monitor pinned server connection for remote-end closures.
3691 startPinnedConnectionMonitoring();
3693 if (pipeline
.empty())
3694 kick(); // in case parseRequests() was blocked by a busy pic.connection
3697 /// Forward future client requests using the given server connection.
3699 ConnStateData::pinConnection(const Comm::ConnectionPointer
&pinServer
, const HttpRequest
&request
)
3701 if (Comm::IsConnOpen(pinning
.serverConnection
) &&
3702 pinning
.serverConnection
->fd
== pinServer
->fd
) {
3703 debugs(33, 3, "already pinned" << pinServer
);
3707 unpinConnection(true); // closes pinned connection, if any, and resets fields
3709 pinning
.serverConnection
= pinServer
;
3711 debugs(33, 3, pinning
.serverConnection
);
3713 Must(pinning
.serverConnection
!= nullptr);
3715 const char *pinnedHost
= "[unknown]";
3716 pinning
.host
= xstrdup(request
.url
.host());
3717 pinning
.port
= request
.url
.port();
3718 pinnedHost
= pinning
.host
;
3719 pinning
.pinned
= true;
3720 pinning
.auth
= request
.flags
.connectionAuth
;
3721 char stmp
[MAX_IPSTRLEN
];
3722 char desc
[FD_DESC_SZ
];
3723 const auto peer
= pinning
.peer();
3724 snprintf(desc
, FD_DESC_SZ
, "%s pinned connection for %s (%d)",
3725 (pinning
.auth
|| !peer
) ? pinnedHost
: peer
->name
,
3726 clientConnection
->remote
.toUrl(stmp
,MAX_IPSTRLEN
),
3727 clientConnection
->fd
);
3728 fd_note(pinning
.serverConnection
->fd
, desc
);
3730 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3731 pinning
.closeHandler
= JobCallback(33, 5,
3732 Dialer
, this, ConnStateData::clientPinnedConnectionClosed
);
3733 // remember the pinned connection so that cb does not unpin a fresher one
3734 typedef CommCloseCbParams Params
;
3735 Params
¶ms
= GetCommParams
<Params
>(pinning
.closeHandler
);
3736 params
.conn
= pinning
.serverConnection
;
3737 comm_add_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
3740 /// [re]start monitoring pinned connection for peer closures so that we can
3741 /// propagate them to an _idle_ client pinned to that peer
3743 ConnStateData::startPinnedConnectionMonitoring()
3745 if (pinning
.readHandler
!= nullptr)
3746 return; // already monitoring
3748 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
3749 pinning
.readHandler
= JobCallback(33, 3,
3750 Dialer
, this, ConnStateData::clientPinnedConnectionRead
);
3751 Comm::Read(pinning
.serverConnection
, pinning
.readHandler
);
3755 ConnStateData::stopPinnedConnectionMonitoring()
3757 if (pinning
.readHandler
!= nullptr) {
3758 Comm::ReadCancel(pinning
.serverConnection
->fd
, pinning
.readHandler
);
3759 pinning
.readHandler
= nullptr;
3765 ConnStateData::handleIdleClientPinnedTlsRead()
3767 // A ready-for-reading connection means that the TLS server either closed
3768 // the connection, sent us some unexpected HTTP data, or started TLS
3769 // renegotiations. We should close the connection except for the last case.
3771 Must(pinning
.serverConnection
!= nullptr);
3772 auto ssl
= fd_table
[pinning
.serverConnection
->fd
].ssl
.get();
3777 const int readResult
= SSL_read(ssl
, buf
, sizeof(buf
));
3779 if (readResult
> 0 || SSL_pending(ssl
) > 0) {
3780 debugs(83, 2, pinning
.serverConnection
<< " TLS application data read");
3784 switch(const int error
= SSL_get_error(ssl
, readResult
)) {
3785 case SSL_ERROR_WANT_WRITE
:
3786 debugs(83, DBG_IMPORTANT
, pinning
.serverConnection
<< " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3787 [[fallthrough
]]; // to restart monitoring, for now
3789 case SSL_ERROR_NONE
:
3790 case SSL_ERROR_WANT_READ
:
3791 startPinnedConnectionMonitoring();
3795 debugs(83, 2, pinning
.serverConnection
<< " TLS error: " << error
);
3804 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3805 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3807 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams
&io
)
3809 pinning
.readHandler
= nullptr; // Comm unregisters handlers before calling
3811 if (io
.flag
== Comm::ERR_CLOSING
)
3812 return; // close handler will clean up
3814 Must(pinning
.serverConnection
== io
.conn
);
3817 if (handleIdleClientPinnedTlsRead())
3821 const bool clientIsIdle
= pipeline
.empty();
3823 debugs(33, 3, "idle pinned " << pinning
.serverConnection
<< " read " <<
3824 io
.size
<< (clientIsIdle
? " with idle client" : ""));
3826 pinning
.serverConnection
->close();
3828 // If we are still sending data to the client, do not close now. When we are done sending,
3829 // ConnStateData::kick() checks pinning.serverConnection and will close.
3830 // However, if we are idle, then we must close to inform the idle client and minimize races.
3831 if (clientIsIdle
&& clientConnection
!= nullptr)
3832 clientConnection
->close();
3835 Comm::ConnectionPointer
3836 ConnStateData::borrowPinnedConnection(HttpRequest
*request
, const AccessLogEntryPointer
&ale
)
3838 debugs(33, 7, pinning
.serverConnection
);
3841 const auto pinningError
= [&](const err_type type
) {
3842 unpinConnection(true);
3843 HttpRequestPointer requestPointer
= request
;
3844 return ErrorState::NewForwarding(type
, requestPointer
, ale
);
3847 if (!Comm::IsConnOpen(pinning
.serverConnection
))
3848 throw pinningError(ERR_ZERO_SIZE_OBJECT
);
3850 if (pinning
.auth
&& pinning
.host
&& strcasecmp(pinning
.host
, request
->url
.host()) != 0)
3851 throw pinningError(ERR_CANNOT_FORWARD
); // or generalize ERR_CONFLICT_HOST
3853 if (pinning
.port
!= request
->url
.port())
3854 throw pinningError(ERR_CANNOT_FORWARD
); // or generalize ERR_CONFLICT_HOST
3856 if (pinning
.serverConnection
->toGoneCachePeer())
3857 throw pinningError(ERR_ZERO_SIZE_OBJECT
);
3859 if (pinning
.peerAccessDenied
)
3860 throw pinningError(ERR_CANNOT_FORWARD
); // or generalize ERR_FORWARDING_DENIED
3862 stopPinnedConnectionMonitoring();
3863 return pinning
.serverConnection
;
3866 Comm::ConnectionPointer
3867 ConnStateData::BorrowPinnedConnection(HttpRequest
*request
, const AccessLogEntryPointer
&ale
)
3869 if (const auto connManager
= request
? request
->pinnedConnection() : nullptr)
3870 return connManager
->borrowPinnedConnection(request
, ale
);
3872 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3873 // there is no point since the client connection is now gone
3874 HttpRequestPointer requestPointer
= request
;
3875 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD
, requestPointer
, ale
);
3879 ConnStateData::unpinConnection(const bool andClose
)
3881 debugs(33, 3, pinning
.serverConnection
);
3883 if (Comm::IsConnOpen(pinning
.serverConnection
)) {
3884 if (pinning
.closeHandler
!= nullptr) {
3885 comm_remove_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
3886 pinning
.closeHandler
= nullptr;
3889 stopPinnedConnectionMonitoring();
3891 // close the server side socket if requested
3893 pinning
.serverConnection
->close();
3894 pinning
.serverConnection
= nullptr;
3897 safe_free(pinning
.host
);
3899 pinning
.zeroReply
= false;
3900 pinning
.peerAccessDenied
= false;
3902 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3903 * connection has gone away */
3907 ConnStateData::terminateAll(const Error
&rawError
, const LogTagsErrors
<e
)
3909 auto error
= rawError
; // (cheap) copy so that we can detail
3910 // We detail even ERR_NONE: There should be no transactions left, and
3911 // detailed ERR_NONE will be unused. Otherwise, this detail helps in triage.
3912 if (error
.details
.empty()) {
3913 static const auto d
= MakeNamedErrorDetail("WITH_CLIENT");
3914 error
.details
.push_back(d
);
3917 debugs(33, 3, pipeline
.count() << '/' << pipeline
.nrequests
<< " after " << error
);
3919 if (pipeline
.empty()) {
3920 bareError
.update(error
); // XXX: bareLogTagsErrors
3922 // We terminate the current CONNECT/PUT/etc. context below, logging any
3923 // error details, but that context may leave unparsed bytes behind.
3924 // Consume them to stop checkLogging() from logging them again later.
3925 const auto intputToConsume
=
3927 parsingTlsHandshake
? "TLS handshake" : // more specific than CONNECT
3929 bodyPipe
? "HTTP request body" :
3930 pipeline
.back()->mayUseConnection() ? "HTTP CONNECT" :
3933 while (const auto context
= pipeline
.front()) {
3934 context
->noteIoError(error
, lte
);
3935 context
->finished(); // cleanup and self-deregister
3936 assert(context
!= pipeline
.front());
3939 if (intputToConsume
&& !inBuf
.isEmpty()) {
3940 debugs(83, 5, "forgetting client " << intputToConsume
<< " bytes: " << inBuf
.length());
3945 clientConnection
->close();
3948 /// log the last (attempt at) transaction if nobody else did
3950 ConnStateData::checkLogging()
3952 // to simplify our logic, we assume that terminateAll() has been called
3953 assert(pipeline
.empty());
3955 // do not log connections that closed after a transaction (it is normal)
3956 // TODO: access_log needs ACLs to match received-no-bytes connections
3957 if (pipeline
.nrequests
&& inBuf
.isEmpty())
3960 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
3961 ClientHttpRequest
http(this);
3962 http
.req_sz
= inBuf
.length();
3963 // XXX: Or we died while waiting for the pinned connection to become idle.
3964 http
.setErrorUri("error:transaction-end-before-headers");
3965 http
.updateError(bareError
);
3969 ConnStateData::shouldPreserveClientData() const
3971 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
3972 if (needProxyProtocolHeader_
)
3975 // If our decision here is negative, configuration changes are irrelevant.
3976 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
3977 if (!Config
.accessList
.on_unsupported_protocol
)
3980 // TODO: Figure out whether/how we can support FTP tunneling.
3981 if (port
->transport
.protocol
== AnyP::PROTO_FTP
)
3985 if (parsingTlsHandshake
)
3988 // the 1st HTTP request on a bumped connection
3989 if (!parsedBumpedRequestCount
&& switchedToHttps())
3993 // the 1st HTTP(S) request on a connection to an intercepting port
3994 if (!pipeline
.nrequests
&& transparent())
4001 ConnStateData::notes()
4004 theNotes
= new NotePairs
;
4009 operator <<(std::ostream
&os
, const ConnStateData::PinnedIdleContext
&pic
)
4011 return os
<< pic
.connection
<< ", request=" << pic
.request
;
4015 operator <<(std::ostream
&os
, const ConnStateData::ServerConnectionContext
&scc
)
4017 return os
<< scc
.conn_
<< ", srv_bytes=" << scc
.preReadServerBytes
.length();