2 * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 33 Client-side Routines */
12 \defgroup ClientSide Client-Side Logics
14 \section cserrors Errors and client side
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
39 \section pconn_logic Persistent connection logic:
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "errorpage.h"
82 #include "fqdncache.h"
86 #include "helper/Reply.h"
88 #include "http/one/RequestParser.h"
89 #include "http/one/TeChunkedParser.h"
90 #include "http/Stream.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpHeaderTools.h"
93 #include "HttpReply.h"
94 #include "HttpRequest.h"
95 #include "ident/Config.h"
96 #include "ident/Ident.h"
98 #include "ipc/FdNotes.h"
99 #include "ipc/StartListening.h"
100 #include "log/access_log.h"
102 #include "MemObject.h"
103 #include "mime_header.h"
104 #include "parser/Tokenizer.h"
105 #include "profiler/Profiler.h"
106 #include "proxyp/Header.h"
107 #include "proxyp/Parser.h"
108 #include "security/NegotiationHistory.h"
109 #include "servers/forward.h"
110 #include "SquidConfig.h"
111 #include "SquidTime.h"
112 #include "StatCounters.h"
113 #include "StatHist.h"
115 #include "TimeOrTag.h"
119 #include "auth/UserRequest.h"
122 #include "ClientInfo.h"
123 #include "MessageDelayPools.h"
127 #include "ssl/context_storage.h"
128 #include "ssl/gadgets.h"
129 #include "ssl/helper.h"
130 #include "ssl/ProxyCerts.h"
131 #include "ssl/ServerBump.h"
132 #include "ssl/support.h"
135 // for tvSubUsec() which should be in SquidTime.h
143 #define comm_close comm_lingering_close
146 /// dials clientListenerConnectionOpened call
147 class ListeningStartedDialer
: public CallDialer
, public Ipc::StartListeningCb
150 typedef void (*Handler
)(AnyP::PortCfgPointer
&portCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&sub
);
151 ListeningStartedDialer(Handler aHandler
, AnyP::PortCfgPointer
&aPortCfg
, const Ipc::FdNoteId note
, const Subscription::Pointer
&aSub
):
152 handler(aHandler
), portCfg(aPortCfg
), portTypeNote(note
), sub(aSub
) {}
154 virtual void print(std::ostream
&os
) const {
156 ", " << FdNote(portTypeNote
) << " port=" << (void*)&portCfg
<< ')';
159 virtual bool canDial(AsyncCall
&) const { return true; }
160 virtual void dial(AsyncCall
&) { (handler
)(portCfg
, portTypeNote
, sub
); }
166 AnyP::PortCfgPointer portCfg
; ///< from HttpPortList
167 Ipc::FdNoteId portTypeNote
; ///< Type of IPC socket being opened
168 Subscription::Pointer sub
; ///< The handler to be subscribed for this connetion listener
171 static void clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
);
173 static IOACB httpAccept
;
174 static CTCB clientLifetimeTimeout
;
176 static IDCB clientIdentDone
;
178 static int clientIsContentLengthValid(HttpRequest
* r
);
179 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
);
181 static void clientUpdateStatHistCounters(const LogTags
&logType
, int svc_time
);
182 static void clientUpdateStatCounters(const LogTags
&logType
);
183 static void clientUpdateHierCounters(HierarchyLogEntry
*);
184 static bool clientPingHasFinished(ping_data
const *aPing
);
185 void prepareLogWithRequestDetails(HttpRequest
*, AccessLogEntry::Pointer
&);
186 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest
, ConnStateData
* conn
);
188 char *skipLeadingSpace(char *aString
);
192 clientIdentDone(const char *ident
, void *data
)
194 ConnStateData
*conn
= (ConnStateData
*)data
;
195 xstrncpy(conn
->clientConnection
->rfc931
, ident
? ident
: dash_str
, USER_IDENT_SZ
);
200 clientUpdateStatCounters(const LogTags
&logType
)
202 ++statCounter
.client_http
.requests
;
204 if (logType
.isTcpHit())
205 ++statCounter
.client_http
.hits
;
207 if (logType
.oldType
== LOG_TCP_HIT
)
208 ++statCounter
.client_http
.disk_hits
;
209 else if (logType
.oldType
== LOG_TCP_MEM_HIT
)
210 ++statCounter
.client_http
.mem_hits
;
214 clientUpdateStatHistCounters(const LogTags
&logType
, int svc_time
)
216 statCounter
.client_http
.allSvcTime
.count(svc_time
);
218 * The idea here is not to be complete, but to get service times
219 * for only well-defined types. For example, we don't include
220 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
221 * (we *tried* to validate it, but failed).
224 switch (logType
.oldType
) {
226 case LOG_TCP_REFRESH_UNMODIFIED
:
227 statCounter
.client_http
.nearHitSvcTime
.count(svc_time
);
230 case LOG_TCP_INM_HIT
:
231 case LOG_TCP_IMS_HIT
:
232 statCounter
.client_http
.nearMissSvcTime
.count(svc_time
);
237 case LOG_TCP_MEM_HIT
:
239 case LOG_TCP_OFFLINE_HIT
:
240 statCounter
.client_http
.hitSvcTime
.count(svc_time
);
245 case LOG_TCP_CLIENT_REFRESH_MISS
:
246 statCounter
.client_http
.missSvcTime
.count(svc_time
);
250 /* make compiler warnings go away */
256 clientPingHasFinished(ping_data
const *aPing
)
258 if (0 != aPing
->stop
.tv_sec
&& 0 != aPing
->start
.tv_sec
)
265 clientUpdateHierCounters(HierarchyLogEntry
* someEntry
)
269 switch (someEntry
->code
) {
270 #if USE_CACHE_DIGESTS
275 ++ statCounter
.cd
.times_used
;
283 case FIRST_PARENT_MISS
:
285 case CLOSEST_PARENT_MISS
:
286 ++ statCounter
.icp
.times_used
;
287 i
= &someEntry
->ping
;
289 if (clientPingHasFinished(i
))
290 statCounter
.icp
.querySvcTime
.count(tvSubUsec(i
->start
, i
->stop
));
293 ++ statCounter
.icp
.query_timeouts
;
300 ++ statCounter
.netdb
.times_used
;
310 ClientHttpRequest::updateCounters()
312 clientUpdateStatCounters(logType
);
314 if (request
->errType
!= ERR_NONE
)
315 ++ statCounter
.client_http
.errors
;
317 clientUpdateStatHistCounters(logType
,
318 tvSubMsec(al
->cache
.start_time
, current_time
));
320 clientUpdateHierCounters(&request
->hier
);
324 prepareLogWithRequestDetails(HttpRequest
* request
, AccessLogEntry::Pointer
&aLogEntry
)
327 assert(aLogEntry
!= NULL
);
329 if (Config
.onoff
.log_mime_hdrs
) {
332 request
->header
.packInto(&mb
);
333 //This is the request after adaptation or redirection
334 aLogEntry
->headers
.adapted_request
= xstrdup(mb
.buf
);
336 // the virgin request is saved to aLogEntry->request
337 if (aLogEntry
->request
) {
339 aLogEntry
->request
->header
.packInto(&mb
);
340 aLogEntry
->headers
.request
= xstrdup(mb
.buf
);
344 const Adaptation::History::Pointer ah
= request
->adaptLogHistory();
347 ah
->lastMeta
.packInto(&mb
);
348 aLogEntry
->adapt
.last_meta
= xstrdup(mb
.buf
);
356 const Adaptation::Icap::History::Pointer ih
= request
->icapHistory();
358 ih
->processingTime(aLogEntry
->icap
.processingTime
);
361 aLogEntry
->http
.method
= request
->method
;
362 aLogEntry
->http
.version
= request
->http_ver
;
363 aLogEntry
->hier
= request
->hier
;
364 aLogEntry
->cache
.extuser
= request
->extacl_user
.termedBuf();
366 // Adapted request, if any, inherits and then collects all the stats, but
367 // the virgin request gets logged instead; copy the stats to log them.
368 // TODO: avoid losses by keeping these stats in a shared history object?
369 if (aLogEntry
->request
) {
370 aLogEntry
->request
->dnsWait
= request
->dnsWait
;
371 aLogEntry
->request
->errType
= request
->errType
;
372 aLogEntry
->request
->errDetail
= request
->errDetail
;
377 ClientHttpRequest::logRequest()
379 if (!out
.size
&& logType
.oldType
== LOG_TAG_NONE
)
380 debugs(33, 5, "logging half-baked transaction: " << log_uri
);
382 al
->icp
.opcode
= ICP_INVALID
;
384 debugs(33, 9, "clientLogRequest: al.url='" << al
->url
<< "'");
387 al
->http
.code
= al
->reply
->sline
.status();
388 al
->http
.content_type
= al
->reply
->content_type
.termedBuf();
389 } else if (loggingEntry() && loggingEntry()->mem_obj
) {
390 al
->http
.code
= loggingEntry()->mem_obj
->getReply()->sline
.status();
391 al
->http
.content_type
= loggingEntry()->mem_obj
->getReply()->content_type
.termedBuf();
394 debugs(33, 9, "clientLogRequest: http.code='" << al
->http
.code
<< "'");
396 if (loggingEntry() && loggingEntry()->mem_obj
&& loggingEntry()->objectLen() >= 0)
397 al
->cache
.objectSize
= loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
399 al
->http
.clientRequestSz
.header
= req_sz
;
400 // the virgin request is saved to al->request
401 if (al
->request
&& al
->request
->body_pipe
)
402 al
->http
.clientRequestSz
.payloadData
= al
->request
->body_pipe
->producedSize();
403 al
->http
.clientReplySz
.header
= out
.headers_sz
;
404 // XXX: calculate without payload encoding or headers !!
405 al
->http
.clientReplySz
.payloadData
= out
.size
- out
.headers_sz
; // pretend its all un-encoded data for now.
407 al
->cache
.highOffset
= out
.offset
;
409 al
->cache
.code
= logType
;
411 tvSub(al
->cache
.trTime
, al
->cache
.start_time
, current_time
);
414 prepareLogWithRequestDetails(request
, al
);
418 /* This is broken. Fails if the connection has been closed. Needs
419 * to snarf the ssl details some place earlier..
421 if (getConn() != NULL
)
422 al
->cache
.ssluser
= sslGetUserEmail(fd_table
[getConn()->fd
].ssl
);
426 /* Add notes (if we have a request to annotate) */
429 for (auto h
: Config
.notes
) {
430 if (h
->match(request
, al
->reply
, NULL
, matched
)) {
431 request
->notes()->add(h
->key(), matched
);
432 debugs(33, 3, h
->key() << " " << matched
);
435 // The al->notes and request->notes must point to the same object.
436 al
->syncNotes(request
);
439 ACLFilledChecklist
checklist(NULL
, request
, NULL
);
441 checklist
.reply
= al
->reply
;
442 HTTPMSGLOCK(checklist
.reply
);
446 HTTPMSGUNLOCK(al
->adapted_request
);
447 al
->adapted_request
= request
;
448 HTTPMSGLOCK(al
->adapted_request
);
450 // no need checklist.syncAle(): already synced
452 accessLogLog(al
, &checklist
);
454 bool updatePerformanceCounters
= true;
455 if (Config
.accessList
.stats_collection
) {
456 ACLFilledChecklist
statsCheck(Config
.accessList
.stats_collection
, request
, NULL
);
459 statsCheck
.reply
= al
->reply
;
460 HTTPMSGLOCK(statsCheck
.reply
);
462 updatePerformanceCounters
= statsCheck
.fastCheck().allowed();
465 if (updatePerformanceCounters
) {
469 if (getConn() != NULL
&& getConn()->clientConnection
!= NULL
)
470 clientdbUpdate(getConn()->clientConnection
->remote
, logType
, AnyP::PROTO_HTTP
, out
.size
);
475 ClientHttpRequest::freeResources()
478 safe_free(redirect
.location
);
479 range_iter
.boundary
.clean();
482 if (client_stream
.tail
)
483 clientStreamAbort((clientStreamNode
*)client_stream
.tail
->data
, this);
487 httpRequestFree(void *data
)
489 ClientHttpRequest
*http
= (ClientHttpRequest
*)data
;
490 assert(http
!= NULL
);
494 /* This is a handler normally called by comm_close() */
495 void ConnStateData::connStateClosed(const CommCloseCbParams
&)
497 deleteThis("ConnStateData::connStateClosed");
502 ConnStateData::setAuth(const Auth::UserRequest::Pointer
&aur
, const char *by
)
506 debugs(33, 2, "Adding connection-auth to " << clientConnection
<< " from " << by
);
512 // clobered with self-pointer
513 // NP: something nasty is going on in Squid, but harmless.
515 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection
<< " from " << by
);
520 * Connection-auth relies on a single set of credentials being preserved
521 * for all requests on a connection once they have been setup.
522 * There are several things which need to happen to preserve security
523 * when connection-auth credentials change unexpectedly or are unset.
525 * 1) auth helper released from any active state
527 * They can only be reserved by a handshake process which this
528 * connection can now never complete.
529 * This prevents helpers hanging when their connections close.
531 * 2) pinning is expected to be removed and server conn closed
533 * The upstream link is authenticated with the same credentials.
534 * Expecting the same level of consistency we should have received.
535 * This prevents upstream being faced with multiple or missing
536 * credentials after authentication.
537 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
538 * we just trigger that cleanup here via comm_reset_close() or
539 * ConnStateData::stopReceiving()
541 * 3) the connection needs to close.
543 * This prevents attackers injecting requests into a connection,
544 * or gateways wrongly multiplexing users into a single connection.
546 * When credentials are missing closure needs to follow an auth
547 * challenge for best recovery by the client.
549 * When credentials change there is nothing we can do but abort as
550 * fast as possible. Sending TCP RST instead of an HTTP response
551 * is the best-case action.
554 // clobbered with nul-pointer
556 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection
<< " due to connection-auth erase from " << by
);
557 auth_
->releaseAuthServer();
559 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
560 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
561 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
562 stopReceiving("connection-auth removed");
566 // clobbered with alternative credentials
568 debugs(33, 2, "ERROR: Closing " << clientConnection
<< " due to change of connection-auth from " << by
);
569 auth_
->releaseAuthServer();
571 // this is a fatal type of problem.
572 // Close the connection immediately with TCP RST to abort all traffic flow
573 comm_reset_close(clientConnection
);
581 // cleans up before destructor is called
583 ConnStateData::swanSong()
585 debugs(33, 2, HERE
<< clientConnection
);
588 flags
.readMore
= false;
589 clientdbEstablished(clientConnection
->remote
, -1); /* decrement */
590 pipeline
.terminateAll(0);
592 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
593 unpinConnection(true);
595 Server::swanSong(); // closes the client connection
598 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
599 setAuth(NULL
, "ConnStateData::SwanSong cleanup");
602 flags
.swanSang
= true;
606 ConnStateData::isOpen() const
608 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
609 Comm::IsConnOpen(clientConnection
) &&
610 !fd_table
[clientConnection
->fd
].closing();
613 ConnStateData::~ConnStateData()
615 debugs(33, 3, HERE
<< clientConnection
);
618 debugs(33, DBG_IMPORTANT
, "BUG: ConnStateData did not close " << clientConnection
);
621 debugs(33, DBG_IMPORTANT
, "BUG: ConnStateData was not destroyed properly; " << clientConnection
);
623 if (bodyPipe
!= NULL
)
624 stopProducingFor(bodyPipe
, false);
626 delete bodyParser
; // TODO: pool
629 delete sslServerBump
;
634 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
635 * This is the client-side persistent connection flag. We need
636 * to set this relatively early in the request processing
637 * to handle hacks for broken servers and clients.
640 clientSetKeepaliveFlag(ClientHttpRequest
* http
)
642 HttpRequest
*request
= http
->request
;
644 debugs(33, 3, "http_ver = " << request
->http_ver
);
645 debugs(33, 3, "method = " << request
->method
);
647 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
648 request
->flags
.proxyKeepalive
= request
->persistent();
651 /// checks body length of non-chunked requests
653 clientIsContentLengthValid(HttpRequest
* r
)
655 // No Content-Length means this request just has no body, but conflicting
656 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
657 if (r
->header
.conflictingContentLength())
660 switch (r
->method
.id()) {
662 case Http::METHOD_GET
:
664 case Http::METHOD_HEAD
:
665 /* We do not want to see a request entity on GET/HEAD requests */
666 return (r
->content_length
<= 0 || Config
.onoff
.request_entities
);
669 /* For other types of requests we don't care */
677 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength
)
679 if (Config
.maxRequestBodySize
&&
680 bodyLength
> Config
.maxRequestBodySize
)
681 return 1; /* too large */
687 ClientHttpRequest::multipartRangeRequest() const
689 return request
->multipartRangeRequest();
693 clientPackTermBound(String boundary
, MemBuf
*mb
)
695 mb
->appendf("\r\n--" SQUIDSTRINGPH
"--\r\n", SQUIDSTRINGPRINT(boundary
));
696 debugs(33, 6, "buf offset: " << mb
->size
);
700 clientPackRangeHdr(const HttpReplyPointer
&rep
, const HttpHdrRangeSpec
* spec
, String boundary
, MemBuf
* mb
)
702 HttpHeader
hdr(hoReply
);
707 debugs(33, 5, "appending boundary: " << boundary
);
708 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
709 mb
->appendf("\r\n--" SQUIDSTRINGPH
"\r\n", SQUIDSTRINGPRINT(boundary
));
711 /* stuff the header with required entries and pack it */
713 if (rep
->header
.has(Http::HdrType::CONTENT_TYPE
))
714 hdr
.putStr(Http::HdrType::CONTENT_TYPE
, rep
->header
.getStr(Http::HdrType::CONTENT_TYPE
));
716 httpHeaderAddContRange(&hdr
, *spec
, rep
->content_length
);
721 /* append <crlf> (we packed a header, not a reply) */
722 mb
->append("\r\n", 2);
725 /** returns expected content length for multi-range replies
726 * note: assumes that httpHdrRangeCanonize has already been called
727 * warning: assumes that HTTP headers for individual ranges at the
728 * time of the actuall assembly will be exactly the same as
729 * the headers when clientMRangeCLen() is called */
731 ClientHttpRequest::mRangeCLen()
739 HttpHdrRange::iterator pos
= request
->range
->begin();
741 while (pos
!= request
->range
->end()) {
742 /* account for headers for this range */
744 clientPackRangeHdr(memObject()->getReply(),
745 *pos
, range_iter
.boundary
, &mb
);
748 /* account for range content */
749 clen
+= (*pos
)->length
;
751 debugs(33, 6, "clientMRangeCLen: (clen += " << mb
.size
<< " + " << (*pos
)->length
<< ") == " << clen
);
755 /* account for the terminating boundary */
758 clientPackTermBound(range_iter
.boundary
, &mb
);
768 * generates a "unique" boundary string for multipart responses
769 * the caller is responsible for cleaning the string */
771 ClientHttpRequest::rangeBoundaryStr() const
774 String
b(APP_FULLNAME
);
776 key
= storeEntry()->getMD5Text();
777 b
.append(key
, strlen(key
));
782 * Write a chunk of data to a client socket. If the reply is present,
783 * send the reply headers down the wire too, and clean them up when
786 * The request is one backed by a connection, not an internal request.
787 * data context is not NULL
788 * There are no more entries in the stream chain.
791 clientSocketRecipient(clientStreamNode
* node
, ClientHttpRequest
* http
,
792 HttpReply
* rep
, StoreIOBuffer receivedData
)
794 // do not try to deliver if client already ABORTED
795 if (!http
->getConn() || !cbdataReferenceValid(http
->getConn()) || !Comm::IsConnOpen(http
->getConn()->clientConnection
))
798 /* Test preconditions */
799 assert(node
!= NULL
);
800 PROF_start(clientSocketRecipient
);
801 /* TODO: handle this rather than asserting
802 * - it should only ever happen if we cause an abort and
803 * the callback chain loops back to here, so we can simply return.
804 * However, that itself shouldn't happen, so it stays as an assert for now.
806 assert(cbdataReferenceValid(node
));
807 assert(node
->node
.next
== NULL
);
808 Http::StreamPointer context
= dynamic_cast<Http::Stream
*>(node
->data
.getRaw());
809 assert(context
!= NULL
);
811 /* TODO: check offset is what we asked for */
813 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
814 if (context
!= http
->getConn()->pipeline
.front())
815 context
->deferRecipientForLater(node
, rep
, receivedData
);
816 else if (http
->getConn()->cbControlMsgSent
) // 1xx to the user is pending
817 context
->deferRecipientForLater(node
, rep
, receivedData
);
819 http
->getConn()->handleReply(rep
, receivedData
);
821 PROF_stop(clientSocketRecipient
);
825 * Called when a downstream node is no longer interested in
826 * our data. As we are a terminal node, this means on aborts
830 clientSocketDetach(clientStreamNode
* node
, ClientHttpRequest
* http
)
832 /* Test preconditions */
833 assert(node
!= NULL
);
834 /* TODO: handle this rather than asserting
835 * - it should only ever happen if we cause an abort and
836 * the callback chain loops back to here, so we can simply return.
837 * However, that itself shouldn't happen, so it stays as an assert for now.
839 assert(cbdataReferenceValid(node
));
840 /* Set null by ContextFree */
841 assert(node
->node
.next
== NULL
);
842 /* this is the assert discussed above */
843 assert(NULL
== dynamic_cast<Http::Stream
*>(node
->data
.getRaw()));
844 /* We are only called when the client socket shutsdown.
845 * Tell the prev pipeline member we're finished
847 clientStreamDetach(node
, http
);
851 ConnStateData::readNextRequest()
853 debugs(33, 5, HERE
<< clientConnection
<< " reading next req");
855 fd_note(clientConnection
->fd
, "Idle client: Waiting for next request");
857 * Set the timeout BEFORE calling readSomeData().
859 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
860 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5,
861 TimeoutDialer
, this, ConnStateData::requestTimeout
);
862 commSetConnTimeout(clientConnection
, clientConnection
->timeLeft(idleTimeout()), timeoutCall
);
865 /** Please don't do anything with the FD past here! */
869 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest
, ConnStateData
* conn
)
871 debugs(33, 2, HERE
<< conn
->clientConnection
<< " Sending next");
873 /** If the client stream is waiting on a socket write to occur, then */
875 if (deferredRequest
->flags
.deferred
) {
876 /** NO data is allowed to have been sent. */
877 assert(deferredRequest
->http
->out
.size
== 0);
879 clientSocketRecipient(deferredRequest
->deferredparams
.node
,
880 deferredRequest
->http
,
881 deferredRequest
->deferredparams
.rep
,
882 deferredRequest
->deferredparams
.queuedBuffer
);
885 /** otherwise, the request is still active in a callbacksomewhere,
891 ConnStateData::kick()
893 if (!Comm::IsConnOpen(clientConnection
)) {
894 debugs(33, 2, clientConnection
<< " Connection was closed");
898 if (pinning
.pinned
&& !Comm::IsConnOpen(pinning
.serverConnection
)) {
899 debugs(33, 2, clientConnection
<< " Connection was pinned but server side gone. Terminating client connection");
900 clientConnection
->close();
905 * We are done with the response, and we are either still receiving request
906 * body (early response!) or have already stopped receiving anything.
908 * If we are still receiving, then clientParseRequest() below will fail.
909 * (XXX: but then we will call readNextRequest() which may succeed and
910 * execute a smuggled request as we are not done with the current request).
912 * If we stopped because we got everything, then try the next request.
914 * If we stopped receiving because of an error, then close now to avoid
915 * getting stuck and to prevent accidental request smuggling.
918 if (const char *reason
= stoppedReceiving()) {
919 debugs(33, 3, "closing for earlier request error: " << reason
);
920 clientConnection
->close();
925 * Attempt to parse a request from the request buffer.
926 * If we've been fed a pipelined request it may already
927 * be in our read buffer.
930 * This needs to fall through - if we're unlucky and parse the _last_ request
931 * from our read buffer we may never re-register for another client read.
934 if (clientParseRequests()) {
935 debugs(33, 3, clientConnection
<< ": parsed next request from buffer");
939 * Either we need to kick-start another read or, if we have
940 * a half-closed connection, kill it after the last request.
941 * This saves waiting for half-closed connections to finished being
942 * half-closed _AND_ then, sometimes, spending "Timeout" time in
943 * the keepalive "Waiting for next request" state.
945 if (commIsHalfClosed(clientConnection
->fd
) && pipeline
.empty()) {
946 debugs(33, 3, "half-closed client with no pending requests, closing");
947 clientConnection
->close();
952 * At this point we either have a parsed request (which we've
953 * kicked off the processing for) or not. If we have a deferred
954 * request (parsed but deferred for pipeling processing reasons)
955 * then look at processing it. If not, simply kickstart
958 Http::StreamPointer deferredRequest
= pipeline
.front();
959 if (deferredRequest
!= nullptr) {
960 debugs(33, 3, clientConnection
<< ": calling PushDeferredIfNeeded");
961 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, this);
962 } else if (flags
.readMore
) {
963 debugs(33, 3, clientConnection
<< ": calling readNextRequest()");
966 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
967 debugs(33, DBG_IMPORTANT
, MYNAME
<< "abandoning " << clientConnection
);
972 ConnStateData::stopSending(const char *error
)
974 debugs(33, 4, HERE
<< "sending error (" << clientConnection
<< "): " << error
<<
975 "; old receiving error: " <<
976 (stoppedReceiving() ? stoppedReceiving_
: "none"));
978 if (const char *oldError
= stoppedSending()) {
979 debugs(33, 3, HERE
<< "already stopped sending: " << oldError
);
980 return; // nothing has changed as far as this connection is concerned
982 stoppedSending_
= error
;
984 if (!stoppedReceiving()) {
985 if (const int64_t expecting
= mayNeedToReadMoreBody()) {
986 debugs(33, 5, HERE
<< "must still read " << expecting
<<
987 " request body bytes with " << inBuf
.length() << " unused");
988 return; // wait for the request receiver to finish reading
992 clientConnection
->close();
996 ConnStateData::afterClientWrite(size_t size
)
998 if (pipeline
.empty())
1001 auto ctx
= pipeline
.front();
1003 statCounter
.client_http
.kbytes_out
+= size
;
1004 if (ctx
->http
->logType
.isTcpHit())
1005 statCounter
.client_http
.hit_kbytes_out
+= size
;
1007 ctx
->writeComplete(size
);
1011 ConnStateData::abortRequestParsing(const char *const uri
)
1013 ClientHttpRequest
*http
= new ClientHttpRequest(this);
1014 http
->req_sz
= inBuf
.length();
1015 http
->setErrorUri(uri
);
1016 auto *context
= new Http::Stream(clientConnection
, http
);
1017 StoreIOBuffer tempBuffer
;
1018 tempBuffer
.data
= context
->reqbuf
;
1019 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1020 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1021 clientReplyStatus
, new clientReplyContext(http
), clientSocketRecipient
,
1022 clientSocketDetach
, context
, tempBuffer
);
1027 ConnStateData::startShutdown()
1029 // RegisteredRunner API callback - Squid has been shut down
1031 // if connection is idle terminate it now,
1032 // otherwise wait for grace period to end
1033 if (pipeline
.empty())
1038 ConnStateData::endingShutdown()
1040 // RegisteredRunner API callback - Squid shutdown grace period is over
1042 // force the client connection to close immediately
1043 // swanSong() in the close handler will cleanup.
1044 if (Comm::IsConnOpen(clientConnection
))
1045 clientConnection
->close();
1049 skipLeadingSpace(char *aString
)
1051 char *result
= aString
;
1053 while (xisspace(*aString
))
1060 * 'end' defaults to NULL for backwards compatibility
1061 * remove default value if we ever get rid of NULL-terminated
1065 findTrailingHTTPVersion(const char *uriAndHTTPVersion
, const char *end
)
1068 end
= uriAndHTTPVersion
+ strcspn(uriAndHTTPVersion
, "\r\n");
1072 for (; end
> uriAndHTTPVersion
; --end
) {
1073 if (*end
== '\n' || *end
== '\r')
1076 if (xisspace(*end
)) {
1077 if (strncasecmp(end
+ 1, "HTTP/", 5) == 0)
1088 prepareAcceleratedURL(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1090 int vhost
= conn
->port
->vhost
;
1091 int vport
= conn
->port
->vport
;
1092 static char ipbuf
[MAX_IPSTRLEN
];
1094 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1096 static const SBuf
cache_object("cache_object://");
1097 if (hp
->requestUri().startsWith(cache_object
))
1098 return nullptr; /* already in good shape */
1100 // XXX: re-use proper URL parser for this
1101 SBuf url
= hp
->requestUri(); // use full provided URI if we abort
1102 do { // use a loop so we can break out of it
1103 ::Parser::Tokenizer
tok(url
);
1104 if (tok
.skip('/')) // origin-form URL already.
1107 if (conn
->port
->vhost
)
1108 return nullptr; /* already in good shape */
1110 // skip the URI scheme
1111 static const CharacterSet uriScheme
= CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA
+ CharacterSet::DIGIT
;
1112 static const SBuf
uriSchemeEnd("://");
1113 if (!tok
.skipAll(uriScheme
) || !tok
.skip(uriSchemeEnd
))
1116 // skip the authority segment
1117 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1118 static const CharacterSet authority
= CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1119 CharacterSet::HEXDIG
+ CharacterSet::ALPHA
+ CharacterSet::DIGIT
;
1120 if (!tok
.skipAll(authority
))
1123 static const SBuf
slashUri("/");
1124 const SBuf t
= tok
.remaining();
1127 else if (t
[0]=='/') // looks like path
1129 else if (t
[0]=='?' || t
[0]=='#') { // looks like query or fragment. fix '/'
1132 } // else do nothing. invalid path
1136 #if SHOULD_REJECT_UNKNOWN_URLS
1137 // reject URI which are not well-formed even after the processing above
1138 if (url
.isEmpty() || url
[0] != '/') {
1139 hp
->parseStatusCode
= Http::scBadRequest
;
1140 return conn
->abortRequestParsing("error:invalid-request");
1145 vport
= conn
->clientConnection
->local
.port();
1148 if (vhost
&& (host
= hp
->getHeaderField("Host"))) {
1149 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host
<< " + vport=" << vport
);
1154 if (host
[strlen(host
) - 1] != ']' && (t
= strrchr(host
,':')) != nullptr) {
1155 strncpy(thost
, host
, (t
-host
));
1156 snprintf(thost
+(t
-host
), sizeof(thost
)-(t
-host
), ":%d", vport
);
1159 snprintf(thost
, sizeof(thost
), "%s:%d",host
, vport
);
1162 } // else nothing to alter port-wise.
1163 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1164 const int url_sz
= scheme
.length() + strlen(host
) + url
.length() + 32;
1165 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1166 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s" SQUIDSBUFPH
, SQUIDSBUFPRINT(scheme
), host
, SQUIDSBUFPRINT(url
));
1167 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri
);
1169 } else if (conn
->port
->defaultsite
/* && !vhost */) {
1170 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn
->port
->defaultsite
<< " + vport=" << vport
);
1174 snprintf(vportStr
, sizeof(vportStr
),":%d",vport
);
1176 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1177 const int url_sz
= scheme
.length() + strlen(conn
->port
->defaultsite
) + sizeof(vportStr
) + url
.length() + 32;
1178 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1179 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s%s" SQUIDSBUFPH
,
1180 SQUIDSBUFPRINT(scheme
), conn
->port
->defaultsite
, vportStr
, SQUIDSBUFPRINT(url
));
1181 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri
);
1183 } else if (vport
> 0 /* && (!vhost || no Host:) */) {
1184 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport
);
1185 /* Put the local socket IP address as the hostname, with whatever vport we found */
1186 conn
->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
1187 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1188 const int url_sz
= scheme
.length() + sizeof(ipbuf
) + url
.length() + 32;
1189 char *uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1190 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s:%d" SQUIDSBUFPH
,
1191 SQUIDSBUFPRINT(scheme
), ipbuf
, vport
, SQUIDSBUFPRINT(url
));
1192 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri
);
1200 buildUrlFromHost(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1202 char *uri
= nullptr;
1203 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1204 if (const char *host
= hp
->getHeaderField("Host")) {
1205 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1206 const int url_sz
= scheme
.length() + strlen(host
) + hp
->requestUri().length() + 32;
1207 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1208 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s" SQUIDSBUFPH
,
1209 SQUIDSBUFPRINT(scheme
),
1211 SQUIDSBUFPRINT(hp
->requestUri()));
1217 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer
&hp
)
1219 Must(switchedToHttps());
1221 if (!hp
->requestUri().isEmpty() && hp
->requestUri()[0] != '/')
1222 return nullptr; /* already in good shape */
1224 char *uri
= buildUrlFromHost(this, hp
);
1227 Must(tlsConnectPort
);
1228 Must(sslConnectHostOrIp
.size());
1230 if (!tlsClientSni().isEmpty())
1231 useHost
= tlsClientSni();
1233 useHost
.assign(sslConnectHostOrIp
.rawBuf(), sslConnectHostOrIp
.size());
1235 const SBuf
&scheme
= AnyP::UriScheme(transferProtocol
.protocol
).image();
1236 const int url_sz
= scheme
.length() + useHost
.length() + hp
->requestUri().length() + 32;
1237 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1238 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://" SQUIDSBUFPH
":%d" SQUIDSBUFPH
,
1239 SQUIDSBUFPRINT(scheme
),
1240 SQUIDSBUFPRINT(useHost
),
1242 SQUIDSBUFPRINT(hp
->requestUri()));
1246 debugs(33, 5, "TLS switching host rewrite: " << uri
);
1251 prepareTransparentURL(ConnStateData
* conn
, const Http1::RequestParserPointer
&hp
)
1253 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1254 if (!hp
->requestUri().isEmpty() && hp
->requestUri()[0] != '/')
1255 return nullptr; /* already in good shape */
1257 char *uri
= buildUrlFromHost(conn
, hp
);
1259 /* Put the local socket IP address as the hostname. */
1260 static char ipbuf
[MAX_IPSTRLEN
];
1261 conn
->clientConnection
->local
.toHostStr(ipbuf
,MAX_IPSTRLEN
);
1262 const SBuf
&scheme
= AnyP::UriScheme(conn
->transferProtocol
.protocol
).image();
1263 const int url_sz
= sizeof(ipbuf
) + hp
->requestUri().length() + 32;
1264 uri
= static_cast<char *>(xcalloc(url_sz
, 1));
1265 snprintf(uri
, url_sz
, SQUIDSBUFPH
"://%s:%d" SQUIDSBUFPH
,
1266 SQUIDSBUFPRINT(scheme
),
1267 ipbuf
, conn
->clientConnection
->local
.port(), SQUIDSBUFPRINT(hp
->requestUri()));
1271 debugs(33, 5, "TRANSPARENT REWRITE: " << uri
);
1275 /** Parse an HTTP request
1277 * \note Sets result->flags.parsed_ok to 0 if failed to parse the request,
1278 * to 1 if the request was correctly parsed.
1279 * \param[in] csd a ConnStateData. The caller must make sure it is not null
1280 * \param[in] hp an Http1::RequestParser
1281 * \param[out] mehtod_p will be set as a side-effect of the parsing.
1282 * Pointed-to value will be set to Http::METHOD_NONE in case of
1284 * \param[out] http_ver will be set as a side-effect of the parsing
1285 * \return NULL on incomplete requests,
1286 * a Http::Stream on success or failure.
1289 parseHttpRequest(ConnStateData
*csd
, const Http1::RequestParserPointer
&hp
)
1291 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1293 const bool parsedOk
= hp
->parse(csd
->inBuf
);
1295 // sync the buffers after parsing.
1296 csd
->inBuf
= hp
->remaining();
1298 if (hp
->needsMoreData()) {
1299 debugs(33, 5, "Incomplete request, waiting for end of request line");
1303 if (csd
->mayTunnelUnsupportedProto()) {
1304 csd
->preservedClientData
= hp
->parsed();
1305 csd
->preservedClientData
.append(csd
->inBuf
);
1310 hp
->parseStatusCode
== Http::scRequestHeaderFieldsTooLarge
||
1311 hp
->parseStatusCode
== Http::scUriTooLong
;
1312 auto result
= csd
->abortRequestParsing(
1313 tooBig
? "error:request-too-large" : "error:invalid-request");
1314 // assume that remaining leftovers belong to this bad request
1315 if (!csd
->inBuf
.isEmpty())
1316 csd
->consumeInput(csd
->inBuf
.length());
1321 /* We know the whole request is in parser now */
1322 debugs(11, 2, "HTTP Client " << csd
->clientConnection
);
1323 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1324 hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol() << "\n" <<
1328 /* deny CONNECT via accelerated ports */
1329 if (hp
->method() == Http::METHOD_CONNECT
&& csd
->port
!= NULL
&& csd
->port
->flags
.accelSurrogate
) {
1330 debugs(33, DBG_IMPORTANT
, "WARNING: CONNECT method received on " << csd
->transferProtocol
<< " Accelerator port " << csd
->port
->s
.port());
1331 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1332 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1333 return csd
->abortRequestParsing("error:method-not-allowed");
1336 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1337 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1338 * If seen it signals a broken client or proxy has corrupted the traffic.
1340 if (hp
->method() == Http::METHOD_PRI
&& hp
->messageProtocol() < Http::ProtocolVersion(2,0)) {
1341 debugs(33, DBG_IMPORTANT
, "WARNING: PRI method received on " << csd
->transferProtocol
<< " port " << csd
->port
->s
.port());
1342 debugs(33, DBG_IMPORTANT
, "WARNING: for request: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1343 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1344 return csd
->abortRequestParsing("error:method-not-allowed");
1347 if (hp
->method() == Http::METHOD_NONE
) {
1348 debugs(33, DBG_IMPORTANT
, "WARNING: Unsupported method: " << hp
->method() << " " << hp
->requestUri() << " " << hp
->messageProtocol());
1349 hp
->parseStatusCode
= Http::scMethodNotAllowed
;
1350 return csd
->abortRequestParsing("error:unsupported-request-method");
1353 // Process headers after request line
1354 debugs(33, 3, "complete request received. " <<
1355 "prefix_sz = " << hp
->messageHeaderSize() <<
1356 ", request-line-size=" << hp
->firstLineSize() <<
1357 ", mime-header-size=" << hp
->headerBlockSize() <<
1358 ", mime header block:\n" << hp
->mimeHeader() << "\n----------");
1360 /* Ok, all headers are received */
1361 ClientHttpRequest
*http
= new ClientHttpRequest(csd
);
1363 http
->req_sz
= hp
->messageHeaderSize();
1364 Http::Stream
*result
= new Http::Stream(csd
->clientConnection
, http
);
1366 StoreIOBuffer tempBuffer
;
1367 tempBuffer
.data
= result
->reqbuf
;
1368 tempBuffer
.length
= HTTP_REQBUF_SZ
;
1370 ClientStreamData newServer
= new clientReplyContext(http
);
1371 ClientStreamData newClient
= result
;
1372 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
1373 clientReplyStatus
, newServer
, clientSocketRecipient
,
1374 clientSocketDetach
, newClient
, tempBuffer
);
1377 debugs(33,5, "Prepare absolute URL from " <<
1378 (csd
->transparent()?"intercept":(csd
->port
->flags
.accelSurrogate
? "accel":"")));
1379 /* Rewrite the URL in transparent or accelerator mode */
1380 /* NP: there are several cases to traverse here:
1381 * - standard mode (forward proxy)
1382 * - transparent mode (TPROXY)
1383 * - transparent mode with failures
1384 * - intercept mode (NAT)
1385 * - intercept mode with failures
1386 * - accelerator mode (reverse proxy)
1387 * - internal relative-URL
1388 * - mixed combos of the above with internal URL
1389 * - remote interception with PROXY protocol
1390 * - remote reverse-proxy with PROXY protocol
1392 if (csd
->switchedToHttps()) {
1393 http
->uri
= csd
->prepareTlsSwitchingURL(hp
);
1394 } else if (csd
->transparent()) {
1395 /* intercept or transparent mode, properly working with no failures */
1396 http
->uri
= prepareTransparentURL(csd
, hp
);
1398 } else if (internalCheck(hp
->requestUri())) { // NP: only matches relative-URI
1399 /* internal URL mode */
1400 /* prepend our name & port */
1401 http
->uri
= xstrdup(internalLocalUri(NULL
, hp
->requestUri()));
1402 // We just re-wrote the URL. Must replace the Host: header.
1403 // But have not parsed there yet!! flag for local-only handling.
1404 http
->flags
.internal
= true;
1406 } else if (csd
->port
->flags
.accelSurrogate
) {
1407 /* accelerator mode */
1408 http
->uri
= prepareAcceleratedURL(csd
, hp
);
1409 http
->flags
.accel
= true;
1413 /* No special rewrites have been applied above, use the
1414 * requested url. may be rewritten later, so make extra room */
1415 int url_sz
= hp
->requestUri().length() + Config
.appendDomainLen
+ 5;
1416 http
->uri
= (char *)xcalloc(url_sz
, 1);
1417 SBufToCstring(http
->uri
, hp
->requestUri());
1420 result
->flags
.parsed_ok
= 1;
1425 ConnStateData::connFinishedWithConn(int size
)
1428 if (pipeline
.empty() && inBuf
.isEmpty()) {
1429 /* no current or pending requests */
1430 debugs(33, 4, HERE
<< clientConnection
<< " closed");
1432 } else if (!Config
.onoff
.half_closed_clients
) {
1433 /* admin doesn't want to support half-closed client sockets */
1434 debugs(33, 3, HERE
<< clientConnection
<< " aborted (half_closed_clients disabled)");
1435 pipeline
.terminateAll(0);
1444 ConnStateData::consumeInput(const size_t byteCount
)
1446 assert(byteCount
> 0 && byteCount
<= inBuf
.length());
1447 inBuf
.consume(byteCount
);
1448 debugs(33, 5, "inBuf has " << inBuf
.length() << " unused bytes");
1452 ConnStateData::clientAfterReadingRequests()
1454 // Were we expecting to read more request body from half-closed connection?
1455 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection
->fd
)) {
1456 debugs(33, 3, HERE
<< "truncated body: closing half-closed " << clientConnection
);
1457 clientConnection
->close();
1466 ConnStateData::quitAfterError(HttpRequest
*request
)
1468 // From HTTP p.o.v., we do not have to close after every error detected
1469 // at the client-side, but many such errors do require closure and the
1470 // client-side code is bad at handling errors so we play it safe.
1472 request
->flags
.proxyKeepalive
= false;
1473 flags
.readMore
= false;
1474 debugs(33,4, HERE
<< "Will close after error: " << clientConnection
);
1478 bool ConnStateData::serveDelayedError(Http::Stream
*context
)
1480 ClientHttpRequest
*http
= context
->http
;
1485 assert(sslServerBump
->entry
);
1486 // Did we create an error entry while processing CONNECT?
1487 if (!sslServerBump
->entry
->isEmpty()) {
1488 quitAfterError(http
->request
);
1490 // Get the saved error entry and send it to the client by replacing the
1491 // ClientHttpRequest store entry with it.
1492 clientStreamNode
*node
= context
->getClientReplyContext();
1493 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1495 debugs(33, 5, "Responding with delated error for " << http
->uri
);
1496 repContext
->setReplyToStoreEntry(sslServerBump
->entry
, "delayed SslBump error");
1498 // Get error details from the fake certificate-peeking request.
1499 http
->request
->detailError(sslServerBump
->request
->errType
, sslServerBump
->request
->errDetail
);
1500 context
->pullData();
1504 // In bump-server-first mode, we have not necessarily seen the intended
1505 // server name at certificate-peeking time. Check for domain mismatch now,
1506 // when we can extract the intended name from the bumped HTTP request.
1507 if (const Security::CertPointer
&srvCert
= sslServerBump
->serverCert
) {
1508 HttpRequest
*request
= http
->request
;
1509 if (!Ssl::checkX509ServerValidity(srvCert
.get(), request
->url
.host())) {
1510 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1511 "does not match domainname " << request
->url
.host());
1513 bool allowDomainMismatch
= false;
1514 if (Config
.ssl_client
.cert_error
) {
1515 ACLFilledChecklist
check(Config
.ssl_client
.cert_error
, request
, dash_str
);
1516 check
.al
= http
->al
;
1517 check
.sslErrors
= new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH
, srvCert
));
1518 check
.syncAle(request
, http
->log_uri
);
1519 allowDomainMismatch
= check
.fastCheck().allowed();
1520 delete check
.sslErrors
;
1521 check
.sslErrors
= NULL
;
1524 if (!allowDomainMismatch
) {
1525 quitAfterError(request
);
1527 clientStreamNode
*node
= context
->getClientReplyContext();
1528 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1529 assert (repContext
);
1531 request
->hier
= sslServerBump
->request
->hier
;
1533 // Create an error object and fill it
1534 ErrorState
*err
= new ErrorState(ERR_SECURE_CONNECT_FAIL
, Http::scServiceUnavailable
, request
);
1535 err
->src_addr
= clientConnection
->remote
;
1536 Ssl::ErrorDetail
*errDetail
= new Ssl::ErrorDetail(
1537 SQUID_X509_V_ERR_DOMAIN_MISMATCH
,
1538 srvCert
.get(), nullptr);
1539 err
->detail
= errDetail
;
1540 repContext
->setReplyToError(request
->method
, err
);
1541 assert(context
->http
->out
.offset
== 0);
1542 context
->pullData();
1550 #endif // USE_OPENSSL
1553 * Check on_unsupported_protocol checklist and return true if tunnel mode selected
1554 * or false otherwise
1557 clientTunnelOnError(ConnStateData
*conn
, Http::StreamPointer
&context
, HttpRequest::Pointer
&request
, const HttpRequestMethod
& method
, err_type requestError
)
1559 if (conn
->mayTunnelUnsupportedProto()) {
1560 ACLFilledChecklist
checklist(Config
.accessList
.on_unsupported_protocol
, request
.getRaw(), nullptr);
1561 checklist
.al
= (context
&& context
->http
) ? context
->http
->al
: nullptr;
1562 checklist
.requestErrorType
= requestError
;
1563 checklist
.src_addr
= conn
->clientConnection
->remote
;
1564 checklist
.my_addr
= conn
->clientConnection
->local
;
1565 checklist
.conn(conn
);
1566 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
1567 const char *log_uri
= http
? http
->log_uri
: nullptr;
1568 checklist
.syncAle(request
.getRaw(), log_uri
);
1569 allow_t answer
= checklist
.fastCheck();
1570 if (answer
.allowed() && answer
.kind
== 1) {
1571 debugs(33, 3, "Request will be tunneled to server");
1573 assert(conn
->pipeline
.front() == context
); // XXX: still assumes HTTP/1 semantics
1574 context
->finished(); // Will remove from conn->pipeline queue
1576 Comm::SetSelect(conn
->clientConnection
->fd
, COMM_SELECT_READ
, NULL
, NULL
, 0);
1577 return conn
->initiateTunneledRequest(request
, Http::METHOD_NONE
, "unknown-protocol", conn
->preservedClientData
);
1579 debugs(33, 3, "Continue with returning the error: " << requestError
);
1587 clientProcessRequestFinished(ConnStateData
*conn
, const HttpRequest::Pointer
&request
)
1591 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1592 * to here because calling comm_reset_close() causes http to
1593 * be freed before accessing.
1595 if (request
!= NULL
&& request
->flags
.resetTcp
&& Comm::IsConnOpen(conn
->clientConnection
)) {
1596 debugs(33, 3, HERE
<< "Sending TCP RST on " << conn
->clientConnection
);
1597 conn
->flags
.readMore
= false;
1598 comm_reset_close(conn
->clientConnection
);
1603 clientProcessRequest(ConnStateData
*conn
, const Http1::RequestParserPointer
&hp
, Http::Stream
*context
)
1605 ClientHttpRequest
*http
= context
->http
;
1606 bool chunked
= false;
1607 bool mustReplyToOptions
= false;
1608 bool unsupportedTe
= false;
1609 bool expectBody
= false;
1611 // We already have the request parsed and checked, so we
1612 // only need to go through the final body/conn setup to doCallouts().
1613 assert(http
->request
);
1614 HttpRequest::Pointer request
= http
->request
;
1616 // temporary hack to avoid splitting this huge function with sensitive code
1617 const bool isFtp
= !hp
;
1619 // Some blobs below are still HTTP-specific, but we would have to rewrite
1620 // this entire function to remove them from the FTP code path. Connection
1621 // setup and body_pipe preparation blobs are needed for FTP.
1623 request
->manager(conn
, http
->al
);
1625 request
->flags
.accelerated
= http
->flags
.accel
;
1626 request
->flags
.sslBumped
=conn
->switchedToHttps();
1627 // TODO: decouple http->flags.accel from request->flags.sslBumped
1628 request
->flags
.noDirect
= (request
->flags
.accelerated
&& !request
->flags
.sslBumped
) ?
1629 !conn
->port
->allow_direct
: 0;
1630 request
->sources
|= isFtp
? Http::Message::srcFtp
:
1631 ((request
->flags
.sslBumped
|| conn
->port
->transport
.protocol
== AnyP::PROTO_HTTPS
) ? Http::Message::srcHttps
: Http::Message::srcHttp
);
1633 if (request
->flags
.sslBumped
) {
1634 if (conn
->getAuth() != NULL
)
1635 request
->auth_user_request
= conn
->getAuth();
1639 if (internalCheck(request
->url
.path())) {
1640 if (internalHostnameIs(request
->url
.host()) && request
->url
.port() == getMyPort()) {
1641 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->url
.authority(true));
1642 http
->flags
.internal
= true;
1643 } else if (Config
.onoff
.global_internal_static
&& internalStaticCheck(request
->url
.path())) {
1644 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->url
.authority(true) << " (global_internal_static on)");
1645 request
->url
.setScheme(AnyP::PROTO_HTTP
, "http");
1646 request
->url
.host(internalHostname());
1647 request
->url
.port(getMyPort());
1648 http
->flags
.internal
= true;
1649 http
->setLogUriToRequestUri();
1651 debugs(33, 2, "internal URL found: " << request
->url
.getScheme() << "://" << request
->url
.authority(true) << " (not this proxy)");
1654 request
->flags
.internal
= http
->flags
.internal
;
1657 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1658 // for now Squid only supports HTTP requests
1659 const AnyP::ProtocolVersion
&http_ver
= hp
->messageProtocol();
1660 assert(request
->http_ver
.protocol
== http_ver
.protocol
);
1661 request
->http_ver
.major
= http_ver
.major
;
1662 request
->http_ver
.minor
= http_ver
.minor
;
1665 if (request
->header
.chunked()) {
1667 } else if (request
->header
.has(Http::HdrType::TRANSFER_ENCODING
)) {
1668 const String te
= request
->header
.getList(Http::HdrType::TRANSFER_ENCODING
);
1669 // HTTP/1.1 requires chunking to be the last encoding if there is one
1670 unsupportedTe
= te
.size() && te
!= "identity";
1671 } // else implied identity coding
1673 mustReplyToOptions
= (request
->method
== Http::METHOD_OPTIONS
) &&
1674 (request
->header
.getInt64(Http::HdrType::MAX_FORWARDS
) == 0);
1675 if (!urlCheckRequest(request
.getRaw()) || mustReplyToOptions
|| unsupportedTe
) {
1676 clientStreamNode
*node
= context
->getClientReplyContext();
1677 conn
->quitAfterError(request
.getRaw());
1678 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1679 assert (repContext
);
1680 repContext
->setReplyToError(ERR_UNSUP_REQ
, Http::scNotImplemented
, request
->method
, NULL
,
1681 conn
->clientConnection
->remote
, request
.getRaw(), NULL
, NULL
);
1682 assert(context
->http
->out
.offset
== 0);
1683 context
->pullData();
1684 clientProcessRequestFinished(conn
, request
);
1688 if (!chunked
&& !clientIsContentLengthValid(request
.getRaw())) {
1689 clientStreamNode
*node
= context
->getClientReplyContext();
1690 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1691 assert (repContext
);
1692 conn
->quitAfterError(request
.getRaw());
1693 repContext
->setReplyToError(ERR_INVALID_REQ
,
1694 Http::scLengthRequired
, request
->method
, NULL
,
1695 conn
->clientConnection
->remote
, request
.getRaw(), NULL
, NULL
);
1696 assert(context
->http
->out
.offset
== 0);
1697 context
->pullData();
1698 clientProcessRequestFinished(conn
, request
);
1702 clientSetKeepaliveFlag(http
);
1703 // Let tunneling code be fully responsible for CONNECT requests
1704 if (http
->request
->method
== Http::METHOD_CONNECT
) {
1705 context
->mayUseConnection(true);
1706 conn
->flags
.readMore
= false;
1710 if (conn
->switchedToHttps() && conn
->serveDelayedError(context
)) {
1711 clientProcessRequestFinished(conn
, request
);
1716 /* Do we expect a request-body? */
1717 expectBody
= chunked
|| request
->content_length
> 0;
1718 if (!context
->mayUseConnection() && expectBody
) {
1719 request
->body_pipe
= conn
->expectRequestBody(
1720 chunked
? -1 : request
->content_length
);
1722 /* Is it too large? */
1723 if (!chunked
&& // if chunked, we will check as we accumulate
1724 clientIsRequestBodyTooLargeForPolicy(request
->content_length
)) {
1725 clientStreamNode
*node
= context
->getClientReplyContext();
1726 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1727 assert (repContext
);
1728 conn
->quitAfterError(request
.getRaw());
1729 repContext
->setReplyToError(ERR_TOO_BIG
,
1730 Http::scPayloadTooLarge
, Http::METHOD_NONE
, NULL
,
1731 conn
->clientConnection
->remote
, http
->request
, NULL
, NULL
);
1732 assert(context
->http
->out
.offset
== 0);
1733 context
->pullData();
1734 clientProcessRequestFinished(conn
, request
);
1739 // We may stop producing, comm_close, and/or call setReplyToError()
1740 // below, so quit on errors to avoid http->doCallouts()
1741 if (!conn
->handleRequestBodyData()) {
1742 clientProcessRequestFinished(conn
, request
);
1746 if (!request
->body_pipe
->productionEnded()) {
1747 debugs(33, 5, "need more request body");
1748 context
->mayUseConnection(true);
1749 assert(conn
->flags
.readMore
);
1754 http
->calloutContext
= new ClientRequestContext(http
);
1758 clientProcessRequestFinished(conn
, request
);
1762 ConnStateData::pipelinePrefetchMax() const
1764 // TODO: Support pipelined requests through pinned connections.
1767 return Config
.pipeline_max_prefetch
;
1771 * Limit the number of concurrent requests.
1772 * \return true when there are available position(s) in the pipeline queue for another request.
1773 * \return false when the pipeline queue is full or disabled.
1776 ConnStateData::concurrentRequestQueueFilled() const
1778 const int existingRequestCount
= pipeline
.count();
1780 // default to the configured pipeline size.
1781 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1783 const int internalRequest
= (transparent() && sslBumpMode
== Ssl::bumpSplice
) ? 1 : 0;
1785 const int internalRequest
= 0;
1787 const int concurrentRequestLimit
= pipelinePrefetchMax() + 1 + internalRequest
;
1789 // when queue filled already we cant add more.
1790 if (existingRequestCount
>= concurrentRequestLimit
) {
1791 debugs(33, 3, clientConnection
<< " max concurrent requests reached (" << concurrentRequestLimit
<< ")");
1792 debugs(33, 5, clientConnection
<< " deferring new request until one is done");
1800 * Perform proxy_protocol_access ACL tests on the client which
1801 * connected to PROXY protocol port to see if we trust the
1802 * sender enough to accept their PROXY header claim.
1805 ConnStateData::proxyProtocolValidateClient()
1807 if (!Config
.accessList
.proxyProtocol
)
1808 return proxyProtocolError("PROXY client not permitted by default ACL");
1810 ACLFilledChecklist
ch(Config
.accessList
.proxyProtocol
, NULL
, clientConnection
->rfc931
);
1811 ch
.src_addr
= clientConnection
->remote
;
1812 ch
.my_addr
= clientConnection
->local
;
1815 if (!ch
.fastCheck().allowed())
1816 return proxyProtocolError("PROXY client not permitted by ACLs");
1822 * Perform cleanup on PROXY protocol errors.
1823 * If header parsing hits a fatal error terminate the connection,
1824 * otherwise wait for more data.
1827 ConnStateData::proxyProtocolError(const char *msg
)
1830 // This is important to know, but maybe not so much that flooding the log is okay.
1831 #if QUIET_PROXY_PROTOCOL
1832 // display the first of every 32 occurances at level 1, the others at level 2.
1833 static uint8_t hide
= 0;
1834 debugs(33, (hide
++ % 32 == 0 ? DBG_IMPORTANT
: 2), msg
<< " from " << clientConnection
);
1836 debugs(33, DBG_IMPORTANT
, msg
<< " from " << clientConnection
);
1843 /// Attempts to extract a PROXY protocol header from the input buffer and,
1844 /// upon success, stores the parsed header in proxyProtocolHeader_.
1845 /// \returns true if the header was successfully parsed
1846 /// \returns false if more data is needed to parse the header or on error
1848 ConnStateData::parseProxyProtocolHeader()
1851 const auto parsed
= ProxyProtocol::Parse(inBuf
);
1852 proxyProtocolHeader_
= parsed
.header
;
1853 assert(bool(proxyProtocolHeader_
));
1854 inBuf
.consume(parsed
.size
);
1855 needProxyProtocolHeader_
= false;
1856 if (proxyProtocolHeader_
->hasForwardedAddresses()) {
1857 clientConnection
->local
= proxyProtocolHeader_
->destinationAddress
;
1858 clientConnection
->remote
= proxyProtocolHeader_
->sourceAddress
;
1859 if ((clientConnection
->flags
& COMM_TRANSPARENT
))
1860 clientConnection
->flags
^= COMM_TRANSPARENT
; // prevent TPROXY spoofing of this new IP.
1861 debugs(33, 5, "PROXY/" << proxyProtocolHeader_
->version() << " upgrade: " << clientConnection
);
1863 } catch (const Parser::BinaryTokenizer::InsufficientInput
&) {
1864 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf
.length() << " bytes");
1866 } catch (const std::exception
&e
) {
1867 return proxyProtocolError(e
.what());
1873 ConnStateData::receivedFirstByte()
1875 if (receivedFirstByte_
)
1878 receivedFirstByte_
= true;
1879 // Set timeout to Config.Timeout.request
1880 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1881 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5,
1882 TimeoutDialer
, this, ConnStateData::requestTimeout
);
1883 commSetConnTimeout(clientConnection
, Config
.Timeout
.request
, timeoutCall
);
1887 * Attempt to parse one or more requests from the input buffer.
1888 * Returns true after completing parsing of at least one request [header]. That
1889 * includes cases where parsing ended with an error (e.g., a huge request).
1892 ConnStateData::clientParseRequests()
1894 bool parsed_req
= false;
1896 debugs(33, 5, HERE
<< clientConnection
<< ": attempting to parse");
1898 // Loop while we have read bytes that are not needed for producing the body
1899 // On errors, bodyPipe may become nil, but readMore will be cleared
1900 while (!inBuf
.isEmpty() && !bodyPipe
&& flags
.readMore
) {
1902 // Prohibit concurrent requests when using a pinned to-server connection
1903 // because our Client classes do not support request pipelining.
1904 if (pinning
.pinned
&& !pinning
.readHandler
) {
1905 debugs(33, 3, clientConnection
<< " waits for busy " << pinning
.serverConnection
);
1909 /* Limit the number of concurrent requests */
1910 if (concurrentRequestQueueFilled())
1913 // try to parse the PROXY protocol header magic bytes
1914 if (needProxyProtocolHeader_
) {
1915 if (!parseProxyProtocolHeader())
1918 // we have been waiting for PROXY to provide client-IP
1919 // for some lookups, ie rDNS and IDENT.
1920 whenClientIpKnown();
1923 if (Http::StreamPointer context
= parseOneRequest()) {
1924 debugs(33, 5, clientConnection
<< ": done parsing a request");
1926 AsyncCall::Pointer timeoutCall
= commCbCall(5, 4, "clientLifetimeTimeout",
1927 CommTimeoutCbPtrFun(clientLifetimeTimeout
, context
->http
));
1928 commSetConnTimeout(clientConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
1930 context
->registerWithConn();
1932 processParsedRequest(context
);
1934 parsed_req
= true; // XXX: do we really need to parse everything right NOW ?
1936 if (context
->mayUseConnection()) {
1937 debugs(33, 3, HERE
<< "Not parsing new requests, as this request may need the connection");
1941 debugs(33, 5, clientConnection
<< ": not enough request data: " <<
1942 inBuf
.length() << " < " << Config
.maxRequestHeaderSize
);
1943 Must(inBuf
.length() < Config
.maxRequestHeaderSize
);
1948 /* XXX where to 'finish' the parsing pass? */
1953 ConnStateData::afterClientRead()
1956 if (parsingTlsHandshake
) {
1957 parseTlsHandshake();
1962 /* Process next request */
1963 if (pipeline
.empty())
1964 fd_note(clientConnection
->fd
, "Reading next request");
1966 if (!clientParseRequests()) {
1970 * If the client here is half closed and we failed
1971 * to parse a request, close the connection.
1972 * The above check with connFinishedWithConn() only
1973 * succeeds _if_ the buffer is empty which it won't
1974 * be if we have an incomplete request.
1975 * XXX: This duplicates ConnStateData::kick
1977 if (pipeline
.empty() && commIsHalfClosed(clientConnection
->fd
)) {
1978 debugs(33, 5, clientConnection
<< ": half-closed connection, no completed request parsed, connection closing.");
1979 clientConnection
->close();
1987 clientAfterReadingRequests();
1991 * called when new request data has been read from the socket
1993 * \retval false called comm_close or setReplyToError (the caller should bail)
1994 * \retval true we did not call comm_close or setReplyToError
1997 ConnStateData::handleReadData()
1999 // if we are reading a body, stuff data into the body pipe
2000 if (bodyPipe
!= NULL
)
2001 return handleRequestBodyData();
2006 * called when new request body data has been buffered in inBuf
2007 * may close the connection if we were closing and piped everything out
2009 * \retval false called comm_close or setReplyToError (the caller should bail)
2010 * \retval true we did not call comm_close or setReplyToError
2013 ConnStateData::handleRequestBodyData()
2015 assert(bodyPipe
!= NULL
);
2017 if (bodyParser
) { // chunked encoding
2018 if (const err_type error
= handleChunkedRequestBody()) {
2019 abortChunkedRequestBody(error
);
2022 } else { // identity encoding
2023 debugs(33,5, HERE
<< "handling plain request body for " << clientConnection
);
2024 const size_t putSize
= bodyPipe
->putMoreData(inBuf
.c_str(), inBuf
.length());
2026 consumeInput(putSize
);
2028 if (!bodyPipe
->mayNeedMoreData()) {
2029 // BodyPipe will clear us automagically when we produced everything
2035 debugs(33,5, HERE
<< "produced entire request body for " << clientConnection
);
2037 if (const char *reason
= stoppedSending()) {
2038 /* we've finished reading like good clients,
2039 * now do the close that initiateClose initiated.
2041 debugs(33, 3, HERE
<< "closing for earlier sending error: " << reason
);
2042 clientConnection
->close();
2050 /// parses available chunked encoded body bytes, checks size, returns errors
2052 ConnStateData::handleChunkedRequestBody()
2054 debugs(33, 7, "chunked from " << clientConnection
<< ": " << inBuf
.length());
2056 try { // the parser will throw on errors
2058 if (inBuf
.isEmpty()) // nothing to do
2061 BodyPipeCheckout
bpc(*bodyPipe
);
2062 bodyParser
->setPayloadBuffer(&bpc
.buf
);
2063 const bool parsed
= bodyParser
->parse(inBuf
);
2064 inBuf
= bodyParser
->remaining(); // sync buffers
2067 // dechunk then check: the size limit applies to _dechunked_ content
2068 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe
->producedSize()))
2072 finishDechunkingRequest(true);
2074 return ERR_NONE
; // nil bodyPipe implies body end for the caller
2077 // if chunk parser needs data, then the body pipe must need it too
2078 Must(!bodyParser
->needsMoreData() || bodyPipe
->mayNeedMoreData());
2080 // if parser needs more space and we can consume nothing, we will stall
2081 Must(!bodyParser
->needsMoreSpace() || bodyPipe
->buf().hasContent());
2082 } catch (...) { // TODO: be more specific
2083 debugs(33, 3, HERE
<< "malformed chunks" << bodyPipe
->status());
2084 return ERR_INVALID_REQ
;
2087 debugs(33, 7, HERE
<< "need more chunked data" << *bodyPipe
->status());
2091 /// quit on errors related to chunked request body handling
2093 ConnStateData::abortChunkedRequestBody(const err_type error
)
2095 finishDechunkingRequest(false);
2097 // XXX: The code below works if we fail during initial request parsing,
2098 // but if we fail when the server connection is used already, the server may send
2099 // us its response too, causing various assertions. How to prevent that?
2100 #if WE_KNOW_HOW_TO_SEND_ERRORS
2101 Http::StreamPointer context
= pipeline
.front();
2102 if (context
!= NULL
&& !context
->http
->out
.offset
) { // output nothing yet
2103 clientStreamNode
*node
= context
->getClientReplyContext();
2104 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
2106 const Http::StatusCode scode
= (error
== ERR_TOO_BIG
) ?
2107 Http::scPayloadTooLarge
: HTTP_BAD_REQUEST
;
2108 repContext
->setReplyToError(error
, scode
,
2109 repContext
->http
->request
->method
,
2110 repContext
->http
->uri
,
2112 repContext
->http
->request
,
2114 context
->pullData();
2116 // close or otherwise we may get stuck as nobody will notice the error?
2117 comm_reset_close(clientConnection
);
2120 debugs(33, 3, HERE
<< "aborting chunked request without error " << error
);
2121 comm_reset_close(clientConnection
);
2123 flags
.readMore
= false;
2127 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer
)
2129 // request reader may get stuck waiting for space if nobody consumes body
2130 if (bodyPipe
!= NULL
)
2131 bodyPipe
->enableAutoConsumption();
2136 /** general lifetime handler for HTTP requests */
2138 ConnStateData::requestTimeout(const CommTimeoutCbParams
&io
)
2140 if (!Comm::IsConnOpen(io
.conn
))
2143 if (mayTunnelUnsupportedProto() && !receivedFirstByte_
) {
2144 Http::StreamPointer context
= pipeline
.front();
2145 Must(context
&& context
->http
);
2146 HttpRequest::Pointer request
= context
->http
->request
;
2147 if (clientTunnelOnError(this, context
, request
, HttpRequestMethod(), ERR_REQUEST_START_TIMEOUT
))
2151 * Just close the connection to not confuse browsers
2152 * using persistent connections. Some browsers open
2153 * a connection and then do not use it until much
2154 * later (presumeably because the request triggering
2155 * the open has already been completed on another
2158 debugs(33, 3, "requestTimeout: FD " << io
.fd
<< ": lifetime is expired.");
2163 clientLifetimeTimeout(const CommTimeoutCbParams
&io
)
2165 ClientHttpRequest
*http
= static_cast<ClientHttpRequest
*>(io
.data
);
2166 debugs(33, DBG_IMPORTANT
, "WARNING: Closing client connection due to lifetime timeout");
2167 debugs(33, DBG_IMPORTANT
, "\t" << http
->uri
);
2168 http
->logType
.err
.timedout
= true;
2169 if (Comm::IsConnOpen(io
.conn
))
2173 ConnStateData::ConnStateData(const MasterXaction::Pointer
&xact
) :
2174 AsyncJob("ConnStateData"), // kids overwrite
2176 bodyParser(nullptr),
2178 sslBumpMode(Ssl::bumpEnd
),
2180 needProxyProtocolHeader_(false),
2182 switchedToHttps_(false),
2183 parsingTlsHandshake(false),
2185 sslServerBump(NULL
),
2186 signAlgorithm(Ssl::algSignTrusted
),
2188 stoppedSending_(NULL
),
2189 stoppedReceiving_(NULL
)
2191 flags
.readMore
= true; // kids may overwrite
2192 flags
.swanSang
= false;
2194 pinning
.host
= NULL
;
2196 pinning
.pinned
= false;
2197 pinning
.auth
= false;
2198 pinning
.zeroReply
= false;
2199 pinning
.peer
= NULL
;
2201 // store the details required for creating more MasterXaction objects as new requests come in
2202 log_addr
= xact
->tcpClient
->remote
;
2203 log_addr
.applyClientMask(Config
.Addrs
.client_netmask
);
2205 // register to receive notice of Squid signal events
2206 // which may affect long persisting client connections
2211 ConnStateData::start()
2213 BodyProducer::start();
2214 HttpControlMsgSink::start();
2216 if (port
->disable_pmtu_discovery
!= DISABLE_PMTU_OFF
&&
2217 (transparent() || port
->disable_pmtu_discovery
== DISABLE_PMTU_ALWAYS
)) {
2218 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2219 int i
= IP_PMTUDISC_DONT
;
2220 if (setsockopt(clientConnection
->fd
, SOL_IP
, IP_MTU_DISCOVER
, &i
, sizeof(i
)) < 0) {
2222 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection
<< " : " << xstrerr(xerrno
));
2225 static bool reported
= false;
2228 debugs(33, DBG_IMPORTANT
, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2234 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
2235 AsyncCall::Pointer call
= JobCallback(33, 5, Dialer
, this, ConnStateData::connStateClosed
);
2236 comm_add_close_handler(clientConnection
->fd
, call
);
2238 needProxyProtocolHeader_
= port
->flags
.proxySurrogate
;
2239 if (needProxyProtocolHeader_
) {
2240 if (!proxyProtocolValidateClient()) // will close the connection on failure
2243 whenClientIpKnown();
2248 ConnStateData::whenClientIpKnown()
2250 if (Config
.onoff
.log_fqdn
)
2251 fqdncache_gethostbyaddr(clientConnection
->remote
, FQDN_LOOKUP_IF_MISS
);
2254 if (Ident::TheConfig
.identLookup
) {
2255 ACLFilledChecklist
identChecklist(Ident::TheConfig
.identLookup
, NULL
, NULL
);
2256 identChecklist
.src_addr
= clientConnection
->remote
;
2257 identChecklist
.my_addr
= clientConnection
->local
;
2258 if (identChecklist
.fastCheck().allowed())
2259 Ident::Start(clientConnection
, clientIdentDone
, this);
2263 clientdbEstablished(clientConnection
->remote
, 1);
2266 fd_table
[clientConnection
->fd
].clientInfo
= NULL
;
2268 if (!Config
.onoff
.client_db
)
2269 return; // client delay pools require client_db
2271 const auto &pools
= ClientDelayPools::Instance()->pools
;
2273 ACLFilledChecklist
ch(NULL
, NULL
, NULL
);
2275 // TODO: we check early to limit error response bandwith but we
2276 // should recheck when we can honor delay_pool_uses_indirect
2277 // TODO: we should also pass the port details for myportname here.
2278 ch
.src_addr
= clientConnection
->remote
;
2279 ch
.my_addr
= clientConnection
->local
;
2281 for (unsigned int pool
= 0; pool
< pools
.size(); ++pool
) {
2283 /* pools require explicit 'allow' to assign a client into them */
2284 if (pools
[pool
]->access
) {
2285 ch
.changeAcl(pools
[pool
]->access
);
2286 allow_t answer
= ch
.fastCheck();
2287 if (answer
.allowed()) {
2289 /* request client information from db after we did all checks
2290 this will save hash lookup if client failed checks */
2291 ClientInfo
* cli
= clientdbGetInfo(clientConnection
->remote
);
2294 /* put client info in FDE */
2295 fd_table
[clientConnection
->fd
].clientInfo
= cli
;
2297 /* setup write limiter for this request */
2298 const double burst
= floor(0.5 +
2299 (pools
[pool
]->highwatermark
* Config
.ClientDelay
.initial
)/100.0);
2300 cli
->setWriteLimiter(pools
[pool
]->rate
, burst
, pools
[pool
]->highwatermark
);
2303 debugs(83, 4, HERE
<< "Delay pool " << pool
<< " skipped because ACL " << answer
);
2310 // kids must extend to actually start doing something (e.g., reading)
2313 /** Handle a new connection on an HTTP socket. */
2315 httpAccept(const CommAcceptCbParams
¶ms
)
2317 MasterXaction::Pointer xact
= params
.xaction
;
2318 AnyP::PortCfgPointer s
= xact
->squidPort
;
2320 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2322 if (params
.flag
!= Comm::OK
) {
2323 // Its possible the call was still queued when the client disconnected
2324 debugs(33, 2, s
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
2328 debugs(33, 4, params
.conn
<< ": accepted");
2329 fd_note(params
.conn
->fd
, "client http connect");
2331 if (s
->tcp_keepalive
.enabled
)
2332 commSetTcpKeepalive(params
.conn
->fd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
2334 ++incoming_sockets_accepted
;
2336 // Socket is ready, setup the connection manager to start using it
2337 auto *srv
= Http::NewServer(xact
);
2338 AsyncJob::Start(srv
); // usually async-calls readSomeData()
2341 /// Create TLS connection structure and update fd_table
2343 httpsCreate(const Comm::ConnectionPointer
&conn
, const Security::ContextPointer
&ctx
)
2345 if (Security::CreateServerSession(ctx
, conn
, "client https start")) {
2346 debugs(33, 5, "will negotiate TLS on " << conn
);
2350 debugs(33, DBG_IMPORTANT
, "ERROR: could not create TLS server context for " << conn
);
2357 * \retval 1 on success
2358 * \retval 0 when needs more data
2359 * \retval -1 on error
2362 tlsAttemptHandshake(ConnStateData
*conn
, PF
*callback
)
2364 // TODO: maybe throw instead of returning -1
2365 // see https://github.com/squid-cache/squid/pull/81#discussion_r153053278
2366 int fd
= conn
->clientConnection
->fd
;
2367 auto session
= fd_table
[fd
].ssl
.get();
2372 const auto ret
= SSL_accept(session
);
2376 const int xerrno
= errno
;
2377 const auto ssl_error
= SSL_get_error(session
, ret
);
2379 switch (ssl_error
) {
2381 case SSL_ERROR_WANT_READ
:
2382 Comm::SetSelect(fd
, COMM_SELECT_READ
, callback
, (callback
? conn
: nullptr), 0);
2385 case SSL_ERROR_WANT_WRITE
:
2386 Comm::SetSelect(fd
, COMM_SELECT_WRITE
, callback
, (callback
? conn
: nullptr), 0);
2389 case SSL_ERROR_SYSCALL
:
2391 debugs(83, 2, "Error negotiating SSL connection on FD " << fd
<< ": Aborted by client: " << ssl_error
);
2393 debugs(83, (xerrno
== ECONNRESET
) ? 1 : 2, "Error negotiating SSL connection on FD " << fd
<< ": " <<
2394 (xerrno
== 0 ? Security::ErrorString(ssl_error
) : xstrerr(xerrno
)));
2398 case SSL_ERROR_ZERO_RETURN
:
2399 debugs(83, DBG_IMPORTANT
, "Error negotiating SSL connection on FD " << fd
<< ": Closed by client");
2403 debugs(83, DBG_IMPORTANT
, "Error negotiating SSL connection on FD " <<
2404 fd
<< ": " << Security::ErrorString(ssl_error
) <<
2405 " (" << ssl_error
<< "/" << ret
<< ")");
2410 const auto x
= gnutls_handshake(session
);
2411 if (x
== GNUTLS_E_SUCCESS
)
2414 if (gnutls_error_is_fatal(x
)) {
2415 debugs(83, 2, "Error negotiating TLS on " << conn
->clientConnection
<< ": Aborted by client: " << Security::ErrorString(x
));
2417 } else if (x
== GNUTLS_E_INTERRUPTED
|| x
== GNUTLS_E_AGAIN
) {
2418 const auto ioAction
= (gnutls_record_get_direction(session
)==0 ? COMM_SELECT_READ
: COMM_SELECT_WRITE
);
2419 Comm::SetSelect(fd
, ioAction
, callback
, (callback
? conn
: nullptr), 0);
2424 // Performing TLS handshake should never be reachable without a TLS/SSL library.
2425 (void)session
; // avoid compiler and static analysis complaints
2426 fatal("FATAL: HTTPS not supported by this Squid.");
2432 /** negotiate an SSL connection */
2434 clientNegotiateSSL(int fd
, void *data
)
2436 ConnStateData
*conn
= (ConnStateData
*)data
;
2438 const int ret
= tlsAttemptHandshake(conn
, clientNegotiateSSL
);
2440 if (ret
< 0) // An error
2441 conn
->clientConnection
->close();
2445 Security::SessionPointer
session(fd_table
[fd
].ssl
);
2448 if (Security::SessionIsResumed(session
)) {
2449 debugs(83, 2, "Session " << SSL_get_session(session
.get()) <<
2450 " reused on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<<
2451 ":" << (int)fd_table
[fd
].remote_port
<< ")");
2453 if (Debug::Enabled(83, 4)) {
2454 /* Write out the SSL session details.. actually the call below, but
2455 * OpenSSL headers do strange typecasts confusing GCC.. */
2456 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2457 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2458 PEM_ASN1_write(reinterpret_cast<i2d_of_void
*>(i2d_SSL_SESSION
),
2459 PEM_STRING_SSL_SESSION
, debug_log
,
2460 reinterpret_cast<char *>(SSL_get_session(session
.get())),
2461 nullptr, nullptr, 0, nullptr, nullptr);
2463 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2465 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2466 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2467 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2468 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2469 * Because there are two possible usable cast, if you get an error here, try the other
2470 * commented line. */
2472 PEM_ASN1_write((int(*)())i2d_SSL_SESSION
, PEM_STRING_SSL_SESSION
,
2474 reinterpret_cast<char *>(SSL_get_session(session
.get())),
2475 nullptr, nullptr, 0, nullptr, nullptr);
2476 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2478 reinterpret_cast<char *>(SSL_get_session(session.get())),
2479 nullptr, nullptr, 0, nullptr, nullptr);
2482 debugs(83, 4, "With " OPENSSL_VERSION_TEXT
", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2485 /* Note: This does not automatically fflush the log file.. */
2488 debugs(83, 2, "New session " << SSL_get_session(session
.get()) <<
2489 " on FD " << fd
<< " (" << fd_table
[fd
].ipaddr
<< ":" <<
2490 fd_table
[fd
].remote_port
<< ")");
2493 debugs(83, 2, "TLS session reuse not yet implemented.");
2496 // Connection established. Retrieve TLS connection parameters for logging.
2497 conn
->clientConnection
->tlsNegotiations()->retrieveNegotiatedInfo(session
);
2500 X509
*client_cert
= SSL_get_peer_certificate(session
.get());
2503 debugs(83, 3, "FD " << fd
<< " client certificate: subject: " <<
2504 X509_NAME_oneline(X509_get_subject_name(client_cert
), 0, 0));
2506 debugs(83, 3, "FD " << fd
<< " client certificate: issuer: " <<
2507 X509_NAME_oneline(X509_get_issuer_name(client_cert
), 0, 0));
2509 X509_free(client_cert
);
2511 debugs(83, 5, "FD " << fd
<< " has no client certificate.");
2514 debugs(83, 2, "Client certificate requesting not yet implemented.");
2517 conn
->readSomeData();
2521 * If Security::ContextPointer is given, starts reading the TLS handshake.
2522 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2525 httpsEstablish(ConnStateData
*connState
, const Security::ContextPointer
&ctx
)
2528 const Comm::ConnectionPointer
&details
= connState
->clientConnection
;
2530 if (!ctx
|| !httpsCreate(details
, ctx
))
2533 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2534 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5, TimeoutDialer
,
2535 connState
, ConnStateData::requestTimeout
);
2536 commSetConnTimeout(details
, Config
.Timeout
.request
, timeoutCall
);
2538 Comm::SetSelect(details
->fd
, COMM_SELECT_READ
, clientNegotiateSSL
, connState
, 0);
2543 * A callback function to use with the ACLFilledChecklist callback.
2546 httpsSslBumpAccessCheckDone(allow_t answer
, void *data
)
2548 ConnStateData
*connState
= (ConnStateData
*) data
;
2550 // if the connection is closed or closing, just return.
2551 if (!connState
->isOpen())
2554 if (answer
.allowed()) {
2555 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer
.kind
) << "needed for " << connState
->clientConnection
);
2556 connState
->sslBumpMode
= static_cast<Ssl::BumpMode
>(answer
.kind
);
2558 debugs(33, 3, "sslBump not needed for " << connState
->clientConnection
);
2559 connState
->sslBumpMode
= Ssl::bumpSplice
;
2562 if (connState
->sslBumpMode
== Ssl::bumpTerminate
) {
2563 connState
->clientConnection
->close();
2567 if (!connState
->fakeAConnectRequest("ssl-bump", connState
->inBuf
))
2568 connState
->clientConnection
->close();
2572 /** handle a new HTTPS connection */
2574 httpsAccept(const CommAcceptCbParams
¶ms
)
2576 MasterXaction::Pointer xact
= params
.xaction
;
2577 const AnyP::PortCfgPointer s
= xact
->squidPort
;
2579 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2581 if (params
.flag
!= Comm::OK
) {
2582 // Its possible the call was still queued when the client disconnected
2583 debugs(33, 2, "httpsAccept: " << s
->listenConn
<< ": accept failure: " << xstrerr(params
.xerrno
));
2587 debugs(33, 4, HERE
<< params
.conn
<< " accepted, starting SSL negotiation.");
2588 fd_note(params
.conn
->fd
, "client https connect");
2590 if (s
->tcp_keepalive
.enabled
) {
2591 commSetTcpKeepalive(params
.conn
->fd
, s
->tcp_keepalive
.idle
, s
->tcp_keepalive
.interval
, s
->tcp_keepalive
.timeout
);
2593 ++incoming_sockets_accepted
;
2595 // Socket is ready, setup the connection manager to start using it
2596 auto *srv
= Https::NewServer(xact
);
2597 AsyncJob::Start(srv
); // usually async-calls postHttpsAccept()
2601 ConnStateData::postHttpsAccept()
2603 if (port
->flags
.tunnelSslBumping
) {
2605 debugs(33, 5, "accept transparent connection: " << clientConnection
);
2607 if (!Config
.accessList
.ssl_bump
) {
2608 httpsSslBumpAccessCheckDone(ACCESS_DENIED
, this);
2612 MasterXaction::Pointer mx
= new MasterXaction(XactionInitiator::initClient
);
2613 mx
->tcpClient
= clientConnection
;
2614 // Create a fake HTTP request for ssl_bump ACL check,
2615 // using tproxy/intercept provided destination IP and port.
2616 HttpRequest
*request
= new HttpRequest(mx
);
2617 static char ip
[MAX_IPSTRLEN
];
2618 assert(clientConnection
->flags
& (COMM_TRANSPARENT
| COMM_INTERCEPTION
));
2619 request
->url
.host(clientConnection
->local
.toStr(ip
, sizeof(ip
)));
2620 request
->url
.port(clientConnection
->local
.port());
2621 request
->myportname
= port
->name
;
2623 ACLFilledChecklist
*acl_checklist
= new ACLFilledChecklist(Config
.accessList
.ssl_bump
, request
, NULL
);
2624 acl_checklist
->src_addr
= clientConnection
->remote
;
2625 acl_checklist
->my_addr
= port
->s
;
2626 // Build a local AccessLogEntry to allow requiresAle() acls work
2627 acl_checklist
->al
= new AccessLogEntry
;
2628 acl_checklist
->al
->cache
.start_time
= current_time
;
2629 acl_checklist
->al
->tcpClient
= clientConnection
;
2630 acl_checklist
->al
->cache
.port
= port
;
2631 acl_checklist
->al
->cache
.caddr
= log_addr
;
2632 acl_checklist
->al
->proxyProtocolHeader
= proxyProtocolHeader_
;
2633 HTTPMSGUNLOCK(acl_checklist
->al
->request
);
2634 acl_checklist
->al
->request
= request
;
2635 HTTPMSGLOCK(acl_checklist
->al
->request
);
2636 Http::StreamPointer context
= pipeline
.front();
2637 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
2638 const char *log_uri
= http
? http
->log_uri
: nullptr;
2639 acl_checklist
->syncAle(request
, log_uri
);
2640 acl_checklist
->nonBlockingCheck(httpsSslBumpAccessCheckDone
, this);
2642 fatal("FATAL: SSL-Bump requires --with-openssl");
2646 httpsEstablish(this, port
->secure
.staticContext
);
2652 ConnStateData::sslCrtdHandleReplyWrapper(void *data
, const Helper::Reply
&reply
)
2654 ConnStateData
* state_data
= (ConnStateData
*)(data
);
2655 state_data
->sslCrtdHandleReply(reply
);
2659 ConnStateData::sslCrtdHandleReply(const Helper::Reply
&reply
)
2662 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply
);
2666 if (reply
.result
== Helper::BrokenHelper
) {
2667 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply
);
2668 } else if (!reply
.other().hasContent()) {
2669 debugs(1, DBG_IMPORTANT
, HERE
<< "\"ssl_crtd\" helper returned <NULL> reply.");
2671 Ssl::CrtdMessage
reply_message(Ssl::CrtdMessage::REPLY
);
2672 if (reply_message
.parse(reply
.other().content(), reply
.other().contentSize()) != Ssl::CrtdMessage::OK
) {
2673 debugs(33, 5, HERE
<< "Reply from ssl_crtd for " << sslConnectHostOrIp
<< " is incorrect");
2675 if (reply
.result
!= Helper::Okay
) {
2676 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " cannot be generated. ssl_crtd response: " << reply_message
.getBody());
2678 debugs(33, 5, HERE
<< "Certificate for " << sslConnectHostOrIp
<< " was successfully recieved from ssl_crtd");
2679 if (sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
)) {
2680 doPeekAndSpliceStep();
2681 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2682 bool ret
= Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl
, reply_message
.getBody().c_str(), *port
);
2684 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2686 Security::ContextPointer
ctx(Security::GetFrom(fd_table
[clientConnection
->fd
].ssl
));
2687 Ssl::configureUnconfiguredSslContext(ctx
, signAlgorithm
, *port
);
2689 Security::ContextPointer
ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message
.getBody().c_str(), port
->secure
, (signAlgorithm
== Ssl::algSignTrusted
)));
2690 if (ctx
&& !sslBumpCertKey
.isEmpty())
2691 storeTlsContextToCache(sslBumpCertKey
, ctx
);
2692 getSslContextDone(ctx
);
2698 Security::ContextPointer nil
;
2699 getSslContextDone(nil
);
2702 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties
&certProperties
)
2704 certProperties
.commonName
= sslCommonName_
.isEmpty() ? sslConnectHostOrIp
.termedBuf() : sslCommonName_
.c_str();
2706 const bool connectedOk
= sslServerBump
&& sslServerBump
->connectedOk();
2708 if (X509
*mimicCert
= sslServerBump
->serverCert
.get())
2709 certProperties
.mimicCert
.resetAndLock(mimicCert
);
2711 ACLFilledChecklist
checklist(NULL
, sslServerBump
->request
.getRaw(),
2712 clientConnection
!= NULL
? clientConnection
->rfc931
: dash_str
);
2713 checklist
.sslErrors
= cbdataReference(sslServerBump
->sslErrors());
2715 for (sslproxy_cert_adapt
*ca
= Config
.ssl_client
.cert_adapt
; ca
!= NULL
; ca
= ca
->next
) {
2716 // If the algorithm already set, then ignore it.
2717 if ((ca
->alg
== Ssl::algSetCommonName
&& certProperties
.setCommonName
) ||
2718 (ca
->alg
== Ssl::algSetValidAfter
&& certProperties
.setValidAfter
) ||
2719 (ca
->alg
== Ssl::algSetValidBefore
&& certProperties
.setValidBefore
) )
2722 if (ca
->aclList
&& checklist
.fastCheck(ca
->aclList
).allowed()) {
2723 const char *alg
= Ssl::CertAdaptAlgorithmStr
[ca
->alg
];
2724 const char *param
= ca
->param
;
2726 // For parameterless CN adaptation, use hostname from the
2728 if (ca
->alg
== Ssl::algSetCommonName
) {
2730 param
= sslConnectHostOrIp
.termedBuf();
2731 certProperties
.commonName
= param
;
2732 certProperties
.setCommonName
= true;
2733 } else if (ca
->alg
== Ssl::algSetValidAfter
)
2734 certProperties
.setValidAfter
= true;
2735 else if (ca
->alg
== Ssl::algSetValidBefore
)
2736 certProperties
.setValidBefore
= true;
2738 debugs(33, 5, HERE
<< "Matches certificate adaptation aglorithm: " <<
2739 alg
<< " param: " << (param
? param
: "-"));
2743 certProperties
.signAlgorithm
= Ssl::algSignEnd
;
2744 for (sslproxy_cert_sign
*sg
= Config
.ssl_client
.cert_sign
; sg
!= NULL
; sg
= sg
->next
) {
2745 if (sg
->aclList
&& checklist
.fastCheck(sg
->aclList
).allowed()) {
2746 certProperties
.signAlgorithm
= (Ssl::CertSignAlgorithm
)sg
->alg
;
2750 } else {// did not try to connect (e.g. client-first) or failed to connect
2751 // In case of an error while connecting to the secure server, use a
2752 // trusted certificate, with no mimicked fields and no adaptation
2753 // algorithms. There is nothing we can mimic, so we want to minimize the
2754 // number of warnings the user will have to see to get to the error page.
2755 // We will close the connection, so that the trust is not extended to
2756 // non-Squid content.
2757 certProperties
.signAlgorithm
= Ssl::algSignTrusted
;
2760 assert(certProperties
.signAlgorithm
!= Ssl::algSignEnd
);
2762 if (certProperties
.signAlgorithm
== Ssl::algSignUntrusted
) {
2763 assert(port
->secure
.untrustedSigningCa
.cert
);
2764 certProperties
.signWithX509
.resetAndLock(port
->secure
.untrustedSigningCa
.cert
.get());
2765 certProperties
.signWithPkey
.resetAndLock(port
->secure
.untrustedSigningCa
.pkey
.get());
2767 assert(port
->secure
.signingCa
.cert
.get());
2768 certProperties
.signWithX509
.resetAndLock(port
->secure
.signingCa
.cert
.get());
2770 if (port
->secure
.signingCa
.pkey
)
2771 certProperties
.signWithPkey
.resetAndLock(port
->secure
.signingCa
.pkey
.get());
2773 signAlgorithm
= certProperties
.signAlgorithm
;
2775 certProperties
.signHash
= Ssl::DefaultSignHash
;
2778 Security::ContextPointer
2779 ConnStateData::getTlsContextFromCache(const SBuf
&cacheKey
, const Ssl::CertificateProperties
&certProperties
)
2781 debugs(33, 5, "Finding SSL certificate for " << cacheKey
<< " in cache");
2782 Ssl::LocalContextStorage
* ssl_ctx_cache
= Ssl::TheGlobalContextStorage
.getLocalStorage(port
->s
);
2783 if (Security::ContextPointer
*ctx
= ssl_ctx_cache
? ssl_ctx_cache
->get(cacheKey
) : nullptr) {
2784 if (Ssl::verifySslCertificate(*ctx
, certProperties
)) {
2785 debugs(33, 5, "Cached SSL certificate for " << certProperties
.commonName
<< " is valid");
2788 debugs(33, 5, "Cached SSL certificate for " << certProperties
.commonName
<< " is out of date. Delete this certificate from cache");
2790 ssl_ctx_cache
->del(cacheKey
);
2793 return Security::ContextPointer(nullptr);
2797 ConnStateData::storeTlsContextToCache(const SBuf
&cacheKey
, Security::ContextPointer
&ctx
)
2799 Ssl::LocalContextStorage
*ssl_ctx_cache
= Ssl::TheGlobalContextStorage
.getLocalStorage(port
->s
);
2800 if (!ssl_ctx_cache
|| !ssl_ctx_cache
->add(cacheKey
, new Security::ContextPointer(ctx
))) {
2801 // If it is not in storage delete after using. Else storage deleted it.
2802 fd_table
[clientConnection
->fd
].dynamicTlsContext
= ctx
;
2807 ConnStateData::getSslContextStart()
2809 // If we are called, then CONNECT has succeeded. Finalize it.
2810 if (auto xact
= pipeline
.front()) {
2811 if (xact
->http
&& xact
->http
->request
&& xact
->http
->request
->method
== Http::METHOD_CONNECT
)
2813 // cannot proceed with encryption if requests wait for plain responses
2814 Must(pipeline
.empty());
2816 /* careful: finished() above frees request, host, etc. */
2818 if (port
->secure
.generateHostCertificates
) {
2819 Ssl::CertificateProperties certProperties
;
2820 buildSslCertGenerationParams(certProperties
);
2822 // Disable caching for bumpPeekAndSplice mode
2823 if (!(sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
))) {
2824 sslBumpCertKey
.clear();
2825 Ssl::InRamCertificateDbKey(certProperties
, sslBumpCertKey
);
2826 assert(!sslBumpCertKey
.isEmpty());
2828 Security::ContextPointer
ctx(getTlsContextFromCache(sslBumpCertKey
, certProperties
));
2830 getSslContextDone(ctx
);
2837 debugs(33, 5, HERE
<< "Generating SSL certificate for " << certProperties
.commonName
<< " using ssl_crtd.");
2838 Ssl::CrtdMessage
request_message(Ssl::CrtdMessage::REQUEST
);
2839 request_message
.setCode(Ssl::CrtdMessage::code_new_certificate
);
2840 request_message
.composeRequest(certProperties
);
2841 debugs(33, 5, HERE
<< "SSL crtd request: " << request_message
.compose().c_str());
2842 Ssl::Helper::Submit(request_message
, sslCrtdHandleReplyWrapper
, this);
2844 } catch (const std::exception
&e
) {
2845 debugs(33, DBG_IMPORTANT
, "ERROR: Failed to compose ssl_crtd " <<
2846 "request for " << certProperties
.commonName
<<
2847 " certificate: " << e
.what() << "; will now block to " <<
2848 "generate that certificate.");
2849 // fall through to do blocking in-process generation.
2851 #endif // USE_SSL_CRTD
2853 debugs(33, 5, HERE
<< "Generating SSL certificate for " << certProperties
.commonName
);
2854 if (sslServerBump
&& (sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
)) {
2855 doPeekAndSpliceStep();
2856 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2857 if (!Ssl::configureSSL(ssl
, certProperties
, *port
))
2858 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2860 Security::ContextPointer
ctx(Security::GetFrom(fd_table
[clientConnection
->fd
].ssl
));
2861 Ssl::configureUnconfiguredSslContext(ctx
, certProperties
.signAlgorithm
, *port
);
2863 Security::ContextPointer
dynCtx(Ssl::GenerateSslContext(certProperties
, port
->secure
, (signAlgorithm
== Ssl::algSignTrusted
)));
2864 if (dynCtx
&& !sslBumpCertKey
.isEmpty())
2865 storeTlsContextToCache(sslBumpCertKey
, dynCtx
);
2866 getSslContextDone(dynCtx
);
2871 Security::ContextPointer nil
;
2872 getSslContextDone(nil
);
2876 ConnStateData::getSslContextDone(Security::ContextPointer
&ctx
)
2878 if (port
->secure
.generateHostCertificates
&& !ctx
) {
2879 debugs(33, 2, "Failed to generate TLS context for " << sslConnectHostOrIp
);
2882 // If generated ssl context = NULL, try to use static ssl context.
2884 if (!port
->secure
.staticContext
) {
2885 debugs(83, DBG_IMPORTANT
, "Closing " << clientConnection
->remote
<< " as lacking TLS context");
2886 clientConnection
->close();
2889 debugs(33, 5, "Using static TLS context.");
2890 ctx
= port
->secure
.staticContext
;
2894 if (!httpsCreate(clientConnection
, ctx
))
2897 // bumped intercepted conns should already have Config.Timeout.request set
2898 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2899 // to make sure the connection does not get stuck on non-SSL clients.
2900 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2901 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5, TimeoutDialer
,
2902 this, ConnStateData::requestTimeout
);
2903 commSetConnTimeout(clientConnection
, Config
.Timeout
.request
, timeoutCall
);
2905 switchedToHttps_
= true;
2907 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
2908 BIO
*b
= SSL_get_rbio(ssl
);
2909 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
2910 bio
->setReadBufData(inBuf
);
2912 clientNegotiateSSL(clientConnection
->fd
, this);
2916 ConnStateData::switchToHttps(HttpRequest
*request
, Ssl::BumpMode bumpServerMode
)
2918 assert(!switchedToHttps_
);
2920 sslConnectHostOrIp
= request
->url
.host();
2921 tlsConnectPort
= request
->url
.port();
2922 resetSslCommonName(request
->url
.host());
2924 // We are going to read new request
2925 flags
.readMore
= true;
2926 debugs(33, 5, HERE
<< "converting " << clientConnection
<< " to SSL");
2928 // keep version major.minor details the same.
2929 // but we are now performing the HTTPS handshake traffic
2930 transferProtocol
.protocol
= AnyP::PROTO_HTTPS
;
2932 // If sslServerBump is set, then we have decided to deny CONNECT
2933 // and now want to switch to SSL to send the error to the client
2934 // without even peeking at the origin server certificate.
2935 if (bumpServerMode
== Ssl::bumpServerFirst
&& !sslServerBump
) {
2936 request
->flags
.sslPeek
= true;
2937 sslServerBump
= new Ssl::ServerBump(request
);
2938 } else if (bumpServerMode
== Ssl::bumpPeek
|| bumpServerMode
== Ssl::bumpStare
) {
2939 request
->flags
.sslPeek
= true;
2940 sslServerBump
= new Ssl::ServerBump(request
, NULL
, bumpServerMode
);
2943 // commSetConnTimeout() was called for this request before we switched.
2944 // Fix timeout to request_start_timeout
2945 typedef CommCbMemFunT
<ConnStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2946 AsyncCall::Pointer timeoutCall
= JobCallback(33, 5,
2947 TimeoutDialer
, this, ConnStateData::requestTimeout
);
2948 commSetConnTimeout(clientConnection
, Config
.Timeout
.request_start_timeout
, timeoutCall
);
2949 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2950 // a bumbed "connect" request on non transparent port.
2951 receivedFirstByte_
= false;
2952 // Get more data to peek at Tls
2953 parsingTlsHandshake
= true;
2958 ConnStateData::parseTlsHandshake()
2960 Must(parsingTlsHandshake
);
2962 assert(!inBuf
.isEmpty());
2963 receivedFirstByte();
2964 fd_note(clientConnection
->fd
, "Parsing TLS handshake");
2966 bool unsupportedProtocol
= false;
2968 if (!tlsParser
.parseHello(inBuf
)) {
2969 // need more data to finish parsing
2974 catch (const std::exception
&ex
) {
2975 debugs(83, 2, "error on FD " << clientConnection
->fd
<< ": " << ex
.what());
2976 unsupportedProtocol
= true;
2979 parsingTlsHandshake
= false;
2981 // client data may be needed for splicing and for
2982 // tunneling unsupportedProtocol after an error
2983 preservedClientData
= inBuf
;
2985 // Even if the parser failed, each TLS detail should either be set
2986 // correctly or still be "unknown"; copying unknown detail is a no-op.
2987 Security::TlsDetails::Pointer
const &details
= tlsParser
.details
;
2988 clientConnection
->tlsNegotiations()->retrieveParsedInfo(details
);
2989 if (details
&& !details
->serverName
.isEmpty()) {
2990 resetSslCommonName(details
->serverName
.c_str());
2991 tlsClientSni_
= details
->serverName
;
2994 // We should disable read/write handlers
2995 Comm::ResetSelect(clientConnection
->fd
);
2997 if (unsupportedProtocol
) {
2998 Http::StreamPointer context
= pipeline
.front();
2999 Must(context
&& context
->http
);
3000 HttpRequest::Pointer request
= context
->http
->request
;
3001 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
3002 sslBumpMode
= Ssl::bumpSplice
;
3003 context
->http
->al
->ssl
.bumpMode
= Ssl::bumpSplice
;
3004 if (!clientTunnelOnError(this, context
, request
, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN
))
3005 clientConnection
->close();
3009 if (!sslServerBump
|| sslServerBump
->act
.step1
== Ssl::bumpClientFirst
) { // Either means client-first.
3010 getSslContextStart();
3012 } else if (sslServerBump
->act
.step1
== Ssl::bumpServerFirst
) {
3013 // will call httpsPeeked() with certificate and connection, eventually
3014 FwdState::fwdStart(clientConnection
, sslServerBump
->entry
, sslServerBump
->request
.getRaw());
3016 Must(sslServerBump
->act
.step1
== Ssl::bumpPeek
|| sslServerBump
->act
.step1
== Ssl::bumpStare
);
3017 startPeekAndSplice();
3021 void httpsSslBumpStep2AccessCheckDone(allow_t answer
, void *data
)
3023 ConnStateData
*connState
= (ConnStateData
*) data
;
3025 // if the connection is closed or closing, just return.
3026 if (!connState
->isOpen())
3029 debugs(33, 5, "Answer: " << answer
<< " kind:" << answer
.kind
);
3030 assert(connState
->serverBump());
3031 Ssl::BumpMode bumpAction
;
3032 if (answer
.allowed()) {
3033 bumpAction
= (Ssl::BumpMode
)answer
.kind
;
3035 bumpAction
= Ssl::bumpSplice
;
3037 connState
->serverBump()->act
.step2
= bumpAction
;
3038 connState
->sslBumpMode
= bumpAction
;
3039 Http::StreamPointer context
= connState
->pipeline
.front();
3040 if (ClientHttpRequest
*http
= (context
? context
->http
: nullptr))
3041 http
->al
->ssl
.bumpMode
= bumpAction
;
3043 if (bumpAction
== Ssl::bumpTerminate
) {
3044 connState
->clientConnection
->close();
3045 } else if (bumpAction
!= Ssl::bumpSplice
) {
3046 connState
->startPeekAndSplice();
3047 } else if (!connState
->splice())
3048 connState
->clientConnection
->close();
3052 ConnStateData::splice()
3054 // normally we can splice here, because we just got client hello message
3056 if (fd_table
[clientConnection
->fd
].ssl
.get()) {
3057 // Restore default read methods
3058 fd_table
[clientConnection
->fd
].read_method
= &default_read_method
;
3059 fd_table
[clientConnection
->fd
].write_method
= &default_write_method
;
3062 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3063 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3064 transferProtocol
= Http::ProtocolVersion();
3065 assert(!pipeline
.empty());
3066 Http::StreamPointer context
= pipeline
.front();
3068 Must(context
->http
);
3069 ClientHttpRequest
*http
= context
->http
;
3070 HttpRequest::Pointer request
= http
->request
;
3071 context
->finished();
3072 if (transparent()) {
3073 // For transparent connections, make a new fake CONNECT request, now
3074 // with SNI as target. doCallout() checks, adaptations may need that.
3075 return fakeAConnectRequest("splice", preservedClientData
);
3077 // For non transparent connections make a new tunneled CONNECT, which
3078 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3079 // respond with "Connection Established" to the client.
3080 // This fake CONNECT request required to allow use of SNI in
3081 // doCallout() checks and adaptations.
3082 return initiateTunneledRequest(request
, Http::METHOD_CONNECT
, "splice", preservedClientData
);
3087 ConnStateData::startPeekAndSplice()
3089 // This is the Step2 of the SSL bumping
3090 assert(sslServerBump
);
3091 Http::StreamPointer context
= pipeline
.front();
3092 ClientHttpRequest
*http
= context
? context
->http
: nullptr;
3094 if (sslServerBump
->step
== Ssl::bumpStep1
) {
3095 sslServerBump
->step
= Ssl::bumpStep2
;
3096 // Run a accessList check to check if want to splice or continue bumping
3098 ACLFilledChecklist
*acl_checklist
= new ACLFilledChecklist(Config
.accessList
.ssl_bump
, sslServerBump
->request
.getRaw(), nullptr);
3099 acl_checklist
->al
= http
? http
->al
: nullptr;
3100 //acl_checklist->src_addr = params.conn->remote;
3101 //acl_checklist->my_addr = s->s;
3102 acl_checklist
->banAction(allow_t(ACCESS_ALLOWED
, Ssl::bumpNone
));
3103 acl_checklist
->banAction(allow_t(ACCESS_ALLOWED
, Ssl::bumpClientFirst
));
3104 acl_checklist
->banAction(allow_t(ACCESS_ALLOWED
, Ssl::bumpServerFirst
));
3105 const char *log_uri
= http
? http
->log_uri
: nullptr;
3106 acl_checklist
->syncAle(sslServerBump
->request
.getRaw(), log_uri
);
3107 acl_checklist
->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone
, this);
3111 // will call httpsPeeked() with certificate and connection, eventually
3112 Security::ContextPointer
unConfiguredCTX(Ssl::createSSLContext(port
->secure
.signingCa
.cert
, port
->secure
.signingCa
.pkey
, port
->secure
));
3113 fd_table
[clientConnection
->fd
].dynamicTlsContext
= unConfiguredCTX
;
3115 if (!httpsCreate(clientConnection
, unConfiguredCTX
))
3118 switchedToHttps_
= true;
3120 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
3121 BIO
*b
= SSL_get_rbio(ssl
);
3122 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
3123 bio
->setReadBufData(inBuf
);
3126 // Here squid should have all of the client hello message so the
3127 // tlsAttemptHandshake() should return 0.
3128 // This block exist only to force openSSL parse client hello and detect
3129 // ERR_SECURE_ACCEPT_FAIL error, which should be checked and splice if required.
3130 if (tlsAttemptHandshake(this, nullptr) < 0) {
3131 debugs(83, 2, "TLS handshake failed.");
3132 HttpRequest::Pointer
request(http
? http
->request
: nullptr);
3133 if (!clientTunnelOnError(this, context
, request
, HttpRequestMethod(), ERR_SECURE_ACCEPT_FAIL
))
3134 clientConnection
->close();
3138 // We need to reset inBuf here, to be used by incoming requests in the case
3142 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3143 FwdState::Start(clientConnection
, sslServerBump
->entry
, sslServerBump
->request
.getRaw(), http
? http
->al
: NULL
);
3147 ConnStateData::doPeekAndSpliceStep()
3149 auto ssl
= fd_table
[clientConnection
->fd
].ssl
.get();
3150 BIO
*b
= SSL_get_rbio(ssl
);
3152 Ssl::ClientBio
*bio
= static_cast<Ssl::ClientBio
*>(BIO_get_data(b
));
3154 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Currrent state:" << SSL_state_string_long(ssl
));
3157 Comm::SetSelect(clientConnection
->fd
, COMM_SELECT_WRITE
, clientNegotiateSSL
, this, 0);
3158 switchedToHttps_
= true;
3162 ConnStateData::httpsPeeked(PinnedIdleContext pic
)
3164 Must(sslServerBump
!= NULL
);
3165 Must(sslServerBump
->request
== pic
.request
);
3166 Must(pipeline
.empty() || pipeline
.front()->http
== nullptr || pipeline
.front()->http
->request
== pic
.request
.getRaw());
3168 if (Comm::IsConnOpen(pic
.connection
)) {
3169 notePinnedConnectionBecameIdle(pic
);
3170 debugs(33, 5, HERE
<< "bumped HTTPS server: " << sslConnectHostOrIp
);
3172 debugs(33, 5, HERE
<< "Error while bumping: " << sslConnectHostOrIp
);
3174 getSslContextStart();
3177 #endif /* USE_OPENSSL */
3180 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer
const &cause
, Http::MethodType
const method
, const char *reason
, const SBuf
&payload
)
3182 // fake a CONNECT request to force connState to tunnel
3184 unsigned short connectPort
= 0;
3186 if (pinning
.serverConnection
!= nullptr) {
3187 static char ip
[MAX_IPSTRLEN
];
3188 pinning
.serverConnection
->remote
.toHostStr(ip
, sizeof(ip
));
3189 connectHost
.assign(ip
);
3190 connectPort
= pinning
.serverConnection
->remote
.port();
3191 } else if (cause
&& cause
->method
== Http::METHOD_CONNECT
) {
3192 // We are inside a (not fully established) CONNECT request
3193 connectHost
= cause
->url
.host();
3194 connectPort
= cause
->url
.port();
3196 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason
);
3200 debugs(33, 2, "Request tunneling for " << reason
);
3201 ClientHttpRequest
*http
= buildFakeRequest(method
, connectHost
, connectPort
, payload
);
3202 HttpRequest::Pointer request
= http
->request
;
3203 request
->flags
.forceTunnel
= true;
3204 http
->calloutContext
= new ClientRequestContext(http
);
3206 clientProcessRequestFinished(this, request
);
3211 ConnStateData::fakeAConnectRequest(const char *reason
, const SBuf
&payload
)
3213 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason
);
3216 assert(transparent());
3217 const unsigned short connectPort
= clientConnection
->local
.port();
3220 if (!tlsClientSni_
.isEmpty())
3221 connectHost
.assign(tlsClientSni_
);
3225 static char ip
[MAX_IPSTRLEN
];
3226 clientConnection
->local
.toHostStr(ip
, sizeof(ip
));
3227 connectHost
.assign(ip
);
3230 ClientHttpRequest
*http
= buildFakeRequest(Http::METHOD_CONNECT
, connectHost
, connectPort
, payload
);
3232 http
->calloutContext
= new ClientRequestContext(http
);
3233 HttpRequest::Pointer request
= http
->request
;
3235 clientProcessRequestFinished(this, request
);
3240 ConnStateData::buildFakeRequest(Http::MethodType
const method
, SBuf
&useHost
, unsigned short usePort
, const SBuf
&payload
)
3242 ClientHttpRequest
*http
= new ClientHttpRequest(this);
3243 Http::Stream
*stream
= new Http::Stream(clientConnection
, http
);
3245 StoreIOBuffer tempBuffer
;
3246 tempBuffer
.data
= stream
->reqbuf
;
3247 tempBuffer
.length
= HTTP_REQBUF_SZ
;
3249 ClientStreamData newServer
= new clientReplyContext(http
);
3250 ClientStreamData newClient
= stream
;
3251 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
3252 clientReplyStatus
, newServer
, clientSocketRecipient
,
3253 clientSocketDetach
, newClient
, tempBuffer
);
3255 http
->uri
= SBufToCstring(useHost
);
3256 stream
->flags
.parsed_ok
= 1; // Do we need it?
3257 stream
->mayUseConnection(true);
3259 AsyncCall::Pointer timeoutCall
= commCbCall(5, 4, "clientLifetimeTimeout",
3260 CommTimeoutCbPtrFun(clientLifetimeTimeout
, stream
->http
));
3261 commSetConnTimeout(clientConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
3263 stream
->registerWithConn();
3265 MasterXaction::Pointer mx
= new MasterXaction(XactionInitiator::initClient
);
3266 mx
->tcpClient
= clientConnection
;
3267 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3268 // clientProcessRequest
3269 HttpRequest::Pointer request
= new HttpRequest(mx
);
3270 AnyP::ProtocolType proto
= (method
== Http::METHOD_NONE
) ? AnyP::PROTO_AUTHORITY_FORM
: AnyP::PROTO_HTTP
;
3271 request
->url
.setScheme(proto
, nullptr);
3272 request
->method
= method
;
3273 request
->url
.host(useHost
.c_str());
3274 request
->url
.port(usePort
);
3275 http
->initRequest(request
.getRaw());
3277 request
->manager(this, http
->al
);
3279 if (proto
== AnyP::PROTO_HTTP
)
3280 request
->header
.putStr(Http::HOST
, useHost
.c_str());
3282 request
->sources
|= ((switchedToHttps() || port
->transport
.protocol
== AnyP::PROTO_HTTPS
) ? Http::Message::srcHttps
: Http::Message::srcHttp
);
3285 request
->auth_user_request
= getAuth();
3289 flags
.readMore
= false;
3294 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3296 OpenedHttpSocket(const Comm::ConnectionPointer
&c
, const Ipc::FdNoteId portType
)
3298 if (!Comm::IsConnOpen(c
)) {
3299 Must(NHttpSockets
> 0); // we tried to open some
3300 --NHttpSockets
; // there will be fewer sockets than planned
3301 Must(HttpSockets
[NHttpSockets
] < 0); // no extra fds received
3303 if (!NHttpSockets
) // we could not open any listen sockets at all
3304 fatalf("Unable to open %s",FdNote(portType
));
3311 /// find any unused HttpSockets[] slot and store fd there or return false
3313 AddOpenedHttpSocket(const Comm::ConnectionPointer
&conn
)
3316 for (int i
= 0; i
< NHttpSockets
&& !found
; ++i
) {
3317 if ((found
= HttpSockets
[i
] < 0))
3318 HttpSockets
[i
] = conn
->fd
;
3324 clientHttpConnectionsOpen(void)
3326 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= NULL
; s
= s
->next
) {
3327 const SBuf
&scheme
= AnyP::UriScheme(s
->transport
.protocol
).image();
3329 if (MAXTCPLISTENPORTS
== NHttpSockets
) {
3330 debugs(1, DBG_IMPORTANT
, "WARNING: You have too many '" << scheme
<< "_port' lines.");
3331 debugs(1, DBG_IMPORTANT
, " The limit is " << MAXTCPLISTENPORTS
<< " HTTP ports.");
3336 if (s
->flags
.tunnelSslBumping
) {
3337 if (!Config
.accessList
.ssl_bump
) {
3338 debugs(33, DBG_IMPORTANT
, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme
<< "_port " << s
->s
);
3339 s
->flags
.tunnelSslBumping
= false;
3341 if (!s
->secure
.staticContext
&& !s
->secure
.generateHostCertificates
) {
3342 debugs(1, DBG_IMPORTANT
, "Will not bump SSL at " << scheme
<< "_port " << s
->s
<< " due to TLS initialization failure.");
3343 s
->flags
.tunnelSslBumping
= false;
3344 if (s
->transport
.protocol
== AnyP::PROTO_HTTP
)
3345 s
->secure
.encryptTransport
= false;
3347 if (s
->flags
.tunnelSslBumping
) {
3348 // Create ssl_ctx cache for this port.
3349 Ssl::TheGlobalContextStorage
.addLocalStorage(s
->s
, s
->secure
.dynamicCertMemCacheSize
);
3354 if (s
->secure
.encryptTransport
&& !s
->secure
.staticContext
) {
3355 debugs(1, DBG_CRITICAL
, "ERROR: Ignoring " << scheme
<< "_port " << s
->s
<< " due to TLS context initialization failure.");
3359 // Fill out a Comm::Connection which IPC will open as a listener for us
3360 // then pass back when active so we can start a TcpAcceptor subscription.
3361 s
->listenConn
= new Comm::Connection
;
3362 s
->listenConn
->local
= s
->s
;
3364 s
->listenConn
->flags
= COMM_NONBLOCKING
| (s
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) |
3365 (s
->flags
.natIntercept
? COMM_INTERCEPTION
: 0);
3367 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
3368 if (s
->transport
.protocol
== AnyP::PROTO_HTTP
) {
3369 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3370 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept
, CommAcceptCbParams(NULL
)));
3371 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
3373 AsyncCall::Pointer listenCall
= asyncCall(33,2, "clientListenerConnectionOpened",
3374 ListeningStartedDialer(&clientListenerConnectionOpened
, s
, Ipc::fdnHttpSocket
, sub
));
3375 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, s
->listenConn
, Ipc::fdnHttpSocket
, listenCall
);
3377 } else if (s
->transport
.protocol
== AnyP::PROTO_HTTPS
) {
3378 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3379 RefCount
<AcceptCall
> subCall
= commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept
, CommAcceptCbParams(NULL
)));
3380 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
3382 AsyncCall::Pointer listenCall
= asyncCall(33, 2, "clientListenerConnectionOpened",
3383 ListeningStartedDialer(&clientListenerConnectionOpened
,
3384 s
, Ipc::fdnHttpsSocket
, sub
));
3385 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, s
->listenConn
, Ipc::fdnHttpsSocket
, listenCall
);
3388 HttpSockets
[NHttpSockets
] = -1; // set in clientListenerConnectionOpened
3394 clientStartListeningOn(AnyP::PortCfgPointer
&port
, const RefCount
< CommCbFunPtrCallT
<CommAcceptCbPtrFun
> > &subCall
, const Ipc::FdNoteId fdNote
)
3396 // Fill out a Comm::Connection which IPC will open as a listener for us
3397 port
->listenConn
= new Comm::Connection
;
3398 port
->listenConn
->local
= port
->s
;
3399 port
->listenConn
->flags
=
3401 (port
->flags
.tproxyIntercept
? COMM_TRANSPARENT
: 0) |
3402 (port
->flags
.natIntercept
? COMM_INTERCEPTION
: 0);
3404 // route new connections to subCall
3405 typedef CommCbFunPtrCallT
<CommAcceptCbPtrFun
> AcceptCall
;
3406 Subscription::Pointer sub
= new CallSubscription
<AcceptCall
>(subCall
);
3407 AsyncCall::Pointer listenCall
=
3408 asyncCall(33, 2, "clientListenerConnectionOpened",
3409 ListeningStartedDialer(&clientListenerConnectionOpened
,
3410 port
, fdNote
, sub
));
3411 Ipc::StartListening(SOCK_STREAM
, IPPROTO_TCP
, port
->listenConn
, fdNote
, listenCall
);
3413 assert(NHttpSockets
< MAXTCPLISTENPORTS
);
3414 HttpSockets
[NHttpSockets
] = -1;
3418 /// process clientHttpConnectionsOpen result
3420 clientListenerConnectionOpened(AnyP::PortCfgPointer
&s
, const Ipc::FdNoteId portTypeNote
, const Subscription::Pointer
&sub
)
3424 if (!OpenedHttpSocket(s
->listenConn
, portTypeNote
))
3427 Must(Comm::IsConnOpen(s
->listenConn
));
3429 // TCP: setup a job to handle accept() with subscribed handler
3430 AsyncJob::Start(new Comm::TcpAcceptor(s
, FdNote(portTypeNote
), sub
));
3432 debugs(1, DBG_IMPORTANT
, "Accepting " <<
3433 (s
->flags
.natIntercept
? "NAT intercepted " : "") <<
3434 (s
->flags
.tproxyIntercept
? "TPROXY intercepted " : "") <<
3435 (s
->flags
.tunnelSslBumping
? "SSL bumped " : "") <<
3436 (s
->flags
.accelSurrogate
? "reverse-proxy " : "")
3437 << FdNote(portTypeNote
) << " connections at "
3440 Must(AddOpenedHttpSocket(s
->listenConn
)); // otherwise, we have received a fd we did not ask for
3444 clientOpenListenSockets(void)
3446 clientHttpConnectionsOpen();
3447 Ftp::StartListening();
3449 if (NHttpSockets
< 1)
3450 fatal("No HTTP, HTTPS, or FTP ports configured");
3454 clientConnectionsClose()
3456 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= NULL
; s
= s
->next
) {
3457 if (s
->listenConn
!= NULL
) {
3458 debugs(1, DBG_IMPORTANT
, "Closing HTTP(S) port " << s
->listenConn
->local
);
3459 s
->listenConn
->close();
3460 s
->listenConn
= NULL
;
3464 Ftp::StopListening();
3466 // TODO see if we can drop HttpSockets array entirely */
3467 for (int i
= 0; i
< NHttpSockets
; ++i
) {
3468 HttpSockets
[i
] = -1;
3475 varyEvaluateMatch(StoreEntry
* entry
, HttpRequest
* request
)
3477 SBuf
vary(request
->vary_headers
);
3478 int has_vary
= entry
->getReply()->header
.has(Http::HdrType::VARY
);
3479 #if X_ACCELERATOR_VARY
3482 entry
->getReply()->header
.has(Http::HdrType::HDR_X_ACCELERATOR_VARY
);
3485 if (!has_vary
|| entry
->mem_obj
->vary_headers
.isEmpty()) {
3486 if (!vary
.isEmpty()) {
3487 /* Oops... something odd is going on here.. */
3488 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3489 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
3490 request
->vary_headers
.clear();
3495 /* This is not a varying object */
3499 /* virtual "vary" object found. Calculate the vary key and
3500 * continue the search
3502 vary
= httpMakeVaryMark(request
, entry
->getReply());
3504 if (!vary
.isEmpty()) {
3505 request
->vary_headers
= vary
;
3508 /* Ouch.. we cannot handle this kind of variance */
3509 /* XXX This cannot really happen, but just to be complete */
3513 if (vary
.isEmpty()) {
3514 vary
= httpMakeVaryMark(request
, entry
->getReply());
3516 if (!vary
.isEmpty())
3517 request
->vary_headers
= vary
;
3520 if (vary
.isEmpty()) {
3521 /* Ouch.. we cannot handle this kind of variance */
3522 /* XXX This cannot really happen, but just to be complete */
3524 } else if (vary
.cmp(entry
->mem_obj
->vary_headers
) == 0) {
3527 /* Oops.. we have already been here and still haven't
3528 * found the requested variant. Bail out
3530 debugs(33, DBG_IMPORTANT
, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3531 entry
->mem_obj
->urlXXX() << "' '" << vary
<< "'");
3537 ACLFilledChecklist
*
3538 clientAclChecklistCreate(const acl_access
* acl
, ClientHttpRequest
* http
)
3540 const auto checklist
= new ACLFilledChecklist(acl
, nullptr, nullptr);
3541 clientAclChecklistFill(*checklist
, http
);
3546 clientAclChecklistFill(ACLFilledChecklist
&checklist
, ClientHttpRequest
*http
)
3548 checklist
.setRequest(http
->request
);
3549 checklist
.al
= http
->al
;
3550 checklist
.syncAle(http
->request
, http
->log_uri
);
3552 // TODO: If http->getConn is always http->request->clientConnectionManager,
3553 // then call setIdent() inside checklist.setRequest(). Otherwise, restore
3554 // USE_IDENT lost in commit 94439e4.
3555 ConnStateData
* conn
= http
->getConn();
3556 const char *ident
= (cbdataReferenceValid(conn
) &&
3557 conn
&& conn
->clientConnection
) ?
3558 conn
->clientConnection
->rfc931
: dash_str
;
3559 checklist
.setIdent(ident
);
3563 ConnStateData::transparent() const
3565 return clientConnection
!= NULL
&& (clientConnection
->flags
& (COMM_TRANSPARENT
|COMM_INTERCEPTION
));
3569 ConnStateData::expectRequestBody(int64_t size
)
3571 bodyPipe
= new BodyPipe(this);
3573 bodyPipe
->setBodySize(size
);
3575 startDechunkingRequest();
3580 ConnStateData::mayNeedToReadMoreBody() const
3583 return 0; // request without a body or read/produced all body bytes
3585 if (!bodyPipe
->bodySizeKnown())
3586 return -1; // probably need to read more, but we cannot be sure
3588 const int64_t needToProduce
= bodyPipe
->unproducedSize();
3589 const int64_t haveAvailable
= static_cast<int64_t>(inBuf
.length());
3591 if (needToProduce
<= haveAvailable
)
3592 return 0; // we have read what we need (but are waiting for pipe space)
3594 return needToProduce
- haveAvailable
;
3598 ConnStateData::stopReceiving(const char *error
)
3600 debugs(33, 4, HERE
<< "receiving error (" << clientConnection
<< "): " << error
<<
3601 "; old sending error: " <<
3602 (stoppedSending() ? stoppedSending_
: "none"));
3604 if (const char *oldError
= stoppedReceiving()) {
3605 debugs(33, 3, HERE
<< "already stopped receiving: " << oldError
);
3606 return; // nothing has changed as far as this connection is concerned
3609 stoppedReceiving_
= error
;
3611 if (const char *sendError
= stoppedSending()) {
3612 debugs(33, 3, HERE
<< "closing because also stopped sending: " << sendError
);
3613 clientConnection
->close();
3618 ConnStateData::expectNoForwarding()
3620 if (bodyPipe
!= NULL
) {
3621 debugs(33, 4, HERE
<< "no consumer for virgin body " << bodyPipe
->status());
3622 bodyPipe
->expectNoConsumption();
3626 /// initialize dechunking state
3628 ConnStateData::startDechunkingRequest()
3630 Must(bodyPipe
!= NULL
);
3631 debugs(33, 5, HERE
<< "start dechunking" << bodyPipe
->status());
3632 assert(!bodyParser
);
3633 bodyParser
= new Http1::TeChunkedParser
;
3636 /// put parsed content into input buffer and clean up
3638 ConnStateData::finishDechunkingRequest(bool withSuccess
)
3640 debugs(33, 5, HERE
<< "finish dechunking: " << withSuccess
);
3642 if (bodyPipe
!= NULL
) {
3643 debugs(33, 7, HERE
<< "dechunked tail: " << bodyPipe
->status());
3644 BodyPipe::Pointer myPipe
= bodyPipe
;
3645 stopProducingFor(bodyPipe
, withSuccess
); // sets bodyPipe->bodySize()
3646 Must(!bodyPipe
); // we rely on it being nil after we are done with body
3648 Must(myPipe
->bodySizeKnown());
3649 Http::StreamPointer context
= pipeline
.front();
3650 if (context
!= NULL
&& context
->http
&& context
->http
->request
)
3651 context
->http
->request
->setContentLength(myPipe
->bodySize());
3659 // XXX: this is an HTTP/1-only operation
3661 ConnStateData::sendControlMsg(HttpControlMsg msg
)
3664 debugs(33, 3, HERE
<< "ignoring 1xx due to earlier closure");
3668 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3669 if (!pipeline
.empty()) {
3670 HttpReply::Pointer
rep(msg
.reply
);
3672 // remember the callback
3673 cbControlMsgSent
= msg
.cbSuccess
;
3675 typedef CommCbMemFunT
<HttpControlMsgSink
, CommIoCbParams
> Dialer
;
3676 AsyncCall::Pointer call
= JobCallback(33, 5, Dialer
, this, HttpControlMsgSink::wroteControlMsg
);
3678 if (!writeControlMsgAndCall(rep
.getRaw(), call
)) {
3679 // but still inform the caller (so it may resume its operation)
3680 doneWithControlMsg();
3685 debugs(33, 3, HERE
<< " closing due to missing context for 1xx");
3686 clientConnection
->close();
3690 ConnStateData::doneWithControlMsg()
3692 HttpControlMsgSink::doneWithControlMsg();
3694 if (Http::StreamPointer deferredRequest
= pipeline
.front()) {
3695 debugs(33, 3, clientConnection
<< ": calling PushDeferredIfNeeded after control msg wrote");
3696 ClientSocketContextPushDeferredIfNeeded(deferredRequest
, this);
3700 /// Our close handler called by Comm when the pinned connection is closed
3702 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams
&io
)
3704 // FwdState might repin a failed connection sooner than this close
3705 // callback is called for the failed connection.
3706 assert(pinning
.serverConnection
== io
.conn
);
3707 pinning
.closeHandler
= NULL
; // Comm unregisters handlers before calling
3708 const bool sawZeroReply
= pinning
.zeroReply
; // reset when unpinning
3709 pinning
.serverConnection
->noteClosure();
3710 unpinConnection(false);
3712 if (sawZeroReply
&& clientConnection
!= NULL
) {
3713 debugs(33, 3, "Closing client connection on pinned zero reply.");
3714 clientConnection
->close();
3720 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer
&pinServer
, const HttpRequest::Pointer
&request
)
3722 pinConnection(pinServer
, *request
);
3726 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic
)
3728 Must(pic
.connection
);
3730 pinConnection(pic
.connection
, *pic
.request
);
3732 // monitor pinned server connection for remote-end closures.
3733 startPinnedConnectionMonitoring();
3735 if (pipeline
.empty())
3736 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3739 /// Forward future client requests using the given server connection.
3741 ConnStateData::pinConnection(const Comm::ConnectionPointer
&pinServer
, const HttpRequest
&request
)
3743 if (Comm::IsConnOpen(pinning
.serverConnection
) &&
3744 pinning
.serverConnection
->fd
== pinServer
->fd
) {
3745 debugs(33, 3, "already pinned" << pinServer
);
3749 unpinConnection(true); // closes pinned connection, if any, and resets fields
3751 pinning
.serverConnection
= pinServer
;
3753 debugs(33, 3, HERE
<< pinning
.serverConnection
);
3755 Must(pinning
.serverConnection
!= NULL
);
3757 const char *pinnedHost
= "[unknown]";
3758 pinning
.host
= xstrdup(request
.url
.host());
3759 pinning
.port
= request
.url
.port();
3760 pinnedHost
= pinning
.host
;
3761 pinning
.pinned
= true;
3762 if (CachePeer
*aPeer
= pinServer
->getPeer())
3763 pinning
.peer
= cbdataReference(aPeer
);
3764 pinning
.auth
= request
.flags
.connectionAuth
;
3765 char stmp
[MAX_IPSTRLEN
];
3766 char desc
[FD_DESC_SZ
];
3767 snprintf(desc
, FD_DESC_SZ
, "%s pinned connection for %s (%d)",
3768 (pinning
.auth
|| !pinning
.peer
) ? pinnedHost
: pinning
.peer
->name
,
3769 clientConnection
->remote
.toUrl(stmp
,MAX_IPSTRLEN
),
3770 clientConnection
->fd
);
3771 fd_note(pinning
.serverConnection
->fd
, desc
);
3773 typedef CommCbMemFunT
<ConnStateData
, CommCloseCbParams
> Dialer
;
3774 pinning
.closeHandler
= JobCallback(33, 5,
3775 Dialer
, this, ConnStateData::clientPinnedConnectionClosed
);
3776 // remember the pinned connection so that cb does not unpin a fresher one
3777 typedef CommCloseCbParams Params
;
3778 Params
¶ms
= GetCommParams
<Params
>(pinning
.closeHandler
);
3779 params
.conn
= pinning
.serverConnection
;
3780 comm_add_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
3783 /// [re]start monitoring pinned connection for peer closures so that we can
3784 /// propagate them to an _idle_ client pinned to that peer
3786 ConnStateData::startPinnedConnectionMonitoring()
3788 if (pinning
.readHandler
!= NULL
)
3789 return; // already monitoring
3791 typedef CommCbMemFunT
<ConnStateData
, CommIoCbParams
> Dialer
;
3792 pinning
.readHandler
= JobCallback(33, 3,
3793 Dialer
, this, ConnStateData::clientPinnedConnectionRead
);
3794 Comm::Read(pinning
.serverConnection
, pinning
.readHandler
);
3798 ConnStateData::stopPinnedConnectionMonitoring()
3800 if (pinning
.readHandler
!= NULL
) {
3801 Comm::ReadCancel(pinning
.serverConnection
->fd
, pinning
.readHandler
);
3802 pinning
.readHandler
= NULL
;
3808 ConnStateData::handleIdleClientPinnedTlsRead()
3810 // A ready-for-reading connection means that the TLS server either closed
3811 // the connection, sent us some unexpected HTTP data, or started TLS
3812 // renegotiations. We should close the connection except for the last case.
3814 Must(pinning
.serverConnection
!= nullptr);
3815 auto ssl
= fd_table
[pinning
.serverConnection
->fd
].ssl
.get();
3820 const int readResult
= SSL_read(ssl
, buf
, sizeof(buf
));
3822 if (readResult
> 0 || SSL_pending(ssl
) > 0) {
3823 debugs(83, 2, pinning
.serverConnection
<< " TLS application data read");
3827 switch(const int error
= SSL_get_error(ssl
, readResult
)) {
3828 case SSL_ERROR_WANT_WRITE
:
3829 debugs(83, DBG_IMPORTANT
, pinning
.serverConnection
<< " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3830 // fall through to restart monitoring, for now
3831 case SSL_ERROR_NONE
:
3832 case SSL_ERROR_WANT_READ
:
3833 startPinnedConnectionMonitoring();
3837 debugs(83, 2, pinning
.serverConnection
<< " TLS error: " << error
);
3846 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3847 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3849 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams
&io
)
3851 pinning
.readHandler
= NULL
; // Comm unregisters handlers before calling
3853 if (io
.flag
== Comm::ERR_CLOSING
)
3854 return; // close handler will clean up
3856 Must(pinning
.serverConnection
== io
.conn
);
3859 if (handleIdleClientPinnedTlsRead())
3863 const bool clientIsIdle
= pipeline
.empty();
3865 debugs(33, 3, "idle pinned " << pinning
.serverConnection
<< " read " <<
3866 io
.size
<< (clientIsIdle
? " with idle client" : ""));
3868 pinning
.serverConnection
->close();
3870 // If we are still sending data to the client, do not close now. When we are done sending,
3871 // ConnStateData::kick() checks pinning.serverConnection and will close.
3872 // However, if we are idle, then we must close to inform the idle client and minimize races.
3873 if (clientIsIdle
&& clientConnection
!= NULL
)
3874 clientConnection
->close();
3877 const Comm::ConnectionPointer
3878 ConnStateData::validatePinnedConnection(HttpRequest
*request
, const CachePeer
*aPeer
)
3880 debugs(33, 7, HERE
<< pinning
.serverConnection
);
3883 if (!Comm::IsConnOpen(pinning
.serverConnection
))
3885 else if (pinning
.auth
&& pinning
.host
&& request
&& strcasecmp(pinning
.host
, request
->url
.host()) != 0)
3887 else if (request
&& pinning
.port
!= request
->url
.port())
3889 else if (pinning
.peer
&& !cbdataReferenceValid(pinning
.peer
))
3891 else if (aPeer
!= pinning
.peer
)
3895 /* The pinning info is not safe, remove any pinning info */
3896 unpinConnection(true);
3899 return pinning
.serverConnection
;
3902 Comm::ConnectionPointer
3903 ConnStateData::borrowPinnedConnection(HttpRequest
*request
, const CachePeer
*aPeer
)
3905 debugs(33, 7, pinning
.serverConnection
);
3906 if (validatePinnedConnection(request
, aPeer
) != NULL
)
3907 stopPinnedConnectionMonitoring();
3909 return pinning
.serverConnection
; // closed if validation failed
3913 ConnStateData::unpinConnection(const bool andClose
)
3915 debugs(33, 3, HERE
<< pinning
.serverConnection
);
3918 cbdataReferenceDone(pinning
.peer
);
3920 if (Comm::IsConnOpen(pinning
.serverConnection
)) {
3921 if (pinning
.closeHandler
!= NULL
) {
3922 comm_remove_close_handler(pinning
.serverConnection
->fd
, pinning
.closeHandler
);
3923 pinning
.closeHandler
= NULL
;
3926 stopPinnedConnectionMonitoring();
3928 // close the server side socket if requested
3930 pinning
.serverConnection
->close();
3931 pinning
.serverConnection
= NULL
;
3934 safe_free(pinning
.host
);
3936 pinning
.zeroReply
= false;
3938 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3939 * connection has gone away */
3943 ConnStateData::checkLogging()
3945 // if we are parsing request body, its request is responsible for logging
3949 // a request currently using this connection is responsible for logging
3950 if (!pipeline
.empty() && pipeline
.back()->mayUseConnection())
3953 /* Either we are waiting for the very first transaction, or
3954 * we are done with the Nth transaction and are waiting for N+1st.
3955 * XXX: We assume that if anything was added to inBuf, then it could
3956 * only be consumed by actions already covered by the above checks.
3959 // do not log connections that closed after a transaction (it is normal)
3960 // TODO: access_log needs ACLs to match received-no-bytes connections
3961 if (pipeline
.nrequests
&& inBuf
.isEmpty())
3964 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
3965 ClientHttpRequest
http(this);
3966 http
.req_sz
= inBuf
.length();
3967 // XXX: Or we died while waiting for the pinned connection to become idle.
3968 http
.setErrorUri("error:transaction-end-before-headers");
3972 ConnStateData::mayTunnelUnsupportedProto()
3974 return Config
.accessList
.on_unsupported_protocol
3977 ((port
->flags
.isIntercepted() && port
->flags
.tunnelSslBumping
)
3978 || (serverBump() && pinning
.serverConnection
))
3984 ConnStateData::notes()
3987 theNotes
= new NotePairs
;
3992 operator <<(std::ostream
&os
, const ConnStateData::PinnedIdleContext
&pic
)
3994 return os
<< pic
.connection
<< ", request=" << pic
.request
;