]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
SslBump: Disable OpenSSL TLSv1.3 support for older TLS traffic (#588)
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "errorpage.h"
80 #include "fd.h"
81 #include "fde.h"
82 #include "fqdncache.h"
83 #include "FwdState.h"
84 #include "globals.h"
85 #include "helper.h"
86 #include "helper/Reply.h"
87 #include "http.h"
88 #include "http/one/RequestParser.h"
89 #include "http/one/TeChunkedParser.h"
90 #include "http/Stream.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpHeaderTools.h"
93 #include "HttpReply.h"
94 #include "HttpRequest.h"
95 #include "ident/Config.h"
96 #include "ident/Ident.h"
97 #include "internal.h"
98 #include "ipc/FdNotes.h"
99 #include "ipc/StartListening.h"
100 #include "log/access_log.h"
101 #include "MemBuf.h"
102 #include "MemObject.h"
103 #include "mime_header.h"
104 #include "parser/Tokenizer.h"
105 #include "profiler/Profiler.h"
106 #include "proxyp/Header.h"
107 #include "proxyp/Parser.h"
108 #include "security/NegotiationHistory.h"
109 #include "servers/forward.h"
110 #include "SquidConfig.h"
111 #include "SquidTime.h"
112 #include "StatCounters.h"
113 #include "StatHist.h"
114 #include "Store.h"
115 #include "TimeOrTag.h"
116 #include "tools.h"
117
118 #if USE_AUTH
119 #include "auth/UserRequest.h"
120 #endif
121 #if USE_DELAY_POOLS
122 #include "ClientInfo.h"
123 #include "MessageDelayPools.h"
124 #endif
125 #if USE_OPENSSL
126 #include "ssl/bio.h"
127 #include "ssl/context_storage.h"
128 #include "ssl/gadgets.h"
129 #include "ssl/helper.h"
130 #include "ssl/ProxyCerts.h"
131 #include "ssl/ServerBump.h"
132 #include "ssl/support.h"
133 #endif
134
135 // for tvSubUsec() which should be in SquidTime.h
136 #include "util.h"
137
138 #include <climits>
139 #include <cmath>
140 #include <limits>
141
142 #if HAVE_SYSTEMD_SD_DAEMON_H
143 #include <systemd/sd-daemon.h>
144 #endif
145
146 #if LINGERING_CLOSE
147 #define comm_close comm_lingering_close
148 #endif
149
150 /// dials clientListenerConnectionOpened call
151 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
152 {
153 public:
154 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
155 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
156 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
157
158 virtual void print(std::ostream &os) const {
159 startPrint(os) <<
160 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
161 }
162
163 virtual bool canDial(AsyncCall &) const { return true; }
164 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
165
166 public:
167 Handler handler;
168
169 private:
170 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
171 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
172 Subscription::Pointer sub; ///< The handler to be subscribed for this connection listener
173 };
174
175 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
176
177 static IOACB httpAccept;
178 static CTCB clientLifetimeTimeout;
179 #if USE_IDENT
180 static IDCB clientIdentDone;
181 #endif
182 static int clientIsContentLengthValid(HttpRequest * r);
183 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
184
185 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
186 static void clientUpdateStatCounters(const LogTags &logType);
187 static void clientUpdateHierCounters(HierarchyLogEntry *);
188 static bool clientPingHasFinished(ping_data const *aPing);
189 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &);
190 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
191
192 char *skipLeadingSpace(char *aString);
193
194 #if USE_IDENT
195 static void
196 clientIdentDone(const char *ident, void *data)
197 {
198 ConnStateData *conn = (ConnStateData *)data;
199 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
200 }
201 #endif
202
203 void
204 clientUpdateStatCounters(const LogTags &logType)
205 {
206 ++statCounter.client_http.requests;
207
208 if (logType.isTcpHit())
209 ++statCounter.client_http.hits;
210
211 if (logType.oldType == LOG_TCP_HIT)
212 ++statCounter.client_http.disk_hits;
213 else if (logType.oldType == LOG_TCP_MEM_HIT)
214 ++statCounter.client_http.mem_hits;
215 }
216
217 void
218 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
219 {
220 statCounter.client_http.allSvcTime.count(svc_time);
221 /**
222 * The idea here is not to be complete, but to get service times
223 * for only well-defined types. For example, we don't include
224 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
225 * (we *tried* to validate it, but failed).
226 */
227
228 switch (logType.oldType) {
229
230 case LOG_TCP_REFRESH_UNMODIFIED:
231 statCounter.client_http.nearHitSvcTime.count(svc_time);
232 break;
233
234 case LOG_TCP_INM_HIT:
235 case LOG_TCP_IMS_HIT:
236 statCounter.client_http.nearMissSvcTime.count(svc_time);
237 break;
238
239 case LOG_TCP_HIT:
240
241 case LOG_TCP_MEM_HIT:
242
243 case LOG_TCP_OFFLINE_HIT:
244 statCounter.client_http.hitSvcTime.count(svc_time);
245 break;
246
247 case LOG_TCP_MISS:
248
249 case LOG_TCP_CLIENT_REFRESH_MISS:
250 statCounter.client_http.missSvcTime.count(svc_time);
251 break;
252
253 default:
254 /* make compiler warnings go away */
255 break;
256 }
257 }
258
259 bool
260 clientPingHasFinished(ping_data const *aPing)
261 {
262 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
263 return true;
264
265 return false;
266 }
267
268 void
269 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
270 {
271 ping_data *i;
272
273 switch (someEntry->code) {
274 #if USE_CACHE_DIGESTS
275
276 case CD_PARENT_HIT:
277
278 case CD_SIBLING_HIT:
279 ++ statCounter.cd.times_used;
280 break;
281 #endif
282
283 case SIBLING_HIT:
284
285 case PARENT_HIT:
286
287 case FIRST_PARENT_MISS:
288
289 case CLOSEST_PARENT_MISS:
290 ++ statCounter.icp.times_used;
291 i = &someEntry->ping;
292
293 if (clientPingHasFinished(i))
294 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
295
296 if (i->timeout)
297 ++ statCounter.icp.query_timeouts;
298
299 break;
300
301 case CLOSEST_PARENT:
302
303 case CLOSEST_DIRECT:
304 ++ statCounter.netdb.times_used;
305
306 break;
307
308 default:
309 break;
310 }
311 }
312
313 void
314 ClientHttpRequest::updateCounters()
315 {
316 clientUpdateStatCounters(logType);
317
318 if (request->errType != ERR_NONE)
319 ++ statCounter.client_http.errors;
320
321 clientUpdateStatHistCounters(logType,
322 tvSubMsec(al->cache.start_time, current_time));
323
324 clientUpdateHierCounters(&request->hier);
325 }
326
327 void
328 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry::Pointer &aLogEntry)
329 {
330 assert(request);
331 assert(aLogEntry != NULL);
332
333 if (Config.onoff.log_mime_hdrs) {
334 MemBuf mb;
335 mb.init();
336 request->header.packInto(&mb);
337 //This is the request after adaptation or redirection
338 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
339
340 // the virgin request is saved to aLogEntry->request
341 if (aLogEntry->request) {
342 mb.reset();
343 aLogEntry->request->header.packInto(&mb);
344 aLogEntry->headers.request = xstrdup(mb.buf);
345 }
346
347 #if USE_ADAPTATION
348 const Adaptation::History::Pointer ah = request->adaptLogHistory();
349 if (ah != NULL) {
350 mb.reset();
351 ah->lastMeta.packInto(&mb);
352 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
353 }
354 #endif
355
356 mb.clean();
357 }
358
359 #if ICAP_CLIENT
360 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
361 if (ih != NULL)
362 ih->processingTime(aLogEntry->icap.processingTime);
363 #endif
364
365 aLogEntry->http.method = request->method;
366 aLogEntry->http.version = request->http_ver;
367 aLogEntry->hier = request->hier;
368 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
369
370 // Adapted request, if any, inherits and then collects all the stats, but
371 // the virgin request gets logged instead; copy the stats to log them.
372 // TODO: avoid losses by keeping these stats in a shared history object?
373 if (aLogEntry->request) {
374 aLogEntry->request->dnsWait = request->dnsWait;
375 aLogEntry->request->errType = request->errType;
376 aLogEntry->request->errDetail = request->errDetail;
377 }
378 }
379
380 void
381 ClientHttpRequest::logRequest()
382 {
383 if (!out.size && logType.oldType == LOG_TAG_NONE)
384 debugs(33, 5, "logging half-baked transaction: " << log_uri);
385
386 al->icp.opcode = ICP_INVALID;
387 al->url = log_uri;
388 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
389
390 const auto findReply = [this]() -> const HttpReply * {
391 if (al->reply)
392 return al->reply.getRaw();
393 if (const auto le = loggingEntry())
394 return le->hasFreshestReply();
395 return nullptr;
396 };
397 if (const auto reply = findReply()) {
398 al->http.code = reply->sline.status();
399 al->http.content_type = reply->content_type.termedBuf();
400 }
401
402 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
403
404 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
405 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
406
407 al->http.clientRequestSz.header = req_sz;
408 // the virgin request is saved to al->request
409 if (al->request && al->request->body_pipe)
410 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
411 al->http.clientReplySz.header = out.headers_sz;
412 // XXX: calculate without payload encoding or headers !!
413 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
414
415 al->cache.highOffset = out.offset;
416
417 al->cache.code = logType;
418
419 tvSub(al->cache.trTime, al->cache.start_time, current_time);
420
421 if (request)
422 prepareLogWithRequestDetails(request, al);
423
424 #if USE_OPENSSL && 0
425
426 /* This is broken. Fails if the connection has been closed. Needs
427 * to snarf the ssl details some place earlier..
428 */
429 if (getConn() != NULL)
430 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
431
432 #endif
433
434 /* Add notes (if we have a request to annotate) */
435 if (request) {
436 SBuf matched;
437 for (auto h: Config.notes) {
438 if (h->match(request, al->reply.getRaw(), al, matched)) {
439 request->notes()->add(h->key(), matched);
440 debugs(33, 3, h->key() << " " << matched);
441 }
442 }
443 // The al->notes and request->notes must point to the same object.
444 al->syncNotes(request);
445 }
446
447 ACLFilledChecklist checklist(NULL, request, NULL);
448 if (al->reply) {
449 checklist.reply = al->reply.getRaw();
450 HTTPMSGLOCK(checklist.reply);
451 }
452
453 if (request) {
454 HTTPMSGUNLOCK(al->adapted_request);
455 al->adapted_request = request;
456 HTTPMSGLOCK(al->adapted_request);
457 }
458 // no need checklist.syncAle(): already synced
459 checklist.al = al;
460 accessLogLog(al, &checklist);
461
462 bool updatePerformanceCounters = true;
463 if (Config.accessList.stats_collection) {
464 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
465 statsCheck.al = al;
466 if (al->reply) {
467 statsCheck.reply = al->reply.getRaw();
468 HTTPMSGLOCK(statsCheck.reply);
469 }
470 updatePerformanceCounters = statsCheck.fastCheck().allowed();
471 }
472
473 if (updatePerformanceCounters) {
474 if (request)
475 updateCounters();
476
477 if (getConn() != NULL && getConn()->clientConnection != NULL)
478 clientdbUpdate(getConn()->clientConnection->remote, logType, AnyP::PROTO_HTTP, out.size);
479 }
480 }
481
482 void
483 ClientHttpRequest::freeResources()
484 {
485 safe_free(uri);
486 safe_free(redirect.location);
487 range_iter.boundary.clean();
488 clearRequest();
489
490 if (client_stream.tail)
491 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
492 }
493
494 void
495 httpRequestFree(void *data)
496 {
497 ClientHttpRequest *http = (ClientHttpRequest *)data;
498 assert(http != NULL);
499 delete http;
500 }
501
502 /* This is a handler normally called by comm_close() */
503 void ConnStateData::connStateClosed(const CommCloseCbParams &)
504 {
505 deleteThis("ConnStateData::connStateClosed");
506 }
507
508 #if USE_AUTH
509 void
510 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
511 {
512 if (auth_ == NULL) {
513 if (aur != NULL) {
514 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
515 auth_ = aur;
516 }
517 return;
518 }
519
520 // clobered with self-pointer
521 // NP: something nasty is going on in Squid, but harmless.
522 if (aur == auth_) {
523 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
524 return;
525 }
526
527 /*
528 * Connection-auth relies on a single set of credentials being preserved
529 * for all requests on a connection once they have been setup.
530 * There are several things which need to happen to preserve security
531 * when connection-auth credentials change unexpectedly or are unset.
532 *
533 * 1) auth helper released from any active state
534 *
535 * They can only be reserved by a handshake process which this
536 * connection can now never complete.
537 * This prevents helpers hanging when their connections close.
538 *
539 * 2) pinning is expected to be removed and server conn closed
540 *
541 * The upstream link is authenticated with the same credentials.
542 * Expecting the same level of consistency we should have received.
543 * This prevents upstream being faced with multiple or missing
544 * credentials after authentication.
545 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
546 * we just trigger that cleanup here via comm_reset_close() or
547 * ConnStateData::stopReceiving()
548 *
549 * 3) the connection needs to close.
550 *
551 * This prevents attackers injecting requests into a connection,
552 * or gateways wrongly multiplexing users into a single connection.
553 *
554 * When credentials are missing closure needs to follow an auth
555 * challenge for best recovery by the client.
556 *
557 * When credentials change there is nothing we can do but abort as
558 * fast as possible. Sending TCP RST instead of an HTTP response
559 * is the best-case action.
560 */
561
562 // clobbered with nul-pointer
563 if (aur == NULL) {
564 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
565 auth_->releaseAuthServer();
566 auth_ = NULL;
567 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
568 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
569 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
570 stopReceiving("connection-auth removed");
571 return;
572 }
573
574 // clobbered with alternative credentials
575 if (aur != auth_) {
576 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
577 auth_->releaseAuthServer();
578 auth_ = NULL;
579 // this is a fatal type of problem.
580 // Close the connection immediately with TCP RST to abort all traffic flow
581 comm_reset_close(clientConnection);
582 return;
583 }
584
585 /* NOT REACHABLE */
586 }
587 #endif
588
589 // cleans up before destructor is called
590 void
591 ConnStateData::swanSong()
592 {
593 debugs(33, 2, HERE << clientConnection);
594 checkLogging();
595
596 flags.readMore = false;
597 clientdbEstablished(clientConnection->remote, -1); /* decrement */
598 pipeline.terminateAll(0);
599
600 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
601 unpinConnection(true);
602
603 Server::swanSong(); // closes the client connection
604
605 #if USE_AUTH
606 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
607 setAuth(NULL, "ConnStateData::SwanSong cleanup");
608 #endif
609
610 flags.swanSang = true;
611 }
612
613 bool
614 ConnStateData::isOpen() const
615 {
616 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
617 Comm::IsConnOpen(clientConnection) &&
618 !fd_table[clientConnection->fd].closing();
619 }
620
621 ConnStateData::~ConnStateData()
622 {
623 debugs(33, 3, HERE << clientConnection);
624
625 if (isOpen())
626 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData did not close " << clientConnection);
627
628 if (!flags.swanSang)
629 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData was not destroyed properly; " << clientConnection);
630
631 if (bodyPipe != NULL)
632 stopProducingFor(bodyPipe, false);
633
634 delete bodyParser; // TODO: pool
635
636 #if USE_OPENSSL
637 delete sslServerBump;
638 #endif
639 }
640
641 /**
642 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
643 * This is the client-side persistent connection flag. We need
644 * to set this relatively early in the request processing
645 * to handle hacks for broken servers and clients.
646 */
647 void
648 clientSetKeepaliveFlag(ClientHttpRequest * http)
649 {
650 HttpRequest *request = http->request;
651
652 debugs(33, 3, "http_ver = " << request->http_ver);
653 debugs(33, 3, "method = " << request->method);
654
655 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
656 request->flags.proxyKeepalive = request->persistent();
657 }
658
659 /// checks body length of non-chunked requests
660 static int
661 clientIsContentLengthValid(HttpRequest * r)
662 {
663 // No Content-Length means this request just has no body, but conflicting
664 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
665 if (r->header.conflictingContentLength())
666 return 0;
667
668 switch (r->method.id()) {
669
670 case Http::METHOD_GET:
671
672 case Http::METHOD_HEAD:
673 /* We do not want to see a request entity on GET/HEAD requests */
674 return (r->content_length <= 0 || Config.onoff.request_entities);
675
676 default:
677 /* For other types of requests we don't care */
678 return 1;
679 }
680
681 /* NOT REACHED */
682 }
683
684 int
685 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
686 {
687 if (Config.maxRequestBodySize &&
688 bodyLength > Config.maxRequestBodySize)
689 return 1; /* too large */
690
691 return 0;
692 }
693
694 bool
695 ClientHttpRequest::multipartRangeRequest() const
696 {
697 return request->multipartRangeRequest();
698 }
699
700 void
701 clientPackTermBound(String boundary, MemBuf *mb)
702 {
703 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
704 debugs(33, 6, "buf offset: " << mb->size);
705 }
706
707 void
708 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
709 {
710 HttpHeader hdr(hoReply);
711 assert(rep);
712 assert(spec);
713
714 /* put boundary */
715 debugs(33, 5, "appending boundary: " << boundary);
716 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
717 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
718
719 /* stuff the header with required entries and pack it */
720
721 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
722 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
723
724 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
725
726 hdr.packInto(mb);
727 hdr.clean();
728
729 /* append <crlf> (we packed a header, not a reply) */
730 mb->append("\r\n", 2);
731 }
732
733 /** returns expected content length for multi-range replies
734 * note: assumes that httpHdrRangeCanonize has already been called
735 * warning: assumes that HTTP headers for individual ranges at the
736 * time of the actuall assembly will be exactly the same as
737 * the headers when clientMRangeCLen() is called */
738 int
739 ClientHttpRequest::mRangeCLen()
740 {
741 int64_t clen = 0;
742 MemBuf mb;
743
744 assert(memObject());
745
746 mb.init();
747 HttpHdrRange::iterator pos = request->range->begin();
748
749 while (pos != request->range->end()) {
750 /* account for headers for this range */
751 mb.reset();
752 clientPackRangeHdr(&storeEntry()->mem().freshestReply(),
753 *pos, range_iter.boundary, &mb);
754 clen += mb.size;
755
756 /* account for range content */
757 clen += (*pos)->length;
758
759 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
760 ++pos;
761 }
762
763 /* account for the terminating boundary */
764 mb.reset();
765
766 clientPackTermBound(range_iter.boundary, &mb);
767
768 clen += mb.size;
769
770 mb.clean();
771
772 return clen;
773 }
774
775 /**
776 * generates a "unique" boundary string for multipart responses
777 * the caller is responsible for cleaning the string */
778 String
779 ClientHttpRequest::rangeBoundaryStr() const
780 {
781 const char *key;
782 String b(APP_FULLNAME);
783 b.append(":",1);
784 key = storeEntry()->getMD5Text();
785 b.append(key, strlen(key));
786 return b;
787 }
788
789 /**
790 * Write a chunk of data to a client socket. If the reply is present,
791 * send the reply headers down the wire too, and clean them up when
792 * finished.
793 * Pre-condition:
794 * The request is one backed by a connection, not an internal request.
795 * data context is not NULL
796 * There are no more entries in the stream chain.
797 */
798 void
799 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
800 HttpReply * rep, StoreIOBuffer receivedData)
801 {
802 // do not try to deliver if client already ABORTED
803 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
804 return;
805
806 /* Test preconditions */
807 assert(node != NULL);
808 PROF_start(clientSocketRecipient);
809 /* TODO: handle this rather than asserting
810 * - it should only ever happen if we cause an abort and
811 * the callback chain loops back to here, so we can simply return.
812 * However, that itself shouldn't happen, so it stays as an assert for now.
813 */
814 assert(cbdataReferenceValid(node));
815 assert(node->node.next == NULL);
816 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
817 assert(context != NULL);
818
819 /* TODO: check offset is what we asked for */
820
821 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
822 if (context != http->getConn()->pipeline.front())
823 context->deferRecipientForLater(node, rep, receivedData);
824 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
825 context->deferRecipientForLater(node, rep, receivedData);
826 else
827 http->getConn()->handleReply(rep, receivedData);
828
829 PROF_stop(clientSocketRecipient);
830 }
831
832 /**
833 * Called when a downstream node is no longer interested in
834 * our data. As we are a terminal node, this means on aborts
835 * only
836 */
837 void
838 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
839 {
840 /* Test preconditions */
841 assert(node != NULL);
842 /* TODO: handle this rather than asserting
843 * - it should only ever happen if we cause an abort and
844 * the callback chain loops back to here, so we can simply return.
845 * However, that itself shouldn't happen, so it stays as an assert for now.
846 */
847 assert(cbdataReferenceValid(node));
848 /* Set null by ContextFree */
849 assert(node->node.next == NULL);
850 /* this is the assert discussed above */
851 assert(NULL == dynamic_cast<Http::Stream *>(node->data.getRaw()));
852 /* We are only called when the client socket shutsdown.
853 * Tell the prev pipeline member we're finished
854 */
855 clientStreamDetach(node, http);
856 }
857
858 void
859 ConnStateData::readNextRequest()
860 {
861 debugs(33, 5, HERE << clientConnection << " reading next req");
862
863 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
864 /**
865 * Set the timeout BEFORE calling readSomeData().
866 */
867 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
868 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
869 TimeoutDialer, this, ConnStateData::requestTimeout);
870 commSetConnTimeout(clientConnection, clientConnection->timeLeft(idleTimeout()), timeoutCall);
871
872 readSomeData();
873 /** Please don't do anything with the FD past here! */
874 }
875
876 static void
877 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
878 {
879 debugs(33, 2, HERE << conn->clientConnection << " Sending next");
880
881 /** If the client stream is waiting on a socket write to occur, then */
882
883 if (deferredRequest->flags.deferred) {
884 /** NO data is allowed to have been sent. */
885 assert(deferredRequest->http->out.size == 0);
886 /** defer now. */
887 clientSocketRecipient(deferredRequest->deferredparams.node,
888 deferredRequest->http,
889 deferredRequest->deferredparams.rep,
890 deferredRequest->deferredparams.queuedBuffer);
891 }
892
893 /** otherwise, the request is still active in a callbacksomewhere,
894 * and we are done
895 */
896 }
897
898 void
899 ConnStateData::kick()
900 {
901 if (!Comm::IsConnOpen(clientConnection)) {
902 debugs(33, 2, clientConnection << " Connection was closed");
903 return;
904 }
905
906 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
907 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
908 clientConnection->close();
909 return;
910 }
911
912 /** \par
913 * We are done with the response, and we are either still receiving request
914 * body (early response!) or have already stopped receiving anything.
915 *
916 * If we are still receiving, then clientParseRequest() below will fail.
917 * (XXX: but then we will call readNextRequest() which may succeed and
918 * execute a smuggled request as we are not done with the current request).
919 *
920 * If we stopped because we got everything, then try the next request.
921 *
922 * If we stopped receiving because of an error, then close now to avoid
923 * getting stuck and to prevent accidental request smuggling.
924 */
925
926 if (const char *reason = stoppedReceiving()) {
927 debugs(33, 3, "closing for earlier request error: " << reason);
928 clientConnection->close();
929 return;
930 }
931
932 /** \par
933 * Attempt to parse a request from the request buffer.
934 * If we've been fed a pipelined request it may already
935 * be in our read buffer.
936 *
937 \par
938 * This needs to fall through - if we're unlucky and parse the _last_ request
939 * from our read buffer we may never re-register for another client read.
940 */
941
942 if (clientParseRequests()) {
943 debugs(33, 3, clientConnection << ": parsed next request from buffer");
944 }
945
946 /** \par
947 * Either we need to kick-start another read or, if we have
948 * a half-closed connection, kill it after the last request.
949 * This saves waiting for half-closed connections to finished being
950 * half-closed _AND_ then, sometimes, spending "Timeout" time in
951 * the keepalive "Waiting for next request" state.
952 */
953 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
954 debugs(33, 3, "half-closed client with no pending requests, closing");
955 clientConnection->close();
956 return;
957 }
958
959 /** \par
960 * At this point we either have a parsed request (which we've
961 * kicked off the processing for) or not. If we have a deferred
962 * request (parsed but deferred for pipeling processing reasons)
963 * then look at processing it. If not, simply kickstart
964 * another read.
965 */
966 Http::StreamPointer deferredRequest = pipeline.front();
967 if (deferredRequest != nullptr) {
968 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
969 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
970 } else if (flags.readMore) {
971 debugs(33, 3, clientConnection << ": calling readNextRequest()");
972 readNextRequest();
973 } else {
974 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
975 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
976 }
977 }
978
979 void
980 ConnStateData::stopSending(const char *error)
981 {
982 debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
983 "; old receiving error: " <<
984 (stoppedReceiving() ? stoppedReceiving_ : "none"));
985
986 if (const char *oldError = stoppedSending()) {
987 debugs(33, 3, HERE << "already stopped sending: " << oldError);
988 return; // nothing has changed as far as this connection is concerned
989 }
990 stoppedSending_ = error;
991
992 if (!stoppedReceiving()) {
993 if (const int64_t expecting = mayNeedToReadMoreBody()) {
994 debugs(33, 5, HERE << "must still read " << expecting <<
995 " request body bytes with " << inBuf.length() << " unused");
996 return; // wait for the request receiver to finish reading
997 }
998 }
999
1000 clientConnection->close();
1001 }
1002
1003 void
1004 ConnStateData::afterClientWrite(size_t size)
1005 {
1006 if (pipeline.empty())
1007 return;
1008
1009 auto ctx = pipeline.front();
1010 if (size) {
1011 statCounter.client_http.kbytes_out += size;
1012 if (ctx->http->logType.isTcpHit())
1013 statCounter.client_http.hit_kbytes_out += size;
1014 }
1015 ctx->writeComplete(size);
1016 }
1017
1018 Http::Stream *
1019 ConnStateData::abortRequestParsing(const char *const uri)
1020 {
1021 ClientHttpRequest *http = new ClientHttpRequest(this);
1022 http->req_sz = inBuf.length();
1023 http->setErrorUri(uri);
1024 auto *context = new Http::Stream(clientConnection, http);
1025 StoreIOBuffer tempBuffer;
1026 tempBuffer.data = context->reqbuf;
1027 tempBuffer.length = HTTP_REQBUF_SZ;
1028 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1029 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1030 clientSocketDetach, context, tempBuffer);
1031 return context;
1032 }
1033
1034 void
1035 ConnStateData::startShutdown()
1036 {
1037 // RegisteredRunner API callback - Squid has been shut down
1038
1039 // if connection is idle terminate it now,
1040 // otherwise wait for grace period to end
1041 if (pipeline.empty())
1042 endingShutdown();
1043 }
1044
1045 void
1046 ConnStateData::endingShutdown()
1047 {
1048 // RegisteredRunner API callback - Squid shutdown grace period is over
1049
1050 // force the client connection to close immediately
1051 // swanSong() in the close handler will cleanup.
1052 if (Comm::IsConnOpen(clientConnection))
1053 clientConnection->close();
1054 }
1055
1056 char *
1057 skipLeadingSpace(char *aString)
1058 {
1059 char *result = aString;
1060
1061 while (xisspace(*aString))
1062 ++aString;
1063
1064 return result;
1065 }
1066
1067 /**
1068 * 'end' defaults to NULL for backwards compatibility
1069 * remove default value if we ever get rid of NULL-terminated
1070 * request buffers.
1071 */
1072 const char *
1073 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1074 {
1075 if (NULL == end) {
1076 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1077 assert(end);
1078 }
1079
1080 for (; end > uriAndHTTPVersion; --end) {
1081 if (*end == '\n' || *end == '\r')
1082 continue;
1083
1084 if (xisspace(*end)) {
1085 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1086 return end + 1;
1087 else
1088 break;
1089 }
1090 }
1091
1092 return NULL;
1093 }
1094
1095 static char *
1096 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1097 {
1098 int vhost = conn->port->vhost;
1099 int vport = conn->port->vport;
1100 static char ipbuf[MAX_IPSTRLEN];
1101
1102 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1103
1104 static const SBuf cache_object("cache_object://");
1105 if (hp->requestUri().startsWith(cache_object))
1106 return nullptr; /* already in good shape */
1107
1108 // XXX: re-use proper URL parser for this
1109 SBuf url = hp->requestUri(); // use full provided URI if we abort
1110 do { // use a loop so we can break out of it
1111 ::Parser::Tokenizer tok(url);
1112 if (tok.skip('/')) // origin-form URL already.
1113 break;
1114
1115 if (conn->port->vhost)
1116 return nullptr; /* already in good shape */
1117
1118 // skip the URI scheme
1119 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1120 static const SBuf uriSchemeEnd("://");
1121 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1122 break;
1123
1124 // skip the authority segment
1125 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1126 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1127 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1128 if (!tok.skipAll(authority))
1129 break;
1130
1131 static const SBuf slashUri("/");
1132 const SBuf t = tok.remaining();
1133 if (t.isEmpty())
1134 url = slashUri;
1135 else if (t[0]=='/') // looks like path
1136 url = t;
1137 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1138 url = slashUri;
1139 url.append(t);
1140 } // else do nothing. invalid path
1141
1142 } while(false);
1143
1144 #if SHOULD_REJECT_UNKNOWN_URLS
1145 // reject URI which are not well-formed even after the processing above
1146 if (url.isEmpty() || url[0] != '/') {
1147 hp->parseStatusCode = Http::scBadRequest;
1148 return conn->abortRequestParsing("error:invalid-request");
1149 }
1150 #endif
1151
1152 if (vport < 0)
1153 vport = conn->clientConnection->local.port();
1154
1155 char *receivedHost = nullptr;
1156 if (vhost && (receivedHost = hp->getHostHeaderField())) {
1157 SBuf host(receivedHost);
1158 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1159 if (vport > 0) {
1160 // remove existing :port (if any), cope with IPv6+ without port
1161 const auto lastColonPos = host.rfind(':');
1162 if (lastColonPos != SBuf::npos && *host.rbegin() != ']') {
1163 host.chop(0, lastColonPos); // truncate until the last colon
1164 }
1165 host.appendf(":%d", vport);
1166 } // else nothing to alter port-wise.
1167 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1168 const auto url_sz = scheme.length() + host.length() + url.length() + 32;
1169 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1170 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), SQUIDSBUFPRINT(host), SQUIDSBUFPRINT(url));
1171 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1172 return uri;
1173 } else if (conn->port->defaultsite /* && !vhost */) {
1174 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1175 char vportStr[32];
1176 vportStr[0] = '\0';
1177 if (vport > 0) {
1178 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1179 }
1180 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1181 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1182 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1183 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1184 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1185 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1186 return uri;
1187 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1188 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1189 /* Put the local socket IP address as the hostname, with whatever vport we found */
1190 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1191 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1192 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1193 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1194 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1195 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1196 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1197 return uri;
1198 }
1199
1200 return nullptr;
1201 }
1202
1203 static char *
1204 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1205 {
1206 char *uri = nullptr;
1207 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1208 if (const char *host = hp->getHostHeaderField()) {
1209 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1210 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1211 uri = static_cast<char *>(xcalloc(url_sz, 1));
1212 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1213 SQUIDSBUFPRINT(scheme),
1214 host,
1215 SQUIDSBUFPRINT(hp->requestUri()));
1216 }
1217 return uri;
1218 }
1219
1220 char *
1221 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1222 {
1223 Must(switchedToHttps());
1224
1225 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1226 return nullptr; /* already in good shape */
1227
1228 char *uri = buildUrlFromHost(this, hp);
1229 #if USE_OPENSSL
1230 if (!uri) {
1231 Must(tlsConnectPort);
1232 Must(!tlsConnectHostOrIp.isEmpty());
1233 SBuf useHost;
1234 if (!tlsClientSni().isEmpty())
1235 useHost = tlsClientSni();
1236 else
1237 useHost = tlsConnectHostOrIp;
1238
1239 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1240 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1241 uri = static_cast<char *>(xcalloc(url_sz, 1));
1242 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1243 SQUIDSBUFPRINT(scheme),
1244 SQUIDSBUFPRINT(useHost),
1245 tlsConnectPort,
1246 SQUIDSBUFPRINT(hp->requestUri()));
1247 }
1248 #endif
1249 if (uri)
1250 debugs(33, 5, "TLS switching host rewrite: " << uri);
1251 return uri;
1252 }
1253
1254 static char *
1255 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1256 {
1257 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1258 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1259 return nullptr; /* already in good shape */
1260
1261 char *uri = buildUrlFromHost(conn, hp);
1262 if (!uri) {
1263 /* Put the local socket IP address as the hostname. */
1264 static char ipbuf[MAX_IPSTRLEN];
1265 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1266 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1267 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1268 uri = static_cast<char *>(xcalloc(url_sz, 1));
1269 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1270 SQUIDSBUFPRINT(scheme),
1271 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1272 }
1273
1274 if (uri)
1275 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1276 return uri;
1277 }
1278
1279 Http::Stream *
1280 ConnStateData::parseHttpRequest(const Http1::RequestParserPointer &hp)
1281 {
1282 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1283 {
1284 Must(hp);
1285
1286 if (preservingClientData_)
1287 preservedClientData = inBuf;
1288
1289 const bool parsedOk = hp->parse(inBuf);
1290
1291 // sync the buffers after parsing.
1292 inBuf = hp->remaining();
1293
1294 if (hp->needsMoreData()) {
1295 debugs(33, 5, "Incomplete request, waiting for end of request line");
1296 return NULL;
1297 }
1298
1299 if (!parsedOk) {
1300 const bool tooBig =
1301 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1302 hp->parseStatusCode == Http::scUriTooLong;
1303 auto result = abortRequestParsing(
1304 tooBig ? "error:request-too-large" : "error:invalid-request");
1305 // assume that remaining leftovers belong to this bad request
1306 if (!inBuf.isEmpty())
1307 consumeInput(inBuf.length());
1308 return result;
1309 }
1310 }
1311
1312 /* We know the whole request is in parser now */
1313 debugs(11, 2, "HTTP Client " << clientConnection);
1314 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1315 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1316 hp->mimeHeader() <<
1317 "\n----------");
1318
1319 /* deny CONNECT via accelerated ports */
1320 if (hp->method() == Http::METHOD_CONNECT && port != NULL && port->flags.accelSurrogate) {
1321 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << transferProtocol << " Accelerator port " << port->s.port());
1322 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1323 hp->parseStatusCode = Http::scMethodNotAllowed;
1324 return abortRequestParsing("error:method-not-allowed");
1325 }
1326
1327 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1328 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1329 * If seen it signals a broken client or proxy has corrupted the traffic.
1330 */
1331 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1332 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << transferProtocol << " port " << port->s.port());
1333 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1334 hp->parseStatusCode = Http::scMethodNotAllowed;
1335 return abortRequestParsing("error:method-not-allowed");
1336 }
1337
1338 if (hp->method() == Http::METHOD_NONE) {
1339 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1340 hp->parseStatusCode = Http::scMethodNotAllowed;
1341 return abortRequestParsing("error:unsupported-request-method");
1342 }
1343
1344 // Process headers after request line
1345 debugs(33, 3, "complete request received. " <<
1346 "prefix_sz = " << hp->messageHeaderSize() <<
1347 ", request-line-size=" << hp->firstLineSize() <<
1348 ", mime-header-size=" << hp->headerBlockSize() <<
1349 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1350
1351 /* Ok, all headers are received */
1352 ClientHttpRequest *http = new ClientHttpRequest(this);
1353
1354 http->req_sz = hp->messageHeaderSize();
1355 Http::Stream *result = new Http::Stream(clientConnection, http);
1356
1357 StoreIOBuffer tempBuffer;
1358 tempBuffer.data = result->reqbuf;
1359 tempBuffer.length = HTTP_REQBUF_SZ;
1360
1361 ClientStreamData newServer = new clientReplyContext(http);
1362 ClientStreamData newClient = result;
1363 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1364 clientReplyStatus, newServer, clientSocketRecipient,
1365 clientSocketDetach, newClient, tempBuffer);
1366
1367 /* set url */
1368 debugs(33,5, "Prepare absolute URL from " <<
1369 (transparent()?"intercept":(port->flags.accelSurrogate ? "accel":"")));
1370 /* Rewrite the URL in transparent or accelerator mode */
1371 /* NP: there are several cases to traverse here:
1372 * - standard mode (forward proxy)
1373 * - transparent mode (TPROXY)
1374 * - transparent mode with failures
1375 * - intercept mode (NAT)
1376 * - intercept mode with failures
1377 * - accelerator mode (reverse proxy)
1378 * - internal relative-URL
1379 * - mixed combos of the above with internal URL
1380 * - remote interception with PROXY protocol
1381 * - remote reverse-proxy with PROXY protocol
1382 */
1383 if (switchedToHttps()) {
1384 http->uri = prepareTlsSwitchingURL(hp);
1385 } else if (transparent()) {
1386 /* intercept or transparent mode, properly working with no failures */
1387 http->uri = prepareTransparentURL(this, hp);
1388
1389 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1390 /* internal URL mode */
1391 /* prepend our name & port */
1392 http->uri = xstrdup(internalLocalUri(NULL, hp->requestUri()));
1393 // We just re-wrote the URL. Must replace the Host: header.
1394 // But have not parsed there yet!! flag for local-only handling.
1395 http->flags.internal = true;
1396
1397 } else if (port->flags.accelSurrogate) {
1398 /* accelerator mode */
1399 http->uri = prepareAcceleratedURL(this, hp);
1400 http->flags.accel = true;
1401 }
1402
1403 if (!http->uri) {
1404 /* No special rewrites have been applied above, use the
1405 * requested url. may be rewritten later, so make extra room */
1406 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1407 http->uri = (char *)xcalloc(url_sz, 1);
1408 SBufToCstring(http->uri, hp->requestUri());
1409 }
1410
1411 result->flags.parsed_ok = 1;
1412 return result;
1413 }
1414
1415 bool
1416 ConnStateData::connFinishedWithConn(int size)
1417 {
1418 if (size == 0) {
1419 if (pipeline.empty() && inBuf.isEmpty()) {
1420 /* no current or pending requests */
1421 debugs(33, 4, HERE << clientConnection << " closed");
1422 return true;
1423 } else if (!Config.onoff.half_closed_clients) {
1424 /* admin doesn't want to support half-closed client sockets */
1425 debugs(33, 3, HERE << clientConnection << " aborted (half_closed_clients disabled)");
1426 pipeline.terminateAll(0);
1427 return true;
1428 }
1429 }
1430
1431 return false;
1432 }
1433
1434 void
1435 ConnStateData::consumeInput(const size_t byteCount)
1436 {
1437 assert(byteCount > 0 && byteCount <= inBuf.length());
1438 inBuf.consume(byteCount);
1439 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1440 }
1441
1442 void
1443 ConnStateData::clientAfterReadingRequests()
1444 {
1445 // Were we expecting to read more request body from half-closed connection?
1446 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1447 debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection);
1448 clientConnection->close();
1449 return;
1450 }
1451
1452 if (flags.readMore)
1453 readSomeData();
1454 }
1455
1456 void
1457 ConnStateData::quitAfterError(HttpRequest *request)
1458 {
1459 // From HTTP p.o.v., we do not have to close after every error detected
1460 // at the client-side, but many such errors do require closure and the
1461 // client-side code is bad at handling errors so we play it safe.
1462 if (request)
1463 request->flags.proxyKeepalive = false;
1464 flags.readMore = false;
1465 debugs(33,4, HERE << "Will close after error: " << clientConnection);
1466 }
1467
1468 #if USE_OPENSSL
1469 bool ConnStateData::serveDelayedError(Http::Stream *context)
1470 {
1471 ClientHttpRequest *http = context->http;
1472
1473 if (!sslServerBump)
1474 return false;
1475
1476 assert(sslServerBump->entry);
1477 // Did we create an error entry while processing CONNECT?
1478 if (!sslServerBump->entry->isEmpty()) {
1479 quitAfterError(http->request);
1480
1481 // Get the saved error entry and send it to the client by replacing the
1482 // ClientHttpRequest store entry with it.
1483 clientStreamNode *node = context->getClientReplyContext();
1484 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1485 assert(repContext);
1486 debugs(33, 5, "Responding with delated error for " << http->uri);
1487 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1488
1489 // Get error details from the fake certificate-peeking request.
1490 http->request->detailError(sslServerBump->request->errType, sslServerBump->request->errDetail);
1491 context->pullData();
1492 return true;
1493 }
1494
1495 // In bump-server-first mode, we have not necessarily seen the intended
1496 // server name at certificate-peeking time. Check for domain mismatch now,
1497 // when we can extract the intended name from the bumped HTTP request.
1498 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1499 HttpRequest *request = http->request;
1500 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1501 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1502 "does not match domainname " << request->url.host());
1503
1504 bool allowDomainMismatch = false;
1505 if (Config.ssl_client.cert_error) {
1506 ACLFilledChecklist check(Config.ssl_client.cert_error, request, dash_str);
1507 check.al = http->al;
1508 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1509 check.syncAle(request, http->log_uri);
1510 allowDomainMismatch = check.fastCheck().allowed();
1511 delete check.sslErrors;
1512 check.sslErrors = NULL;
1513 }
1514
1515 if (!allowDomainMismatch) {
1516 quitAfterError(request);
1517
1518 clientStreamNode *node = context->getClientReplyContext();
1519 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1520 assert (repContext);
1521
1522 request->hier = sslServerBump->request->hier;
1523
1524 // Create an error object and fill it
1525 const auto err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request, http->al);
1526 err->src_addr = clientConnection->remote;
1527 Ssl::ErrorDetail *errDetail = new Ssl::ErrorDetail(
1528 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1529 srvCert.get(), nullptr);
1530 err->detail = errDetail;
1531 repContext->setReplyToError(request->method, err);
1532 assert(context->http->out.offset == 0);
1533 context->pullData();
1534 return true;
1535 }
1536 }
1537 }
1538
1539 return false;
1540 }
1541 #endif // USE_OPENSSL
1542
1543 /// ConnStateData::tunnelOnError() wrapper. Reduces code changes. TODO: Remove.
1544 bool
1545 clientTunnelOnError(ConnStateData *conn, Http::StreamPointer &context, HttpRequest::Pointer &request, const HttpRequestMethod& method, err_type requestError)
1546 {
1547 assert(conn);
1548 assert(conn->pipeline.front() == context);
1549 return conn->tunnelOnError(method, requestError);
1550 }
1551
1552 /// initiate tunneling if possible or return false otherwise
1553 bool
1554 ConnStateData::tunnelOnError(const HttpRequestMethod &method, const err_type requestError)
1555 {
1556 if (!Config.accessList.on_unsupported_protocol) {
1557 debugs(33, 5, "disabled; send error: " << requestError);
1558 return false;
1559 }
1560
1561 if (!preservingClientData_) {
1562 debugs(33, 3, "may have forgotten client data; send error: " << requestError);
1563 return false;
1564 }
1565
1566 const auto context = pipeline.front();
1567 const auto http = context ? context->http : nullptr;
1568 const auto request = http ? http->request : nullptr;
1569
1570 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, request, nullptr);
1571 checklist.al = http ? http->al : nullptr;
1572 checklist.requestErrorType = requestError;
1573 checklist.src_addr = clientConnection->remote;
1574 checklist.my_addr = clientConnection->local;
1575 checklist.conn(this);
1576 const char *log_uri = http ? http->log_uri : nullptr;
1577 checklist.syncAle(request, log_uri);
1578 auto answer = checklist.fastCheck();
1579 if (answer.allowed() && answer.kind == 1) {
1580 debugs(33, 3, "Request will be tunneled to server");
1581 if (context)
1582 context->finished(); // Will remove from pipeline queue
1583 Comm::SetSelect(clientConnection->fd, COMM_SELECT_READ, NULL, NULL, 0);
1584 return initiateTunneledRequest(request, Http::METHOD_NONE, "unknown-protocol", preservedClientData);
1585 }
1586 debugs(33, 3, "denied; send error: " << requestError);
1587 return false;
1588 }
1589
1590 void
1591 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1592 {
1593 /*
1594 * DPW 2007-05-18
1595 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1596 * to here because calling comm_reset_close() causes http to
1597 * be freed before accessing.
1598 */
1599 if (request != NULL && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1600 debugs(33, 3, HERE << "Sending TCP RST on " << conn->clientConnection);
1601 conn->flags.readMore = false;
1602 comm_reset_close(conn->clientConnection);
1603 }
1604 }
1605
1606 void
1607 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1608 {
1609 ClientHttpRequest *http = context->http;
1610 bool chunked = false;
1611 bool mustReplyToOptions = false;
1612 bool unsupportedTe = false;
1613 bool expectBody = false;
1614
1615 // We already have the request parsed and checked, so we
1616 // only need to go through the final body/conn setup to doCallouts().
1617 assert(http->request);
1618 HttpRequest::Pointer request = http->request;
1619
1620 // temporary hack to avoid splitting this huge function with sensitive code
1621 const bool isFtp = !hp;
1622
1623 // Some blobs below are still HTTP-specific, but we would have to rewrite
1624 // this entire function to remove them from the FTP code path. Connection
1625 // setup and body_pipe preparation blobs are needed for FTP.
1626
1627 request->manager(conn, http->al);
1628
1629 request->flags.accelerated = http->flags.accel;
1630 request->flags.sslBumped=conn->switchedToHttps();
1631 // TODO: decouple http->flags.accel from request->flags.sslBumped
1632 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1633 !conn->port->allow_direct : 0;
1634 request->sources |= isFtp ? Http::Message::srcFtp :
1635 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1636 #if USE_AUTH
1637 if (request->flags.sslBumped) {
1638 if (conn->getAuth() != NULL)
1639 request->auth_user_request = conn->getAuth();
1640 }
1641 #endif
1642
1643 if (internalCheck(request->url.path())) {
1644 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1645 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1646 http->flags.internal = true;
1647 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1648 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1649 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1650 request->url.host(internalHostname());
1651 request->url.port(getMyPort());
1652 http->flags.internal = true;
1653 http->setLogUriToRequestUri();
1654 } else
1655 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1656 }
1657
1658 request->flags.internal = http->flags.internal;
1659
1660 if (!isFtp) {
1661 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1662 // for now Squid only supports HTTP requests
1663 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1664 assert(request->http_ver.protocol == http_ver.protocol);
1665 request->http_ver.major = http_ver.major;
1666 request->http_ver.minor = http_ver.minor;
1667 }
1668
1669 if (request->header.chunked()) {
1670 chunked = true;
1671 } else if (request->header.has(Http::HdrType::TRANSFER_ENCODING)) {
1672 const String te = request->header.getList(Http::HdrType::TRANSFER_ENCODING);
1673 // HTTP/1.1 requires chunking to be the last encoding if there is one
1674 unsupportedTe = te.size() && te != "identity";
1675 } // else implied identity coding
1676
1677 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1678 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1679 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions || unsupportedTe) {
1680 clientStreamNode *node = context->getClientReplyContext();
1681 conn->quitAfterError(request.getRaw());
1682 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1683 assert (repContext);
1684 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, request->method, NULL,
1685 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1686 assert(context->http->out.offset == 0);
1687 context->pullData();
1688 clientProcessRequestFinished(conn, request);
1689 return;
1690 }
1691
1692 if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
1693 clientStreamNode *node = context->getClientReplyContext();
1694 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1695 assert (repContext);
1696 conn->quitAfterError(request.getRaw());
1697 repContext->setReplyToError(ERR_INVALID_REQ,
1698 Http::scLengthRequired, request->method, NULL,
1699 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1700 assert(context->http->out.offset == 0);
1701 context->pullData();
1702 clientProcessRequestFinished(conn, request);
1703 return;
1704 }
1705
1706 clientSetKeepaliveFlag(http);
1707 // Let tunneling code be fully responsible for CONNECT requests
1708 if (http->request->method == Http::METHOD_CONNECT) {
1709 context->mayUseConnection(true);
1710 conn->flags.readMore = false;
1711 }
1712
1713 #if USE_OPENSSL
1714 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1715 clientProcessRequestFinished(conn, request);
1716 return;
1717 }
1718 #endif
1719
1720 /* Do we expect a request-body? */
1721 expectBody = chunked || request->content_length > 0;
1722 if (!context->mayUseConnection() && expectBody) {
1723 request->body_pipe = conn->expectRequestBody(
1724 chunked ? -1 : request->content_length);
1725
1726 /* Is it too large? */
1727 if (!chunked && // if chunked, we will check as we accumulate
1728 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1729 clientStreamNode *node = context->getClientReplyContext();
1730 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1731 assert (repContext);
1732 conn->quitAfterError(request.getRaw());
1733 repContext->setReplyToError(ERR_TOO_BIG,
1734 Http::scPayloadTooLarge, Http::METHOD_NONE, NULL,
1735 conn->clientConnection->remote, http->request, NULL, NULL);
1736 assert(context->http->out.offset == 0);
1737 context->pullData();
1738 clientProcessRequestFinished(conn, request);
1739 return;
1740 }
1741
1742 if (!isFtp) {
1743 // We may stop producing, comm_close, and/or call setReplyToError()
1744 // below, so quit on errors to avoid http->doCallouts()
1745 if (!conn->handleRequestBodyData()) {
1746 clientProcessRequestFinished(conn, request);
1747 return;
1748 }
1749
1750 if (!request->body_pipe->productionEnded()) {
1751 debugs(33, 5, "need more request body");
1752 context->mayUseConnection(true);
1753 assert(conn->flags.readMore);
1754 }
1755 }
1756 }
1757
1758 http->calloutContext = new ClientRequestContext(http);
1759
1760 http->doCallouts();
1761
1762 clientProcessRequestFinished(conn, request);
1763 }
1764
1765 int
1766 ConnStateData::pipelinePrefetchMax() const
1767 {
1768 // TODO: Support pipelined requests through pinned connections.
1769 if (pinning.pinned)
1770 return 0;
1771 return Config.pipeline_max_prefetch;
1772 }
1773
1774 /**
1775 * Limit the number of concurrent requests.
1776 * \return true when there are available position(s) in the pipeline queue for another request.
1777 * \return false when the pipeline queue is full or disabled.
1778 */
1779 bool
1780 ConnStateData::concurrentRequestQueueFilled() const
1781 {
1782 const int existingRequestCount = pipeline.count();
1783
1784 // default to the configured pipeline size.
1785 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1786 #if USE_OPENSSL
1787 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1788 #else
1789 const int internalRequest = 0;
1790 #endif
1791 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1792
1793 // when queue filled already we can't add more.
1794 if (existingRequestCount >= concurrentRequestLimit) {
1795 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1796 debugs(33, 5, clientConnection << " deferring new request until one is done");
1797 return true;
1798 }
1799
1800 return false;
1801 }
1802
1803 /**
1804 * Perform proxy_protocol_access ACL tests on the client which
1805 * connected to PROXY protocol port to see if we trust the
1806 * sender enough to accept their PROXY header claim.
1807 */
1808 bool
1809 ConnStateData::proxyProtocolValidateClient()
1810 {
1811 if (!Config.accessList.proxyProtocol)
1812 return proxyProtocolError("PROXY client not permitted by default ACL");
1813
1814 ACLFilledChecklist ch(Config.accessList.proxyProtocol, NULL, clientConnection->rfc931);
1815 ch.src_addr = clientConnection->remote;
1816 ch.my_addr = clientConnection->local;
1817 ch.conn(this);
1818
1819 if (!ch.fastCheck().allowed())
1820 return proxyProtocolError("PROXY client not permitted by ACLs");
1821
1822 return true;
1823 }
1824
1825 /**
1826 * Perform cleanup on PROXY protocol errors.
1827 * If header parsing hits a fatal error terminate the connection,
1828 * otherwise wait for more data.
1829 */
1830 bool
1831 ConnStateData::proxyProtocolError(const char *msg)
1832 {
1833 if (msg) {
1834 // This is important to know, but maybe not so much that flooding the log is okay.
1835 #if QUIET_PROXY_PROTOCOL
1836 // display the first of every 32 occurrences at level 1, the others at level 2.
1837 static uint8_t hide = 0;
1838 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1839 #else
1840 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1841 #endif
1842 mustStop(msg);
1843 }
1844 return false;
1845 }
1846
1847 /// Attempts to extract a PROXY protocol header from the input buffer and,
1848 /// upon success, stores the parsed header in proxyProtocolHeader_.
1849 /// \returns true if the header was successfully parsed
1850 /// \returns false if more data is needed to parse the header or on error
1851 bool
1852 ConnStateData::parseProxyProtocolHeader()
1853 {
1854 try {
1855 const auto parsed = ProxyProtocol::Parse(inBuf);
1856 proxyProtocolHeader_ = parsed.header;
1857 assert(bool(proxyProtocolHeader_));
1858 inBuf.consume(parsed.size);
1859 needProxyProtocolHeader_ = false;
1860 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1861 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1862 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1863 if ((clientConnection->flags & COMM_TRANSPARENT))
1864 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1865 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1866 }
1867 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1868 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1869 return false;
1870 } catch (const std::exception &e) {
1871 return proxyProtocolError(e.what());
1872 }
1873 return true;
1874 }
1875
1876 void
1877 ConnStateData::receivedFirstByte()
1878 {
1879 if (receivedFirstByte_)
1880 return;
1881
1882 receivedFirstByte_ = true;
1883 // Set timeout to Config.Timeout.request
1884 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
1885 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
1886 TimeoutDialer, this, ConnStateData::requestTimeout);
1887 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
1888 }
1889
1890 /**
1891 * Attempt to parse one or more requests from the input buffer.
1892 * Returns true after completing parsing of at least one request [header]. That
1893 * includes cases where parsing ended with an error (e.g., a huge request).
1894 */
1895 bool
1896 ConnStateData::clientParseRequests()
1897 {
1898 bool parsed_req = false;
1899
1900 debugs(33, 5, HERE << clientConnection << ": attempting to parse");
1901
1902 // Loop while we have read bytes that are not needed for producing the body
1903 // On errors, bodyPipe may become nil, but readMore will be cleared
1904 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1905
1906 // Prohibit concurrent requests when using a pinned to-server connection
1907 // because our Client classes do not support request pipelining.
1908 if (pinning.pinned && !pinning.readHandler) {
1909 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1910 break;
1911 }
1912
1913 /* Limit the number of concurrent requests */
1914 if (concurrentRequestQueueFilled())
1915 break;
1916
1917 // try to parse the PROXY protocol header magic bytes
1918 if (needProxyProtocolHeader_) {
1919 if (!parseProxyProtocolHeader())
1920 break;
1921
1922 // we have been waiting for PROXY to provide client-IP
1923 // for some lookups, ie rDNS and IDENT.
1924 whenClientIpKnown();
1925
1926 // Done with PROXY protocol which has cleared preservingClientData_.
1927 // If the next protocol supports on_unsupported_protocol, then its
1928 // parseOneRequest() must reset preservingClientData_.
1929 assert(!preservingClientData_);
1930 }
1931
1932 if (Http::StreamPointer context = parseOneRequest()) {
1933 debugs(33, 5, clientConnection << ": done parsing a request");
1934
1935 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
1936 CommTimeoutCbPtrFun(clientLifetimeTimeout, context->http));
1937 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
1938
1939 context->registerWithConn();
1940
1941 #if USE_OPENSSL
1942 if (switchedToHttps())
1943 parsedBumpedRequestCount++;
1944 #endif
1945
1946 processParsedRequest(context);
1947
1948 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1949
1950 if (context->mayUseConnection()) {
1951 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
1952 break;
1953 }
1954 } else {
1955 debugs(33, 5, clientConnection << ": not enough request data: " <<
1956 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1957 Must(inBuf.length() < Config.maxRequestHeaderSize);
1958 break;
1959 }
1960 }
1961
1962 /* XXX where to 'finish' the parsing pass? */
1963 return parsed_req;
1964 }
1965
1966 void
1967 ConnStateData::afterClientRead()
1968 {
1969 #if USE_OPENSSL
1970 if (parsingTlsHandshake) {
1971 parseTlsHandshake();
1972 return;
1973 }
1974 #endif
1975
1976 /* Process next request */
1977 if (pipeline.empty())
1978 fd_note(clientConnection->fd, "Reading next request");
1979
1980 if (!clientParseRequests()) {
1981 if (!isOpen())
1982 return;
1983 /*
1984 * If the client here is half closed and we failed
1985 * to parse a request, close the connection.
1986 * The above check with connFinishedWithConn() only
1987 * succeeds _if_ the buffer is empty which it won't
1988 * be if we have an incomplete request.
1989 * XXX: This duplicates ConnStateData::kick
1990 */
1991 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
1992 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
1993 clientConnection->close();
1994 return;
1995 }
1996 }
1997
1998 if (!isOpen())
1999 return;
2000
2001 clientAfterReadingRequests();
2002 }
2003
2004 /**
2005 * called when new request data has been read from the socket
2006 *
2007 * \retval false called comm_close or setReplyToError (the caller should bail)
2008 * \retval true we did not call comm_close or setReplyToError
2009 */
2010 bool
2011 ConnStateData::handleReadData()
2012 {
2013 // if we are reading a body, stuff data into the body pipe
2014 if (bodyPipe != NULL)
2015 return handleRequestBodyData();
2016 return true;
2017 }
2018
2019 /**
2020 * called when new request body data has been buffered in inBuf
2021 * may close the connection if we were closing and piped everything out
2022 *
2023 * \retval false called comm_close or setReplyToError (the caller should bail)
2024 * \retval true we did not call comm_close or setReplyToError
2025 */
2026 bool
2027 ConnStateData::handleRequestBodyData()
2028 {
2029 assert(bodyPipe != NULL);
2030
2031 if (bodyParser) { // chunked encoding
2032 if (const err_type error = handleChunkedRequestBody()) {
2033 abortChunkedRequestBody(error);
2034 return false;
2035 }
2036 } else { // identity encoding
2037 debugs(33,5, HERE << "handling plain request body for " << clientConnection);
2038 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2039 if (putSize > 0)
2040 consumeInput(putSize);
2041
2042 if (!bodyPipe->mayNeedMoreData()) {
2043 // BodyPipe will clear us automagically when we produced everything
2044 bodyPipe = NULL;
2045 }
2046 }
2047
2048 if (!bodyPipe) {
2049 debugs(33,5, HERE << "produced entire request body for " << clientConnection);
2050
2051 if (const char *reason = stoppedSending()) {
2052 /* we've finished reading like good clients,
2053 * now do the close that initiateClose initiated.
2054 */
2055 debugs(33, 3, HERE << "closing for earlier sending error: " << reason);
2056 clientConnection->close();
2057 return false;
2058 }
2059 }
2060
2061 return true;
2062 }
2063
2064 /// parses available chunked encoded body bytes, checks size, returns errors
2065 err_type
2066 ConnStateData::handleChunkedRequestBody()
2067 {
2068 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2069
2070 try { // the parser will throw on errors
2071
2072 if (inBuf.isEmpty()) // nothing to do
2073 return ERR_NONE;
2074
2075 BodyPipeCheckout bpc(*bodyPipe);
2076 bodyParser->setPayloadBuffer(&bpc.buf);
2077 const bool parsed = bodyParser->parse(inBuf);
2078 inBuf = bodyParser->remaining(); // sync buffers
2079 bpc.checkIn();
2080
2081 // dechunk then check: the size limit applies to _dechunked_ content
2082 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2083 return ERR_TOO_BIG;
2084
2085 if (parsed) {
2086 finishDechunkingRequest(true);
2087 Must(!bodyPipe);
2088 return ERR_NONE; // nil bodyPipe implies body end for the caller
2089 }
2090
2091 // if chunk parser needs data, then the body pipe must need it too
2092 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2093
2094 // if parser needs more space and we can consume nothing, we will stall
2095 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2096 } catch (...) { // TODO: be more specific
2097 debugs(33, 3, HERE << "malformed chunks" << bodyPipe->status());
2098 return ERR_INVALID_REQ;
2099 }
2100
2101 debugs(33, 7, HERE << "need more chunked data" << *bodyPipe->status());
2102 return ERR_NONE;
2103 }
2104
2105 /// quit on errors related to chunked request body handling
2106 void
2107 ConnStateData::abortChunkedRequestBody(const err_type error)
2108 {
2109 finishDechunkingRequest(false);
2110
2111 // XXX: The code below works if we fail during initial request parsing,
2112 // but if we fail when the server connection is used already, the server may send
2113 // us its response too, causing various assertions. How to prevent that?
2114 #if WE_KNOW_HOW_TO_SEND_ERRORS
2115 Http::StreamPointer context = pipeline.front();
2116 if (context != NULL && !context->http->out.offset) { // output nothing yet
2117 clientStreamNode *node = context->getClientReplyContext();
2118 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2119 assert(repContext);
2120 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2121 Http::scPayloadTooLarge : HTTP_BAD_REQUEST;
2122 repContext->setReplyToError(error, scode,
2123 repContext->http->request->method,
2124 repContext->http->uri,
2125 CachePeer,
2126 repContext->http->request,
2127 inBuf, NULL);
2128 context->pullData();
2129 } else {
2130 // close or otherwise we may get stuck as nobody will notice the error?
2131 comm_reset_close(clientConnection);
2132 }
2133 #else
2134 debugs(33, 3, HERE << "aborting chunked request without error " << error);
2135 comm_reset_close(clientConnection);
2136 #endif
2137 flags.readMore = false;
2138 }
2139
2140 void
2141 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2142 {
2143 // request reader may get stuck waiting for space if nobody consumes body
2144 if (bodyPipe != NULL)
2145 bodyPipe->enableAutoConsumption();
2146
2147 // kids extend
2148 }
2149
2150 /** general lifetime handler for HTTP requests */
2151 void
2152 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2153 {
2154 if (!Comm::IsConnOpen(io.conn))
2155 return;
2156
2157 const err_type error = receivedFirstByte_ ? ERR_REQUEST_PARSE_TIMEOUT : ERR_REQUEST_START_TIMEOUT;
2158 if (tunnelOnError(HttpRequestMethod(), error))
2159 return;
2160
2161 /*
2162 * Just close the connection to not confuse browsers
2163 * using persistent connections. Some browsers open
2164 * a connection and then do not use it until much
2165 * later (presumeably because the request triggering
2166 * the open has already been completed on another
2167 * connection)
2168 */
2169 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2170 io.conn->close();
2171 }
2172
2173 static void
2174 clientLifetimeTimeout(const CommTimeoutCbParams &io)
2175 {
2176 ClientHttpRequest *http = static_cast<ClientHttpRequest *>(io.data);
2177 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout");
2178 debugs(33, DBG_IMPORTANT, "\t" << http->uri);
2179 http->logType.err.timedout = true;
2180 if (Comm::IsConnOpen(io.conn))
2181 io.conn->close();
2182 }
2183
2184 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2185 AsyncJob("ConnStateData"), // kids overwrite
2186 Server(xact),
2187 bodyParser(nullptr),
2188 #if USE_OPENSSL
2189 sslBumpMode(Ssl::bumpEnd),
2190 tlsParser(Security::HandshakeParser::fromClient),
2191 #endif
2192 needProxyProtocolHeader_(false),
2193 #if USE_OPENSSL
2194 switchedToHttps_(false),
2195 parsingTlsHandshake(false),
2196 parsedBumpedRequestCount(0),
2197 tlsConnectPort(0),
2198 sslServerBump(NULL),
2199 signAlgorithm(Ssl::algSignTrusted),
2200 #endif
2201 stoppedSending_(NULL),
2202 stoppedReceiving_(NULL)
2203 {
2204 flags.readMore = true; // kids may overwrite
2205 flags.swanSang = false;
2206
2207 pinning.host = NULL;
2208 pinning.port = -1;
2209 pinning.pinned = false;
2210 pinning.auth = false;
2211 pinning.zeroReply = false;
2212 pinning.peerAccessDenied = false;
2213 pinning.peer = NULL;
2214
2215 // store the details required for creating more MasterXaction objects as new requests come in
2216 log_addr = xact->tcpClient->remote;
2217 log_addr.applyClientMask(Config.Addrs.client_netmask);
2218
2219 // register to receive notice of Squid signal events
2220 // which may affect long persisting client connections
2221 registerRunner();
2222 }
2223
2224 void
2225 ConnStateData::start()
2226 {
2227 BodyProducer::start();
2228 HttpControlMsgSink::start();
2229
2230 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2231 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2232 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2233 int i = IP_PMTUDISC_DONT;
2234 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2235 int xerrno = errno;
2236 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2237 }
2238 #else
2239 static bool reported = false;
2240
2241 if (!reported) {
2242 debugs(33, DBG_IMPORTANT, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2243 reported = true;
2244 }
2245 #endif
2246 }
2247
2248 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2249 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2250 comm_add_close_handler(clientConnection->fd, call);
2251
2252 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2253 if (needProxyProtocolHeader_) {
2254 if (!proxyProtocolValidateClient()) // will close the connection on failure
2255 return;
2256 } else
2257 whenClientIpKnown();
2258
2259 // requires needProxyProtocolHeader_ which is initialized above
2260 preservingClientData_ = shouldPreserveClientData();
2261 }
2262
2263 void
2264 ConnStateData::whenClientIpKnown()
2265 {
2266 if (Config.onoff.log_fqdn)
2267 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2268
2269 #if USE_IDENT
2270 if (Ident::TheConfig.identLookup) {
2271 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
2272 identChecklist.src_addr = clientConnection->remote;
2273 identChecklist.my_addr = clientConnection->local;
2274 if (identChecklist.fastCheck().allowed())
2275 Ident::Start(clientConnection, clientIdentDone, this);
2276 }
2277 #endif
2278
2279 clientdbEstablished(clientConnection->remote, 1);
2280
2281 #if USE_DELAY_POOLS
2282 fd_table[clientConnection->fd].clientInfo = NULL;
2283
2284 if (!Config.onoff.client_db)
2285 return; // client delay pools require client_db
2286
2287 const auto &pools = ClientDelayPools::Instance()->pools;
2288 if (pools.size()) {
2289 ACLFilledChecklist ch(NULL, NULL, NULL);
2290
2291 // TODO: we check early to limit error response bandwidth but we
2292 // should recheck when we can honor delay_pool_uses_indirect
2293 // TODO: we should also pass the port details for myportname here.
2294 ch.src_addr = clientConnection->remote;
2295 ch.my_addr = clientConnection->local;
2296
2297 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2298
2299 /* pools require explicit 'allow' to assign a client into them */
2300 if (pools[pool]->access) {
2301 ch.changeAcl(pools[pool]->access);
2302 auto answer = ch.fastCheck();
2303 if (answer.allowed()) {
2304
2305 /* request client information from db after we did all checks
2306 this will save hash lookup if client failed checks */
2307 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2308 assert(cli);
2309
2310 /* put client info in FDE */
2311 fd_table[clientConnection->fd].clientInfo = cli;
2312
2313 /* setup write limiter for this request */
2314 const double burst = floor(0.5 +
2315 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2316 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2317 break;
2318 } else {
2319 debugs(83, 4, HERE << "Delay pool " << pool << " skipped because ACL " << answer);
2320 }
2321 }
2322 }
2323 }
2324 #endif
2325
2326 // kids must extend to actually start doing something (e.g., reading)
2327 }
2328
2329 /** Handle a new connection on an HTTP socket. */
2330 void
2331 httpAccept(const CommAcceptCbParams &params)
2332 {
2333 MasterXaction::Pointer xact = params.xaction;
2334 AnyP::PortCfgPointer s = xact->squidPort;
2335
2336 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2337
2338 if (params.flag != Comm::OK) {
2339 // Its possible the call was still queued when the client disconnected
2340 debugs(33, 2, s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2341 return;
2342 }
2343
2344 debugs(33, 4, params.conn << ": accepted");
2345 fd_note(params.conn->fd, "client http connect");
2346
2347 if (s->tcp_keepalive.enabled)
2348 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2349
2350 ++incoming_sockets_accepted;
2351
2352 // Socket is ready, setup the connection manager to start using it
2353 auto *srv = Http::NewServer(xact);
2354 AsyncJob::Start(srv); // usually async-calls readSomeData()
2355 }
2356
2357 /// Create TLS connection structure and update fd_table
2358 static bool
2359 httpsCreate(const ConnStateData *connState, const Security::ContextPointer &ctx)
2360 {
2361 const auto conn = connState->clientConnection;
2362 if (Security::CreateServerSession(ctx, conn, connState->port->secure, "client https start")) {
2363 debugs(33, 5, "will negotiate TLS on " << conn);
2364 return true;
2365 }
2366
2367 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2368 conn->close();
2369 return false;
2370 }
2371
2372 /**
2373 *
2374 * \retval 1 on success
2375 * \retval 0 when needs more data
2376 * \retval -1 on error
2377 */
2378 static int
2379 tlsAttemptHandshake(ConnStateData *conn, PF *callback)
2380 {
2381 // TODO: maybe throw instead of returning -1
2382 // see https://github.com/squid-cache/squid/pull/81#discussion_r153053278
2383 int fd = conn->clientConnection->fd;
2384 auto session = fd_table[fd].ssl.get();
2385
2386 errno = 0;
2387
2388 #if USE_OPENSSL
2389 const auto ret = SSL_accept(session);
2390 if (ret > 0)
2391 return 1;
2392
2393 const int xerrno = errno;
2394 const auto ssl_error = SSL_get_error(session, ret);
2395
2396 switch (ssl_error) {
2397
2398 case SSL_ERROR_WANT_READ:
2399 Comm::SetSelect(fd, COMM_SELECT_READ, callback, (callback ? conn : nullptr), 0);
2400 return 0;
2401
2402 case SSL_ERROR_WANT_WRITE:
2403 Comm::SetSelect(fd, COMM_SELECT_WRITE, callback, (callback ? conn : nullptr), 0);
2404 return 0;
2405
2406 case SSL_ERROR_SYSCALL:
2407 if (ret == 0) {
2408 debugs(83, 2, "Error negotiating SSL connection on FD " << fd << ": Aborted by client: " << ssl_error);
2409 } else {
2410 debugs(83, (xerrno == ECONNRESET) ? 1 : 2, "Error negotiating SSL connection on FD " << fd << ": " <<
2411 (xerrno == 0 ? Security::ErrorString(ssl_error) : xstrerr(xerrno)));
2412 }
2413 break;
2414
2415 case SSL_ERROR_ZERO_RETURN:
2416 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " << fd << ": Closed by client");
2417 break;
2418
2419 default:
2420 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " <<
2421 fd << ": " << Security::ErrorString(ssl_error) <<
2422 " (" << ssl_error << "/" << ret << ")");
2423 }
2424
2425 #elif USE_GNUTLS
2426
2427 const auto x = gnutls_handshake(session);
2428 if (x == GNUTLS_E_SUCCESS)
2429 return 1;
2430
2431 if (gnutls_error_is_fatal(x)) {
2432 debugs(83, 2, "Error negotiating TLS on " << conn->clientConnection << ": Aborted by client: " << Security::ErrorString(x));
2433
2434 } else if (x == GNUTLS_E_INTERRUPTED || x == GNUTLS_E_AGAIN) {
2435 const auto ioAction = (gnutls_record_get_direction(session)==0 ? COMM_SELECT_READ : COMM_SELECT_WRITE);
2436 Comm::SetSelect(fd, ioAction, callback, (callback ? conn : nullptr), 0);
2437 return 0;
2438 }
2439
2440 #else
2441 // Performing TLS handshake should never be reachable without a TLS/SSL library.
2442 (void)session; // avoid compiler and static analysis complaints
2443 fatal("FATAL: HTTPS not supported by this Squid.");
2444 #endif
2445
2446 return -1;
2447 }
2448
2449 /** negotiate an SSL connection */
2450 static void
2451 clientNegotiateSSL(int fd, void *data)
2452 {
2453 ConnStateData *conn = (ConnStateData *)data;
2454
2455 const int ret = tlsAttemptHandshake(conn, clientNegotiateSSL);
2456 if (ret <= 0) {
2457 if (ret < 0) // An error
2458 conn->clientConnection->close();
2459 return;
2460 }
2461
2462 Security::SessionPointer session(fd_table[fd].ssl);
2463
2464 #if USE_OPENSSL
2465 if (Security::SessionIsResumed(session)) {
2466 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2467 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2468 ":" << (int)fd_table[fd].remote_port << ")");
2469 } else {
2470 if (Debug::Enabled(83, 4)) {
2471 /* Write out the SSL session details.. actually the call below, but
2472 * OpenSSL headers do strange typecasts confusing GCC.. */
2473 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2474 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2475 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2476 PEM_STRING_SSL_SESSION, debug_log,
2477 reinterpret_cast<char *>(SSL_get_session(session.get())),
2478 nullptr, nullptr, 0, nullptr, nullptr);
2479
2480 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2481
2482 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2483 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2484 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2485 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2486 * Because there are two possible usable cast, if you get an error here, try the other
2487 * commented line. */
2488
2489 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2490 debug_log,
2491 reinterpret_cast<char *>(SSL_get_session(session.get())),
2492 nullptr, nullptr, 0, nullptr, nullptr);
2493 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2494 debug_log,
2495 reinterpret_cast<char *>(SSL_get_session(session.get())),
2496 nullptr, nullptr, 0, nullptr, nullptr);
2497 */
2498 #else
2499 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2500
2501 #endif
2502 /* Note: This does not automatically fflush the log file.. */
2503 }
2504
2505 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2506 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2507 fd_table[fd].remote_port << ")");
2508 }
2509 #else
2510 debugs(83, 2, "TLS session reuse not yet implemented.");
2511 #endif
2512
2513 // Connection established. Retrieve TLS connection parameters for logging.
2514 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2515
2516 #if USE_OPENSSL
2517 X509 *client_cert = SSL_get_peer_certificate(session.get());
2518
2519 if (client_cert) {
2520 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2521 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
2522
2523 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2524 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
2525
2526 X509_free(client_cert);
2527 } else {
2528 debugs(83, 5, "FD " << fd << " has no client certificate.");
2529 }
2530 #else
2531 debugs(83, 2, "Client certificate requesting not yet implemented.");
2532 #endif
2533
2534 conn->readSomeData();
2535 }
2536
2537 /**
2538 * If Security::ContextPointer is given, starts reading the TLS handshake.
2539 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2540 */
2541 static void
2542 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2543 {
2544 assert(connState);
2545 const Comm::ConnectionPointer &details = connState->clientConnection;
2546
2547 if (!ctx || !httpsCreate(connState, ctx))
2548 return;
2549
2550 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2551 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
2552 connState, ConnStateData::requestTimeout);
2553 commSetConnTimeout(details, Config.Timeout.request, timeoutCall);
2554
2555 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2556 }
2557
2558 #if USE_OPENSSL
2559 /**
2560 * A callback function to use with the ACLFilledChecklist callback.
2561 */
2562 static void
2563 httpsSslBumpAccessCheckDone(Acl::Answer answer, void *data)
2564 {
2565 ConnStateData *connState = (ConnStateData *) data;
2566
2567 // if the connection is closed or closing, just return.
2568 if (!connState->isOpen())
2569 return;
2570
2571 if (answer.allowed()) {
2572 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2573 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2574 } else {
2575 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2576 connState->sslBumpMode = Ssl::bumpSplice;
2577 }
2578
2579 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2580 connState->clientConnection->close();
2581 return;
2582 }
2583
2584 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2585 connState->clientConnection->close();
2586 }
2587 #endif
2588
2589 /** handle a new HTTPS connection */
2590 static void
2591 httpsAccept(const CommAcceptCbParams &params)
2592 {
2593 MasterXaction::Pointer xact = params.xaction;
2594 const AnyP::PortCfgPointer s = xact->squidPort;
2595
2596 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2597
2598 if (params.flag != Comm::OK) {
2599 // Its possible the call was still queued when the client disconnected
2600 debugs(33, 2, "httpsAccept: " << s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2601 return;
2602 }
2603
2604 debugs(33, 4, HERE << params.conn << " accepted, starting SSL negotiation.");
2605 fd_note(params.conn->fd, "client https connect");
2606
2607 if (s->tcp_keepalive.enabled) {
2608 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2609 }
2610 ++incoming_sockets_accepted;
2611
2612 // Socket is ready, setup the connection manager to start using it
2613 auto *srv = Https::NewServer(xact);
2614 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2615 }
2616
2617 void
2618 ConnStateData::postHttpsAccept()
2619 {
2620 if (port->flags.tunnelSslBumping) {
2621 #if USE_OPENSSL
2622 debugs(33, 5, "accept transparent connection: " << clientConnection);
2623
2624 if (!Config.accessList.ssl_bump) {
2625 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2626 return;
2627 }
2628
2629 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
2630 mx->tcpClient = clientConnection;
2631 // Create a fake HTTP request and ALE for the ssl_bump ACL check,
2632 // using tproxy/intercept provided destination IP and port.
2633 // XXX: Merge with subsequent fakeAConnectRequest(), buildFakeRequest().
2634 // XXX: Do this earlier (e.g., in Http[s]::One::Server constructor).
2635 HttpRequest *request = new HttpRequest(mx);
2636 static char ip[MAX_IPSTRLEN];
2637 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2638 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2639 request->url.port(clientConnection->local.port());
2640 request->myportname = port->name;
2641 const AccessLogEntry::Pointer connectAle = new AccessLogEntry;
2642 CodeContext::Reset(connectAle);
2643 // TODO: Use these request/ALE when waiting for new bumped transactions.
2644
2645 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, NULL);
2646 acl_checklist->src_addr = clientConnection->remote;
2647 acl_checklist->my_addr = port->s;
2648 // Build a local AccessLogEntry to allow requiresAle() acls work
2649 acl_checklist->al = connectAle;
2650 acl_checklist->al->cache.start_time = current_time;
2651 acl_checklist->al->tcpClient = clientConnection;
2652 acl_checklist->al->cache.port = port;
2653 acl_checklist->al->cache.caddr = log_addr;
2654 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2655 HTTPMSGUNLOCK(acl_checklist->al->request);
2656 acl_checklist->al->request = request;
2657 HTTPMSGLOCK(acl_checklist->al->request);
2658 Http::StreamPointer context = pipeline.front();
2659 ClientHttpRequest *http = context ? context->http : nullptr;
2660 const char *log_uri = http ? http->log_uri : nullptr;
2661 acl_checklist->syncAle(request, log_uri);
2662 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2663 #else
2664 fatal("FATAL: SSL-Bump requires --with-openssl");
2665 #endif
2666 return;
2667 } else {
2668 httpsEstablish(this, port->secure.staticContext);
2669 }
2670 }
2671
2672 #if USE_OPENSSL
2673 void
2674 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2675 {
2676 ConnStateData * state_data = (ConnStateData *)(data);
2677 state_data->sslCrtdHandleReply(reply);
2678 }
2679
2680 void
2681 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2682 {
2683 if (!isOpen()) {
2684 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2685 return;
2686 }
2687
2688 if (reply.result == Helper::BrokenHelper) {
2689 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2690 } else if (!reply.other().hasContent()) {
2691 debugs(1, DBG_IMPORTANT, HERE << "\"ssl_crtd\" helper returned <NULL> reply.");
2692 } else {
2693 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2694 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2695 debugs(33, 5, "Reply from ssl_crtd for " << tlsConnectHostOrIp << " is incorrect");
2696 } else {
2697 if (reply.result != Helper::Okay) {
2698 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2699 } else {
2700 debugs(33, 5, "Certificate for " << tlsConnectHostOrIp << " was successfully received from ssl_crtd");
2701 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2702 doPeekAndSpliceStep();
2703 auto ssl = fd_table[clientConnection->fd].ssl.get();
2704 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2705 if (!ret)
2706 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2707
2708 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2709 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2710 } else {
2711 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2712 if (ctx && !sslBumpCertKey.isEmpty())
2713 storeTlsContextToCache(sslBumpCertKey, ctx);
2714 getSslContextDone(ctx);
2715 }
2716 return;
2717 }
2718 }
2719 }
2720 Security::ContextPointer nil;
2721 getSslContextDone(nil);
2722 }
2723
2724 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2725 {
2726 certProperties.commonName = sslCommonName_.isEmpty() ? tlsConnectHostOrIp.c_str() : sslCommonName_.c_str();
2727
2728 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2729 if (connectedOk) {
2730 if (X509 *mimicCert = sslServerBump->serverCert.get())
2731 certProperties.mimicCert.resetAndLock(mimicCert);
2732
2733 ACLFilledChecklist checklist(NULL, sslServerBump->request.getRaw(),
2734 clientConnection != NULL ? clientConnection->rfc931 : dash_str);
2735 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
2736
2737 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != NULL; ca = ca->next) {
2738 // If the algorithm already set, then ignore it.
2739 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2740 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2741 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2742 continue;
2743
2744 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2745 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2746 const char *param = ca->param;
2747
2748 // For parameterless CN adaptation, use hostname from the
2749 // CONNECT request.
2750 if (ca->alg == Ssl::algSetCommonName) {
2751 if (!param)
2752 param = tlsConnectHostOrIp.c_str();
2753 certProperties.commonName = param;
2754 certProperties.setCommonName = true;
2755 } else if (ca->alg == Ssl::algSetValidAfter)
2756 certProperties.setValidAfter = true;
2757 else if (ca->alg == Ssl::algSetValidBefore)
2758 certProperties.setValidBefore = true;
2759
2760 debugs(33, 5, HERE << "Matches certificate adaptation aglorithm: " <<
2761 alg << " param: " << (param ? param : "-"));
2762 }
2763 }
2764
2765 certProperties.signAlgorithm = Ssl::algSignEnd;
2766 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != NULL; sg = sg->next) {
2767 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2768 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2769 break;
2770 }
2771 }
2772 } else {// did not try to connect (e.g. client-first) or failed to connect
2773 // In case of an error while connecting to the secure server, use a
2774 // trusted certificate, with no mimicked fields and no adaptation
2775 // algorithms. There is nothing we can mimic, so we want to minimize the
2776 // number of warnings the user will have to see to get to the error page.
2777 // We will close the connection, so that the trust is not extended to
2778 // non-Squid content.
2779 certProperties.signAlgorithm = Ssl::algSignTrusted;
2780 }
2781
2782 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2783
2784 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2785 assert(port->secure.untrustedSigningCa.cert);
2786 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2787 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2788 } else {
2789 assert(port->secure.signingCa.cert.get());
2790 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2791
2792 if (port->secure.signingCa.pkey)
2793 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2794 }
2795 signAlgorithm = certProperties.signAlgorithm;
2796
2797 certProperties.signHash = Ssl::DefaultSignHash;
2798 }
2799
2800 Security::ContextPointer
2801 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2802 {
2803 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2804 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2805 if (Security::ContextPointer *ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2806 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2807 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2808 return *ctx;
2809 } else {
2810 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2811 if (ssl_ctx_cache)
2812 ssl_ctx_cache->del(cacheKey);
2813 }
2814 }
2815 return Security::ContextPointer(nullptr);
2816 }
2817
2818 void
2819 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2820 {
2821 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2822 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, new Security::ContextPointer(ctx))) {
2823 // If it is not in storage delete after using. Else storage deleted it.
2824 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2825 }
2826 }
2827
2828 void
2829 ConnStateData::getSslContextStart()
2830 {
2831 // If we are called, then CONNECT has succeeded. Finalize it.
2832 if (auto xact = pipeline.front()) {
2833 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2834 xact->finished();
2835 // cannot proceed with encryption if requests wait for plain responses
2836 Must(pipeline.empty());
2837 }
2838 /* careful: finished() above frees request, host, etc. */
2839
2840 if (port->secure.generateHostCertificates) {
2841 Ssl::CertificateProperties certProperties;
2842 buildSslCertGenerationParams(certProperties);
2843
2844 // Disable caching for bumpPeekAndSplice mode
2845 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2846 sslBumpCertKey.clear();
2847 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2848 assert(!sslBumpCertKey.isEmpty());
2849
2850 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2851 if (ctx) {
2852 getSslContextDone(ctx);
2853 return;
2854 }
2855 }
2856
2857 #if USE_SSL_CRTD
2858 try {
2859 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2860 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2861 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2862 request_message.composeRequest(certProperties);
2863 debugs(33, 5, HERE << "SSL crtd request: " << request_message.compose().c_str());
2864 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2865 return;
2866 } catch (const std::exception &e) {
2867 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2868 "request for " << certProperties.commonName <<
2869 " certificate: " << e.what() << "; will now block to " <<
2870 "generate that certificate.");
2871 // fall through to do blocking in-process generation.
2872 }
2873 #endif // USE_SSL_CRTD
2874
2875 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName);
2876 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2877 doPeekAndSpliceStep();
2878 auto ssl = fd_table[clientConnection->fd].ssl.get();
2879 if (!Ssl::configureSSL(ssl, certProperties, *port))
2880 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2881
2882 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2883 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2884 } else {
2885 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2886 if (dynCtx && !sslBumpCertKey.isEmpty())
2887 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2888 getSslContextDone(dynCtx);
2889 }
2890 return;
2891 }
2892
2893 Security::ContextPointer nil;
2894 getSslContextDone(nil);
2895 }
2896
2897 void
2898 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2899 {
2900 if (port->secure.generateHostCertificates && !ctx) {
2901 debugs(33, 2, "Failed to generate TLS context for " << tlsConnectHostOrIp);
2902 }
2903
2904 // If generated ssl context = NULL, try to use static ssl context.
2905 if (!ctx) {
2906 if (!port->secure.staticContext) {
2907 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2908 clientConnection->close();
2909 return;
2910 } else {
2911 debugs(33, 5, "Using static TLS context.");
2912 ctx = port->secure.staticContext;
2913 }
2914 }
2915
2916 if (!httpsCreate(this, ctx))
2917 return;
2918
2919 // bumped intercepted conns should already have Config.Timeout.request set
2920 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2921 // to make sure the connection does not get stuck on non-SSL clients.
2922 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2923 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
2924 this, ConnStateData::requestTimeout);
2925 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
2926
2927 switchedToHttps_ = true;
2928
2929 auto ssl = fd_table[clientConnection->fd].ssl.get();
2930 BIO *b = SSL_get_rbio(ssl);
2931 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2932 bio->setReadBufData(inBuf);
2933 inBuf.clear();
2934 clientNegotiateSSL(clientConnection->fd, this);
2935 }
2936
2937 void
2938 ConnStateData::switchToHttps(ClientHttpRequest *http, Ssl::BumpMode bumpServerMode)
2939 {
2940 assert(!switchedToHttps_);
2941 Must(http->request);
2942 auto &request = http->request;
2943
2944 // Depending on receivedFirstByte_, we are at the start of either an
2945 // established CONNECT tunnel with the client or an intercepted TCP (and
2946 // presumably TLS) connection from the client. Expect TLS Client Hello.
2947 const auto insideConnectTunnel = receivedFirstByte_;
2948 debugs(33, 5, (insideConnectTunnel ? "post-CONNECT " : "raw TLS ") << clientConnection);
2949
2950 tlsConnectHostOrIp = request->url.hostOrIp();
2951 tlsConnectPort = request->url.port();
2952 resetSslCommonName(request->url.host());
2953
2954 // We are going to read new request
2955 flags.readMore = true;
2956
2957 // keep version major.minor details the same.
2958 // but we are now performing the HTTPS handshake traffic
2959 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2960
2961 // If sslServerBump is set, then we have decided to deny CONNECT
2962 // and now want to switch to SSL to send the error to the client
2963 // without even peeking at the origin server certificate.
2964 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2965 request->flags.sslPeek = true;
2966 sslServerBump = new Ssl::ServerBump(http);
2967 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2968 request->flags.sslPeek = true;
2969 sslServerBump = new Ssl::ServerBump(http, nullptr, bumpServerMode);
2970 }
2971
2972 // commSetConnTimeout() was called for this request before we switched.
2973 // Fix timeout to request_start_timeout
2974 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2975 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
2976 TimeoutDialer, this, ConnStateData::requestTimeout);
2977 commSetConnTimeout(clientConnection, Config.Timeout.request_start_timeout, timeoutCall);
2978 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2979 // a bumbed "connect" request on non transparent port.
2980 receivedFirstByte_ = false;
2981 // Get more data to peek at Tls
2982 parsingTlsHandshake = true;
2983
2984 // If the protocol has changed, then reset preservingClientData_.
2985 // Otherwise, its value initially set in start() is still valid/fresh.
2986 // shouldPreserveClientData() uses parsingTlsHandshake which is reset above.
2987 if (insideConnectTunnel)
2988 preservingClientData_ = shouldPreserveClientData();
2989
2990 readSomeData();
2991 }
2992
2993 void
2994 ConnStateData::parseTlsHandshake()
2995 {
2996 Must(parsingTlsHandshake);
2997
2998 assert(!inBuf.isEmpty());
2999 receivedFirstByte();
3000 fd_note(clientConnection->fd, "Parsing TLS handshake");
3001
3002 bool unsupportedProtocol = false;
3003 try {
3004 if (!tlsParser.parseHello(inBuf)) {
3005 // need more data to finish parsing
3006 readSomeData();
3007 return;
3008 }
3009 }
3010 catch (const std::exception &ex) {
3011 debugs(83, 2, "error on FD " << clientConnection->fd << ": " << ex.what());
3012 unsupportedProtocol = true;
3013 }
3014
3015 parsingTlsHandshake = false;
3016
3017 // client data may be needed for splicing and for
3018 // tunneling unsupportedProtocol after an error
3019 preservedClientData = inBuf;
3020
3021 // Even if the parser failed, each TLS detail should either be set
3022 // correctly or still be "unknown"; copying unknown detail is a no-op.
3023 Security::TlsDetails::Pointer const &details = tlsParser.details;
3024 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
3025 if (details && !details->serverName.isEmpty()) {
3026 resetSslCommonName(details->serverName.c_str());
3027 tlsClientSni_ = details->serverName;
3028 }
3029
3030 // We should disable read/write handlers
3031 Comm::ResetSelect(clientConnection->fd);
3032
3033 if (unsupportedProtocol) {
3034 Http::StreamPointer context = pipeline.front();
3035 Must(context && context->http);
3036 HttpRequest::Pointer request = context->http->request;
3037 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
3038 sslBumpMode = Ssl::bumpSplice;
3039 context->http->al->ssl.bumpMode = Ssl::bumpSplice;
3040 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN))
3041 clientConnection->close();
3042 return;
3043 }
3044
3045 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
3046 getSslContextStart();
3047 return;
3048 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
3049 Http::StreamPointer context = pipeline.front();
3050 ClientHttpRequest *http = context ? context->http : nullptr;
3051 // will call httpsPeeked() with certificate and connection, eventually
3052 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : nullptr);
3053 } else {
3054 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
3055 startPeekAndSplice();
3056 }
3057 }
3058
3059 void httpsSslBumpStep2AccessCheckDone(Acl::Answer answer, void *data)
3060 {
3061 ConnStateData *connState = (ConnStateData *) data;
3062
3063 // if the connection is closed or closing, just return.
3064 if (!connState->isOpen())
3065 return;
3066
3067 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
3068 assert(connState->serverBump());
3069 Ssl::BumpMode bumpAction;
3070 if (answer.allowed()) {
3071 bumpAction = (Ssl::BumpMode)answer.kind;
3072 } else
3073 bumpAction = Ssl::bumpSplice;
3074
3075 connState->serverBump()->act.step2 = bumpAction;
3076 connState->sslBumpMode = bumpAction;
3077 Http::StreamPointer context = connState->pipeline.front();
3078 if (ClientHttpRequest *http = (context ? context->http : nullptr))
3079 http->al->ssl.bumpMode = bumpAction;
3080
3081 if (bumpAction == Ssl::bumpTerminate) {
3082 connState->clientConnection->close();
3083 } else if (bumpAction != Ssl::bumpSplice) {
3084 connState->startPeekAndSplice();
3085 } else if (!connState->splice())
3086 connState->clientConnection->close();
3087 }
3088
3089 bool
3090 ConnStateData::splice()
3091 {
3092 // normally we can splice here, because we just got client hello message
3093
3094 // fde::ssl/tls_read_method() probably reads from our own inBuf. If so, then
3095 // we should not lose any raw bytes when switching to raw I/O here.
3096 if (fd_table[clientConnection->fd].ssl.get())
3097 fd_table[clientConnection->fd].useDefaultIo();
3098
3099 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3100 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3101 transferProtocol = Http::ProtocolVersion();
3102 assert(!pipeline.empty());
3103 Http::StreamPointer context = pipeline.front();
3104 Must(context);
3105 Must(context->http);
3106 ClientHttpRequest *http = context->http;
3107 HttpRequest::Pointer request = http->request;
3108 context->finished();
3109 if (transparent()) {
3110 // For transparent connections, make a new fake CONNECT request, now
3111 // with SNI as target. doCallout() checks, adaptations may need that.
3112 return fakeAConnectRequest("splice", preservedClientData);
3113 } else {
3114 // For non transparent connections make a new tunneled CONNECT, which
3115 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3116 // respond with "Connection Established" to the client.
3117 // This fake CONNECT request required to allow use of SNI in
3118 // doCallout() checks and adaptations.
3119 return initiateTunneledRequest(request, Http::METHOD_CONNECT, "splice", preservedClientData);
3120 }
3121 }
3122
3123 void
3124 ConnStateData::startPeekAndSplice()
3125 {
3126 // This is the Step2 of the SSL bumping
3127 assert(sslServerBump);
3128 Http::StreamPointer context = pipeline.front();
3129 ClientHttpRequest *http = context ? context->http : nullptr;
3130
3131 if (sslServerBump->at(XactionStep::tlsBump1)) {
3132 sslServerBump->step = XactionStep::tlsBump2;
3133 // Run a accessList check to check if want to splice or continue bumping
3134
3135 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3136 acl_checklist->al = http ? http->al : nullptr;
3137 //acl_checklist->src_addr = params.conn->remote;
3138 //acl_checklist->my_addr = s->s;
3139 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpNone));
3140 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3141 acl_checklist->banAction(Acl::Answer(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3142 const char *log_uri = http ? http->log_uri : nullptr;
3143 acl_checklist->syncAle(sslServerBump->request.getRaw(), log_uri);
3144 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3145 return;
3146 }
3147
3148 // will call httpsPeeked() with certificate and connection, eventually
3149 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3150 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3151
3152 if (!httpsCreate(this, unConfiguredCTX))
3153 return;
3154
3155 switchedToHttps_ = true;
3156
3157 auto ssl = fd_table[clientConnection->fd].ssl.get();
3158 BIO *b = SSL_get_rbio(ssl);
3159 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3160 bio->setReadBufData(inBuf);
3161 bio->hold(true);
3162
3163 // Here squid should have all of the client hello message so the
3164 // tlsAttemptHandshake() should return 0.
3165 // This block exist only to force openSSL parse client hello and detect
3166 // ERR_SECURE_ACCEPT_FAIL error, which should be checked and splice if required.
3167 if (tlsAttemptHandshake(this, nullptr) < 0) {
3168 debugs(83, 2, "TLS handshake failed.");
3169 HttpRequest::Pointer request(http ? http->request : nullptr);
3170 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_SECURE_ACCEPT_FAIL))
3171 clientConnection->close();
3172 return;
3173 }
3174
3175 // We need to reset inBuf here, to be used by incoming requests in the case
3176 // of SSL bump
3177 inBuf.clear();
3178
3179 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3180 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : NULL);
3181 }
3182
3183 void
3184 ConnStateData::doPeekAndSpliceStep()
3185 {
3186 auto ssl = fd_table[clientConnection->fd].ssl.get();
3187 BIO *b = SSL_get_rbio(ssl);
3188 assert(b);
3189 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3190
3191 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Current state:" << SSL_state_string_long(ssl));
3192 bio->hold(false);
3193
3194 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3195 switchedToHttps_ = true;
3196 }
3197
3198 void
3199 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3200 {
3201 Must(sslServerBump != NULL);
3202 Must(sslServerBump->request == pic.request);
3203 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3204
3205 if (Comm::IsConnOpen(pic.connection)) {
3206 notePinnedConnectionBecameIdle(pic);
3207 debugs(33, 5, "bumped HTTPS server: " << tlsConnectHostOrIp);
3208 } else
3209 debugs(33, 5, "Error while bumping: " << tlsConnectHostOrIp);
3210
3211 getSslContextStart();
3212 }
3213
3214 #endif /* USE_OPENSSL */
3215
3216 bool
3217 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, Http::MethodType const method, const char *reason, const SBuf &payload)
3218 {
3219 // fake a CONNECT request to force connState to tunnel
3220 SBuf connectHost;
3221 unsigned short connectPort = 0;
3222
3223 if (pinning.serverConnection != nullptr) {
3224 static char ip[MAX_IPSTRLEN];
3225 connectHost = pinning.serverConnection->remote.toStr(ip, sizeof(ip));
3226 connectPort = pinning.serverConnection->remote.port();
3227 } else if (cause) {
3228 connectHost = cause->url.hostOrIp();
3229 connectPort = cause->url.port();
3230 #if USE_OPENSSL
3231 } else if (!tlsConnectHostOrIp.isEmpty()) {
3232 connectHost = tlsConnectHostOrIp;
3233 connectPort = tlsConnectPort;
3234 #endif
3235 } else if (transparent()) {
3236 static char ip[MAX_IPSTRLEN];
3237 connectHost = clientConnection->local.toStr(ip, sizeof(ip));
3238 connectPort = clientConnection->local.port();
3239 } else {
3240 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3241 return false;
3242 }
3243
3244 debugs(33, 2, "Request tunneling for " << reason);
3245 ClientHttpRequest *http = buildFakeRequest(method, connectHost, connectPort, payload);
3246 HttpRequest::Pointer request = http->request;
3247 request->flags.forceTunnel = true;
3248 http->calloutContext = new ClientRequestContext(http);
3249 http->doCallouts();
3250 clientProcessRequestFinished(this, request);
3251 return true;
3252 }
3253
3254 bool
3255 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3256 {
3257 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3258
3259 SBuf connectHost;
3260 assert(transparent());
3261 const unsigned short connectPort = clientConnection->local.port();
3262
3263 #if USE_OPENSSL
3264 if (!tlsClientSni_.isEmpty())
3265 connectHost.assign(tlsClientSni_);
3266 else
3267 #endif
3268 {
3269 static char ip[MAX_IPSTRLEN];
3270 clientConnection->local.toHostStr(ip, sizeof(ip));
3271 connectHost.assign(ip);
3272 }
3273
3274 ClientHttpRequest *http = buildFakeRequest(Http::METHOD_CONNECT, connectHost, connectPort, payload);
3275
3276 http->calloutContext = new ClientRequestContext(http);
3277 HttpRequest::Pointer request = http->request;
3278 http->doCallouts();
3279 clientProcessRequestFinished(this, request);
3280 return true;
3281 }
3282
3283 ClientHttpRequest *
3284 ConnStateData::buildFakeRequest(Http::MethodType const method, SBuf &useHost, unsigned short usePort, const SBuf &payload)
3285 {
3286 ClientHttpRequest *http = new ClientHttpRequest(this);
3287 Http::Stream *stream = new Http::Stream(clientConnection, http);
3288
3289 StoreIOBuffer tempBuffer;
3290 tempBuffer.data = stream->reqbuf;
3291 tempBuffer.length = HTTP_REQBUF_SZ;
3292
3293 ClientStreamData newServer = new clientReplyContext(http);
3294 ClientStreamData newClient = stream;
3295 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3296 clientReplyStatus, newServer, clientSocketRecipient,
3297 clientSocketDetach, newClient, tempBuffer);
3298
3299 stream->flags.parsed_ok = 1; // Do we need it?
3300 stream->mayUseConnection(true);
3301
3302 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
3303 CommTimeoutCbPtrFun(clientLifetimeTimeout, stream->http));
3304 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
3305
3306 stream->registerWithConn();
3307
3308 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
3309 mx->tcpClient = clientConnection;
3310 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3311 // clientProcessRequest
3312 HttpRequest::Pointer request = new HttpRequest(mx);
3313 AnyP::ProtocolType proto = (method == Http::METHOD_NONE) ? AnyP::PROTO_AUTHORITY_FORM : AnyP::PROTO_HTTP;
3314 request->url.setScheme(proto, nullptr);
3315 request->method = method;
3316 request->url.host(useHost.c_str());
3317 request->url.port(usePort);
3318
3319 http->uri = SBufToCstring(request->effectiveRequestUri());
3320 http->initRequest(request.getRaw());
3321
3322 request->manager(this, http->al);
3323
3324 if (proto == AnyP::PROTO_HTTP)
3325 request->header.putStr(Http::HOST, useHost.c_str());
3326
3327 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3328 #if USE_AUTH
3329 if (getAuth())
3330 request->auth_user_request = getAuth();
3331 #endif
3332
3333 inBuf = payload;
3334 flags.readMore = false;
3335
3336 return http;
3337 }
3338
3339 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3340 static bool
3341 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3342 {
3343 if (!Comm::IsConnOpen(c)) {
3344 Must(NHttpSockets > 0); // we tried to open some
3345 --NHttpSockets; // there will be fewer sockets than planned
3346 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3347
3348 if (!NHttpSockets) // we could not open any listen sockets at all
3349 fatalf("Unable to open %s",FdNote(portType));
3350
3351 return false;
3352 }
3353 return true;
3354 }
3355
3356 /// find any unused HttpSockets[] slot and store fd there or return false
3357 static bool
3358 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3359 {
3360 bool found = false;
3361 for (int i = 0; i < NHttpSockets && !found; ++i) {
3362 if ((found = HttpSockets[i] < 0))
3363 HttpSockets[i] = conn->fd;
3364 }
3365 return found;
3366 }
3367
3368 static void
3369 clientHttpConnectionsOpen(void)
3370 {
3371 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3372 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3373
3374 if (MAXTCPLISTENPORTS == NHttpSockets) {
3375 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines." <<
3376 Debug::Extra << "The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3377 continue;
3378 }
3379
3380 #if USE_OPENSSL
3381 if (s->flags.tunnelSslBumping) {
3382 if (!Config.accessList.ssl_bump) {
3383 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3384 s->flags.tunnelSslBumping = false;
3385 }
3386 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3387 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3388 s->flags.tunnelSslBumping = false;
3389 if (s->transport.protocol == AnyP::PROTO_HTTP)
3390 s->secure.encryptTransport = false;
3391 }
3392 if (s->flags.tunnelSslBumping) {
3393 // Create ssl_ctx cache for this port.
3394 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3395 }
3396 }
3397 #endif
3398
3399 if (s->secure.encryptTransport && !s->secure.staticContext) {
3400 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3401 continue;
3402 }
3403
3404 // Fill out a Comm::Connection which IPC will open as a listener for us
3405 // then pass back when active so we can start a TcpAcceptor subscription.
3406 s->listenConn = new Comm::Connection;
3407 s->listenConn->local = s->s;
3408
3409 s->listenConn->flags = COMM_NONBLOCKING | (s->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3410 (s->flags.natIntercept ? COMM_INTERCEPTION : 0) |
3411 (s->workerQueues ? COMM_REUSEPORT : 0);
3412
3413 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3414 if (s->transport.protocol == AnyP::PROTO_HTTP) {
3415 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3416 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept, CommAcceptCbParams(NULL)));
3417 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3418
3419 AsyncCall::Pointer listenCall = asyncCall(33,2, "clientListenerConnectionOpened",
3420 ListeningStartedDialer(&clientListenerConnectionOpened, s, Ipc::fdnHttpSocket, sub));
3421 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpSocket, listenCall);
3422
3423 } else if (s->transport.protocol == AnyP::PROTO_HTTPS) {
3424 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3425 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept, CommAcceptCbParams(NULL)));
3426 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3427
3428 AsyncCall::Pointer listenCall = asyncCall(33, 2, "clientListenerConnectionOpened",
3429 ListeningStartedDialer(&clientListenerConnectionOpened,
3430 s, Ipc::fdnHttpsSocket, sub));
3431 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpsSocket, listenCall);
3432 }
3433
3434 HttpSockets[NHttpSockets] = -1; // set in clientListenerConnectionOpened
3435 ++NHttpSockets;
3436 }
3437 }
3438
3439 void
3440 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3441 {
3442 // Fill out a Comm::Connection which IPC will open as a listener for us
3443 port->listenConn = new Comm::Connection;
3444 port->listenConn->local = port->s;
3445 port->listenConn->flags =
3446 COMM_NONBLOCKING |
3447 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3448 (port->flags.natIntercept ? COMM_INTERCEPTION : 0);
3449
3450 // route new connections to subCall
3451 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3452 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3453 AsyncCall::Pointer listenCall =
3454 asyncCall(33, 2, "clientListenerConnectionOpened",
3455 ListeningStartedDialer(&clientListenerConnectionOpened,
3456 port, fdNote, sub));
3457 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3458
3459 assert(NHttpSockets < MAXTCPLISTENPORTS);
3460 HttpSockets[NHttpSockets] = -1;
3461 ++NHttpSockets;
3462 }
3463
3464 /// process clientHttpConnectionsOpen result
3465 static void
3466 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3467 {
3468 Must(s != NULL);
3469
3470 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3471 return;
3472
3473 Must(Comm::IsConnOpen(s->listenConn));
3474
3475 // TCP: setup a job to handle accept() with subscribed handler
3476 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3477
3478 debugs(1, DBG_IMPORTANT, "Accepting " <<
3479 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3480 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3481 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3482 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3483 << FdNote(portTypeNote) << " connections at "
3484 << s->listenConn);
3485
3486 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3487
3488 #if USE_SYSTEMD
3489 // When the very first port opens, tell systemd we are able to serve connections.
3490 // Subsequent sd_notify() calls, including calls during reconfiguration,
3491 // do nothing because the first call parameter is 1.
3492 // XXX: Send the notification only after opening all configured ports.
3493 if (opt_foreground || opt_no_daemon) {
3494 const auto result = sd_notify(1, "READY=1");
3495 if (result < 0) {
3496 debugs(1, DBG_IMPORTANT, "WARNING: failed to send start-up notification to systemd" <<
3497 Debug::Extra << "sd_notify() error: " << xstrerr(-result));
3498 }
3499 }
3500 #endif
3501 }
3502
3503 void
3504 clientOpenListenSockets(void)
3505 {
3506 clientHttpConnectionsOpen();
3507 Ftp::StartListening();
3508
3509 if (NHttpSockets < 1)
3510 fatal("No HTTP, HTTPS, or FTP ports configured");
3511 }
3512
3513 void
3514 clientConnectionsClose()
3515 {
3516 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3517 if (s->listenConn != NULL) {
3518 debugs(1, DBG_IMPORTANT, "Closing HTTP(S) port " << s->listenConn->local);
3519 s->listenConn->close();
3520 s->listenConn = NULL;
3521 }
3522 }
3523
3524 Ftp::StopListening();
3525
3526 // TODO see if we can drop HttpSockets array entirely */
3527 for (int i = 0; i < NHttpSockets; ++i) {
3528 HttpSockets[i] = -1;
3529 }
3530
3531 NHttpSockets = 0;
3532 }
3533
3534 int
3535 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3536 {
3537 SBuf vary(request->vary_headers);
3538 const auto &reply = entry->mem().freshestReply();
3539 auto has_vary = reply.header.has(Http::HdrType::VARY);
3540 #if X_ACCELERATOR_VARY
3541
3542 has_vary |=
3543 reply.header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3544 #endif
3545
3546 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3547 if (!vary.isEmpty()) {
3548 /* Oops... something odd is going on here.. */
3549 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3550 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3551 request->vary_headers.clear();
3552 return VARY_CANCEL;
3553 }
3554
3555 if (!has_vary) {
3556 /* This is not a varying object */
3557 return VARY_NONE;
3558 }
3559
3560 /* virtual "vary" object found. Calculate the vary key and
3561 * continue the search
3562 */
3563 vary = httpMakeVaryMark(request, &reply);
3564
3565 if (!vary.isEmpty()) {
3566 request->vary_headers = vary;
3567 return VARY_OTHER;
3568 } else {
3569 /* Ouch.. we cannot handle this kind of variance */
3570 /* XXX This cannot really happen, but just to be complete */
3571 return VARY_CANCEL;
3572 }
3573 } else {
3574 if (vary.isEmpty()) {
3575 vary = httpMakeVaryMark(request, &reply);
3576
3577 if (!vary.isEmpty())
3578 request->vary_headers = vary;
3579 }
3580
3581 if (vary.isEmpty()) {
3582 /* Ouch.. we cannot handle this kind of variance */
3583 /* XXX This cannot really happen, but just to be complete */
3584 return VARY_CANCEL;
3585 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3586 return VARY_MATCH;
3587 } else {
3588 /* Oops.. we have already been here and still haven't
3589 * found the requested variant. Bail out
3590 */
3591 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3592 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3593 return VARY_CANCEL;
3594 }
3595 }
3596 }
3597
3598 ACLFilledChecklist *
3599 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3600 {
3601 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3602 clientAclChecklistFill(*checklist, http);
3603 return checklist;
3604 }
3605
3606 void
3607 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3608 {
3609 checklist.setRequest(http->request);
3610 checklist.al = http->al;
3611 checklist.syncAle(http->request, http->log_uri);
3612
3613 // TODO: If http->getConn is always http->request->clientConnectionManager,
3614 // then call setIdent() inside checklist.setRequest(). Otherwise, restore
3615 // USE_IDENT lost in commit 94439e4.
3616 ConnStateData * conn = http->getConn();
3617 const char *ident = (cbdataReferenceValid(conn) &&
3618 conn && conn->clientConnection) ?
3619 conn->clientConnection->rfc931 : dash_str;
3620 checklist.setIdent(ident);
3621 }
3622
3623 bool
3624 ConnStateData::transparent() const
3625 {
3626 return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3627 }
3628
3629 BodyPipe::Pointer
3630 ConnStateData::expectRequestBody(int64_t size)
3631 {
3632 bodyPipe = new BodyPipe(this);
3633 if (size >= 0)
3634 bodyPipe->setBodySize(size);
3635 else
3636 startDechunkingRequest();
3637 return bodyPipe;
3638 }
3639
3640 int64_t
3641 ConnStateData::mayNeedToReadMoreBody() const
3642 {
3643 if (!bodyPipe)
3644 return 0; // request without a body or read/produced all body bytes
3645
3646 if (!bodyPipe->bodySizeKnown())
3647 return -1; // probably need to read more, but we cannot be sure
3648
3649 const int64_t needToProduce = bodyPipe->unproducedSize();
3650 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3651
3652 if (needToProduce <= haveAvailable)
3653 return 0; // we have read what we need (but are waiting for pipe space)
3654
3655 return needToProduce - haveAvailable;
3656 }
3657
3658 void
3659 ConnStateData::stopReceiving(const char *error)
3660 {
3661 debugs(33, 4, HERE << "receiving error (" << clientConnection << "): " << error <<
3662 "; old sending error: " <<
3663 (stoppedSending() ? stoppedSending_ : "none"));
3664
3665 if (const char *oldError = stoppedReceiving()) {
3666 debugs(33, 3, HERE << "already stopped receiving: " << oldError);
3667 return; // nothing has changed as far as this connection is concerned
3668 }
3669
3670 stoppedReceiving_ = error;
3671
3672 if (const char *sendError = stoppedSending()) {
3673 debugs(33, 3, HERE << "closing because also stopped sending: " << sendError);
3674 clientConnection->close();
3675 }
3676 }
3677
3678 void
3679 ConnStateData::expectNoForwarding()
3680 {
3681 if (bodyPipe != NULL) {
3682 debugs(33, 4, HERE << "no consumer for virgin body " << bodyPipe->status());
3683 bodyPipe->expectNoConsumption();
3684 }
3685 }
3686
3687 /// initialize dechunking state
3688 void
3689 ConnStateData::startDechunkingRequest()
3690 {
3691 Must(bodyPipe != NULL);
3692 debugs(33, 5, HERE << "start dechunking" << bodyPipe->status());
3693 assert(!bodyParser);
3694 bodyParser = new Http1::TeChunkedParser;
3695 }
3696
3697 /// put parsed content into input buffer and clean up
3698 void
3699 ConnStateData::finishDechunkingRequest(bool withSuccess)
3700 {
3701 debugs(33, 5, HERE << "finish dechunking: " << withSuccess);
3702
3703 if (bodyPipe != NULL) {
3704 debugs(33, 7, HERE << "dechunked tail: " << bodyPipe->status());
3705 BodyPipe::Pointer myPipe = bodyPipe;
3706 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3707 Must(!bodyPipe); // we rely on it being nil after we are done with body
3708 if (withSuccess) {
3709 Must(myPipe->bodySizeKnown());
3710 Http::StreamPointer context = pipeline.front();
3711 if (context != NULL && context->http && context->http->request)
3712 context->http->request->setContentLength(myPipe->bodySize());
3713 }
3714 }
3715
3716 delete bodyParser;
3717 bodyParser = NULL;
3718 }
3719
3720 // XXX: this is an HTTP/1-only operation
3721 void
3722 ConnStateData::sendControlMsg(HttpControlMsg msg)
3723 {
3724 if (const auto context = pipeline.front()) {
3725 if (context->http)
3726 context->http->al->reply = msg.reply;
3727 }
3728
3729 if (!isOpen()) {
3730 debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
3731 return;
3732 }
3733
3734 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3735 if (!pipeline.empty()) {
3736 HttpReply::Pointer rep(msg.reply);
3737 Must(rep);
3738 // remember the callback
3739 cbControlMsgSent = msg.cbSuccess;
3740
3741 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3742 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3743
3744 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3745 // but still inform the caller (so it may resume its operation)
3746 doneWithControlMsg();
3747 }
3748 return;
3749 }
3750
3751 debugs(33, 3, HERE << " closing due to missing context for 1xx");
3752 clientConnection->close();
3753 }
3754
3755 void
3756 ConnStateData::doneWithControlMsg()
3757 {
3758 HttpControlMsgSink::doneWithControlMsg();
3759
3760 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3761 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3762 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3763 }
3764 }
3765
3766 /// Our close handler called by Comm when the pinned connection is closed
3767 void
3768 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3769 {
3770 // FwdState might repin a failed connection sooner than this close
3771 // callback is called for the failed connection.
3772 assert(pinning.serverConnection == io.conn);
3773 pinning.closeHandler = NULL; // Comm unregisters handlers before calling
3774 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3775 pinning.serverConnection->noteClosure();
3776 unpinConnection(false);
3777
3778 if (sawZeroReply && clientConnection != NULL) {
3779 debugs(33, 3, "Closing client connection on pinned zero reply.");
3780 clientConnection->close();
3781 }
3782
3783 }
3784
3785 void
3786 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3787 {
3788 pinConnection(pinServer, *request);
3789 }
3790
3791 void
3792 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3793 {
3794 Must(pic.connection);
3795 Must(pic.request);
3796 pinConnection(pic.connection, *pic.request);
3797
3798 // monitor pinned server connection for remote-end closures.
3799 startPinnedConnectionMonitoring();
3800
3801 if (pipeline.empty())
3802 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3803 }
3804
3805 /// Forward future client requests using the given server connection.
3806 void
3807 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3808 {
3809 if (Comm::IsConnOpen(pinning.serverConnection) &&
3810 pinning.serverConnection->fd == pinServer->fd) {
3811 debugs(33, 3, "already pinned" << pinServer);
3812 return;
3813 }
3814
3815 unpinConnection(true); // closes pinned connection, if any, and resets fields
3816
3817 pinning.serverConnection = pinServer;
3818
3819 debugs(33, 3, HERE << pinning.serverConnection);
3820
3821 Must(pinning.serverConnection != NULL);
3822
3823 const char *pinnedHost = "[unknown]";
3824 pinning.host = xstrdup(request.url.host());
3825 pinning.port = request.url.port();
3826 pinnedHost = pinning.host;
3827 pinning.pinned = true;
3828 if (CachePeer *aPeer = pinServer->getPeer())
3829 pinning.peer = cbdataReference(aPeer);
3830 pinning.auth = request.flags.connectionAuth;
3831 char stmp[MAX_IPSTRLEN];
3832 char desc[FD_DESC_SZ];
3833 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3834 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3835 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3836 clientConnection->fd);
3837 fd_note(pinning.serverConnection->fd, desc);
3838
3839 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3840 pinning.closeHandler = JobCallback(33, 5,
3841 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3842 // remember the pinned connection so that cb does not unpin a fresher one
3843 typedef CommCloseCbParams Params;
3844 Params &params = GetCommParams<Params>(pinning.closeHandler);
3845 params.conn = pinning.serverConnection;
3846 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3847 }
3848
3849 /// [re]start monitoring pinned connection for peer closures so that we can
3850 /// propagate them to an _idle_ client pinned to that peer
3851 void
3852 ConnStateData::startPinnedConnectionMonitoring()
3853 {
3854 if (pinning.readHandler != NULL)
3855 return; // already monitoring
3856
3857 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3858 pinning.readHandler = JobCallback(33, 3,
3859 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3860 Comm::Read(pinning.serverConnection, pinning.readHandler);
3861 }
3862
3863 void
3864 ConnStateData::stopPinnedConnectionMonitoring()
3865 {
3866 if (pinning.readHandler != NULL) {
3867 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3868 pinning.readHandler = NULL;
3869 }
3870 }
3871
3872 #if USE_OPENSSL
3873 bool
3874 ConnStateData::handleIdleClientPinnedTlsRead()
3875 {
3876 // A ready-for-reading connection means that the TLS server either closed
3877 // the connection, sent us some unexpected HTTP data, or started TLS
3878 // renegotiations. We should close the connection except for the last case.
3879
3880 Must(pinning.serverConnection != nullptr);
3881 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3882 if (!ssl)
3883 return false;
3884
3885 char buf[1];
3886 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3887
3888 if (readResult > 0 || SSL_pending(ssl) > 0) {
3889 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3890 return false;
3891 }
3892
3893 switch(const int error = SSL_get_error(ssl, readResult)) {
3894 case SSL_ERROR_WANT_WRITE:
3895 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3896 // fall through to restart monitoring, for now
3897 case SSL_ERROR_NONE:
3898 case SSL_ERROR_WANT_READ:
3899 startPinnedConnectionMonitoring();
3900 return true;
3901
3902 default:
3903 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3904 return false;
3905 }
3906
3907 // not reached
3908 return true;
3909 }
3910 #endif
3911
3912 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3913 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3914 void
3915 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3916 {
3917 pinning.readHandler = NULL; // Comm unregisters handlers before calling
3918
3919 if (io.flag == Comm::ERR_CLOSING)
3920 return; // close handler will clean up
3921
3922 Must(pinning.serverConnection == io.conn);
3923
3924 #if USE_OPENSSL
3925 if (handleIdleClientPinnedTlsRead())
3926 return;
3927 #endif
3928
3929 const bool clientIsIdle = pipeline.empty();
3930
3931 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3932 io.size << (clientIsIdle ? " with idle client" : ""));
3933
3934 pinning.serverConnection->close();
3935
3936 // If we are still sending data to the client, do not close now. When we are done sending,
3937 // ConnStateData::kick() checks pinning.serverConnection and will close.
3938 // However, if we are idle, then we must close to inform the idle client and minimize races.
3939 if (clientIsIdle && clientConnection != NULL)
3940 clientConnection->close();
3941 }
3942
3943 Comm::ConnectionPointer
3944 ConnStateData::borrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3945 {
3946 debugs(33, 7, pinning.serverConnection);
3947 Must(request);
3948
3949 const auto pinningError = [&](const err_type type) {
3950 unpinConnection(true);
3951 HttpRequestPointer requestPointer = request;
3952 return ErrorState::NewForwarding(type, requestPointer, ale);
3953 };
3954
3955 if (!Comm::IsConnOpen(pinning.serverConnection))
3956 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3957
3958 if (pinning.auth && pinning.host && strcasecmp(pinning.host, request->url.host()) != 0)
3959 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3960
3961 if (pinning.port != request->url.port())
3962 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_CONFLICT_HOST
3963
3964 if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3965 throw pinningError(ERR_ZERO_SIZE_OBJECT);
3966
3967 if (pinning.peerAccessDenied)
3968 throw pinningError(ERR_CANNOT_FORWARD); // or generalize ERR_FORWARDING_DENIED
3969
3970 stopPinnedConnectionMonitoring();
3971 return pinning.serverConnection;
3972 }
3973
3974 Comm::ConnectionPointer
3975 ConnStateData::BorrowPinnedConnection(HttpRequest *request, const AccessLogEntryPointer &ale)
3976 {
3977 if (const auto connManager = request ? request->pinnedConnection() : nullptr)
3978 return connManager->borrowPinnedConnection(request, ale);
3979
3980 // ERR_CANNOT_FORWARD is somewhat misleading here; we can still forward, but
3981 // there is no point since the client connection is now gone
3982 HttpRequestPointer requestPointer = request;
3983 throw ErrorState::NewForwarding(ERR_CANNOT_FORWARD, requestPointer, ale);
3984 }
3985
3986 void
3987 ConnStateData::unpinConnection(const bool andClose)
3988 {
3989 debugs(33, 3, HERE << pinning.serverConnection);
3990
3991 if (pinning.peer)
3992 cbdataReferenceDone(pinning.peer);
3993
3994 if (Comm::IsConnOpen(pinning.serverConnection)) {
3995 if (pinning.closeHandler != NULL) {
3996 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3997 pinning.closeHandler = NULL;
3998 }
3999
4000 stopPinnedConnectionMonitoring();
4001
4002 // close the server side socket if requested
4003 if (andClose)
4004 pinning.serverConnection->close();
4005 pinning.serverConnection = NULL;
4006 }
4007
4008 safe_free(pinning.host);
4009
4010 pinning.zeroReply = false;
4011 pinning.peerAccessDenied = false;
4012
4013 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
4014 * connection has gone away */
4015 }
4016
4017 void
4018 ConnStateData::checkLogging()
4019 {
4020 // if we are parsing request body, its request is responsible for logging
4021 if (bodyPipe)
4022 return;
4023
4024 // a request currently using this connection is responsible for logging
4025 if (!pipeline.empty() && pipeline.back()->mayUseConnection())
4026 return;
4027
4028 /* Either we are waiting for the very first transaction, or
4029 * we are done with the Nth transaction and are waiting for N+1st.
4030 * XXX: We assume that if anything was added to inBuf, then it could
4031 * only be consumed by actions already covered by the above checks.
4032 */
4033
4034 // do not log connections that closed after a transaction (it is normal)
4035 // TODO: access_log needs ACLs to match received-no-bytes connections
4036 if (pipeline.nrequests && inBuf.isEmpty())
4037 return;
4038
4039 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
4040 ClientHttpRequest http(this);
4041 http.req_sz = inBuf.length();
4042 // XXX: Or we died while waiting for the pinned connection to become idle.
4043 http.setErrorUri("error:transaction-end-before-headers");
4044 }
4045
4046 bool
4047 ConnStateData::shouldPreserveClientData() const
4048 {
4049 // PROXY protocol bytes are meant for us and, hence, cannot be tunneled
4050 if (needProxyProtocolHeader_)
4051 return false;
4052
4053 // If our decision here is negative, configuration changes are irrelevant.
4054 // Otherwise, clientTunnelOnError() rechecks configuration before tunneling.
4055 if (!Config.accessList.on_unsupported_protocol)
4056 return false;
4057
4058 // TODO: Figure out whether/how we can support FTP tunneling.
4059 if (port->transport.protocol == AnyP::PROTO_FTP)
4060 return false;
4061
4062 #if USE_OPENSSL
4063 if (parsingTlsHandshake)
4064 return true;
4065
4066 // the 1st HTTP request on a bumped connection
4067 if (!parsedBumpedRequestCount && switchedToHttps())
4068 return true;
4069 #endif
4070
4071 // the 1st HTTP request on a connection to a plain intercepting port
4072 if (!pipeline.nrequests && !port->secure.encryptTransport && transparent())
4073 return true;
4074
4075 return false;
4076 }
4077
4078 NotePairs::Pointer
4079 ConnStateData::notes()
4080 {
4081 if (!theNotes)
4082 theNotes = new NotePairs;
4083 return theNotes;
4084 }
4085
4086 std::ostream &
4087 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
4088 {
4089 return os << pic.connection << ", request=" << pic.request;
4090 }
4091