]> git.ipfire.org Git - thirdparty/squid.git/blob - src/client_side.cc
Log PROXY protocol v2 TLVs; fix PROXY protocol parsing bugs (#342)
[thirdparty/squid.git] / src / client_side.cc
1 /*
2 * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 33 Client-side Routines */
10
11 /**
12 \defgroup ClientSide Client-Side Logics
13 *
14 \section cserrors Errors and client side
15 *
16 \par Problem the first:
17 * the store entry is no longer authoritative on the
18 * reply status. EBITTEST (E_ABORT) is no longer a valid test outside
19 * of client_side_reply.c.
20 * Problem the second: resources are wasted if we delay in cleaning up.
21 * Problem the third we can't depend on a connection close to clean up.
22 *
23 \par Nice thing the first:
24 * Any step in the stream can callback with data
25 * representing an error.
26 * Nice thing the second: once you stop requesting reads from upstream,
27 * upstream can be stopped too.
28 *
29 \par Solution #1:
30 * Error has a callback mechanism to hand over a membuf
31 * with the error content. The failing node pushes that back as the
32 * reply. Can this be generalised to reduce duplicate efforts?
33 * A: Possibly. For now, only one location uses this.
34 * How to deal with pre-stream errors?
35 * Tell client_side_reply that we *want* an error page before any
36 * stream calls occur. Then we simply read as normal.
37 *
38 *
39 \section pconn_logic Persistent connection logic:
40 *
41 \par
42 * requests (httpClientRequest structs) get added to the connection
43 * list, with the current one being chr
44 *
45 \par
46 * The request is *immediately* kicked off, and data flows through
47 * to clientSocketRecipient.
48 *
49 \par
50 * If the data that arrives at clientSocketRecipient is not for the current
51 * request, clientSocketRecipient simply returns, without requesting more
52 * data, or sending it.
53 *
54 \par
55 * ConnStateData::kick() will then detect the presence of data in
56 * the next ClientHttpRequest, and will send it, restablishing the
57 * data flow.
58 */
59
60 #include "squid.h"
61 #include "acl/FilledChecklist.h"
62 #include "anyp/PortCfg.h"
63 #include "base/Subscription.h"
64 #include "base/TextException.h"
65 #include "CachePeer.h"
66 #include "client_db.h"
67 #include "client_side.h"
68 #include "client_side_reply.h"
69 #include "client_side_request.h"
70 #include "ClientRequestContext.h"
71 #include "clientStream.h"
72 #include "comm.h"
73 #include "comm/Connection.h"
74 #include "comm/Loops.h"
75 #include "comm/Read.h"
76 #include "comm/TcpAcceptor.h"
77 #include "comm/Write.h"
78 #include "CommCalls.h"
79 #include "errorpage.h"
80 #include "fd.h"
81 #include "fde.h"
82 #include "fqdncache.h"
83 #include "FwdState.h"
84 #include "globals.h"
85 #include "helper.h"
86 #include "helper/Reply.h"
87 #include "http.h"
88 #include "http/one/RequestParser.h"
89 #include "http/one/TeChunkedParser.h"
90 #include "http/Stream.h"
91 #include "HttpHdrContRange.h"
92 #include "HttpHeaderTools.h"
93 #include "HttpReply.h"
94 #include "HttpRequest.h"
95 #include "ident/Config.h"
96 #include "ident/Ident.h"
97 #include "internal.h"
98 #include "ipc/FdNotes.h"
99 #include "ipc/StartListening.h"
100 #include "log/access_log.h"
101 #include "MemBuf.h"
102 #include "MemObject.h"
103 #include "mime_header.h"
104 #include "parser/Tokenizer.h"
105 #include "profiler/Profiler.h"
106 #include "proxyp/Header.h"
107 #include "proxyp/Parser.h"
108 #include "security/NegotiationHistory.h"
109 #include "servers/forward.h"
110 #include "SquidConfig.h"
111 #include "SquidTime.h"
112 #include "StatCounters.h"
113 #include "StatHist.h"
114 #include "Store.h"
115 #include "TimeOrTag.h"
116 #include "tools.h"
117
118 #if USE_AUTH
119 #include "auth/UserRequest.h"
120 #endif
121 #if USE_DELAY_POOLS
122 #include "ClientInfo.h"
123 #include "MessageDelayPools.h"
124 #endif
125 #if USE_OPENSSL
126 #include "ssl/bio.h"
127 #include "ssl/context_storage.h"
128 #include "ssl/gadgets.h"
129 #include "ssl/helper.h"
130 #include "ssl/ProxyCerts.h"
131 #include "ssl/ServerBump.h"
132 #include "ssl/support.h"
133 #endif
134
135 // for tvSubUsec() which should be in SquidTime.h
136 #include "util.h"
137
138 #include <climits>
139 #include <cmath>
140 #include <limits>
141
142 #if LINGERING_CLOSE
143 #define comm_close comm_lingering_close
144 #endif
145
146 /// dials clientListenerConnectionOpened call
147 class ListeningStartedDialer: public CallDialer, public Ipc::StartListeningCb
148 {
149 public:
150 typedef void (*Handler)(AnyP::PortCfgPointer &portCfg, const Ipc::FdNoteId note, const Subscription::Pointer &sub);
151 ListeningStartedDialer(Handler aHandler, AnyP::PortCfgPointer &aPortCfg, const Ipc::FdNoteId note, const Subscription::Pointer &aSub):
152 handler(aHandler), portCfg(aPortCfg), portTypeNote(note), sub(aSub) {}
153
154 virtual void print(std::ostream &os) const {
155 startPrint(os) <<
156 ", " << FdNote(portTypeNote) << " port=" << (void*)&portCfg << ')';
157 }
158
159 virtual bool canDial(AsyncCall &) const { return true; }
160 virtual void dial(AsyncCall &) { (handler)(portCfg, portTypeNote, sub); }
161
162 public:
163 Handler handler;
164
165 private:
166 AnyP::PortCfgPointer portCfg; ///< from HttpPortList
167 Ipc::FdNoteId portTypeNote; ///< Type of IPC socket being opened
168 Subscription::Pointer sub; ///< The handler to be subscribed for this connetion listener
169 };
170
171 static void clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub);
172
173 static IOACB httpAccept;
174 static CTCB clientLifetimeTimeout;
175 #if USE_IDENT
176 static IDCB clientIdentDone;
177 #endif
178 static int clientIsContentLengthValid(HttpRequest * r);
179 static int clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength);
180
181 static void clientUpdateStatHistCounters(const LogTags &logType, int svc_time);
182 static void clientUpdateStatCounters(const LogTags &logType);
183 static void clientUpdateHierCounters(HierarchyLogEntry *);
184 static bool clientPingHasFinished(ping_data const *aPing);
185 void prepareLogWithRequestDetails(HttpRequest *, AccessLogEntry::Pointer &);
186 static void ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn);
187
188 char *skipLeadingSpace(char *aString);
189
190 #if USE_IDENT
191 static void
192 clientIdentDone(const char *ident, void *data)
193 {
194 ConnStateData *conn = (ConnStateData *)data;
195 xstrncpy(conn->clientConnection->rfc931, ident ? ident : dash_str, USER_IDENT_SZ);
196 }
197 #endif
198
199 void
200 clientUpdateStatCounters(const LogTags &logType)
201 {
202 ++statCounter.client_http.requests;
203
204 if (logType.isTcpHit())
205 ++statCounter.client_http.hits;
206
207 if (logType.oldType == LOG_TCP_HIT)
208 ++statCounter.client_http.disk_hits;
209 else if (logType.oldType == LOG_TCP_MEM_HIT)
210 ++statCounter.client_http.mem_hits;
211 }
212
213 void
214 clientUpdateStatHistCounters(const LogTags &logType, int svc_time)
215 {
216 statCounter.client_http.allSvcTime.count(svc_time);
217 /**
218 * The idea here is not to be complete, but to get service times
219 * for only well-defined types. For example, we don't include
220 * LOG_TCP_REFRESH_FAIL because its not really a cache hit
221 * (we *tried* to validate it, but failed).
222 */
223
224 switch (logType.oldType) {
225
226 case LOG_TCP_REFRESH_UNMODIFIED:
227 statCounter.client_http.nearHitSvcTime.count(svc_time);
228 break;
229
230 case LOG_TCP_INM_HIT:
231 case LOG_TCP_IMS_HIT:
232 statCounter.client_http.nearMissSvcTime.count(svc_time);
233 break;
234
235 case LOG_TCP_HIT:
236
237 case LOG_TCP_MEM_HIT:
238
239 case LOG_TCP_OFFLINE_HIT:
240 statCounter.client_http.hitSvcTime.count(svc_time);
241 break;
242
243 case LOG_TCP_MISS:
244
245 case LOG_TCP_CLIENT_REFRESH_MISS:
246 statCounter.client_http.missSvcTime.count(svc_time);
247 break;
248
249 default:
250 /* make compiler warnings go away */
251 break;
252 }
253 }
254
255 bool
256 clientPingHasFinished(ping_data const *aPing)
257 {
258 if (0 != aPing->stop.tv_sec && 0 != aPing->start.tv_sec)
259 return true;
260
261 return false;
262 }
263
264 void
265 clientUpdateHierCounters(HierarchyLogEntry * someEntry)
266 {
267 ping_data *i;
268
269 switch (someEntry->code) {
270 #if USE_CACHE_DIGESTS
271
272 case CD_PARENT_HIT:
273
274 case CD_SIBLING_HIT:
275 ++ statCounter.cd.times_used;
276 break;
277 #endif
278
279 case SIBLING_HIT:
280
281 case PARENT_HIT:
282
283 case FIRST_PARENT_MISS:
284
285 case CLOSEST_PARENT_MISS:
286 ++ statCounter.icp.times_used;
287 i = &someEntry->ping;
288
289 if (clientPingHasFinished(i))
290 statCounter.icp.querySvcTime.count(tvSubUsec(i->start, i->stop));
291
292 if (i->timeout)
293 ++ statCounter.icp.query_timeouts;
294
295 break;
296
297 case CLOSEST_PARENT:
298
299 case CLOSEST_DIRECT:
300 ++ statCounter.netdb.times_used;
301
302 break;
303
304 default:
305 break;
306 }
307 }
308
309 void
310 ClientHttpRequest::updateCounters()
311 {
312 clientUpdateStatCounters(logType);
313
314 if (request->errType != ERR_NONE)
315 ++ statCounter.client_http.errors;
316
317 clientUpdateStatHistCounters(logType,
318 tvSubMsec(al->cache.start_time, current_time));
319
320 clientUpdateHierCounters(&request->hier);
321 }
322
323 void
324 prepareLogWithRequestDetails(HttpRequest * request, AccessLogEntry::Pointer &aLogEntry)
325 {
326 assert(request);
327 assert(aLogEntry != NULL);
328
329 if (Config.onoff.log_mime_hdrs) {
330 MemBuf mb;
331 mb.init();
332 request->header.packInto(&mb);
333 //This is the request after adaptation or redirection
334 aLogEntry->headers.adapted_request = xstrdup(mb.buf);
335
336 // the virgin request is saved to aLogEntry->request
337 if (aLogEntry->request) {
338 mb.reset();
339 aLogEntry->request->header.packInto(&mb);
340 aLogEntry->headers.request = xstrdup(mb.buf);
341 }
342
343 #if USE_ADAPTATION
344 const Adaptation::History::Pointer ah = request->adaptLogHistory();
345 if (ah != NULL) {
346 mb.reset();
347 ah->lastMeta.packInto(&mb);
348 aLogEntry->adapt.last_meta = xstrdup(mb.buf);
349 }
350 #endif
351
352 mb.clean();
353 }
354
355 #if ICAP_CLIENT
356 const Adaptation::Icap::History::Pointer ih = request->icapHistory();
357 if (ih != NULL)
358 ih->processingTime(aLogEntry->icap.processingTime);
359 #endif
360
361 aLogEntry->http.method = request->method;
362 aLogEntry->http.version = request->http_ver;
363 aLogEntry->hier = request->hier;
364 aLogEntry->cache.extuser = request->extacl_user.termedBuf();
365
366 // Adapted request, if any, inherits and then collects all the stats, but
367 // the virgin request gets logged instead; copy the stats to log them.
368 // TODO: avoid losses by keeping these stats in a shared history object?
369 if (aLogEntry->request) {
370 aLogEntry->request->dnsWait = request->dnsWait;
371 aLogEntry->request->errType = request->errType;
372 aLogEntry->request->errDetail = request->errDetail;
373 }
374 }
375
376 void
377 ClientHttpRequest::logRequest()
378 {
379 if (!out.size && logType.oldType == LOG_TAG_NONE)
380 debugs(33, 5, "logging half-baked transaction: " << log_uri);
381
382 al->icp.opcode = ICP_INVALID;
383 al->url = log_uri;
384 debugs(33, 9, "clientLogRequest: al.url='" << al->url << "'");
385
386 if (al->reply) {
387 al->http.code = al->reply->sline.status();
388 al->http.content_type = al->reply->content_type.termedBuf();
389 } else if (loggingEntry() && loggingEntry()->mem_obj) {
390 al->http.code = loggingEntry()->mem_obj->getReply()->sline.status();
391 al->http.content_type = loggingEntry()->mem_obj->getReply()->content_type.termedBuf();
392 }
393
394 debugs(33, 9, "clientLogRequest: http.code='" << al->http.code << "'");
395
396 if (loggingEntry() && loggingEntry()->mem_obj && loggingEntry()->objectLen() >= 0)
397 al->cache.objectSize = loggingEntry()->contentLen(); // payload duplicate ?? with or without TE ?
398
399 al->http.clientRequestSz.header = req_sz;
400 // the virgin request is saved to al->request
401 if (al->request && al->request->body_pipe)
402 al->http.clientRequestSz.payloadData = al->request->body_pipe->producedSize();
403 al->http.clientReplySz.header = out.headers_sz;
404 // XXX: calculate without payload encoding or headers !!
405 al->http.clientReplySz.payloadData = out.size - out.headers_sz; // pretend its all un-encoded data for now.
406
407 al->cache.highOffset = out.offset;
408
409 al->cache.code = logType;
410
411 tvSub(al->cache.trTime, al->cache.start_time, current_time);
412
413 if (request)
414 prepareLogWithRequestDetails(request, al);
415
416 #if USE_OPENSSL && 0
417
418 /* This is broken. Fails if the connection has been closed. Needs
419 * to snarf the ssl details some place earlier..
420 */
421 if (getConn() != NULL)
422 al->cache.ssluser = sslGetUserEmail(fd_table[getConn()->fd].ssl);
423
424 #endif
425
426 /* Add notes (if we have a request to annotate) */
427 if (request) {
428 SBuf matched;
429 for (auto h: Config.notes) {
430 if (h->match(request, al->reply, NULL, matched)) {
431 request->notes()->add(h->key(), matched);
432 debugs(33, 3, h->key() << " " << matched);
433 }
434 }
435 // The al->notes and request->notes must point to the same object.
436 al->syncNotes(request);
437 }
438
439 ACLFilledChecklist checklist(NULL, request, NULL);
440 if (al->reply) {
441 checklist.reply = al->reply;
442 HTTPMSGLOCK(checklist.reply);
443 }
444
445 if (request) {
446 HTTPMSGUNLOCK(al->adapted_request);
447 al->adapted_request = request;
448 HTTPMSGLOCK(al->adapted_request);
449 }
450 // no need checklist.syncAle(): already synced
451 checklist.al = al;
452 accessLogLog(al, &checklist);
453
454 bool updatePerformanceCounters = true;
455 if (Config.accessList.stats_collection) {
456 ACLFilledChecklist statsCheck(Config.accessList.stats_collection, request, NULL);
457 statsCheck.al = al;
458 if (al->reply) {
459 statsCheck.reply = al->reply;
460 HTTPMSGLOCK(statsCheck.reply);
461 }
462 updatePerformanceCounters = statsCheck.fastCheck().allowed();
463 }
464
465 if (updatePerformanceCounters) {
466 if (request)
467 updateCounters();
468
469 if (getConn() != NULL && getConn()->clientConnection != NULL)
470 clientdbUpdate(getConn()->clientConnection->remote, logType, AnyP::PROTO_HTTP, out.size);
471 }
472 }
473
474 void
475 ClientHttpRequest::freeResources()
476 {
477 safe_free(uri);
478 safe_free(redirect.location);
479 range_iter.boundary.clean();
480 clearRequest();
481
482 if (client_stream.tail)
483 clientStreamAbort((clientStreamNode *)client_stream.tail->data, this);
484 }
485
486 void
487 httpRequestFree(void *data)
488 {
489 ClientHttpRequest *http = (ClientHttpRequest *)data;
490 assert(http != NULL);
491 delete http;
492 }
493
494 /* This is a handler normally called by comm_close() */
495 void ConnStateData::connStateClosed(const CommCloseCbParams &)
496 {
497 deleteThis("ConnStateData::connStateClosed");
498 }
499
500 #if USE_AUTH
501 void
502 ConnStateData::setAuth(const Auth::UserRequest::Pointer &aur, const char *by)
503 {
504 if (auth_ == NULL) {
505 if (aur != NULL) {
506 debugs(33, 2, "Adding connection-auth to " << clientConnection << " from " << by);
507 auth_ = aur;
508 }
509 return;
510 }
511
512 // clobered with self-pointer
513 // NP: something nasty is going on in Squid, but harmless.
514 if (aur == auth_) {
515 debugs(33, 2, "WARNING: Ignoring duplicate connection-auth for " << clientConnection << " from " << by);
516 return;
517 }
518
519 /*
520 * Connection-auth relies on a single set of credentials being preserved
521 * for all requests on a connection once they have been setup.
522 * There are several things which need to happen to preserve security
523 * when connection-auth credentials change unexpectedly or are unset.
524 *
525 * 1) auth helper released from any active state
526 *
527 * They can only be reserved by a handshake process which this
528 * connection can now never complete.
529 * This prevents helpers hanging when their connections close.
530 *
531 * 2) pinning is expected to be removed and server conn closed
532 *
533 * The upstream link is authenticated with the same credentials.
534 * Expecting the same level of consistency we should have received.
535 * This prevents upstream being faced with multiple or missing
536 * credentials after authentication.
537 * NP: un-pin is left to the cleanup in ConnStateData::swanSong()
538 * we just trigger that cleanup here via comm_reset_close() or
539 * ConnStateData::stopReceiving()
540 *
541 * 3) the connection needs to close.
542 *
543 * This prevents attackers injecting requests into a connection,
544 * or gateways wrongly multiplexing users into a single connection.
545 *
546 * When credentials are missing closure needs to follow an auth
547 * challenge for best recovery by the client.
548 *
549 * When credentials change there is nothing we can do but abort as
550 * fast as possible. Sending TCP RST instead of an HTTP response
551 * is the best-case action.
552 */
553
554 // clobbered with nul-pointer
555 if (aur == NULL) {
556 debugs(33, 2, "WARNING: Graceful closure on " << clientConnection << " due to connection-auth erase from " << by);
557 auth_->releaseAuthServer();
558 auth_ = NULL;
559 // XXX: need to test whether the connection re-auth challenge is sent. If not, how to trigger it from here.
560 // NP: the current situation seems to fix challenge loops in Safari without visible issues in others.
561 // we stop receiving more traffic but can leave the Job running to terminate after the error or challenge is delivered.
562 stopReceiving("connection-auth removed");
563 return;
564 }
565
566 // clobbered with alternative credentials
567 if (aur != auth_) {
568 debugs(33, 2, "ERROR: Closing " << clientConnection << " due to change of connection-auth from " << by);
569 auth_->releaseAuthServer();
570 auth_ = NULL;
571 // this is a fatal type of problem.
572 // Close the connection immediately with TCP RST to abort all traffic flow
573 comm_reset_close(clientConnection);
574 return;
575 }
576
577 /* NOT REACHABLE */
578 }
579 #endif
580
581 // cleans up before destructor is called
582 void
583 ConnStateData::swanSong()
584 {
585 debugs(33, 2, HERE << clientConnection);
586 checkLogging();
587
588 flags.readMore = false;
589 clientdbEstablished(clientConnection->remote, -1); /* decrement */
590 pipeline.terminateAll(0);
591
592 // XXX: Closing pinned conn is too harsh: The Client may want to continue!
593 unpinConnection(true);
594
595 Server::swanSong(); // closes the client connection
596
597 #if USE_AUTH
598 // NP: do this bit after closing the connections to avoid side effects from unwanted TCP RST
599 setAuth(NULL, "ConnStateData::SwanSong cleanup");
600 #endif
601
602 flags.swanSang = true;
603 }
604
605 bool
606 ConnStateData::isOpen() const
607 {
608 return cbdataReferenceValid(this) && // XXX: checking "this" in a method
609 Comm::IsConnOpen(clientConnection) &&
610 !fd_table[clientConnection->fd].closing();
611 }
612
613 ConnStateData::~ConnStateData()
614 {
615 debugs(33, 3, HERE << clientConnection);
616
617 if (isOpen())
618 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData did not close " << clientConnection);
619
620 if (!flags.swanSang)
621 debugs(33, DBG_IMPORTANT, "BUG: ConnStateData was not destroyed properly; " << clientConnection);
622
623 if (bodyPipe != NULL)
624 stopProducingFor(bodyPipe, false);
625
626 delete bodyParser; // TODO: pool
627
628 #if USE_OPENSSL
629 delete sslServerBump;
630 #endif
631 }
632
633 /**
634 * clientSetKeepaliveFlag() sets request->flags.proxyKeepalive.
635 * This is the client-side persistent connection flag. We need
636 * to set this relatively early in the request processing
637 * to handle hacks for broken servers and clients.
638 */
639 void
640 clientSetKeepaliveFlag(ClientHttpRequest * http)
641 {
642 HttpRequest *request = http->request;
643
644 debugs(33, 3, "http_ver = " << request->http_ver);
645 debugs(33, 3, "method = " << request->method);
646
647 // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
648 request->flags.proxyKeepalive = request->persistent();
649 }
650
651 /// checks body length of non-chunked requests
652 static int
653 clientIsContentLengthValid(HttpRequest * r)
654 {
655 // No Content-Length means this request just has no body, but conflicting
656 // Content-Lengths mean a message framing error (RFC 7230 Section 3.3.3 #4).
657 if (r->header.conflictingContentLength())
658 return 0;
659
660 switch (r->method.id()) {
661
662 case Http::METHOD_GET:
663
664 case Http::METHOD_HEAD:
665 /* We do not want to see a request entity on GET/HEAD requests */
666 return (r->content_length <= 0 || Config.onoff.request_entities);
667
668 default:
669 /* For other types of requests we don't care */
670 return 1;
671 }
672
673 /* NOT REACHED */
674 }
675
676 int
677 clientIsRequestBodyTooLargeForPolicy(int64_t bodyLength)
678 {
679 if (Config.maxRequestBodySize &&
680 bodyLength > Config.maxRequestBodySize)
681 return 1; /* too large */
682
683 return 0;
684 }
685
686 bool
687 ClientHttpRequest::multipartRangeRequest() const
688 {
689 return request->multipartRangeRequest();
690 }
691
692 void
693 clientPackTermBound(String boundary, MemBuf *mb)
694 {
695 mb->appendf("\r\n--" SQUIDSTRINGPH "--\r\n", SQUIDSTRINGPRINT(boundary));
696 debugs(33, 6, "buf offset: " << mb->size);
697 }
698
699 void
700 clientPackRangeHdr(const HttpReplyPointer &rep, const HttpHdrRangeSpec * spec, String boundary, MemBuf * mb)
701 {
702 HttpHeader hdr(hoReply);
703 assert(rep);
704 assert(spec);
705
706 /* put boundary */
707 debugs(33, 5, "appending boundary: " << boundary);
708 /* rfc2046 requires to _prepend_ boundary with <crlf>! */
709 mb->appendf("\r\n--" SQUIDSTRINGPH "\r\n", SQUIDSTRINGPRINT(boundary));
710
711 /* stuff the header with required entries and pack it */
712
713 if (rep->header.has(Http::HdrType::CONTENT_TYPE))
714 hdr.putStr(Http::HdrType::CONTENT_TYPE, rep->header.getStr(Http::HdrType::CONTENT_TYPE));
715
716 httpHeaderAddContRange(&hdr, *spec, rep->content_length);
717
718 hdr.packInto(mb);
719 hdr.clean();
720
721 /* append <crlf> (we packed a header, not a reply) */
722 mb->append("\r\n", 2);
723 }
724
725 /** returns expected content length for multi-range replies
726 * note: assumes that httpHdrRangeCanonize has already been called
727 * warning: assumes that HTTP headers for individual ranges at the
728 * time of the actuall assembly will be exactly the same as
729 * the headers when clientMRangeCLen() is called */
730 int
731 ClientHttpRequest::mRangeCLen()
732 {
733 int64_t clen = 0;
734 MemBuf mb;
735
736 assert(memObject());
737
738 mb.init();
739 HttpHdrRange::iterator pos = request->range->begin();
740
741 while (pos != request->range->end()) {
742 /* account for headers for this range */
743 mb.reset();
744 clientPackRangeHdr(memObject()->getReply(),
745 *pos, range_iter.boundary, &mb);
746 clen += mb.size;
747
748 /* account for range content */
749 clen += (*pos)->length;
750
751 debugs(33, 6, "clientMRangeCLen: (clen += " << mb.size << " + " << (*pos)->length << ") == " << clen);
752 ++pos;
753 }
754
755 /* account for the terminating boundary */
756 mb.reset();
757
758 clientPackTermBound(range_iter.boundary, &mb);
759
760 clen += mb.size;
761
762 mb.clean();
763
764 return clen;
765 }
766
767 /**
768 * generates a "unique" boundary string for multipart responses
769 * the caller is responsible for cleaning the string */
770 String
771 ClientHttpRequest::rangeBoundaryStr() const
772 {
773 const char *key;
774 String b(APP_FULLNAME);
775 b.append(":",1);
776 key = storeEntry()->getMD5Text();
777 b.append(key, strlen(key));
778 return b;
779 }
780
781 /**
782 * Write a chunk of data to a client socket. If the reply is present,
783 * send the reply headers down the wire too, and clean them up when
784 * finished.
785 * Pre-condition:
786 * The request is one backed by a connection, not an internal request.
787 * data context is not NULL
788 * There are no more entries in the stream chain.
789 */
790 void
791 clientSocketRecipient(clientStreamNode * node, ClientHttpRequest * http,
792 HttpReply * rep, StoreIOBuffer receivedData)
793 {
794 // do not try to deliver if client already ABORTED
795 if (!http->getConn() || !cbdataReferenceValid(http->getConn()) || !Comm::IsConnOpen(http->getConn()->clientConnection))
796 return;
797
798 /* Test preconditions */
799 assert(node != NULL);
800 PROF_start(clientSocketRecipient);
801 /* TODO: handle this rather than asserting
802 * - it should only ever happen if we cause an abort and
803 * the callback chain loops back to here, so we can simply return.
804 * However, that itself shouldn't happen, so it stays as an assert for now.
805 */
806 assert(cbdataReferenceValid(node));
807 assert(node->node.next == NULL);
808 Http::StreamPointer context = dynamic_cast<Http::Stream *>(node->data.getRaw());
809 assert(context != NULL);
810
811 /* TODO: check offset is what we asked for */
812
813 // TODO: enforces HTTP/1 MUST on pipeline order, but is irrelevant to HTTP/2
814 if (context != http->getConn()->pipeline.front())
815 context->deferRecipientForLater(node, rep, receivedData);
816 else if (http->getConn()->cbControlMsgSent) // 1xx to the user is pending
817 context->deferRecipientForLater(node, rep, receivedData);
818 else
819 http->getConn()->handleReply(rep, receivedData);
820
821 PROF_stop(clientSocketRecipient);
822 }
823
824 /**
825 * Called when a downstream node is no longer interested in
826 * our data. As we are a terminal node, this means on aborts
827 * only
828 */
829 void
830 clientSocketDetach(clientStreamNode * node, ClientHttpRequest * http)
831 {
832 /* Test preconditions */
833 assert(node != NULL);
834 /* TODO: handle this rather than asserting
835 * - it should only ever happen if we cause an abort and
836 * the callback chain loops back to here, so we can simply return.
837 * However, that itself shouldn't happen, so it stays as an assert for now.
838 */
839 assert(cbdataReferenceValid(node));
840 /* Set null by ContextFree */
841 assert(node->node.next == NULL);
842 /* this is the assert discussed above */
843 assert(NULL == dynamic_cast<Http::Stream *>(node->data.getRaw()));
844 /* We are only called when the client socket shutsdown.
845 * Tell the prev pipeline member we're finished
846 */
847 clientStreamDetach(node, http);
848 }
849
850 void
851 ConnStateData::readNextRequest()
852 {
853 debugs(33, 5, HERE << clientConnection << " reading next req");
854
855 fd_note(clientConnection->fd, "Idle client: Waiting for next request");
856 /**
857 * Set the timeout BEFORE calling readSomeData().
858 */
859 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
860 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
861 TimeoutDialer, this, ConnStateData::requestTimeout);
862 commSetConnTimeout(clientConnection, clientConnection->timeLeft(idleTimeout()), timeoutCall);
863
864 readSomeData();
865 /** Please don't do anything with the FD past here! */
866 }
867
868 static void
869 ClientSocketContextPushDeferredIfNeeded(Http::StreamPointer deferredRequest, ConnStateData * conn)
870 {
871 debugs(33, 2, HERE << conn->clientConnection << " Sending next");
872
873 /** If the client stream is waiting on a socket write to occur, then */
874
875 if (deferredRequest->flags.deferred) {
876 /** NO data is allowed to have been sent. */
877 assert(deferredRequest->http->out.size == 0);
878 /** defer now. */
879 clientSocketRecipient(deferredRequest->deferredparams.node,
880 deferredRequest->http,
881 deferredRequest->deferredparams.rep,
882 deferredRequest->deferredparams.queuedBuffer);
883 }
884
885 /** otherwise, the request is still active in a callbacksomewhere,
886 * and we are done
887 */
888 }
889
890 void
891 ConnStateData::kick()
892 {
893 if (!Comm::IsConnOpen(clientConnection)) {
894 debugs(33, 2, clientConnection << " Connection was closed");
895 return;
896 }
897
898 if (pinning.pinned && !Comm::IsConnOpen(pinning.serverConnection)) {
899 debugs(33, 2, clientConnection << " Connection was pinned but server side gone. Terminating client connection");
900 clientConnection->close();
901 return;
902 }
903
904 /** \par
905 * We are done with the response, and we are either still receiving request
906 * body (early response!) or have already stopped receiving anything.
907 *
908 * If we are still receiving, then clientParseRequest() below will fail.
909 * (XXX: but then we will call readNextRequest() which may succeed and
910 * execute a smuggled request as we are not done with the current request).
911 *
912 * If we stopped because we got everything, then try the next request.
913 *
914 * If we stopped receiving because of an error, then close now to avoid
915 * getting stuck and to prevent accidental request smuggling.
916 */
917
918 if (const char *reason = stoppedReceiving()) {
919 debugs(33, 3, "closing for earlier request error: " << reason);
920 clientConnection->close();
921 return;
922 }
923
924 /** \par
925 * Attempt to parse a request from the request buffer.
926 * If we've been fed a pipelined request it may already
927 * be in our read buffer.
928 *
929 \par
930 * This needs to fall through - if we're unlucky and parse the _last_ request
931 * from our read buffer we may never re-register for another client read.
932 */
933
934 if (clientParseRequests()) {
935 debugs(33, 3, clientConnection << ": parsed next request from buffer");
936 }
937
938 /** \par
939 * Either we need to kick-start another read or, if we have
940 * a half-closed connection, kill it after the last request.
941 * This saves waiting for half-closed connections to finished being
942 * half-closed _AND_ then, sometimes, spending "Timeout" time in
943 * the keepalive "Waiting for next request" state.
944 */
945 if (commIsHalfClosed(clientConnection->fd) && pipeline.empty()) {
946 debugs(33, 3, "half-closed client with no pending requests, closing");
947 clientConnection->close();
948 return;
949 }
950
951 /** \par
952 * At this point we either have a parsed request (which we've
953 * kicked off the processing for) or not. If we have a deferred
954 * request (parsed but deferred for pipeling processing reasons)
955 * then look at processing it. If not, simply kickstart
956 * another read.
957 */
958 Http::StreamPointer deferredRequest = pipeline.front();
959 if (deferredRequest != nullptr) {
960 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded");
961 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
962 } else if (flags.readMore) {
963 debugs(33, 3, clientConnection << ": calling readNextRequest()");
964 readNextRequest();
965 } else {
966 // XXX: Can this happen? CONNECT tunnels have deferredRequest set.
967 debugs(33, DBG_IMPORTANT, MYNAME << "abandoning " << clientConnection);
968 }
969 }
970
971 void
972 ConnStateData::stopSending(const char *error)
973 {
974 debugs(33, 4, HERE << "sending error (" << clientConnection << "): " << error <<
975 "; old receiving error: " <<
976 (stoppedReceiving() ? stoppedReceiving_ : "none"));
977
978 if (const char *oldError = stoppedSending()) {
979 debugs(33, 3, HERE << "already stopped sending: " << oldError);
980 return; // nothing has changed as far as this connection is concerned
981 }
982 stoppedSending_ = error;
983
984 if (!stoppedReceiving()) {
985 if (const int64_t expecting = mayNeedToReadMoreBody()) {
986 debugs(33, 5, HERE << "must still read " << expecting <<
987 " request body bytes with " << inBuf.length() << " unused");
988 return; // wait for the request receiver to finish reading
989 }
990 }
991
992 clientConnection->close();
993 }
994
995 void
996 ConnStateData::afterClientWrite(size_t size)
997 {
998 if (pipeline.empty())
999 return;
1000
1001 auto ctx = pipeline.front();
1002 if (size) {
1003 statCounter.client_http.kbytes_out += size;
1004 if (ctx->http->logType.isTcpHit())
1005 statCounter.client_http.hit_kbytes_out += size;
1006 }
1007 ctx->writeComplete(size);
1008 }
1009
1010 Http::Stream *
1011 ConnStateData::abortRequestParsing(const char *const uri)
1012 {
1013 ClientHttpRequest *http = new ClientHttpRequest(this);
1014 http->req_sz = inBuf.length();
1015 http->setErrorUri(uri);
1016 auto *context = new Http::Stream(clientConnection, http);
1017 StoreIOBuffer tempBuffer;
1018 tempBuffer.data = context->reqbuf;
1019 tempBuffer.length = HTTP_REQBUF_SZ;
1020 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1021 clientReplyStatus, new clientReplyContext(http), clientSocketRecipient,
1022 clientSocketDetach, context, tempBuffer);
1023 return context;
1024 }
1025
1026 void
1027 ConnStateData::startShutdown()
1028 {
1029 // RegisteredRunner API callback - Squid has been shut down
1030
1031 // if connection is idle terminate it now,
1032 // otherwise wait for grace period to end
1033 if (pipeline.empty())
1034 endingShutdown();
1035 }
1036
1037 void
1038 ConnStateData::endingShutdown()
1039 {
1040 // RegisteredRunner API callback - Squid shutdown grace period is over
1041
1042 // force the client connection to close immediately
1043 // swanSong() in the close handler will cleanup.
1044 if (Comm::IsConnOpen(clientConnection))
1045 clientConnection->close();
1046 }
1047
1048 char *
1049 skipLeadingSpace(char *aString)
1050 {
1051 char *result = aString;
1052
1053 while (xisspace(*aString))
1054 ++aString;
1055
1056 return result;
1057 }
1058
1059 /**
1060 * 'end' defaults to NULL for backwards compatibility
1061 * remove default value if we ever get rid of NULL-terminated
1062 * request buffers.
1063 */
1064 const char *
1065 findTrailingHTTPVersion(const char *uriAndHTTPVersion, const char *end)
1066 {
1067 if (NULL == end) {
1068 end = uriAndHTTPVersion + strcspn(uriAndHTTPVersion, "\r\n");
1069 assert(end);
1070 }
1071
1072 for (; end > uriAndHTTPVersion; --end) {
1073 if (*end == '\n' || *end == '\r')
1074 continue;
1075
1076 if (xisspace(*end)) {
1077 if (strncasecmp(end + 1, "HTTP/", 5) == 0)
1078 return end + 1;
1079 else
1080 break;
1081 }
1082 }
1083
1084 return NULL;
1085 }
1086
1087 static char *
1088 prepareAcceleratedURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1089 {
1090 int vhost = conn->port->vhost;
1091 int vport = conn->port->vport;
1092 static char ipbuf[MAX_IPSTRLEN];
1093
1094 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1095
1096 static const SBuf cache_object("cache_object://");
1097 if (hp->requestUri().startsWith(cache_object))
1098 return nullptr; /* already in good shape */
1099
1100 // XXX: re-use proper URL parser for this
1101 SBuf url = hp->requestUri(); // use full provided URI if we abort
1102 do { // use a loop so we can break out of it
1103 ::Parser::Tokenizer tok(url);
1104 if (tok.skip('/')) // origin-form URL already.
1105 break;
1106
1107 if (conn->port->vhost)
1108 return nullptr; /* already in good shape */
1109
1110 // skip the URI scheme
1111 static const CharacterSet uriScheme = CharacterSet("URI-scheme","+-.") + CharacterSet::ALPHA + CharacterSet::DIGIT;
1112 static const SBuf uriSchemeEnd("://");
1113 if (!tok.skipAll(uriScheme) || !tok.skip(uriSchemeEnd))
1114 break;
1115
1116 // skip the authority segment
1117 // RFC 3986 complex nested ABNF for "authority" boils down to this:
1118 static const CharacterSet authority = CharacterSet("authority","-._~%:@[]!$&'()*+,;=") +
1119 CharacterSet::HEXDIG + CharacterSet::ALPHA + CharacterSet::DIGIT;
1120 if (!tok.skipAll(authority))
1121 break;
1122
1123 static const SBuf slashUri("/");
1124 const SBuf t = tok.remaining();
1125 if (t.isEmpty())
1126 url = slashUri;
1127 else if (t[0]=='/') // looks like path
1128 url = t;
1129 else if (t[0]=='?' || t[0]=='#') { // looks like query or fragment. fix '/'
1130 url = slashUri;
1131 url.append(t);
1132 } // else do nothing. invalid path
1133
1134 } while(false);
1135
1136 #if SHOULD_REJECT_UNKNOWN_URLS
1137 // reject URI which are not well-formed even after the processing above
1138 if (url.isEmpty() || url[0] != '/') {
1139 hp->parseStatusCode = Http::scBadRequest;
1140 return conn->abortRequestParsing("error:invalid-request");
1141 }
1142 #endif
1143
1144 if (vport < 0)
1145 vport = conn->clientConnection->local.port();
1146
1147 char *host = NULL;
1148 if (vhost && (host = hp->getHeaderField("Host"))) {
1149 debugs(33, 5, "ACCEL VHOST REWRITE: vhost=" << host << " + vport=" << vport);
1150 char thost[256];
1151 if (vport > 0) {
1152 thost[0] = '\0';
1153 char *t = NULL;
1154 if (host[strlen(host) - 1] != ']' && (t = strrchr(host,':')) != nullptr) {
1155 strncpy(thost, host, (t-host));
1156 snprintf(thost+(t-host), sizeof(thost)-(t-host), ":%d", vport);
1157 host = thost;
1158 } else if (!t) {
1159 snprintf(thost, sizeof(thost), "%s:%d",host, vport);
1160 host = thost;
1161 }
1162 } // else nothing to alter port-wise.
1163 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1164 const int url_sz = scheme.length() + strlen(host) + url.length() + 32;
1165 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1166 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH, SQUIDSBUFPRINT(scheme), host, SQUIDSBUFPRINT(url));
1167 debugs(33, 5, "ACCEL VHOST REWRITE: " << uri);
1168 return uri;
1169 } else if (conn->port->defaultsite /* && !vhost */) {
1170 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: defaultsite=" << conn->port->defaultsite << " + vport=" << vport);
1171 char vportStr[32];
1172 vportStr[0] = '\0';
1173 if (vport > 0) {
1174 snprintf(vportStr, sizeof(vportStr),":%d",vport);
1175 }
1176 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1177 const int url_sz = scheme.length() + strlen(conn->port->defaultsite) + sizeof(vportStr) + url.length() + 32;
1178 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1179 snprintf(uri, url_sz, SQUIDSBUFPH "://%s%s" SQUIDSBUFPH,
1180 SQUIDSBUFPRINT(scheme), conn->port->defaultsite, vportStr, SQUIDSBUFPRINT(url));
1181 debugs(33, 5, "ACCEL DEFAULTSITE REWRITE: " << uri);
1182 return uri;
1183 } else if (vport > 0 /* && (!vhost || no Host:) */) {
1184 debugs(33, 5, "ACCEL VPORT REWRITE: *_port IP + vport=" << vport);
1185 /* Put the local socket IP address as the hostname, with whatever vport we found */
1186 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1187 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1188 const int url_sz = scheme.length() + sizeof(ipbuf) + url.length() + 32;
1189 char *uri = static_cast<char *>(xcalloc(url_sz, 1));
1190 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1191 SQUIDSBUFPRINT(scheme), ipbuf, vport, SQUIDSBUFPRINT(url));
1192 debugs(33, 5, "ACCEL VPORT REWRITE: " << uri);
1193 return uri;
1194 }
1195
1196 return nullptr;
1197 }
1198
1199 static char *
1200 buildUrlFromHost(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1201 {
1202 char *uri = nullptr;
1203 /* BUG: Squid cannot deal with '*' URLs (RFC2616 5.1.2) */
1204 if (const char *host = hp->getHeaderField("Host")) {
1205 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1206 const int url_sz = scheme.length() + strlen(host) + hp->requestUri().length() + 32;
1207 uri = static_cast<char *>(xcalloc(url_sz, 1));
1208 snprintf(uri, url_sz, SQUIDSBUFPH "://%s" SQUIDSBUFPH,
1209 SQUIDSBUFPRINT(scheme),
1210 host,
1211 SQUIDSBUFPRINT(hp->requestUri()));
1212 }
1213 return uri;
1214 }
1215
1216 char *
1217 ConnStateData::prepareTlsSwitchingURL(const Http1::RequestParserPointer &hp)
1218 {
1219 Must(switchedToHttps());
1220
1221 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1222 return nullptr; /* already in good shape */
1223
1224 char *uri = buildUrlFromHost(this, hp);
1225 #if USE_OPENSSL
1226 if (!uri) {
1227 Must(tlsConnectPort);
1228 Must(sslConnectHostOrIp.size());
1229 SBuf useHost;
1230 if (!tlsClientSni().isEmpty())
1231 useHost = tlsClientSni();
1232 else
1233 useHost.assign(sslConnectHostOrIp.rawBuf(), sslConnectHostOrIp.size());
1234
1235 const SBuf &scheme = AnyP::UriScheme(transferProtocol.protocol).image();
1236 const int url_sz = scheme.length() + useHost.length() + hp->requestUri().length() + 32;
1237 uri = static_cast<char *>(xcalloc(url_sz, 1));
1238 snprintf(uri, url_sz, SQUIDSBUFPH "://" SQUIDSBUFPH ":%d" SQUIDSBUFPH,
1239 SQUIDSBUFPRINT(scheme),
1240 SQUIDSBUFPRINT(useHost),
1241 tlsConnectPort,
1242 SQUIDSBUFPRINT(hp->requestUri()));
1243 }
1244 #endif
1245 if (uri)
1246 debugs(33, 5, "TLS switching host rewrite: " << uri);
1247 return uri;
1248 }
1249
1250 static char *
1251 prepareTransparentURL(ConnStateData * conn, const Http1::RequestParserPointer &hp)
1252 {
1253 // TODO Must() on URI !empty when the parser supports throw. For now avoid assert().
1254 if (!hp->requestUri().isEmpty() && hp->requestUri()[0] != '/')
1255 return nullptr; /* already in good shape */
1256
1257 char *uri = buildUrlFromHost(conn, hp);
1258 if (!uri) {
1259 /* Put the local socket IP address as the hostname. */
1260 static char ipbuf[MAX_IPSTRLEN];
1261 conn->clientConnection->local.toHostStr(ipbuf,MAX_IPSTRLEN);
1262 const SBuf &scheme = AnyP::UriScheme(conn->transferProtocol.protocol).image();
1263 const int url_sz = sizeof(ipbuf) + hp->requestUri().length() + 32;
1264 uri = static_cast<char *>(xcalloc(url_sz, 1));
1265 snprintf(uri, url_sz, SQUIDSBUFPH "://%s:%d" SQUIDSBUFPH,
1266 SQUIDSBUFPRINT(scheme),
1267 ipbuf, conn->clientConnection->local.port(), SQUIDSBUFPRINT(hp->requestUri()));
1268 }
1269
1270 if (uri)
1271 debugs(33, 5, "TRANSPARENT REWRITE: " << uri);
1272 return uri;
1273 }
1274
1275 /** Parse an HTTP request
1276 *
1277 * \note Sets result->flags.parsed_ok to 0 if failed to parse the request,
1278 * to 1 if the request was correctly parsed.
1279 * \param[in] csd a ConnStateData. The caller must make sure it is not null
1280 * \param[in] hp an Http1::RequestParser
1281 * \param[out] mehtod_p will be set as a side-effect of the parsing.
1282 * Pointed-to value will be set to Http::METHOD_NONE in case of
1283 * parsing failure
1284 * \param[out] http_ver will be set as a side-effect of the parsing
1285 * \return NULL on incomplete requests,
1286 * a Http::Stream on success or failure.
1287 */
1288 Http::Stream *
1289 parseHttpRequest(ConnStateData *csd, const Http1::RequestParserPointer &hp)
1290 {
1291 /* Attempt to parse the first line; this will define where the method, url, version and header begin */
1292 {
1293 const bool parsedOk = hp->parse(csd->inBuf);
1294
1295 // sync the buffers after parsing.
1296 csd->inBuf = hp->remaining();
1297
1298 if (hp->needsMoreData()) {
1299 debugs(33, 5, "Incomplete request, waiting for end of request line");
1300 return NULL;
1301 }
1302
1303 if (csd->mayTunnelUnsupportedProto()) {
1304 csd->preservedClientData = hp->parsed();
1305 csd->preservedClientData.append(csd->inBuf);
1306 }
1307
1308 if (!parsedOk) {
1309 const bool tooBig =
1310 hp->parseStatusCode == Http::scRequestHeaderFieldsTooLarge ||
1311 hp->parseStatusCode == Http::scUriTooLong;
1312 auto result = csd->abortRequestParsing(
1313 tooBig ? "error:request-too-large" : "error:invalid-request");
1314 // assume that remaining leftovers belong to this bad request
1315 if (!csd->inBuf.isEmpty())
1316 csd->consumeInput(csd->inBuf.length());
1317 return result;
1318 }
1319 }
1320
1321 /* We know the whole request is in parser now */
1322 debugs(11, 2, "HTTP Client " << csd->clientConnection);
1323 debugs(11, 2, "HTTP Client REQUEST:\n---------\n" <<
1324 hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol() << "\n" <<
1325 hp->mimeHeader() <<
1326 "\n----------");
1327
1328 /* deny CONNECT via accelerated ports */
1329 if (hp->method() == Http::METHOD_CONNECT && csd->port != NULL && csd->port->flags.accelSurrogate) {
1330 debugs(33, DBG_IMPORTANT, "WARNING: CONNECT method received on " << csd->transferProtocol << " Accelerator port " << csd->port->s.port());
1331 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1332 hp->parseStatusCode = Http::scMethodNotAllowed;
1333 return csd->abortRequestParsing("error:method-not-allowed");
1334 }
1335
1336 /* RFC 7540 section 11.6 registers the method PRI as HTTP/2 specific
1337 * Deny "PRI" method if used in HTTP/1.x or 0.9 versions.
1338 * If seen it signals a broken client or proxy has corrupted the traffic.
1339 */
1340 if (hp->method() == Http::METHOD_PRI && hp->messageProtocol() < Http::ProtocolVersion(2,0)) {
1341 debugs(33, DBG_IMPORTANT, "WARNING: PRI method received on " << csd->transferProtocol << " port " << csd->port->s.port());
1342 debugs(33, DBG_IMPORTANT, "WARNING: for request: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1343 hp->parseStatusCode = Http::scMethodNotAllowed;
1344 return csd->abortRequestParsing("error:method-not-allowed");
1345 }
1346
1347 if (hp->method() == Http::METHOD_NONE) {
1348 debugs(33, DBG_IMPORTANT, "WARNING: Unsupported method: " << hp->method() << " " << hp->requestUri() << " " << hp->messageProtocol());
1349 hp->parseStatusCode = Http::scMethodNotAllowed;
1350 return csd->abortRequestParsing("error:unsupported-request-method");
1351 }
1352
1353 // Process headers after request line
1354 debugs(33, 3, "complete request received. " <<
1355 "prefix_sz = " << hp->messageHeaderSize() <<
1356 ", request-line-size=" << hp->firstLineSize() <<
1357 ", mime-header-size=" << hp->headerBlockSize() <<
1358 ", mime header block:\n" << hp->mimeHeader() << "\n----------");
1359
1360 /* Ok, all headers are received */
1361 ClientHttpRequest *http = new ClientHttpRequest(csd);
1362
1363 http->req_sz = hp->messageHeaderSize();
1364 Http::Stream *result = new Http::Stream(csd->clientConnection, http);
1365
1366 StoreIOBuffer tempBuffer;
1367 tempBuffer.data = result->reqbuf;
1368 tempBuffer.length = HTTP_REQBUF_SZ;
1369
1370 ClientStreamData newServer = new clientReplyContext(http);
1371 ClientStreamData newClient = result;
1372 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
1373 clientReplyStatus, newServer, clientSocketRecipient,
1374 clientSocketDetach, newClient, tempBuffer);
1375
1376 /* set url */
1377 debugs(33,5, "Prepare absolute URL from " <<
1378 (csd->transparent()?"intercept":(csd->port->flags.accelSurrogate ? "accel":"")));
1379 /* Rewrite the URL in transparent or accelerator mode */
1380 /* NP: there are several cases to traverse here:
1381 * - standard mode (forward proxy)
1382 * - transparent mode (TPROXY)
1383 * - transparent mode with failures
1384 * - intercept mode (NAT)
1385 * - intercept mode with failures
1386 * - accelerator mode (reverse proxy)
1387 * - internal relative-URL
1388 * - mixed combos of the above with internal URL
1389 * - remote interception with PROXY protocol
1390 * - remote reverse-proxy with PROXY protocol
1391 */
1392 if (csd->switchedToHttps()) {
1393 http->uri = csd->prepareTlsSwitchingURL(hp);
1394 } else if (csd->transparent()) {
1395 /* intercept or transparent mode, properly working with no failures */
1396 http->uri = prepareTransparentURL(csd, hp);
1397
1398 } else if (internalCheck(hp->requestUri())) { // NP: only matches relative-URI
1399 /* internal URL mode */
1400 /* prepend our name & port */
1401 http->uri = xstrdup(internalLocalUri(NULL, hp->requestUri()));
1402 // We just re-wrote the URL. Must replace the Host: header.
1403 // But have not parsed there yet!! flag for local-only handling.
1404 http->flags.internal = true;
1405
1406 } else if (csd->port->flags.accelSurrogate) {
1407 /* accelerator mode */
1408 http->uri = prepareAcceleratedURL(csd, hp);
1409 http->flags.accel = true;
1410 }
1411
1412 if (!http->uri) {
1413 /* No special rewrites have been applied above, use the
1414 * requested url. may be rewritten later, so make extra room */
1415 int url_sz = hp->requestUri().length() + Config.appendDomainLen + 5;
1416 http->uri = (char *)xcalloc(url_sz, 1);
1417 SBufToCstring(http->uri, hp->requestUri());
1418 }
1419
1420 result->flags.parsed_ok = 1;
1421 return result;
1422 }
1423
1424 bool
1425 ConnStateData::connFinishedWithConn(int size)
1426 {
1427 if (size == 0) {
1428 if (pipeline.empty() && inBuf.isEmpty()) {
1429 /* no current or pending requests */
1430 debugs(33, 4, HERE << clientConnection << " closed");
1431 return true;
1432 } else if (!Config.onoff.half_closed_clients) {
1433 /* admin doesn't want to support half-closed client sockets */
1434 debugs(33, 3, HERE << clientConnection << " aborted (half_closed_clients disabled)");
1435 pipeline.terminateAll(0);
1436 return true;
1437 }
1438 }
1439
1440 return false;
1441 }
1442
1443 void
1444 ConnStateData::consumeInput(const size_t byteCount)
1445 {
1446 assert(byteCount > 0 && byteCount <= inBuf.length());
1447 inBuf.consume(byteCount);
1448 debugs(33, 5, "inBuf has " << inBuf.length() << " unused bytes");
1449 }
1450
1451 void
1452 ConnStateData::clientAfterReadingRequests()
1453 {
1454 // Were we expecting to read more request body from half-closed connection?
1455 if (mayNeedToReadMoreBody() && commIsHalfClosed(clientConnection->fd)) {
1456 debugs(33, 3, HERE << "truncated body: closing half-closed " << clientConnection);
1457 clientConnection->close();
1458 return;
1459 }
1460
1461 if (flags.readMore)
1462 readSomeData();
1463 }
1464
1465 void
1466 ConnStateData::quitAfterError(HttpRequest *request)
1467 {
1468 // From HTTP p.o.v., we do not have to close after every error detected
1469 // at the client-side, but many such errors do require closure and the
1470 // client-side code is bad at handling errors so we play it safe.
1471 if (request)
1472 request->flags.proxyKeepalive = false;
1473 flags.readMore = false;
1474 debugs(33,4, HERE << "Will close after error: " << clientConnection);
1475 }
1476
1477 #if USE_OPENSSL
1478 bool ConnStateData::serveDelayedError(Http::Stream *context)
1479 {
1480 ClientHttpRequest *http = context->http;
1481
1482 if (!sslServerBump)
1483 return false;
1484
1485 assert(sslServerBump->entry);
1486 // Did we create an error entry while processing CONNECT?
1487 if (!sslServerBump->entry->isEmpty()) {
1488 quitAfterError(http->request);
1489
1490 // Get the saved error entry and send it to the client by replacing the
1491 // ClientHttpRequest store entry with it.
1492 clientStreamNode *node = context->getClientReplyContext();
1493 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1494 assert(repContext);
1495 debugs(33, 5, "Responding with delated error for " << http->uri);
1496 repContext->setReplyToStoreEntry(sslServerBump->entry, "delayed SslBump error");
1497
1498 // Get error details from the fake certificate-peeking request.
1499 http->request->detailError(sslServerBump->request->errType, sslServerBump->request->errDetail);
1500 context->pullData();
1501 return true;
1502 }
1503
1504 // In bump-server-first mode, we have not necessarily seen the intended
1505 // server name at certificate-peeking time. Check for domain mismatch now,
1506 // when we can extract the intended name from the bumped HTTP request.
1507 if (const Security::CertPointer &srvCert = sslServerBump->serverCert) {
1508 HttpRequest *request = http->request;
1509 if (!Ssl::checkX509ServerValidity(srvCert.get(), request->url.host())) {
1510 debugs(33, 2, "SQUID_X509_V_ERR_DOMAIN_MISMATCH: Certificate " <<
1511 "does not match domainname " << request->url.host());
1512
1513 bool allowDomainMismatch = false;
1514 if (Config.ssl_client.cert_error) {
1515 ACLFilledChecklist check(Config.ssl_client.cert_error, request, dash_str);
1516 check.al = http->al;
1517 check.sslErrors = new Security::CertErrors(Security::CertError(SQUID_X509_V_ERR_DOMAIN_MISMATCH, srvCert));
1518 check.syncAle(request, http->log_uri);
1519 allowDomainMismatch = check.fastCheck().allowed();
1520 delete check.sslErrors;
1521 check.sslErrors = NULL;
1522 }
1523
1524 if (!allowDomainMismatch) {
1525 quitAfterError(request);
1526
1527 clientStreamNode *node = context->getClientReplyContext();
1528 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1529 assert (repContext);
1530
1531 request->hier = sslServerBump->request->hier;
1532
1533 // Create an error object and fill it
1534 ErrorState *err = new ErrorState(ERR_SECURE_CONNECT_FAIL, Http::scServiceUnavailable, request);
1535 err->src_addr = clientConnection->remote;
1536 Ssl::ErrorDetail *errDetail = new Ssl::ErrorDetail(
1537 SQUID_X509_V_ERR_DOMAIN_MISMATCH,
1538 srvCert.get(), nullptr);
1539 err->detail = errDetail;
1540 repContext->setReplyToError(request->method, err);
1541 assert(context->http->out.offset == 0);
1542 context->pullData();
1543 return true;
1544 }
1545 }
1546 }
1547
1548 return false;
1549 }
1550 #endif // USE_OPENSSL
1551
1552 /**
1553 * Check on_unsupported_protocol checklist and return true if tunnel mode selected
1554 * or false otherwise
1555 */
1556 bool
1557 clientTunnelOnError(ConnStateData *conn, Http::StreamPointer &context, HttpRequest::Pointer &request, const HttpRequestMethod& method, err_type requestError)
1558 {
1559 if (conn->mayTunnelUnsupportedProto()) {
1560 ACLFilledChecklist checklist(Config.accessList.on_unsupported_protocol, request.getRaw(), nullptr);
1561 checklist.al = (context && context->http) ? context->http->al : nullptr;
1562 checklist.requestErrorType = requestError;
1563 checklist.src_addr = conn->clientConnection->remote;
1564 checklist.my_addr = conn->clientConnection->local;
1565 checklist.conn(conn);
1566 ClientHttpRequest *http = context ? context->http : nullptr;
1567 const char *log_uri = http ? http->log_uri : nullptr;
1568 checklist.syncAle(request.getRaw(), log_uri);
1569 allow_t answer = checklist.fastCheck();
1570 if (answer.allowed() && answer.kind == 1) {
1571 debugs(33, 3, "Request will be tunneled to server");
1572 if (context) {
1573 assert(conn->pipeline.front() == context); // XXX: still assumes HTTP/1 semantics
1574 context->finished(); // Will remove from conn->pipeline queue
1575 }
1576 Comm::SetSelect(conn->clientConnection->fd, COMM_SELECT_READ, NULL, NULL, 0);
1577 return conn->initiateTunneledRequest(request, Http::METHOD_NONE, "unknown-protocol", conn->preservedClientData);
1578 } else {
1579 debugs(33, 3, "Continue with returning the error: " << requestError);
1580 }
1581 }
1582
1583 return false;
1584 }
1585
1586 void
1587 clientProcessRequestFinished(ConnStateData *conn, const HttpRequest::Pointer &request)
1588 {
1589 /*
1590 * DPW 2007-05-18
1591 * Moved the TCP_RESET feature from clientReplyContext::sendMoreData
1592 * to here because calling comm_reset_close() causes http to
1593 * be freed before accessing.
1594 */
1595 if (request != NULL && request->flags.resetTcp && Comm::IsConnOpen(conn->clientConnection)) {
1596 debugs(33, 3, HERE << "Sending TCP RST on " << conn->clientConnection);
1597 conn->flags.readMore = false;
1598 comm_reset_close(conn->clientConnection);
1599 }
1600 }
1601
1602 void
1603 clientProcessRequest(ConnStateData *conn, const Http1::RequestParserPointer &hp, Http::Stream *context)
1604 {
1605 ClientHttpRequest *http = context->http;
1606 bool chunked = false;
1607 bool mustReplyToOptions = false;
1608 bool unsupportedTe = false;
1609 bool expectBody = false;
1610
1611 // We already have the request parsed and checked, so we
1612 // only need to go through the final body/conn setup to doCallouts().
1613 assert(http->request);
1614 HttpRequest::Pointer request = http->request;
1615
1616 // temporary hack to avoid splitting this huge function with sensitive code
1617 const bool isFtp = !hp;
1618
1619 // Some blobs below are still HTTP-specific, but we would have to rewrite
1620 // this entire function to remove them from the FTP code path. Connection
1621 // setup and body_pipe preparation blobs are needed for FTP.
1622
1623 request->manager(conn, http->al);
1624
1625 request->flags.accelerated = http->flags.accel;
1626 request->flags.sslBumped=conn->switchedToHttps();
1627 // TODO: decouple http->flags.accel from request->flags.sslBumped
1628 request->flags.noDirect = (request->flags.accelerated && !request->flags.sslBumped) ?
1629 !conn->port->allow_direct : 0;
1630 request->sources |= isFtp ? Http::Message::srcFtp :
1631 ((request->flags.sslBumped || conn->port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
1632 #if USE_AUTH
1633 if (request->flags.sslBumped) {
1634 if (conn->getAuth() != NULL)
1635 request->auth_user_request = conn->getAuth();
1636 }
1637 #endif
1638
1639 if (internalCheck(request->url.path())) {
1640 if (internalHostnameIs(request->url.host()) && request->url.port() == getMyPort()) {
1641 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true));
1642 http->flags.internal = true;
1643 } else if (Config.onoff.global_internal_static && internalStaticCheck(request->url.path())) {
1644 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (global_internal_static on)");
1645 request->url.setScheme(AnyP::PROTO_HTTP, "http");
1646 request->url.host(internalHostname());
1647 request->url.port(getMyPort());
1648 http->flags.internal = true;
1649 http->setLogUriToRequestUri();
1650 } else
1651 debugs(33, 2, "internal URL found: " << request->url.getScheme() << "://" << request->url.authority(true) << " (not this proxy)");
1652 }
1653
1654 request->flags.internal = http->flags.internal;
1655
1656 if (!isFtp) {
1657 // XXX: for non-HTTP messages instantiate a different Http::Message child type
1658 // for now Squid only supports HTTP requests
1659 const AnyP::ProtocolVersion &http_ver = hp->messageProtocol();
1660 assert(request->http_ver.protocol == http_ver.protocol);
1661 request->http_ver.major = http_ver.major;
1662 request->http_ver.minor = http_ver.minor;
1663 }
1664
1665 if (request->header.chunked()) {
1666 chunked = true;
1667 } else if (request->header.has(Http::HdrType::TRANSFER_ENCODING)) {
1668 const String te = request->header.getList(Http::HdrType::TRANSFER_ENCODING);
1669 // HTTP/1.1 requires chunking to be the last encoding if there is one
1670 unsupportedTe = te.size() && te != "identity";
1671 } // else implied identity coding
1672
1673 mustReplyToOptions = (request->method == Http::METHOD_OPTIONS) &&
1674 (request->header.getInt64(Http::HdrType::MAX_FORWARDS) == 0);
1675 if (!urlCheckRequest(request.getRaw()) || mustReplyToOptions || unsupportedTe) {
1676 clientStreamNode *node = context->getClientReplyContext();
1677 conn->quitAfterError(request.getRaw());
1678 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1679 assert (repContext);
1680 repContext->setReplyToError(ERR_UNSUP_REQ, Http::scNotImplemented, request->method, NULL,
1681 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1682 assert(context->http->out.offset == 0);
1683 context->pullData();
1684 clientProcessRequestFinished(conn, request);
1685 return;
1686 }
1687
1688 if (!chunked && !clientIsContentLengthValid(request.getRaw())) {
1689 clientStreamNode *node = context->getClientReplyContext();
1690 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1691 assert (repContext);
1692 conn->quitAfterError(request.getRaw());
1693 repContext->setReplyToError(ERR_INVALID_REQ,
1694 Http::scLengthRequired, request->method, NULL,
1695 conn->clientConnection->remote, request.getRaw(), NULL, NULL);
1696 assert(context->http->out.offset == 0);
1697 context->pullData();
1698 clientProcessRequestFinished(conn, request);
1699 return;
1700 }
1701
1702 clientSetKeepaliveFlag(http);
1703 // Let tunneling code be fully responsible for CONNECT requests
1704 if (http->request->method == Http::METHOD_CONNECT) {
1705 context->mayUseConnection(true);
1706 conn->flags.readMore = false;
1707 }
1708
1709 #if USE_OPENSSL
1710 if (conn->switchedToHttps() && conn->serveDelayedError(context)) {
1711 clientProcessRequestFinished(conn, request);
1712 return;
1713 }
1714 #endif
1715
1716 /* Do we expect a request-body? */
1717 expectBody = chunked || request->content_length > 0;
1718 if (!context->mayUseConnection() && expectBody) {
1719 request->body_pipe = conn->expectRequestBody(
1720 chunked ? -1 : request->content_length);
1721
1722 /* Is it too large? */
1723 if (!chunked && // if chunked, we will check as we accumulate
1724 clientIsRequestBodyTooLargeForPolicy(request->content_length)) {
1725 clientStreamNode *node = context->getClientReplyContext();
1726 clientReplyContext *repContext = dynamic_cast<clientReplyContext *>(node->data.getRaw());
1727 assert (repContext);
1728 conn->quitAfterError(request.getRaw());
1729 repContext->setReplyToError(ERR_TOO_BIG,
1730 Http::scPayloadTooLarge, Http::METHOD_NONE, NULL,
1731 conn->clientConnection->remote, http->request, NULL, NULL);
1732 assert(context->http->out.offset == 0);
1733 context->pullData();
1734 clientProcessRequestFinished(conn, request);
1735 return;
1736 }
1737
1738 if (!isFtp) {
1739 // We may stop producing, comm_close, and/or call setReplyToError()
1740 // below, so quit on errors to avoid http->doCallouts()
1741 if (!conn->handleRequestBodyData()) {
1742 clientProcessRequestFinished(conn, request);
1743 return;
1744 }
1745
1746 if (!request->body_pipe->productionEnded()) {
1747 debugs(33, 5, "need more request body");
1748 context->mayUseConnection(true);
1749 assert(conn->flags.readMore);
1750 }
1751 }
1752 }
1753
1754 http->calloutContext = new ClientRequestContext(http);
1755
1756 http->doCallouts();
1757
1758 clientProcessRequestFinished(conn, request);
1759 }
1760
1761 int
1762 ConnStateData::pipelinePrefetchMax() const
1763 {
1764 // TODO: Support pipelined requests through pinned connections.
1765 if (pinning.pinned)
1766 return 0;
1767 return Config.pipeline_max_prefetch;
1768 }
1769
1770 /**
1771 * Limit the number of concurrent requests.
1772 * \return true when there are available position(s) in the pipeline queue for another request.
1773 * \return false when the pipeline queue is full or disabled.
1774 */
1775 bool
1776 ConnStateData::concurrentRequestQueueFilled() const
1777 {
1778 const int existingRequestCount = pipeline.count();
1779
1780 // default to the configured pipeline size.
1781 // add 1 because the head of pipeline is counted in concurrent requests and not prefetch queue
1782 #if USE_OPENSSL
1783 const int internalRequest = (transparent() && sslBumpMode == Ssl::bumpSplice) ? 1 : 0;
1784 #else
1785 const int internalRequest = 0;
1786 #endif
1787 const int concurrentRequestLimit = pipelinePrefetchMax() + 1 + internalRequest;
1788
1789 // when queue filled already we cant add more.
1790 if (existingRequestCount >= concurrentRequestLimit) {
1791 debugs(33, 3, clientConnection << " max concurrent requests reached (" << concurrentRequestLimit << ")");
1792 debugs(33, 5, clientConnection << " deferring new request until one is done");
1793 return true;
1794 }
1795
1796 return false;
1797 }
1798
1799 /**
1800 * Perform proxy_protocol_access ACL tests on the client which
1801 * connected to PROXY protocol port to see if we trust the
1802 * sender enough to accept their PROXY header claim.
1803 */
1804 bool
1805 ConnStateData::proxyProtocolValidateClient()
1806 {
1807 if (!Config.accessList.proxyProtocol)
1808 return proxyProtocolError("PROXY client not permitted by default ACL");
1809
1810 ACLFilledChecklist ch(Config.accessList.proxyProtocol, NULL, clientConnection->rfc931);
1811 ch.src_addr = clientConnection->remote;
1812 ch.my_addr = clientConnection->local;
1813 ch.conn(this);
1814
1815 if (!ch.fastCheck().allowed())
1816 return proxyProtocolError("PROXY client not permitted by ACLs");
1817
1818 return true;
1819 }
1820
1821 /**
1822 * Perform cleanup on PROXY protocol errors.
1823 * If header parsing hits a fatal error terminate the connection,
1824 * otherwise wait for more data.
1825 */
1826 bool
1827 ConnStateData::proxyProtocolError(const char *msg)
1828 {
1829 if (msg) {
1830 // This is important to know, but maybe not so much that flooding the log is okay.
1831 #if QUIET_PROXY_PROTOCOL
1832 // display the first of every 32 occurances at level 1, the others at level 2.
1833 static uint8_t hide = 0;
1834 debugs(33, (hide++ % 32 == 0 ? DBG_IMPORTANT : 2), msg << " from " << clientConnection);
1835 #else
1836 debugs(33, DBG_IMPORTANT, msg << " from " << clientConnection);
1837 #endif
1838 mustStop(msg);
1839 }
1840 return false;
1841 }
1842
1843 /// Attempts to extract a PROXY protocol header from the input buffer and,
1844 /// upon success, stores the parsed header in proxyProtocolHeader_.
1845 /// \returns true if the header was successfully parsed
1846 /// \returns false if more data is needed to parse the header or on error
1847 bool
1848 ConnStateData::parseProxyProtocolHeader()
1849 {
1850 try {
1851 const auto parsed = ProxyProtocol::Parse(inBuf);
1852 proxyProtocolHeader_ = parsed.header;
1853 assert(bool(proxyProtocolHeader_));
1854 inBuf.consume(parsed.size);
1855 needProxyProtocolHeader_ = false;
1856 if (proxyProtocolHeader_->hasForwardedAddresses()) {
1857 clientConnection->local = proxyProtocolHeader_->destinationAddress;
1858 clientConnection->remote = proxyProtocolHeader_->sourceAddress;
1859 if ((clientConnection->flags & COMM_TRANSPARENT))
1860 clientConnection->flags ^= COMM_TRANSPARENT; // prevent TPROXY spoofing of this new IP.
1861 debugs(33, 5, "PROXY/" << proxyProtocolHeader_->version() << " upgrade: " << clientConnection);
1862 }
1863 } catch (const Parser::BinaryTokenizer::InsufficientInput &) {
1864 debugs(33, 3, "PROXY protocol: waiting for more than " << inBuf.length() << " bytes");
1865 return false;
1866 } catch (const std::exception &e) {
1867 return proxyProtocolError(e.what());
1868 }
1869 return true;
1870 }
1871
1872 void
1873 ConnStateData::receivedFirstByte()
1874 {
1875 if (receivedFirstByte_)
1876 return;
1877
1878 receivedFirstByte_ = true;
1879 // Set timeout to Config.Timeout.request
1880 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
1881 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
1882 TimeoutDialer, this, ConnStateData::requestTimeout);
1883 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
1884 }
1885
1886 /**
1887 * Attempt to parse one or more requests from the input buffer.
1888 * Returns true after completing parsing of at least one request [header]. That
1889 * includes cases where parsing ended with an error (e.g., a huge request).
1890 */
1891 bool
1892 ConnStateData::clientParseRequests()
1893 {
1894 bool parsed_req = false;
1895
1896 debugs(33, 5, HERE << clientConnection << ": attempting to parse");
1897
1898 // Loop while we have read bytes that are not needed for producing the body
1899 // On errors, bodyPipe may become nil, but readMore will be cleared
1900 while (!inBuf.isEmpty() && !bodyPipe && flags.readMore) {
1901
1902 // Prohibit concurrent requests when using a pinned to-server connection
1903 // because our Client classes do not support request pipelining.
1904 if (pinning.pinned && !pinning.readHandler) {
1905 debugs(33, 3, clientConnection << " waits for busy " << pinning.serverConnection);
1906 break;
1907 }
1908
1909 /* Limit the number of concurrent requests */
1910 if (concurrentRequestQueueFilled())
1911 break;
1912
1913 // try to parse the PROXY protocol header magic bytes
1914 if (needProxyProtocolHeader_) {
1915 if (!parseProxyProtocolHeader())
1916 break;
1917
1918 // we have been waiting for PROXY to provide client-IP
1919 // for some lookups, ie rDNS and IDENT.
1920 whenClientIpKnown();
1921 }
1922
1923 if (Http::StreamPointer context = parseOneRequest()) {
1924 debugs(33, 5, clientConnection << ": done parsing a request");
1925
1926 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
1927 CommTimeoutCbPtrFun(clientLifetimeTimeout, context->http));
1928 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
1929
1930 context->registerWithConn();
1931
1932 processParsedRequest(context);
1933
1934 parsed_req = true; // XXX: do we really need to parse everything right NOW ?
1935
1936 if (context->mayUseConnection()) {
1937 debugs(33, 3, HERE << "Not parsing new requests, as this request may need the connection");
1938 break;
1939 }
1940 } else {
1941 debugs(33, 5, clientConnection << ": not enough request data: " <<
1942 inBuf.length() << " < " << Config.maxRequestHeaderSize);
1943 Must(inBuf.length() < Config.maxRequestHeaderSize);
1944 break;
1945 }
1946 }
1947
1948 /* XXX where to 'finish' the parsing pass? */
1949 return parsed_req;
1950 }
1951
1952 void
1953 ConnStateData::afterClientRead()
1954 {
1955 #if USE_OPENSSL
1956 if (parsingTlsHandshake) {
1957 parseTlsHandshake();
1958 return;
1959 }
1960 #endif
1961
1962 /* Process next request */
1963 if (pipeline.empty())
1964 fd_note(clientConnection->fd, "Reading next request");
1965
1966 if (!clientParseRequests()) {
1967 if (!isOpen())
1968 return;
1969 /*
1970 * If the client here is half closed and we failed
1971 * to parse a request, close the connection.
1972 * The above check with connFinishedWithConn() only
1973 * succeeds _if_ the buffer is empty which it won't
1974 * be if we have an incomplete request.
1975 * XXX: This duplicates ConnStateData::kick
1976 */
1977 if (pipeline.empty() && commIsHalfClosed(clientConnection->fd)) {
1978 debugs(33, 5, clientConnection << ": half-closed connection, no completed request parsed, connection closing.");
1979 clientConnection->close();
1980 return;
1981 }
1982 }
1983
1984 if (!isOpen())
1985 return;
1986
1987 clientAfterReadingRequests();
1988 }
1989
1990 /**
1991 * called when new request data has been read from the socket
1992 *
1993 * \retval false called comm_close or setReplyToError (the caller should bail)
1994 * \retval true we did not call comm_close or setReplyToError
1995 */
1996 bool
1997 ConnStateData::handleReadData()
1998 {
1999 // if we are reading a body, stuff data into the body pipe
2000 if (bodyPipe != NULL)
2001 return handleRequestBodyData();
2002 return true;
2003 }
2004
2005 /**
2006 * called when new request body data has been buffered in inBuf
2007 * may close the connection if we were closing and piped everything out
2008 *
2009 * \retval false called comm_close or setReplyToError (the caller should bail)
2010 * \retval true we did not call comm_close or setReplyToError
2011 */
2012 bool
2013 ConnStateData::handleRequestBodyData()
2014 {
2015 assert(bodyPipe != NULL);
2016
2017 if (bodyParser) { // chunked encoding
2018 if (const err_type error = handleChunkedRequestBody()) {
2019 abortChunkedRequestBody(error);
2020 return false;
2021 }
2022 } else { // identity encoding
2023 debugs(33,5, HERE << "handling plain request body for " << clientConnection);
2024 const size_t putSize = bodyPipe->putMoreData(inBuf.c_str(), inBuf.length());
2025 if (putSize > 0)
2026 consumeInput(putSize);
2027
2028 if (!bodyPipe->mayNeedMoreData()) {
2029 // BodyPipe will clear us automagically when we produced everything
2030 bodyPipe = NULL;
2031 }
2032 }
2033
2034 if (!bodyPipe) {
2035 debugs(33,5, HERE << "produced entire request body for " << clientConnection);
2036
2037 if (const char *reason = stoppedSending()) {
2038 /* we've finished reading like good clients,
2039 * now do the close that initiateClose initiated.
2040 */
2041 debugs(33, 3, HERE << "closing for earlier sending error: " << reason);
2042 clientConnection->close();
2043 return false;
2044 }
2045 }
2046
2047 return true;
2048 }
2049
2050 /// parses available chunked encoded body bytes, checks size, returns errors
2051 err_type
2052 ConnStateData::handleChunkedRequestBody()
2053 {
2054 debugs(33, 7, "chunked from " << clientConnection << ": " << inBuf.length());
2055
2056 try { // the parser will throw on errors
2057
2058 if (inBuf.isEmpty()) // nothing to do
2059 return ERR_NONE;
2060
2061 BodyPipeCheckout bpc(*bodyPipe);
2062 bodyParser->setPayloadBuffer(&bpc.buf);
2063 const bool parsed = bodyParser->parse(inBuf);
2064 inBuf = bodyParser->remaining(); // sync buffers
2065 bpc.checkIn();
2066
2067 // dechunk then check: the size limit applies to _dechunked_ content
2068 if (clientIsRequestBodyTooLargeForPolicy(bodyPipe->producedSize()))
2069 return ERR_TOO_BIG;
2070
2071 if (parsed) {
2072 finishDechunkingRequest(true);
2073 Must(!bodyPipe);
2074 return ERR_NONE; // nil bodyPipe implies body end for the caller
2075 }
2076
2077 // if chunk parser needs data, then the body pipe must need it too
2078 Must(!bodyParser->needsMoreData() || bodyPipe->mayNeedMoreData());
2079
2080 // if parser needs more space and we can consume nothing, we will stall
2081 Must(!bodyParser->needsMoreSpace() || bodyPipe->buf().hasContent());
2082 } catch (...) { // TODO: be more specific
2083 debugs(33, 3, HERE << "malformed chunks" << bodyPipe->status());
2084 return ERR_INVALID_REQ;
2085 }
2086
2087 debugs(33, 7, HERE << "need more chunked data" << *bodyPipe->status());
2088 return ERR_NONE;
2089 }
2090
2091 /// quit on errors related to chunked request body handling
2092 void
2093 ConnStateData::abortChunkedRequestBody(const err_type error)
2094 {
2095 finishDechunkingRequest(false);
2096
2097 // XXX: The code below works if we fail during initial request parsing,
2098 // but if we fail when the server connection is used already, the server may send
2099 // us its response too, causing various assertions. How to prevent that?
2100 #if WE_KNOW_HOW_TO_SEND_ERRORS
2101 Http::StreamPointer context = pipeline.front();
2102 if (context != NULL && !context->http->out.offset) { // output nothing yet
2103 clientStreamNode *node = context->getClientReplyContext();
2104 clientReplyContext *repContext = dynamic_cast<clientReplyContext*>(node->data.getRaw());
2105 assert(repContext);
2106 const Http::StatusCode scode = (error == ERR_TOO_BIG) ?
2107 Http::scPayloadTooLarge : HTTP_BAD_REQUEST;
2108 repContext->setReplyToError(error, scode,
2109 repContext->http->request->method,
2110 repContext->http->uri,
2111 CachePeer,
2112 repContext->http->request,
2113 inBuf, NULL);
2114 context->pullData();
2115 } else {
2116 // close or otherwise we may get stuck as nobody will notice the error?
2117 comm_reset_close(clientConnection);
2118 }
2119 #else
2120 debugs(33, 3, HERE << "aborting chunked request without error " << error);
2121 comm_reset_close(clientConnection);
2122 #endif
2123 flags.readMore = false;
2124 }
2125
2126 void
2127 ConnStateData::noteBodyConsumerAborted(BodyPipe::Pointer )
2128 {
2129 // request reader may get stuck waiting for space if nobody consumes body
2130 if (bodyPipe != NULL)
2131 bodyPipe->enableAutoConsumption();
2132
2133 // kids extend
2134 }
2135
2136 /** general lifetime handler for HTTP requests */
2137 void
2138 ConnStateData::requestTimeout(const CommTimeoutCbParams &io)
2139 {
2140 if (!Comm::IsConnOpen(io.conn))
2141 return;
2142
2143 if (mayTunnelUnsupportedProto() && !receivedFirstByte_) {
2144 Http::StreamPointer context = pipeline.front();
2145 Must(context && context->http);
2146 HttpRequest::Pointer request = context->http->request;
2147 if (clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_REQUEST_START_TIMEOUT))
2148 return;
2149 }
2150 /*
2151 * Just close the connection to not confuse browsers
2152 * using persistent connections. Some browsers open
2153 * a connection and then do not use it until much
2154 * later (presumeably because the request triggering
2155 * the open has already been completed on another
2156 * connection)
2157 */
2158 debugs(33, 3, "requestTimeout: FD " << io.fd << ": lifetime is expired.");
2159 io.conn->close();
2160 }
2161
2162 static void
2163 clientLifetimeTimeout(const CommTimeoutCbParams &io)
2164 {
2165 ClientHttpRequest *http = static_cast<ClientHttpRequest *>(io.data);
2166 debugs(33, DBG_IMPORTANT, "WARNING: Closing client connection due to lifetime timeout");
2167 debugs(33, DBG_IMPORTANT, "\t" << http->uri);
2168 http->logType.err.timedout = true;
2169 if (Comm::IsConnOpen(io.conn))
2170 io.conn->close();
2171 }
2172
2173 ConnStateData::ConnStateData(const MasterXaction::Pointer &xact) :
2174 AsyncJob("ConnStateData"), // kids overwrite
2175 Server(xact),
2176 bodyParser(nullptr),
2177 #if USE_OPENSSL
2178 sslBumpMode(Ssl::bumpEnd),
2179 #endif
2180 needProxyProtocolHeader_(false),
2181 #if USE_OPENSSL
2182 switchedToHttps_(false),
2183 parsingTlsHandshake(false),
2184 tlsConnectPort(0),
2185 sslServerBump(NULL),
2186 signAlgorithm(Ssl::algSignTrusted),
2187 #endif
2188 stoppedSending_(NULL),
2189 stoppedReceiving_(NULL)
2190 {
2191 flags.readMore = true; // kids may overwrite
2192 flags.swanSang = false;
2193
2194 pinning.host = NULL;
2195 pinning.port = -1;
2196 pinning.pinned = false;
2197 pinning.auth = false;
2198 pinning.zeroReply = false;
2199 pinning.peer = NULL;
2200
2201 // store the details required for creating more MasterXaction objects as new requests come in
2202 log_addr = xact->tcpClient->remote;
2203 log_addr.applyClientMask(Config.Addrs.client_netmask);
2204
2205 // register to receive notice of Squid signal events
2206 // which may affect long persisting client connections
2207 registerRunner();
2208 }
2209
2210 void
2211 ConnStateData::start()
2212 {
2213 BodyProducer::start();
2214 HttpControlMsgSink::start();
2215
2216 if (port->disable_pmtu_discovery != DISABLE_PMTU_OFF &&
2217 (transparent() || port->disable_pmtu_discovery == DISABLE_PMTU_ALWAYS)) {
2218 #if defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
2219 int i = IP_PMTUDISC_DONT;
2220 if (setsockopt(clientConnection->fd, SOL_IP, IP_MTU_DISCOVER, &i, sizeof(i)) < 0) {
2221 int xerrno = errno;
2222 debugs(33, 2, "WARNING: Path MTU discovery disabling failed on " << clientConnection << " : " << xstrerr(xerrno));
2223 }
2224 #else
2225 static bool reported = false;
2226
2227 if (!reported) {
2228 debugs(33, DBG_IMPORTANT, "NOTICE: Path MTU discovery disabling is not supported on your platform.");
2229 reported = true;
2230 }
2231 #endif
2232 }
2233
2234 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
2235 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, ConnStateData::connStateClosed);
2236 comm_add_close_handler(clientConnection->fd, call);
2237
2238 needProxyProtocolHeader_ = port->flags.proxySurrogate;
2239 if (needProxyProtocolHeader_) {
2240 if (!proxyProtocolValidateClient()) // will close the connection on failure
2241 return;
2242 } else
2243 whenClientIpKnown();
2244
2245 }
2246
2247 void
2248 ConnStateData::whenClientIpKnown()
2249 {
2250 if (Config.onoff.log_fqdn)
2251 fqdncache_gethostbyaddr(clientConnection->remote, FQDN_LOOKUP_IF_MISS);
2252
2253 #if USE_IDENT
2254 if (Ident::TheConfig.identLookup) {
2255 ACLFilledChecklist identChecklist(Ident::TheConfig.identLookup, NULL, NULL);
2256 identChecklist.src_addr = clientConnection->remote;
2257 identChecklist.my_addr = clientConnection->local;
2258 if (identChecklist.fastCheck().allowed())
2259 Ident::Start(clientConnection, clientIdentDone, this);
2260 }
2261 #endif
2262
2263 clientdbEstablished(clientConnection->remote, 1);
2264
2265 #if USE_DELAY_POOLS
2266 fd_table[clientConnection->fd].clientInfo = NULL;
2267
2268 if (!Config.onoff.client_db)
2269 return; // client delay pools require client_db
2270
2271 const auto &pools = ClientDelayPools::Instance()->pools;
2272 if (pools.size()) {
2273 ACLFilledChecklist ch(NULL, NULL, NULL);
2274
2275 // TODO: we check early to limit error response bandwith but we
2276 // should recheck when we can honor delay_pool_uses_indirect
2277 // TODO: we should also pass the port details for myportname here.
2278 ch.src_addr = clientConnection->remote;
2279 ch.my_addr = clientConnection->local;
2280
2281 for (unsigned int pool = 0; pool < pools.size(); ++pool) {
2282
2283 /* pools require explicit 'allow' to assign a client into them */
2284 if (pools[pool]->access) {
2285 ch.changeAcl(pools[pool]->access);
2286 allow_t answer = ch.fastCheck();
2287 if (answer.allowed()) {
2288
2289 /* request client information from db after we did all checks
2290 this will save hash lookup if client failed checks */
2291 ClientInfo * cli = clientdbGetInfo(clientConnection->remote);
2292 assert(cli);
2293
2294 /* put client info in FDE */
2295 fd_table[clientConnection->fd].clientInfo = cli;
2296
2297 /* setup write limiter for this request */
2298 const double burst = floor(0.5 +
2299 (pools[pool]->highwatermark * Config.ClientDelay.initial)/100.0);
2300 cli->setWriteLimiter(pools[pool]->rate, burst, pools[pool]->highwatermark);
2301 break;
2302 } else {
2303 debugs(83, 4, HERE << "Delay pool " << pool << " skipped because ACL " << answer);
2304 }
2305 }
2306 }
2307 }
2308 #endif
2309
2310 // kids must extend to actually start doing something (e.g., reading)
2311 }
2312
2313 /** Handle a new connection on an HTTP socket. */
2314 void
2315 httpAccept(const CommAcceptCbParams &params)
2316 {
2317 MasterXaction::Pointer xact = params.xaction;
2318 AnyP::PortCfgPointer s = xact->squidPort;
2319
2320 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2321
2322 if (params.flag != Comm::OK) {
2323 // Its possible the call was still queued when the client disconnected
2324 debugs(33, 2, s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2325 return;
2326 }
2327
2328 debugs(33, 4, params.conn << ": accepted");
2329 fd_note(params.conn->fd, "client http connect");
2330
2331 if (s->tcp_keepalive.enabled)
2332 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2333
2334 ++incoming_sockets_accepted;
2335
2336 // Socket is ready, setup the connection manager to start using it
2337 auto *srv = Http::NewServer(xact);
2338 AsyncJob::Start(srv); // usually async-calls readSomeData()
2339 }
2340
2341 /// Create TLS connection structure and update fd_table
2342 static bool
2343 httpsCreate(const Comm::ConnectionPointer &conn, const Security::ContextPointer &ctx)
2344 {
2345 if (Security::CreateServerSession(ctx, conn, "client https start")) {
2346 debugs(33, 5, "will negotiate TLS on " << conn);
2347 return true;
2348 }
2349
2350 debugs(33, DBG_IMPORTANT, "ERROR: could not create TLS server context for " << conn);
2351 conn->close();
2352 return false;
2353 }
2354
2355 /**
2356 *
2357 * \retval 1 on success
2358 * \retval 0 when needs more data
2359 * \retval -1 on error
2360 */
2361 static int
2362 tlsAttemptHandshake(ConnStateData *conn, PF *callback)
2363 {
2364 // TODO: maybe throw instead of returning -1
2365 // see https://github.com/squid-cache/squid/pull/81#discussion_r153053278
2366 int fd = conn->clientConnection->fd;
2367 auto session = fd_table[fd].ssl.get();
2368
2369 errno = 0;
2370
2371 #if USE_OPENSSL
2372 const auto ret = SSL_accept(session);
2373 if (ret > 0)
2374 return 1;
2375
2376 const int xerrno = errno;
2377 const auto ssl_error = SSL_get_error(session, ret);
2378
2379 switch (ssl_error) {
2380
2381 case SSL_ERROR_WANT_READ:
2382 Comm::SetSelect(fd, COMM_SELECT_READ, callback, (callback ? conn : nullptr), 0);
2383 return 0;
2384
2385 case SSL_ERROR_WANT_WRITE:
2386 Comm::SetSelect(fd, COMM_SELECT_WRITE, callback, (callback ? conn : nullptr), 0);
2387 return 0;
2388
2389 case SSL_ERROR_SYSCALL:
2390 if (ret == 0) {
2391 debugs(83, 2, "Error negotiating SSL connection on FD " << fd << ": Aborted by client: " << ssl_error);
2392 } else {
2393 debugs(83, (xerrno == ECONNRESET) ? 1 : 2, "Error negotiating SSL connection on FD " << fd << ": " <<
2394 (xerrno == 0 ? Security::ErrorString(ssl_error) : xstrerr(xerrno)));
2395 }
2396 break;
2397
2398 case SSL_ERROR_ZERO_RETURN:
2399 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " << fd << ": Closed by client");
2400 break;
2401
2402 default:
2403 debugs(83, DBG_IMPORTANT, "Error negotiating SSL connection on FD " <<
2404 fd << ": " << Security::ErrorString(ssl_error) <<
2405 " (" << ssl_error << "/" << ret << ")");
2406 }
2407
2408 #elif USE_GNUTLS
2409
2410 const auto x = gnutls_handshake(session);
2411 if (x == GNUTLS_E_SUCCESS)
2412 return 1;
2413
2414 if (gnutls_error_is_fatal(x)) {
2415 debugs(83, 2, "Error negotiating TLS on " << conn->clientConnection << ": Aborted by client: " << Security::ErrorString(x));
2416
2417 } else if (x == GNUTLS_E_INTERRUPTED || x == GNUTLS_E_AGAIN) {
2418 const auto ioAction = (gnutls_record_get_direction(session)==0 ? COMM_SELECT_READ : COMM_SELECT_WRITE);
2419 Comm::SetSelect(fd, ioAction, callback, (callback ? conn : nullptr), 0);
2420 return 0;
2421 }
2422
2423 #else
2424 // Performing TLS handshake should never be reachable without a TLS/SSL library.
2425 (void)session; // avoid compiler and static analysis complaints
2426 fatal("FATAL: HTTPS not supported by this Squid.");
2427 #endif
2428
2429 return -1;
2430 }
2431
2432 /** negotiate an SSL connection */
2433 static void
2434 clientNegotiateSSL(int fd, void *data)
2435 {
2436 ConnStateData *conn = (ConnStateData *)data;
2437
2438 const int ret = tlsAttemptHandshake(conn, clientNegotiateSSL);
2439 if (ret <= 0) {
2440 if (ret < 0) // An error
2441 conn->clientConnection->close();
2442 return;
2443 }
2444
2445 Security::SessionPointer session(fd_table[fd].ssl);
2446
2447 #if USE_OPENSSL
2448 if (Security::SessionIsResumed(session)) {
2449 debugs(83, 2, "Session " << SSL_get_session(session.get()) <<
2450 " reused on FD " << fd << " (" << fd_table[fd].ipaddr <<
2451 ":" << (int)fd_table[fd].remote_port << ")");
2452 } else {
2453 if (Debug::Enabled(83, 4)) {
2454 /* Write out the SSL session details.. actually the call below, but
2455 * OpenSSL headers do strange typecasts confusing GCC.. */
2456 /* PEM_write_SSL_SESSION(debug_log, SSL_get_session(ssl)); */
2457 #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x00908000L
2458 PEM_ASN1_write(reinterpret_cast<i2d_of_void *>(i2d_SSL_SESSION),
2459 PEM_STRING_SSL_SESSION, debug_log,
2460 reinterpret_cast<char *>(SSL_get_session(session.get())),
2461 nullptr, nullptr, 0, nullptr, nullptr);
2462
2463 #elif (ALLOW_ALWAYS_SSL_SESSION_DETAIL == 1)
2464
2465 /* When using gcc 3.3.x and OpenSSL 0.9.7x sometimes a compile error can occur here.
2466 * This is caused by an unpredicatble gcc behaviour on a cast of the first argument
2467 * of PEM_ASN1_write(). For this reason this code section is disabled. To enable it,
2468 * define ALLOW_ALWAYS_SSL_SESSION_DETAIL=1.
2469 * Because there are two possible usable cast, if you get an error here, try the other
2470 * commented line. */
2471
2472 PEM_ASN1_write((int(*)())i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2473 debug_log,
2474 reinterpret_cast<char *>(SSL_get_session(session.get())),
2475 nullptr, nullptr, 0, nullptr, nullptr);
2476 /* PEM_ASN1_write((int(*)(...))i2d_SSL_SESSION, PEM_STRING_SSL_SESSION,
2477 debug_log,
2478 reinterpret_cast<char *>(SSL_get_session(session.get())),
2479 nullptr, nullptr, 0, nullptr, nullptr);
2480 */
2481 #else
2482 debugs(83, 4, "With " OPENSSL_VERSION_TEXT ", session details are available only defining ALLOW_ALWAYS_SSL_SESSION_DETAIL=1 in the source.");
2483
2484 #endif
2485 /* Note: This does not automatically fflush the log file.. */
2486 }
2487
2488 debugs(83, 2, "New session " << SSL_get_session(session.get()) <<
2489 " on FD " << fd << " (" << fd_table[fd].ipaddr << ":" <<
2490 fd_table[fd].remote_port << ")");
2491 }
2492 #else
2493 debugs(83, 2, "TLS session reuse not yet implemented.");
2494 #endif
2495
2496 // Connection established. Retrieve TLS connection parameters for logging.
2497 conn->clientConnection->tlsNegotiations()->retrieveNegotiatedInfo(session);
2498
2499 #if USE_OPENSSL
2500 X509 *client_cert = SSL_get_peer_certificate(session.get());
2501
2502 if (client_cert) {
2503 debugs(83, 3, "FD " << fd << " client certificate: subject: " <<
2504 X509_NAME_oneline(X509_get_subject_name(client_cert), 0, 0));
2505
2506 debugs(83, 3, "FD " << fd << " client certificate: issuer: " <<
2507 X509_NAME_oneline(X509_get_issuer_name(client_cert), 0, 0));
2508
2509 X509_free(client_cert);
2510 } else {
2511 debugs(83, 5, "FD " << fd << " has no client certificate.");
2512 }
2513 #else
2514 debugs(83, 2, "Client certificate requesting not yet implemented.");
2515 #endif
2516
2517 conn->readSomeData();
2518 }
2519
2520 /**
2521 * If Security::ContextPointer is given, starts reading the TLS handshake.
2522 * Otherwise, calls switchToHttps to generate a dynamic Security::ContextPointer.
2523 */
2524 static void
2525 httpsEstablish(ConnStateData *connState, const Security::ContextPointer &ctx)
2526 {
2527 assert(connState);
2528 const Comm::ConnectionPointer &details = connState->clientConnection;
2529
2530 if (!ctx || !httpsCreate(details, ctx))
2531 return;
2532
2533 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2534 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
2535 connState, ConnStateData::requestTimeout);
2536 commSetConnTimeout(details, Config.Timeout.request, timeoutCall);
2537
2538 Comm::SetSelect(details->fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0);
2539 }
2540
2541 #if USE_OPENSSL
2542 /**
2543 * A callback function to use with the ACLFilledChecklist callback.
2544 */
2545 static void
2546 httpsSslBumpAccessCheckDone(allow_t answer, void *data)
2547 {
2548 ConnStateData *connState = (ConnStateData *) data;
2549
2550 // if the connection is closed or closing, just return.
2551 if (!connState->isOpen())
2552 return;
2553
2554 if (answer.allowed()) {
2555 debugs(33, 2, "sslBump action " << Ssl::bumpMode(answer.kind) << "needed for " << connState->clientConnection);
2556 connState->sslBumpMode = static_cast<Ssl::BumpMode>(answer.kind);
2557 } else {
2558 debugs(33, 3, "sslBump not needed for " << connState->clientConnection);
2559 connState->sslBumpMode = Ssl::bumpSplice;
2560 }
2561
2562 if (connState->sslBumpMode == Ssl::bumpTerminate) {
2563 connState->clientConnection->close();
2564 return;
2565 }
2566
2567 if (!connState->fakeAConnectRequest("ssl-bump", connState->inBuf))
2568 connState->clientConnection->close();
2569 }
2570 #endif
2571
2572 /** handle a new HTTPS connection */
2573 static void
2574 httpsAccept(const CommAcceptCbParams &params)
2575 {
2576 MasterXaction::Pointer xact = params.xaction;
2577 const AnyP::PortCfgPointer s = xact->squidPort;
2578
2579 // NP: it is possible the port was reconfigured when the call or accept() was queued.
2580
2581 if (params.flag != Comm::OK) {
2582 // Its possible the call was still queued when the client disconnected
2583 debugs(33, 2, "httpsAccept: " << s->listenConn << ": accept failure: " << xstrerr(params.xerrno));
2584 return;
2585 }
2586
2587 debugs(33, 4, HERE << params.conn << " accepted, starting SSL negotiation.");
2588 fd_note(params.conn->fd, "client https connect");
2589
2590 if (s->tcp_keepalive.enabled) {
2591 commSetTcpKeepalive(params.conn->fd, s->tcp_keepalive.idle, s->tcp_keepalive.interval, s->tcp_keepalive.timeout);
2592 }
2593 ++incoming_sockets_accepted;
2594
2595 // Socket is ready, setup the connection manager to start using it
2596 auto *srv = Https::NewServer(xact);
2597 AsyncJob::Start(srv); // usually async-calls postHttpsAccept()
2598 }
2599
2600 void
2601 ConnStateData::postHttpsAccept()
2602 {
2603 if (port->flags.tunnelSslBumping) {
2604 #if USE_OPENSSL
2605 debugs(33, 5, "accept transparent connection: " << clientConnection);
2606
2607 if (!Config.accessList.ssl_bump) {
2608 httpsSslBumpAccessCheckDone(ACCESS_DENIED, this);
2609 return;
2610 }
2611
2612 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
2613 mx->tcpClient = clientConnection;
2614 // Create a fake HTTP request for ssl_bump ACL check,
2615 // using tproxy/intercept provided destination IP and port.
2616 HttpRequest *request = new HttpRequest(mx);
2617 static char ip[MAX_IPSTRLEN];
2618 assert(clientConnection->flags & (COMM_TRANSPARENT | COMM_INTERCEPTION));
2619 request->url.host(clientConnection->local.toStr(ip, sizeof(ip)));
2620 request->url.port(clientConnection->local.port());
2621 request->myportname = port->name;
2622
2623 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, request, NULL);
2624 acl_checklist->src_addr = clientConnection->remote;
2625 acl_checklist->my_addr = port->s;
2626 // Build a local AccessLogEntry to allow requiresAle() acls work
2627 acl_checklist->al = new AccessLogEntry;
2628 acl_checklist->al->cache.start_time = current_time;
2629 acl_checklist->al->tcpClient = clientConnection;
2630 acl_checklist->al->cache.port = port;
2631 acl_checklist->al->cache.caddr = log_addr;
2632 acl_checklist->al->proxyProtocolHeader = proxyProtocolHeader_;
2633 HTTPMSGUNLOCK(acl_checklist->al->request);
2634 acl_checklist->al->request = request;
2635 HTTPMSGLOCK(acl_checklist->al->request);
2636 Http::StreamPointer context = pipeline.front();
2637 ClientHttpRequest *http = context ? context->http : nullptr;
2638 const char *log_uri = http ? http->log_uri : nullptr;
2639 acl_checklist->syncAle(request, log_uri);
2640 acl_checklist->nonBlockingCheck(httpsSslBumpAccessCheckDone, this);
2641 #else
2642 fatal("FATAL: SSL-Bump requires --with-openssl");
2643 #endif
2644 return;
2645 } else {
2646 httpsEstablish(this, port->secure.staticContext);
2647 }
2648 }
2649
2650 #if USE_OPENSSL
2651 void
2652 ConnStateData::sslCrtdHandleReplyWrapper(void *data, const Helper::Reply &reply)
2653 {
2654 ConnStateData * state_data = (ConnStateData *)(data);
2655 state_data->sslCrtdHandleReply(reply);
2656 }
2657
2658 void
2659 ConnStateData::sslCrtdHandleReply(const Helper::Reply &reply)
2660 {
2661 if (!isOpen()) {
2662 debugs(33, 3, "Connection gone while waiting for ssl_crtd helper reply; helper reply:" << reply);
2663 return;
2664 }
2665
2666 if (reply.result == Helper::BrokenHelper) {
2667 debugs(33, 5, HERE << "Certificate for " << sslConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply);
2668 } else if (!reply.other().hasContent()) {
2669 debugs(1, DBG_IMPORTANT, HERE << "\"ssl_crtd\" helper returned <NULL> reply.");
2670 } else {
2671 Ssl::CrtdMessage reply_message(Ssl::CrtdMessage::REPLY);
2672 if (reply_message.parse(reply.other().content(), reply.other().contentSize()) != Ssl::CrtdMessage::OK) {
2673 debugs(33, 5, HERE << "Reply from ssl_crtd for " << sslConnectHostOrIp << " is incorrect");
2674 } else {
2675 if (reply.result != Helper::Okay) {
2676 debugs(33, 5, HERE << "Certificate for " << sslConnectHostOrIp << " cannot be generated. ssl_crtd response: " << reply_message.getBody());
2677 } else {
2678 debugs(33, 5, HERE << "Certificate for " << sslConnectHostOrIp << " was successfully recieved from ssl_crtd");
2679 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2680 doPeekAndSpliceStep();
2681 auto ssl = fd_table[clientConnection->fd].ssl.get();
2682 bool ret = Ssl::configureSSLUsingPkeyAndCertFromMemory(ssl, reply_message.getBody().c_str(), *port);
2683 if (!ret)
2684 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2685
2686 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2687 Ssl::configureUnconfiguredSslContext(ctx, signAlgorithm, *port);
2688 } else {
2689 Security::ContextPointer ctx(Ssl::GenerateSslContextUsingPkeyAndCertFromMemory(reply_message.getBody().c_str(), port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2690 if (ctx && !sslBumpCertKey.isEmpty())
2691 storeTlsContextToCache(sslBumpCertKey, ctx);
2692 getSslContextDone(ctx);
2693 }
2694 return;
2695 }
2696 }
2697 }
2698 Security::ContextPointer nil;
2699 getSslContextDone(nil);
2700 }
2701
2702 void ConnStateData::buildSslCertGenerationParams(Ssl::CertificateProperties &certProperties)
2703 {
2704 certProperties.commonName = sslCommonName_.isEmpty() ? sslConnectHostOrIp.termedBuf() : sslCommonName_.c_str();
2705
2706 const bool connectedOk = sslServerBump && sslServerBump->connectedOk();
2707 if (connectedOk) {
2708 if (X509 *mimicCert = sslServerBump->serverCert.get())
2709 certProperties.mimicCert.resetAndLock(mimicCert);
2710
2711 ACLFilledChecklist checklist(NULL, sslServerBump->request.getRaw(),
2712 clientConnection != NULL ? clientConnection->rfc931 : dash_str);
2713 checklist.sslErrors = cbdataReference(sslServerBump->sslErrors());
2714
2715 for (sslproxy_cert_adapt *ca = Config.ssl_client.cert_adapt; ca != NULL; ca = ca->next) {
2716 // If the algorithm already set, then ignore it.
2717 if ((ca->alg == Ssl::algSetCommonName && certProperties.setCommonName) ||
2718 (ca->alg == Ssl::algSetValidAfter && certProperties.setValidAfter) ||
2719 (ca->alg == Ssl::algSetValidBefore && certProperties.setValidBefore) )
2720 continue;
2721
2722 if (ca->aclList && checklist.fastCheck(ca->aclList).allowed()) {
2723 const char *alg = Ssl::CertAdaptAlgorithmStr[ca->alg];
2724 const char *param = ca->param;
2725
2726 // For parameterless CN adaptation, use hostname from the
2727 // CONNECT request.
2728 if (ca->alg == Ssl::algSetCommonName) {
2729 if (!param)
2730 param = sslConnectHostOrIp.termedBuf();
2731 certProperties.commonName = param;
2732 certProperties.setCommonName = true;
2733 } else if (ca->alg == Ssl::algSetValidAfter)
2734 certProperties.setValidAfter = true;
2735 else if (ca->alg == Ssl::algSetValidBefore)
2736 certProperties.setValidBefore = true;
2737
2738 debugs(33, 5, HERE << "Matches certificate adaptation aglorithm: " <<
2739 alg << " param: " << (param ? param : "-"));
2740 }
2741 }
2742
2743 certProperties.signAlgorithm = Ssl::algSignEnd;
2744 for (sslproxy_cert_sign *sg = Config.ssl_client.cert_sign; sg != NULL; sg = sg->next) {
2745 if (sg->aclList && checklist.fastCheck(sg->aclList).allowed()) {
2746 certProperties.signAlgorithm = (Ssl::CertSignAlgorithm)sg->alg;
2747 break;
2748 }
2749 }
2750 } else {// did not try to connect (e.g. client-first) or failed to connect
2751 // In case of an error while connecting to the secure server, use a
2752 // trusted certificate, with no mimicked fields and no adaptation
2753 // algorithms. There is nothing we can mimic, so we want to minimize the
2754 // number of warnings the user will have to see to get to the error page.
2755 // We will close the connection, so that the trust is not extended to
2756 // non-Squid content.
2757 certProperties.signAlgorithm = Ssl::algSignTrusted;
2758 }
2759
2760 assert(certProperties.signAlgorithm != Ssl::algSignEnd);
2761
2762 if (certProperties.signAlgorithm == Ssl::algSignUntrusted) {
2763 assert(port->secure.untrustedSigningCa.cert);
2764 certProperties.signWithX509.resetAndLock(port->secure.untrustedSigningCa.cert.get());
2765 certProperties.signWithPkey.resetAndLock(port->secure.untrustedSigningCa.pkey.get());
2766 } else {
2767 assert(port->secure.signingCa.cert.get());
2768 certProperties.signWithX509.resetAndLock(port->secure.signingCa.cert.get());
2769
2770 if (port->secure.signingCa.pkey)
2771 certProperties.signWithPkey.resetAndLock(port->secure.signingCa.pkey.get());
2772 }
2773 signAlgorithm = certProperties.signAlgorithm;
2774
2775 certProperties.signHash = Ssl::DefaultSignHash;
2776 }
2777
2778 Security::ContextPointer
2779 ConnStateData::getTlsContextFromCache(const SBuf &cacheKey, const Ssl::CertificateProperties &certProperties)
2780 {
2781 debugs(33, 5, "Finding SSL certificate for " << cacheKey << " in cache");
2782 Ssl::LocalContextStorage * ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2783 if (Security::ContextPointer *ctx = ssl_ctx_cache ? ssl_ctx_cache->get(cacheKey) : nullptr) {
2784 if (Ssl::verifySslCertificate(*ctx, certProperties)) {
2785 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is valid");
2786 return *ctx;
2787 } else {
2788 debugs(33, 5, "Cached SSL certificate for " << certProperties.commonName << " is out of date. Delete this certificate from cache");
2789 if (ssl_ctx_cache)
2790 ssl_ctx_cache->del(cacheKey);
2791 }
2792 }
2793 return Security::ContextPointer(nullptr);
2794 }
2795
2796 void
2797 ConnStateData::storeTlsContextToCache(const SBuf &cacheKey, Security::ContextPointer &ctx)
2798 {
2799 Ssl::LocalContextStorage *ssl_ctx_cache = Ssl::TheGlobalContextStorage.getLocalStorage(port->s);
2800 if (!ssl_ctx_cache || !ssl_ctx_cache->add(cacheKey, new Security::ContextPointer(ctx))) {
2801 // If it is not in storage delete after using. Else storage deleted it.
2802 fd_table[clientConnection->fd].dynamicTlsContext = ctx;
2803 }
2804 }
2805
2806 void
2807 ConnStateData::getSslContextStart()
2808 {
2809 // If we are called, then CONNECT has succeeded. Finalize it.
2810 if (auto xact = pipeline.front()) {
2811 if (xact->http && xact->http->request && xact->http->request->method == Http::METHOD_CONNECT)
2812 xact->finished();
2813 // cannot proceed with encryption if requests wait for plain responses
2814 Must(pipeline.empty());
2815 }
2816 /* careful: finished() above frees request, host, etc. */
2817
2818 if (port->secure.generateHostCertificates) {
2819 Ssl::CertificateProperties certProperties;
2820 buildSslCertGenerationParams(certProperties);
2821
2822 // Disable caching for bumpPeekAndSplice mode
2823 if (!(sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare))) {
2824 sslBumpCertKey.clear();
2825 Ssl::InRamCertificateDbKey(certProperties, sslBumpCertKey);
2826 assert(!sslBumpCertKey.isEmpty());
2827
2828 Security::ContextPointer ctx(getTlsContextFromCache(sslBumpCertKey, certProperties));
2829 if (ctx) {
2830 getSslContextDone(ctx);
2831 return;
2832 }
2833 }
2834
2835 #if USE_SSL_CRTD
2836 try {
2837 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName << " using ssl_crtd.");
2838 Ssl::CrtdMessage request_message(Ssl::CrtdMessage::REQUEST);
2839 request_message.setCode(Ssl::CrtdMessage::code_new_certificate);
2840 request_message.composeRequest(certProperties);
2841 debugs(33, 5, HERE << "SSL crtd request: " << request_message.compose().c_str());
2842 Ssl::Helper::Submit(request_message, sslCrtdHandleReplyWrapper, this);
2843 return;
2844 } catch (const std::exception &e) {
2845 debugs(33, DBG_IMPORTANT, "ERROR: Failed to compose ssl_crtd " <<
2846 "request for " << certProperties.commonName <<
2847 " certificate: " << e.what() << "; will now block to " <<
2848 "generate that certificate.");
2849 // fall through to do blocking in-process generation.
2850 }
2851 #endif // USE_SSL_CRTD
2852
2853 debugs(33, 5, HERE << "Generating SSL certificate for " << certProperties.commonName);
2854 if (sslServerBump && (sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare)) {
2855 doPeekAndSpliceStep();
2856 auto ssl = fd_table[clientConnection->fd].ssl.get();
2857 if (!Ssl::configureSSL(ssl, certProperties, *port))
2858 debugs(33, 5, "Failed to set certificates to ssl object for PeekAndSplice mode");
2859
2860 Security::ContextPointer ctx(Security::GetFrom(fd_table[clientConnection->fd].ssl));
2861 Ssl::configureUnconfiguredSslContext(ctx, certProperties.signAlgorithm, *port);
2862 } else {
2863 Security::ContextPointer dynCtx(Ssl::GenerateSslContext(certProperties, port->secure, (signAlgorithm == Ssl::algSignTrusted)));
2864 if (dynCtx && !sslBumpCertKey.isEmpty())
2865 storeTlsContextToCache(sslBumpCertKey, dynCtx);
2866 getSslContextDone(dynCtx);
2867 }
2868 return;
2869 }
2870
2871 Security::ContextPointer nil;
2872 getSslContextDone(nil);
2873 }
2874
2875 void
2876 ConnStateData::getSslContextDone(Security::ContextPointer &ctx)
2877 {
2878 if (port->secure.generateHostCertificates && !ctx) {
2879 debugs(33, 2, "Failed to generate TLS context for " << sslConnectHostOrIp);
2880 }
2881
2882 // If generated ssl context = NULL, try to use static ssl context.
2883 if (!ctx) {
2884 if (!port->secure.staticContext) {
2885 debugs(83, DBG_IMPORTANT, "Closing " << clientConnection->remote << " as lacking TLS context");
2886 clientConnection->close();
2887 return;
2888 } else {
2889 debugs(33, 5, "Using static TLS context.");
2890 ctx = port->secure.staticContext;
2891 }
2892 }
2893
2894 if (!httpsCreate(clientConnection, ctx))
2895 return;
2896
2897 // bumped intercepted conns should already have Config.Timeout.request set
2898 // but forwarded connections may only have Config.Timeout.lifetime. [Re]set
2899 // to make sure the connection does not get stuck on non-SSL clients.
2900 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2901 AsyncCall::Pointer timeoutCall = JobCallback(33, 5, TimeoutDialer,
2902 this, ConnStateData::requestTimeout);
2903 commSetConnTimeout(clientConnection, Config.Timeout.request, timeoutCall);
2904
2905 switchedToHttps_ = true;
2906
2907 auto ssl = fd_table[clientConnection->fd].ssl.get();
2908 BIO *b = SSL_get_rbio(ssl);
2909 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
2910 bio->setReadBufData(inBuf);
2911 inBuf.clear();
2912 clientNegotiateSSL(clientConnection->fd, this);
2913 }
2914
2915 void
2916 ConnStateData::switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode)
2917 {
2918 assert(!switchedToHttps_);
2919
2920 sslConnectHostOrIp = request->url.host();
2921 tlsConnectPort = request->url.port();
2922 resetSslCommonName(request->url.host());
2923
2924 // We are going to read new request
2925 flags.readMore = true;
2926 debugs(33, 5, HERE << "converting " << clientConnection << " to SSL");
2927
2928 // keep version major.minor details the same.
2929 // but we are now performing the HTTPS handshake traffic
2930 transferProtocol.protocol = AnyP::PROTO_HTTPS;
2931
2932 // If sslServerBump is set, then we have decided to deny CONNECT
2933 // and now want to switch to SSL to send the error to the client
2934 // without even peeking at the origin server certificate.
2935 if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
2936 request->flags.sslPeek = true;
2937 sslServerBump = new Ssl::ServerBump(request);
2938 } else if (bumpServerMode == Ssl::bumpPeek || bumpServerMode == Ssl::bumpStare) {
2939 request->flags.sslPeek = true;
2940 sslServerBump = new Ssl::ServerBump(request, NULL, bumpServerMode);
2941 }
2942
2943 // commSetConnTimeout() was called for this request before we switched.
2944 // Fix timeout to request_start_timeout
2945 typedef CommCbMemFunT<ConnStateData, CommTimeoutCbParams> TimeoutDialer;
2946 AsyncCall::Pointer timeoutCall = JobCallback(33, 5,
2947 TimeoutDialer, this, ConnStateData::requestTimeout);
2948 commSetConnTimeout(clientConnection, Config.Timeout.request_start_timeout, timeoutCall);
2949 // Also reset receivedFirstByte_ flag to allow this timeout work in the case we have
2950 // a bumbed "connect" request on non transparent port.
2951 receivedFirstByte_ = false;
2952 // Get more data to peek at Tls
2953 parsingTlsHandshake = true;
2954 readSomeData();
2955 }
2956
2957 void
2958 ConnStateData::parseTlsHandshake()
2959 {
2960 Must(parsingTlsHandshake);
2961
2962 assert(!inBuf.isEmpty());
2963 receivedFirstByte();
2964 fd_note(clientConnection->fd, "Parsing TLS handshake");
2965
2966 bool unsupportedProtocol = false;
2967 try {
2968 if (!tlsParser.parseHello(inBuf)) {
2969 // need more data to finish parsing
2970 readSomeData();
2971 return;
2972 }
2973 }
2974 catch (const std::exception &ex) {
2975 debugs(83, 2, "error on FD " << clientConnection->fd << ": " << ex.what());
2976 unsupportedProtocol = true;
2977 }
2978
2979 parsingTlsHandshake = false;
2980
2981 // client data may be needed for splicing and for
2982 // tunneling unsupportedProtocol after an error
2983 preservedClientData = inBuf;
2984
2985 // Even if the parser failed, each TLS detail should either be set
2986 // correctly or still be "unknown"; copying unknown detail is a no-op.
2987 Security::TlsDetails::Pointer const &details = tlsParser.details;
2988 clientConnection->tlsNegotiations()->retrieveParsedInfo(details);
2989 if (details && !details->serverName.isEmpty()) {
2990 resetSslCommonName(details->serverName.c_str());
2991 tlsClientSni_ = details->serverName;
2992 }
2993
2994 // We should disable read/write handlers
2995 Comm::ResetSelect(clientConnection->fd);
2996
2997 if (unsupportedProtocol) {
2998 Http::StreamPointer context = pipeline.front();
2999 Must(context && context->http);
3000 HttpRequest::Pointer request = context->http->request;
3001 debugs(83, 5, "Got something other than TLS Client Hello. Cannot SslBump.");
3002 sslBumpMode = Ssl::bumpSplice;
3003 context->http->al->ssl.bumpMode = Ssl::bumpSplice;
3004 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_PROTOCOL_UNKNOWN))
3005 clientConnection->close();
3006 return;
3007 }
3008
3009 if (!sslServerBump || sslServerBump->act.step1 == Ssl::bumpClientFirst) { // Either means client-first.
3010 getSslContextStart();
3011 return;
3012 } else if (sslServerBump->act.step1 == Ssl::bumpServerFirst) {
3013 // will call httpsPeeked() with certificate and connection, eventually
3014 FwdState::fwdStart(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw());
3015 } else {
3016 Must(sslServerBump->act.step1 == Ssl::bumpPeek || sslServerBump->act.step1 == Ssl::bumpStare);
3017 startPeekAndSplice();
3018 }
3019 }
3020
3021 void httpsSslBumpStep2AccessCheckDone(allow_t answer, void *data)
3022 {
3023 ConnStateData *connState = (ConnStateData *) data;
3024
3025 // if the connection is closed or closing, just return.
3026 if (!connState->isOpen())
3027 return;
3028
3029 debugs(33, 5, "Answer: " << answer << " kind:" << answer.kind);
3030 assert(connState->serverBump());
3031 Ssl::BumpMode bumpAction;
3032 if (answer.allowed()) {
3033 bumpAction = (Ssl::BumpMode)answer.kind;
3034 } else
3035 bumpAction = Ssl::bumpSplice;
3036
3037 connState->serverBump()->act.step2 = bumpAction;
3038 connState->sslBumpMode = bumpAction;
3039 Http::StreamPointer context = connState->pipeline.front();
3040 if (ClientHttpRequest *http = (context ? context->http : nullptr))
3041 http->al->ssl.bumpMode = bumpAction;
3042
3043 if (bumpAction == Ssl::bumpTerminate) {
3044 connState->clientConnection->close();
3045 } else if (bumpAction != Ssl::bumpSplice) {
3046 connState->startPeekAndSplice();
3047 } else if (!connState->splice())
3048 connState->clientConnection->close();
3049 }
3050
3051 bool
3052 ConnStateData::splice()
3053 {
3054 // normally we can splice here, because we just got client hello message
3055
3056 if (fd_table[clientConnection->fd].ssl.get()) {
3057 // Restore default read methods
3058 fd_table[clientConnection->fd].read_method = &default_read_method;
3059 fd_table[clientConnection->fd].write_method = &default_write_method;
3060 }
3061
3062 // XXX: assuming that there was an HTTP/1.1 CONNECT to begin with...
3063 // reset the current protocol to HTTP/1.1 (was "HTTPS" for the bumping process)
3064 transferProtocol = Http::ProtocolVersion();
3065 assert(!pipeline.empty());
3066 Http::StreamPointer context = pipeline.front();
3067 Must(context);
3068 Must(context->http);
3069 ClientHttpRequest *http = context->http;
3070 HttpRequest::Pointer request = http->request;
3071 context->finished();
3072 if (transparent()) {
3073 // For transparent connections, make a new fake CONNECT request, now
3074 // with SNI as target. doCallout() checks, adaptations may need that.
3075 return fakeAConnectRequest("splice", preservedClientData);
3076 } else {
3077 // For non transparent connections make a new tunneled CONNECT, which
3078 // also sets the HttpRequest::flags::forceTunnel flag to avoid
3079 // respond with "Connection Established" to the client.
3080 // This fake CONNECT request required to allow use of SNI in
3081 // doCallout() checks and adaptations.
3082 return initiateTunneledRequest(request, Http::METHOD_CONNECT, "splice", preservedClientData);
3083 }
3084 }
3085
3086 void
3087 ConnStateData::startPeekAndSplice()
3088 {
3089 // This is the Step2 of the SSL bumping
3090 assert(sslServerBump);
3091 Http::StreamPointer context = pipeline.front();
3092 ClientHttpRequest *http = context ? context->http : nullptr;
3093
3094 if (sslServerBump->step == Ssl::bumpStep1) {
3095 sslServerBump->step = Ssl::bumpStep2;
3096 // Run a accessList check to check if want to splice or continue bumping
3097
3098 ACLFilledChecklist *acl_checklist = new ACLFilledChecklist(Config.accessList.ssl_bump, sslServerBump->request.getRaw(), nullptr);
3099 acl_checklist->al = http ? http->al : nullptr;
3100 //acl_checklist->src_addr = params.conn->remote;
3101 //acl_checklist->my_addr = s->s;
3102 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpNone));
3103 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpClientFirst));
3104 acl_checklist->banAction(allow_t(ACCESS_ALLOWED, Ssl::bumpServerFirst));
3105 const char *log_uri = http ? http->log_uri : nullptr;
3106 acl_checklist->syncAle(sslServerBump->request.getRaw(), log_uri);
3107 acl_checklist->nonBlockingCheck(httpsSslBumpStep2AccessCheckDone, this);
3108 return;
3109 }
3110
3111 // will call httpsPeeked() with certificate and connection, eventually
3112 Security::ContextPointer unConfiguredCTX(Ssl::createSSLContext(port->secure.signingCa.cert, port->secure.signingCa.pkey, port->secure));
3113 fd_table[clientConnection->fd].dynamicTlsContext = unConfiguredCTX;
3114
3115 if (!httpsCreate(clientConnection, unConfiguredCTX))
3116 return;
3117
3118 switchedToHttps_ = true;
3119
3120 auto ssl = fd_table[clientConnection->fd].ssl.get();
3121 BIO *b = SSL_get_rbio(ssl);
3122 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3123 bio->setReadBufData(inBuf);
3124 bio->hold(true);
3125
3126 // Here squid should have all of the client hello message so the
3127 // tlsAttemptHandshake() should return 0.
3128 // This block exist only to force openSSL parse client hello and detect
3129 // ERR_SECURE_ACCEPT_FAIL error, which should be checked and splice if required.
3130 if (tlsAttemptHandshake(this, nullptr) < 0) {
3131 debugs(83, 2, "TLS handshake failed.");
3132 HttpRequest::Pointer request(http ? http->request : nullptr);
3133 if (!clientTunnelOnError(this, context, request, HttpRequestMethod(), ERR_SECURE_ACCEPT_FAIL))
3134 clientConnection->close();
3135 return;
3136 }
3137
3138 // We need to reset inBuf here, to be used by incoming requests in the case
3139 // of SSL bump
3140 inBuf.clear();
3141
3142 debugs(83, 5, "Peek and splice at step2 done. Start forwarding the request!!! ");
3143 FwdState::Start(clientConnection, sslServerBump->entry, sslServerBump->request.getRaw(), http ? http->al : NULL);
3144 }
3145
3146 void
3147 ConnStateData::doPeekAndSpliceStep()
3148 {
3149 auto ssl = fd_table[clientConnection->fd].ssl.get();
3150 BIO *b = SSL_get_rbio(ssl);
3151 assert(b);
3152 Ssl::ClientBio *bio = static_cast<Ssl::ClientBio *>(BIO_get_data(b));
3153
3154 debugs(33, 5, "PeekAndSplice mode, proceed with client negotiation. Currrent state:" << SSL_state_string_long(ssl));
3155 bio->hold(false);
3156
3157 Comm::SetSelect(clientConnection->fd, COMM_SELECT_WRITE, clientNegotiateSSL, this, 0);
3158 switchedToHttps_ = true;
3159 }
3160
3161 void
3162 ConnStateData::httpsPeeked(PinnedIdleContext pic)
3163 {
3164 Must(sslServerBump != NULL);
3165 Must(sslServerBump->request == pic.request);
3166 Must(pipeline.empty() || pipeline.front()->http == nullptr || pipeline.front()->http->request == pic.request.getRaw());
3167
3168 if (Comm::IsConnOpen(pic.connection)) {
3169 notePinnedConnectionBecameIdle(pic);
3170 debugs(33, 5, HERE << "bumped HTTPS server: " << sslConnectHostOrIp);
3171 } else
3172 debugs(33, 5, HERE << "Error while bumping: " << sslConnectHostOrIp);
3173
3174 getSslContextStart();
3175 }
3176
3177 #endif /* USE_OPENSSL */
3178
3179 bool
3180 ConnStateData::initiateTunneledRequest(HttpRequest::Pointer const &cause, Http::MethodType const method, const char *reason, const SBuf &payload)
3181 {
3182 // fake a CONNECT request to force connState to tunnel
3183 SBuf connectHost;
3184 unsigned short connectPort = 0;
3185
3186 if (pinning.serverConnection != nullptr) {
3187 static char ip[MAX_IPSTRLEN];
3188 pinning.serverConnection->remote.toHostStr(ip, sizeof(ip));
3189 connectHost.assign(ip);
3190 connectPort = pinning.serverConnection->remote.port();
3191 } else if (cause && cause->method == Http::METHOD_CONNECT) {
3192 // We are inside a (not fully established) CONNECT request
3193 connectHost = cause->url.host();
3194 connectPort = cause->url.port();
3195 } else {
3196 debugs(33, 2, "Not able to compute URL, abort request tunneling for " << reason);
3197 return false;
3198 }
3199
3200 debugs(33, 2, "Request tunneling for " << reason);
3201 ClientHttpRequest *http = buildFakeRequest(method, connectHost, connectPort, payload);
3202 HttpRequest::Pointer request = http->request;
3203 request->flags.forceTunnel = true;
3204 http->calloutContext = new ClientRequestContext(http);
3205 http->doCallouts();
3206 clientProcessRequestFinished(this, request);
3207 return true;
3208 }
3209
3210 bool
3211 ConnStateData::fakeAConnectRequest(const char *reason, const SBuf &payload)
3212 {
3213 debugs(33, 2, "fake a CONNECT request to force connState to tunnel for " << reason);
3214
3215 SBuf connectHost;
3216 assert(transparent());
3217 const unsigned short connectPort = clientConnection->local.port();
3218
3219 #if USE_OPENSSL
3220 if (!tlsClientSni_.isEmpty())
3221 connectHost.assign(tlsClientSni_);
3222 else
3223 #endif
3224 {
3225 static char ip[MAX_IPSTRLEN];
3226 clientConnection->local.toHostStr(ip, sizeof(ip));
3227 connectHost.assign(ip);
3228 }
3229
3230 ClientHttpRequest *http = buildFakeRequest(Http::METHOD_CONNECT, connectHost, connectPort, payload);
3231
3232 http->calloutContext = new ClientRequestContext(http);
3233 HttpRequest::Pointer request = http->request;
3234 http->doCallouts();
3235 clientProcessRequestFinished(this, request);
3236 return true;
3237 }
3238
3239 ClientHttpRequest *
3240 ConnStateData::buildFakeRequest(Http::MethodType const method, SBuf &useHost, unsigned short usePort, const SBuf &payload)
3241 {
3242 ClientHttpRequest *http = new ClientHttpRequest(this);
3243 Http::Stream *stream = new Http::Stream(clientConnection, http);
3244
3245 StoreIOBuffer tempBuffer;
3246 tempBuffer.data = stream->reqbuf;
3247 tempBuffer.length = HTTP_REQBUF_SZ;
3248
3249 ClientStreamData newServer = new clientReplyContext(http);
3250 ClientStreamData newClient = stream;
3251 clientStreamInit(&http->client_stream, clientGetMoreData, clientReplyDetach,
3252 clientReplyStatus, newServer, clientSocketRecipient,
3253 clientSocketDetach, newClient, tempBuffer);
3254
3255 http->uri = SBufToCstring(useHost);
3256 stream->flags.parsed_ok = 1; // Do we need it?
3257 stream->mayUseConnection(true);
3258
3259 AsyncCall::Pointer timeoutCall = commCbCall(5, 4, "clientLifetimeTimeout",
3260 CommTimeoutCbPtrFun(clientLifetimeTimeout, stream->http));
3261 commSetConnTimeout(clientConnection, Config.Timeout.lifetime, timeoutCall);
3262
3263 stream->registerWithConn();
3264
3265 MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initClient);
3266 mx->tcpClient = clientConnection;
3267 // Setup Http::Request object. Maybe should be replaced by a call to (modified)
3268 // clientProcessRequest
3269 HttpRequest::Pointer request = new HttpRequest(mx);
3270 AnyP::ProtocolType proto = (method == Http::METHOD_NONE) ? AnyP::PROTO_AUTHORITY_FORM : AnyP::PROTO_HTTP;
3271 request->url.setScheme(proto, nullptr);
3272 request->method = method;
3273 request->url.host(useHost.c_str());
3274 request->url.port(usePort);
3275 http->initRequest(request.getRaw());
3276
3277 request->manager(this, http->al);
3278
3279 if (proto == AnyP::PROTO_HTTP)
3280 request->header.putStr(Http::HOST, useHost.c_str());
3281
3282 request->sources |= ((switchedToHttps() || port->transport.protocol == AnyP::PROTO_HTTPS) ? Http::Message::srcHttps : Http::Message::srcHttp);
3283 #if USE_AUTH
3284 if (getAuth())
3285 request->auth_user_request = getAuth();
3286 #endif
3287
3288 inBuf = payload;
3289 flags.readMore = false;
3290
3291 return http;
3292 }
3293
3294 /// check FD after clientHttp[s]ConnectionOpened, adjust HttpSockets as needed
3295 static bool
3296 OpenedHttpSocket(const Comm::ConnectionPointer &c, const Ipc::FdNoteId portType)
3297 {
3298 if (!Comm::IsConnOpen(c)) {
3299 Must(NHttpSockets > 0); // we tried to open some
3300 --NHttpSockets; // there will be fewer sockets than planned
3301 Must(HttpSockets[NHttpSockets] < 0); // no extra fds received
3302
3303 if (!NHttpSockets) // we could not open any listen sockets at all
3304 fatalf("Unable to open %s",FdNote(portType));
3305
3306 return false;
3307 }
3308 return true;
3309 }
3310
3311 /// find any unused HttpSockets[] slot and store fd there or return false
3312 static bool
3313 AddOpenedHttpSocket(const Comm::ConnectionPointer &conn)
3314 {
3315 bool found = false;
3316 for (int i = 0; i < NHttpSockets && !found; ++i) {
3317 if ((found = HttpSockets[i] < 0))
3318 HttpSockets[i] = conn->fd;
3319 }
3320 return found;
3321 }
3322
3323 static void
3324 clientHttpConnectionsOpen(void)
3325 {
3326 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3327 const SBuf &scheme = AnyP::UriScheme(s->transport.protocol).image();
3328
3329 if (MAXTCPLISTENPORTS == NHttpSockets) {
3330 debugs(1, DBG_IMPORTANT, "WARNING: You have too many '" << scheme << "_port' lines.");
3331 debugs(1, DBG_IMPORTANT, " The limit is " << MAXTCPLISTENPORTS << " HTTP ports.");
3332 continue;
3333 }
3334
3335 #if USE_OPENSSL
3336 if (s->flags.tunnelSslBumping) {
3337 if (!Config.accessList.ssl_bump) {
3338 debugs(33, DBG_IMPORTANT, "WARNING: No ssl_bump configured. Disabling ssl-bump on " << scheme << "_port " << s->s);
3339 s->flags.tunnelSslBumping = false;
3340 }
3341 if (!s->secure.staticContext && !s->secure.generateHostCertificates) {
3342 debugs(1, DBG_IMPORTANT, "Will not bump SSL at " << scheme << "_port " << s->s << " due to TLS initialization failure.");
3343 s->flags.tunnelSslBumping = false;
3344 if (s->transport.protocol == AnyP::PROTO_HTTP)
3345 s->secure.encryptTransport = false;
3346 }
3347 if (s->flags.tunnelSslBumping) {
3348 // Create ssl_ctx cache for this port.
3349 Ssl::TheGlobalContextStorage.addLocalStorage(s->s, s->secure.dynamicCertMemCacheSize);
3350 }
3351 }
3352 #endif
3353
3354 if (s->secure.encryptTransport && !s->secure.staticContext) {
3355 debugs(1, DBG_CRITICAL, "ERROR: Ignoring " << scheme << "_port " << s->s << " due to TLS context initialization failure.");
3356 continue;
3357 }
3358
3359 // Fill out a Comm::Connection which IPC will open as a listener for us
3360 // then pass back when active so we can start a TcpAcceptor subscription.
3361 s->listenConn = new Comm::Connection;
3362 s->listenConn->local = s->s;
3363
3364 s->listenConn->flags = COMM_NONBLOCKING | (s->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3365 (s->flags.natIntercept ? COMM_INTERCEPTION : 0);
3366
3367 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3368 if (s->transport.protocol == AnyP::PROTO_HTTP) {
3369 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTP
3370 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpAccept", CommAcceptCbPtrFun(httpAccept, CommAcceptCbParams(NULL)));
3371 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3372
3373 AsyncCall::Pointer listenCall = asyncCall(33,2, "clientListenerConnectionOpened",
3374 ListeningStartedDialer(&clientListenerConnectionOpened, s, Ipc::fdnHttpSocket, sub));
3375 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpSocket, listenCall);
3376
3377 } else if (s->transport.protocol == AnyP::PROTO_HTTPS) {
3378 // setup the subscriptions such that new connections accepted by listenConn are handled by HTTPS
3379 RefCount<AcceptCall> subCall = commCbCall(5, 5, "httpsAccept", CommAcceptCbPtrFun(httpsAccept, CommAcceptCbParams(NULL)));
3380 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3381
3382 AsyncCall::Pointer listenCall = asyncCall(33, 2, "clientListenerConnectionOpened",
3383 ListeningStartedDialer(&clientListenerConnectionOpened,
3384 s, Ipc::fdnHttpsSocket, sub));
3385 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, s->listenConn, Ipc::fdnHttpsSocket, listenCall);
3386 }
3387
3388 HttpSockets[NHttpSockets] = -1; // set in clientListenerConnectionOpened
3389 ++NHttpSockets;
3390 }
3391 }
3392
3393 void
3394 clientStartListeningOn(AnyP::PortCfgPointer &port, const RefCount< CommCbFunPtrCallT<CommAcceptCbPtrFun> > &subCall, const Ipc::FdNoteId fdNote)
3395 {
3396 // Fill out a Comm::Connection which IPC will open as a listener for us
3397 port->listenConn = new Comm::Connection;
3398 port->listenConn->local = port->s;
3399 port->listenConn->flags =
3400 COMM_NONBLOCKING |
3401 (port->flags.tproxyIntercept ? COMM_TRANSPARENT : 0) |
3402 (port->flags.natIntercept ? COMM_INTERCEPTION : 0);
3403
3404 // route new connections to subCall
3405 typedef CommCbFunPtrCallT<CommAcceptCbPtrFun> AcceptCall;
3406 Subscription::Pointer sub = new CallSubscription<AcceptCall>(subCall);
3407 AsyncCall::Pointer listenCall =
3408 asyncCall(33, 2, "clientListenerConnectionOpened",
3409 ListeningStartedDialer(&clientListenerConnectionOpened,
3410 port, fdNote, sub));
3411 Ipc::StartListening(SOCK_STREAM, IPPROTO_TCP, port->listenConn, fdNote, listenCall);
3412
3413 assert(NHttpSockets < MAXTCPLISTENPORTS);
3414 HttpSockets[NHttpSockets] = -1;
3415 ++NHttpSockets;
3416 }
3417
3418 /// process clientHttpConnectionsOpen result
3419 static void
3420 clientListenerConnectionOpened(AnyP::PortCfgPointer &s, const Ipc::FdNoteId portTypeNote, const Subscription::Pointer &sub)
3421 {
3422 Must(s != NULL);
3423
3424 if (!OpenedHttpSocket(s->listenConn, portTypeNote))
3425 return;
3426
3427 Must(Comm::IsConnOpen(s->listenConn));
3428
3429 // TCP: setup a job to handle accept() with subscribed handler
3430 AsyncJob::Start(new Comm::TcpAcceptor(s, FdNote(portTypeNote), sub));
3431
3432 debugs(1, DBG_IMPORTANT, "Accepting " <<
3433 (s->flags.natIntercept ? "NAT intercepted " : "") <<
3434 (s->flags.tproxyIntercept ? "TPROXY intercepted " : "") <<
3435 (s->flags.tunnelSslBumping ? "SSL bumped " : "") <<
3436 (s->flags.accelSurrogate ? "reverse-proxy " : "")
3437 << FdNote(portTypeNote) << " connections at "
3438 << s->listenConn);
3439
3440 Must(AddOpenedHttpSocket(s->listenConn)); // otherwise, we have received a fd we did not ask for
3441 }
3442
3443 void
3444 clientOpenListenSockets(void)
3445 {
3446 clientHttpConnectionsOpen();
3447 Ftp::StartListening();
3448
3449 if (NHttpSockets < 1)
3450 fatal("No HTTP, HTTPS, or FTP ports configured");
3451 }
3452
3453 void
3454 clientConnectionsClose()
3455 {
3456 for (AnyP::PortCfgPointer s = HttpPortList; s != NULL; s = s->next) {
3457 if (s->listenConn != NULL) {
3458 debugs(1, DBG_IMPORTANT, "Closing HTTP(S) port " << s->listenConn->local);
3459 s->listenConn->close();
3460 s->listenConn = NULL;
3461 }
3462 }
3463
3464 Ftp::StopListening();
3465
3466 // TODO see if we can drop HttpSockets array entirely */
3467 for (int i = 0; i < NHttpSockets; ++i) {
3468 HttpSockets[i] = -1;
3469 }
3470
3471 NHttpSockets = 0;
3472 }
3473
3474 int
3475 varyEvaluateMatch(StoreEntry * entry, HttpRequest * request)
3476 {
3477 SBuf vary(request->vary_headers);
3478 int has_vary = entry->getReply()->header.has(Http::HdrType::VARY);
3479 #if X_ACCELERATOR_VARY
3480
3481 has_vary |=
3482 entry->getReply()->header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY);
3483 #endif
3484
3485 if (!has_vary || entry->mem_obj->vary_headers.isEmpty()) {
3486 if (!vary.isEmpty()) {
3487 /* Oops... something odd is going on here.. */
3488 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary object on second attempt, '" <<
3489 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3490 request->vary_headers.clear();
3491 return VARY_CANCEL;
3492 }
3493
3494 if (!has_vary) {
3495 /* This is not a varying object */
3496 return VARY_NONE;
3497 }
3498
3499 /* virtual "vary" object found. Calculate the vary key and
3500 * continue the search
3501 */
3502 vary = httpMakeVaryMark(request, entry->getReply());
3503
3504 if (!vary.isEmpty()) {
3505 request->vary_headers = vary;
3506 return VARY_OTHER;
3507 } else {
3508 /* Ouch.. we cannot handle this kind of variance */
3509 /* XXX This cannot really happen, but just to be complete */
3510 return VARY_CANCEL;
3511 }
3512 } else {
3513 if (vary.isEmpty()) {
3514 vary = httpMakeVaryMark(request, entry->getReply());
3515
3516 if (!vary.isEmpty())
3517 request->vary_headers = vary;
3518 }
3519
3520 if (vary.isEmpty()) {
3521 /* Ouch.. we cannot handle this kind of variance */
3522 /* XXX This cannot really happen, but just to be complete */
3523 return VARY_CANCEL;
3524 } else if (vary.cmp(entry->mem_obj->vary_headers) == 0) {
3525 return VARY_MATCH;
3526 } else {
3527 /* Oops.. we have already been here and still haven't
3528 * found the requested variant. Bail out
3529 */
3530 debugs(33, DBG_IMPORTANT, "varyEvaluateMatch: Oops. Not a Vary match on second attempt, '" <<
3531 entry->mem_obj->urlXXX() << "' '" << vary << "'");
3532 return VARY_CANCEL;
3533 }
3534 }
3535 }
3536
3537 ACLFilledChecklist *
3538 clientAclChecklistCreate(const acl_access * acl, ClientHttpRequest * http)
3539 {
3540 const auto checklist = new ACLFilledChecklist(acl, nullptr, nullptr);
3541 clientAclChecklistFill(*checklist, http);
3542 return checklist;
3543 }
3544
3545 void
3546 clientAclChecklistFill(ACLFilledChecklist &checklist, ClientHttpRequest *http)
3547 {
3548 checklist.setRequest(http->request);
3549 checklist.al = http->al;
3550 checklist.syncAle(http->request, http->log_uri);
3551
3552 // TODO: If http->getConn is always http->request->clientConnectionManager,
3553 // then call setIdent() inside checklist.setRequest(). Otherwise, restore
3554 // USE_IDENT lost in commit 94439e4.
3555 ConnStateData * conn = http->getConn();
3556 const char *ident = (cbdataReferenceValid(conn) &&
3557 conn && conn->clientConnection) ?
3558 conn->clientConnection->rfc931 : dash_str;
3559 checklist.setIdent(ident);
3560 }
3561
3562 bool
3563 ConnStateData::transparent() const
3564 {
3565 return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
3566 }
3567
3568 BodyPipe::Pointer
3569 ConnStateData::expectRequestBody(int64_t size)
3570 {
3571 bodyPipe = new BodyPipe(this);
3572 if (size >= 0)
3573 bodyPipe->setBodySize(size);
3574 else
3575 startDechunkingRequest();
3576 return bodyPipe;
3577 }
3578
3579 int64_t
3580 ConnStateData::mayNeedToReadMoreBody() const
3581 {
3582 if (!bodyPipe)
3583 return 0; // request without a body or read/produced all body bytes
3584
3585 if (!bodyPipe->bodySizeKnown())
3586 return -1; // probably need to read more, but we cannot be sure
3587
3588 const int64_t needToProduce = bodyPipe->unproducedSize();
3589 const int64_t haveAvailable = static_cast<int64_t>(inBuf.length());
3590
3591 if (needToProduce <= haveAvailable)
3592 return 0; // we have read what we need (but are waiting for pipe space)
3593
3594 return needToProduce - haveAvailable;
3595 }
3596
3597 void
3598 ConnStateData::stopReceiving(const char *error)
3599 {
3600 debugs(33, 4, HERE << "receiving error (" << clientConnection << "): " << error <<
3601 "; old sending error: " <<
3602 (stoppedSending() ? stoppedSending_ : "none"));
3603
3604 if (const char *oldError = stoppedReceiving()) {
3605 debugs(33, 3, HERE << "already stopped receiving: " << oldError);
3606 return; // nothing has changed as far as this connection is concerned
3607 }
3608
3609 stoppedReceiving_ = error;
3610
3611 if (const char *sendError = stoppedSending()) {
3612 debugs(33, 3, HERE << "closing because also stopped sending: " << sendError);
3613 clientConnection->close();
3614 }
3615 }
3616
3617 void
3618 ConnStateData::expectNoForwarding()
3619 {
3620 if (bodyPipe != NULL) {
3621 debugs(33, 4, HERE << "no consumer for virgin body " << bodyPipe->status());
3622 bodyPipe->expectNoConsumption();
3623 }
3624 }
3625
3626 /// initialize dechunking state
3627 void
3628 ConnStateData::startDechunkingRequest()
3629 {
3630 Must(bodyPipe != NULL);
3631 debugs(33, 5, HERE << "start dechunking" << bodyPipe->status());
3632 assert(!bodyParser);
3633 bodyParser = new Http1::TeChunkedParser;
3634 }
3635
3636 /// put parsed content into input buffer and clean up
3637 void
3638 ConnStateData::finishDechunkingRequest(bool withSuccess)
3639 {
3640 debugs(33, 5, HERE << "finish dechunking: " << withSuccess);
3641
3642 if (bodyPipe != NULL) {
3643 debugs(33, 7, HERE << "dechunked tail: " << bodyPipe->status());
3644 BodyPipe::Pointer myPipe = bodyPipe;
3645 stopProducingFor(bodyPipe, withSuccess); // sets bodyPipe->bodySize()
3646 Must(!bodyPipe); // we rely on it being nil after we are done with body
3647 if (withSuccess) {
3648 Must(myPipe->bodySizeKnown());
3649 Http::StreamPointer context = pipeline.front();
3650 if (context != NULL && context->http && context->http->request)
3651 context->http->request->setContentLength(myPipe->bodySize());
3652 }
3653 }
3654
3655 delete bodyParser;
3656 bodyParser = NULL;
3657 }
3658
3659 // XXX: this is an HTTP/1-only operation
3660 void
3661 ConnStateData::sendControlMsg(HttpControlMsg msg)
3662 {
3663 if (!isOpen()) {
3664 debugs(33, 3, HERE << "ignoring 1xx due to earlier closure");
3665 return;
3666 }
3667
3668 // HTTP/1 1xx status messages are only valid when there is a transaction to trigger them
3669 if (!pipeline.empty()) {
3670 HttpReply::Pointer rep(msg.reply);
3671 Must(rep);
3672 // remember the callback
3673 cbControlMsgSent = msg.cbSuccess;
3674
3675 typedef CommCbMemFunT<HttpControlMsgSink, CommIoCbParams> Dialer;
3676 AsyncCall::Pointer call = JobCallback(33, 5, Dialer, this, HttpControlMsgSink::wroteControlMsg);
3677
3678 if (!writeControlMsgAndCall(rep.getRaw(), call)) {
3679 // but still inform the caller (so it may resume its operation)
3680 doneWithControlMsg();
3681 }
3682 return;
3683 }
3684
3685 debugs(33, 3, HERE << " closing due to missing context for 1xx");
3686 clientConnection->close();
3687 }
3688
3689 void
3690 ConnStateData::doneWithControlMsg()
3691 {
3692 HttpControlMsgSink::doneWithControlMsg();
3693
3694 if (Http::StreamPointer deferredRequest = pipeline.front()) {
3695 debugs(33, 3, clientConnection << ": calling PushDeferredIfNeeded after control msg wrote");
3696 ClientSocketContextPushDeferredIfNeeded(deferredRequest, this);
3697 }
3698 }
3699
3700 /// Our close handler called by Comm when the pinned connection is closed
3701 void
3702 ConnStateData::clientPinnedConnectionClosed(const CommCloseCbParams &io)
3703 {
3704 // FwdState might repin a failed connection sooner than this close
3705 // callback is called for the failed connection.
3706 assert(pinning.serverConnection == io.conn);
3707 pinning.closeHandler = NULL; // Comm unregisters handlers before calling
3708 const bool sawZeroReply = pinning.zeroReply; // reset when unpinning
3709 pinning.serverConnection->noteClosure();
3710 unpinConnection(false);
3711
3712 if (sawZeroReply && clientConnection != NULL) {
3713 debugs(33, 3, "Closing client connection on pinned zero reply.");
3714 clientConnection->close();
3715 }
3716
3717 }
3718
3719 void
3720 ConnStateData::pinBusyConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest::Pointer &request)
3721 {
3722 pinConnection(pinServer, *request);
3723 }
3724
3725 void
3726 ConnStateData::notePinnedConnectionBecameIdle(PinnedIdleContext pic)
3727 {
3728 Must(pic.connection);
3729 Must(pic.request);
3730 pinConnection(pic.connection, *pic.request);
3731
3732 // monitor pinned server connection for remote-end closures.
3733 startPinnedConnectionMonitoring();
3734
3735 if (pipeline.empty())
3736 kick(); // in case clientParseRequests() was blocked by a busy pic.connection
3737 }
3738
3739 /// Forward future client requests using the given server connection.
3740 void
3741 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, const HttpRequest &request)
3742 {
3743 if (Comm::IsConnOpen(pinning.serverConnection) &&
3744 pinning.serverConnection->fd == pinServer->fd) {
3745 debugs(33, 3, "already pinned" << pinServer);
3746 return;
3747 }
3748
3749 unpinConnection(true); // closes pinned connection, if any, and resets fields
3750
3751 pinning.serverConnection = pinServer;
3752
3753 debugs(33, 3, HERE << pinning.serverConnection);
3754
3755 Must(pinning.serverConnection != NULL);
3756
3757 const char *pinnedHost = "[unknown]";
3758 pinning.host = xstrdup(request.url.host());
3759 pinning.port = request.url.port();
3760 pinnedHost = pinning.host;
3761 pinning.pinned = true;
3762 if (CachePeer *aPeer = pinServer->getPeer())
3763 pinning.peer = cbdataReference(aPeer);
3764 pinning.auth = request.flags.connectionAuth;
3765 char stmp[MAX_IPSTRLEN];
3766 char desc[FD_DESC_SZ];
3767 snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
3768 (pinning.auth || !pinning.peer) ? pinnedHost : pinning.peer->name,
3769 clientConnection->remote.toUrl(stmp,MAX_IPSTRLEN),
3770 clientConnection->fd);
3771 fd_note(pinning.serverConnection->fd, desc);
3772
3773 typedef CommCbMemFunT<ConnStateData, CommCloseCbParams> Dialer;
3774 pinning.closeHandler = JobCallback(33, 5,
3775 Dialer, this, ConnStateData::clientPinnedConnectionClosed);
3776 // remember the pinned connection so that cb does not unpin a fresher one
3777 typedef CommCloseCbParams Params;
3778 Params &params = GetCommParams<Params>(pinning.closeHandler);
3779 params.conn = pinning.serverConnection;
3780 comm_add_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3781 }
3782
3783 /// [re]start monitoring pinned connection for peer closures so that we can
3784 /// propagate them to an _idle_ client pinned to that peer
3785 void
3786 ConnStateData::startPinnedConnectionMonitoring()
3787 {
3788 if (pinning.readHandler != NULL)
3789 return; // already monitoring
3790
3791 typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer;
3792 pinning.readHandler = JobCallback(33, 3,
3793 Dialer, this, ConnStateData::clientPinnedConnectionRead);
3794 Comm::Read(pinning.serverConnection, pinning.readHandler);
3795 }
3796
3797 void
3798 ConnStateData::stopPinnedConnectionMonitoring()
3799 {
3800 if (pinning.readHandler != NULL) {
3801 Comm::ReadCancel(pinning.serverConnection->fd, pinning.readHandler);
3802 pinning.readHandler = NULL;
3803 }
3804 }
3805
3806 #if USE_OPENSSL
3807 bool
3808 ConnStateData::handleIdleClientPinnedTlsRead()
3809 {
3810 // A ready-for-reading connection means that the TLS server either closed
3811 // the connection, sent us some unexpected HTTP data, or started TLS
3812 // renegotiations. We should close the connection except for the last case.
3813
3814 Must(pinning.serverConnection != nullptr);
3815 auto ssl = fd_table[pinning.serverConnection->fd].ssl.get();
3816 if (!ssl)
3817 return false;
3818
3819 char buf[1];
3820 const int readResult = SSL_read(ssl, buf, sizeof(buf));
3821
3822 if (readResult > 0 || SSL_pending(ssl) > 0) {
3823 debugs(83, 2, pinning.serverConnection << " TLS application data read");
3824 return false;
3825 }
3826
3827 switch(const int error = SSL_get_error(ssl, readResult)) {
3828 case SSL_ERROR_WANT_WRITE:
3829 debugs(83, DBG_IMPORTANT, pinning.serverConnection << " TLS SSL_ERROR_WANT_WRITE request for idle pinned connection");
3830 // fall through to restart monitoring, for now
3831 case SSL_ERROR_NONE:
3832 case SSL_ERROR_WANT_READ:
3833 startPinnedConnectionMonitoring();
3834 return true;
3835
3836 default:
3837 debugs(83, 2, pinning.serverConnection << " TLS error: " << error);
3838 return false;
3839 }
3840
3841 // not reached
3842 return true;
3843 }
3844 #endif
3845
3846 /// Our read handler called by Comm when the server either closes an idle pinned connection or
3847 /// perhaps unexpectedly sends something on that idle (from Squid p.o.v.) connection.
3848 void
3849 ConnStateData::clientPinnedConnectionRead(const CommIoCbParams &io)
3850 {
3851 pinning.readHandler = NULL; // Comm unregisters handlers before calling
3852
3853 if (io.flag == Comm::ERR_CLOSING)
3854 return; // close handler will clean up
3855
3856 Must(pinning.serverConnection == io.conn);
3857
3858 #if USE_OPENSSL
3859 if (handleIdleClientPinnedTlsRead())
3860 return;
3861 #endif
3862
3863 const bool clientIsIdle = pipeline.empty();
3864
3865 debugs(33, 3, "idle pinned " << pinning.serverConnection << " read " <<
3866 io.size << (clientIsIdle ? " with idle client" : ""));
3867
3868 pinning.serverConnection->close();
3869
3870 // If we are still sending data to the client, do not close now. When we are done sending,
3871 // ConnStateData::kick() checks pinning.serverConnection and will close.
3872 // However, if we are idle, then we must close to inform the idle client and minimize races.
3873 if (clientIsIdle && clientConnection != NULL)
3874 clientConnection->close();
3875 }
3876
3877 const Comm::ConnectionPointer
3878 ConnStateData::validatePinnedConnection(HttpRequest *request, const CachePeer *aPeer)
3879 {
3880 debugs(33, 7, HERE << pinning.serverConnection);
3881
3882 bool valid = true;
3883 if (!Comm::IsConnOpen(pinning.serverConnection))
3884 valid = false;
3885 else if (pinning.auth && pinning.host && request && strcasecmp(pinning.host, request->url.host()) != 0)
3886 valid = false;
3887 else if (request && pinning.port != request->url.port())
3888 valid = false;
3889 else if (pinning.peer && !cbdataReferenceValid(pinning.peer))
3890 valid = false;
3891 else if (aPeer != pinning.peer)
3892 valid = false;
3893
3894 if (!valid) {
3895 /* The pinning info is not safe, remove any pinning info */
3896 unpinConnection(true);
3897 }
3898
3899 return pinning.serverConnection;
3900 }
3901
3902 Comm::ConnectionPointer
3903 ConnStateData::borrowPinnedConnection(HttpRequest *request, const CachePeer *aPeer)
3904 {
3905 debugs(33, 7, pinning.serverConnection);
3906 if (validatePinnedConnection(request, aPeer) != NULL)
3907 stopPinnedConnectionMonitoring();
3908
3909 return pinning.serverConnection; // closed if validation failed
3910 }
3911
3912 void
3913 ConnStateData::unpinConnection(const bool andClose)
3914 {
3915 debugs(33, 3, HERE << pinning.serverConnection);
3916
3917 if (pinning.peer)
3918 cbdataReferenceDone(pinning.peer);
3919
3920 if (Comm::IsConnOpen(pinning.serverConnection)) {
3921 if (pinning.closeHandler != NULL) {
3922 comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
3923 pinning.closeHandler = NULL;
3924 }
3925
3926 stopPinnedConnectionMonitoring();
3927
3928 // close the server side socket if requested
3929 if (andClose)
3930 pinning.serverConnection->close();
3931 pinning.serverConnection = NULL;
3932 }
3933
3934 safe_free(pinning.host);
3935
3936 pinning.zeroReply = false;
3937
3938 /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
3939 * connection has gone away */
3940 }
3941
3942 void
3943 ConnStateData::checkLogging()
3944 {
3945 // if we are parsing request body, its request is responsible for logging
3946 if (bodyPipe)
3947 return;
3948
3949 // a request currently using this connection is responsible for logging
3950 if (!pipeline.empty() && pipeline.back()->mayUseConnection())
3951 return;
3952
3953 /* Either we are waiting for the very first transaction, or
3954 * we are done with the Nth transaction and are waiting for N+1st.
3955 * XXX: We assume that if anything was added to inBuf, then it could
3956 * only be consumed by actions already covered by the above checks.
3957 */
3958
3959 // do not log connections that closed after a transaction (it is normal)
3960 // TODO: access_log needs ACLs to match received-no-bytes connections
3961 if (pipeline.nrequests && inBuf.isEmpty())
3962 return;
3963
3964 /* Create a temporary ClientHttpRequest object. Its destructor will log. */
3965 ClientHttpRequest http(this);
3966 http.req_sz = inBuf.length();
3967 // XXX: Or we died while waiting for the pinned connection to become idle.
3968 http.setErrorUri("error:transaction-end-before-headers");
3969 }
3970
3971 bool
3972 ConnStateData::mayTunnelUnsupportedProto()
3973 {
3974 return Config.accessList.on_unsupported_protocol
3975 #if USE_OPENSSL
3976 &&
3977 ((port->flags.isIntercepted() && port->flags.tunnelSslBumping)
3978 || (serverBump() && pinning.serverConnection))
3979 #endif
3980 ;
3981 }
3982
3983 NotePairs::Pointer
3984 ConnStateData::notes()
3985 {
3986 if (!theNotes)
3987 theNotes = new NotePairs;
3988 return theNotes;
3989 }
3990
3991 std::ostream &
3992 operator <<(std::ostream &os, const ConnStateData::PinnedIdleContext &pic)
3993 {
3994 return os << pic.connection << ", request=" << pic.request;
3995 }
3996