]> git.ipfire.org Git - thirdparty/squid.git/blame_incremental - src/http.cc
Ensure initClient MasterXactions have listening ports (#993)
[thirdparty/squid.git] / src / http.cc
... / ...
CommitLineData
1/*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 11 Hypertext Transfer Protocol (HTTP) */
10
11/*
12 * Anonymizing patch by lutz@as-node.jena.thur.de
13 * have a look into http-anon.c to get more information.
14 */
15
16#include "squid.h"
17#include "acl/FilledChecklist.h"
18#include "base/AsyncJobCalls.h"
19#include "base/Raw.h"
20#include "base/TextException.h"
21#include "base64.h"
22#include "CachePeer.h"
23#include "client_side.h"
24#include "comm/Connection.h"
25#include "comm/Read.h"
26#include "comm/Write.h"
27#include "CommRead.h"
28#include "error/Detail.h"
29#include "errorpage.h"
30#include "fd.h"
31#include "fde.h"
32#include "globals.h"
33#include "http.h"
34#include "http/one/ResponseParser.h"
35#include "http/one/TeChunkedParser.h"
36#include "http/Stream.h"
37#include "HttpControlMsg.h"
38#include "HttpHdrCc.h"
39#include "HttpHdrContRange.h"
40#include "HttpHdrSc.h"
41#include "HttpHdrScTarget.h"
42#include "HttpHeaderTools.h"
43#include "HttpReply.h"
44#include "HttpRequest.h"
45#include "HttpUpgradeProtocolAccess.h"
46#include "log/access_log.h"
47#include "MemBuf.h"
48#include "MemObject.h"
49#include "neighbors.h"
50#include "pconn.h"
51#include "peer_proxy_negotiate_auth.h"
52#include "refresh.h"
53#include "RefreshPattern.h"
54#include "rfc1738.h"
55#include "SquidConfig.h"
56#include "StatCounters.h"
57#include "Store.h"
58#include "StrList.h"
59#include "tools.h"
60#include "util.h"
61
62#if USE_AUTH
63#include "auth/UserRequest.h"
64#endif
65#if USE_DELAY_POOLS
66#include "DelayPools.h"
67#endif
68
69CBDATA_CLASS_INIT(HttpStateData);
70
71static const char *const crlf = "\r\n";
72
73static void httpMaybeRemovePublic(StoreEntry *, Http::StatusCode);
74static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request,
75 HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &);
76
77HttpStateData::HttpStateData(FwdState *theFwdState) :
78 AsyncJob("HttpStateData"),
79 Client(theFwdState)
80{
81 debugs(11,5, "HttpStateData " << this << " created");
82 serverConnection = fwd->serverConnection();
83
84 if (fwd->serverConnection() != NULL)
85 _peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */
86
87 flags.peering = _peer;
88 flags.tunneling = (_peer && request->flags.sslBumped);
89 flags.toOrigin = (!_peer || _peer->options.originserver || request->flags.sslBumped);
90
91 if (_peer) {
92 /*
93 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
94 * We might end up getting the object from somewhere else if,
95 * for example, the request to this neighbor fails.
96 */
97 if (!flags.tunneling && _peer->options.proxy_only)
98 entry->releaseRequest(true);
99
100#if USE_DELAY_POOLS
101 entry->setNoDelay(_peer->options.no_delay);
102#endif
103 }
104
105 /*
106 * register the handler to free HTTP state data when the FD closes
107 */
108 typedef CommCbMemFunT<HttpStateData, CommCloseCbParams> Dialer;
109 closeHandler = JobCallback(9, 5, Dialer, this, HttpStateData::httpStateConnClosed);
110 comm_add_close_handler(serverConnection->fd, closeHandler);
111}
112
113HttpStateData::~HttpStateData()
114{
115 /*
116 * don't forget that ~Client() gets called automatically
117 */
118
119 if (httpChunkDecoder)
120 delete httpChunkDecoder;
121
122 cbdataReferenceDone(_peer);
123
124 delete upgradeHeaderOut;
125
126 debugs(11,5, "HttpStateData " << this << " destroyed; " << serverConnection);
127}
128
129const Comm::ConnectionPointer &
130HttpStateData::dataConnection() const
131{
132 return serverConnection;
133}
134
135void
136HttpStateData::httpStateConnClosed(const CommCloseCbParams &params)
137{
138 debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
139 doneWithFwd = "httpStateConnClosed()"; // assume FwdState is monitoring too
140 mustStop("HttpStateData::httpStateConnClosed");
141}
142
143void
144HttpStateData::httpTimeout(const CommTimeoutCbParams &)
145{
146 debugs(11, 4, serverConnection << ": '" << entry->url() << "'");
147
148 if (entry->store_status == STORE_PENDING) {
149 fwd->fail(new ErrorState(ERR_READ_TIMEOUT, Http::scGatewayTimeout, fwd->request, fwd->al));
150 }
151
152 closeServer();
153 mustStop("HttpStateData::httpTimeout");
154}
155
156static StoreEntry *
157findPreviouslyCachedEntry(StoreEntry *newEntry) {
158 assert(newEntry->mem_obj);
159 return newEntry->mem_obj->request ?
160 storeGetPublicByRequest(newEntry->mem_obj->request.getRaw()) :
161 storeGetPublic(newEntry->mem_obj->storeId(), newEntry->mem_obj->method);
162}
163
164/// Remove an existing public store entry if the incoming response (to be
165/// stored in a currently private entry) is going to invalidate it.
166static void
167httpMaybeRemovePublic(StoreEntry * e, Http::StatusCode status)
168{
169 int remove = 0;
170 int forbidden = 0;
171
172 // If the incoming response already goes into a public entry, then there is
173 // nothing to remove. This protects ready-for-collapsing entries as well.
174 if (!EBIT_TEST(e->flags, KEY_PRIVATE))
175 return;
176
177 // If the new/incoming response cannot be stored, then it does not
178 // compete with the old stored response for the public key, and the
179 // old stored response should be left as is.
180 if (e->mem_obj->request && !e->mem_obj->request->flags.cachable)
181 return;
182
183 switch (status) {
184
185 case Http::scOkay:
186
187 case Http::scNonAuthoritativeInformation:
188
189 case Http::scMultipleChoices:
190
191 case Http::scMovedPermanently:
192
193 case Http::scFound:
194
195 case Http::scSeeOther:
196
197 case Http::scGone:
198
199 case Http::scNotFound:
200 remove = 1;
201
202 break;
203
204 case Http::scForbidden:
205
206 case Http::scMethodNotAllowed:
207 forbidden = 1;
208
209 break;
210
211#if WORK_IN_PROGRESS
212
213 case Http::scUnauthorized:
214 forbidden = 1;
215
216 break;
217
218#endif
219
220 default:
221 break;
222 }
223
224 if (!remove && !forbidden)
225 return;
226
227 StoreEntry *pe = findPreviouslyCachedEntry(e);
228
229 if (pe != NULL) {
230 assert(e != pe);
231#if USE_HTCP
232 neighborsHtcpClear(e, e->mem_obj->request.getRaw(), e->mem_obj->method, HTCP_CLR_INVALIDATION);
233#endif
234 pe->release(true);
235 }
236
237 /** \par
238 * Also remove any cached HEAD response in case the object has
239 * changed.
240 */
241 if (e->mem_obj->request)
242 pe = storeGetPublicByRequestMethod(e->mem_obj->request.getRaw(), Http::METHOD_HEAD);
243 else
244 pe = storeGetPublic(e->mem_obj->storeId(), Http::METHOD_HEAD);
245
246 if (pe != NULL) {
247 assert(e != pe);
248#if USE_HTCP
249 neighborsHtcpClear(e, e->mem_obj->request.getRaw(), HttpRequestMethod(Http::METHOD_HEAD), HTCP_CLR_INVALIDATION);
250#endif
251 pe->release(true);
252 }
253}
254
255void
256HttpStateData::processSurrogateControl(HttpReply *reply)
257{
258 if (request->flags.accelerated && reply->surrogate_control) {
259 HttpHdrScTarget *sctusable = reply->surrogate_control->getMergedTarget(Config.Accel.surrogate_id);
260
261 if (sctusable) {
262 if (sctusable->hasNoStore() ||
263 (Config.onoff.surrogate_is_remote
264 && sctusable->noStoreRemote())) {
265 surrogateNoStore = true;
266 // Be conservative for now and make it non-shareable because
267 // there is no enough information here to make the decision.
268 entry->makePrivate(false);
269 }
270
271 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
272 * accelerated request or not...
273 * Still, this is an abstraction breach. - RC
274 */
275 if (sctusable->hasMaxAge()) {
276 if (sctusable->maxAge() < sctusable->maxStale())
277 reply->expires = reply->date + sctusable->maxAge();
278 else
279 reply->expires = reply->date + sctusable->maxStale();
280
281 /* And update the timestamps */
282 entry->timestampsSet();
283 }
284
285 /* We ignore cache-control directives as per the Surrogate specification */
286 ignoreCacheControl = true;
287
288 delete sctusable;
289 }
290 }
291}
292
293HttpStateData::ReuseDecision::Answers
294HttpStateData::reusableReply(HttpStateData::ReuseDecision &decision)
295{
296 HttpReply const *rep = finalReply();
297 HttpHeader const *hdr = &rep->header;
298 const char *v;
299#if USE_HTTP_VIOLATIONS
300
301 const RefreshPattern *R = NULL;
302
303 /* This strange looking define first looks up the refresh pattern
304 * and then checks if the specified flag is set. The main purpose
305 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
306 * condition
307 */
308#define REFRESH_OVERRIDE(flag) \
309 ((R = (R ? R : refreshLimits(entry->mem_obj->storeId()))) , \
310 (R && R->flags.flag))
311#else
312#define REFRESH_OVERRIDE(flag) 0
313#endif
314
315 if (EBIT_TEST(entry->flags, RELEASE_REQUEST))
316 return decision.make(ReuseDecision::doNotCacheButShare, "the entry has been released");
317
318 // RFC 7234 section 4: a cache MUST use the most recent response
319 // (as determined by the Date header field)
320 // TODO: whether such responses could be shareable?
321 if (sawDateGoBack)
322 return decision.make(ReuseDecision::reuseNot, "the response has an older date header");
323
324 // Check for Surrogate/1.0 protocol conditions
325 // NP: reverse-proxy traffic our parent server has instructed us never to cache
326 if (surrogateNoStore)
327 return decision.make(ReuseDecision::reuseNot, "Surrogate-Control:no-store");
328
329 // RFC 2616: HTTP/1.1 Cache-Control conditions
330 if (!ignoreCacheControl) {
331 // XXX: check to see if the request headers alone were enough to prevent caching earlier
332 // (ie no-store request header) no need to check those all again here if so.
333 // for now we are not reliably doing that so we waste CPU re-checking request CC
334
335 // RFC 2616 section 14.9.2 - MUST NOT cache any response with request CC:no-store
336 if (request && request->cache_control && request->cache_control->hasNoStore() &&
337 !REFRESH_OVERRIDE(ignore_no_store))
338 return decision.make(ReuseDecision::reuseNot,
339 "client request Cache-Control:no-store");
340
341 // NP: request CC:no-cache only means cache READ is forbidden. STORE is permitted.
342 if (rep->cache_control && rep->cache_control->hasNoCacheWithParameters()) {
343 /* TODO: we are allowed to cache when no-cache= has parameters.
344 * Provided we strip away any of the listed headers unless they are revalidated
345 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
346 * That is a bit tricky for squid right now so we avoid caching entirely.
347 */
348 return decision.make(ReuseDecision::reuseNot,
349 "server reply Cache-Control:no-cache has parameters");
350 }
351
352 // NP: request CC:private is undefined. We ignore.
353 // NP: other request CC flags are limiters on HIT/MISS. We don't care about here.
354
355 // RFC 2616 section 14.9.2 - MUST NOT cache any response with CC:no-store
356 if (rep->cache_control && rep->cache_control->hasNoStore() &&
357 !REFRESH_OVERRIDE(ignore_no_store))
358 return decision.make(ReuseDecision::reuseNot,
359 "server reply Cache-Control:no-store");
360
361 // RFC 2616 section 14.9.1 - MUST NOT cache any response with CC:private in a shared cache like Squid.
362 // CC:private overrides CC:public when both are present in a response.
363 // TODO: add a shared/private cache configuration possibility.
364 if (rep->cache_control &&
365 rep->cache_control->hasPrivate() &&
366 !REFRESH_OVERRIDE(ignore_private)) {
367 /* TODO: we are allowed to cache when private= has parameters.
368 * Provided we strip away any of the listed headers unless they are revalidated
369 * successfully (ie, must revalidate AND these headers are prohibited on stale replies).
370 * That is a bit tricky for squid right now so we avoid caching entirely.
371 */
372 return decision.make(ReuseDecision::reuseNot,
373 "server reply Cache-Control:private");
374 }
375 }
376
377 // RFC 2068, sec 14.9.4 - MUST NOT cache any response with Authentication UNLESS certain CC controls are present
378 // allow HTTP violations to IGNORE those controls (ie re-block caching Auth)
379 if (request && (request->flags.auth || request->flags.authSent)) {
380 if (!rep->cache_control)
381 return decision.make(ReuseDecision::reuseNot,
382 "authenticated and server reply missing Cache-Control");
383
384 if (ignoreCacheControl)
385 return decision.make(ReuseDecision::reuseNot,
386 "authenticated and ignoring Cache-Control");
387
388 bool mayStore = false;
389 // HTTPbis pt6 section 3.2: a response CC:public is present
390 if (rep->cache_control->hasPublic()) {
391 debugs(22, 3, "Authenticated but server reply Cache-Control:public");
392 mayStore = true;
393
394 // HTTPbis pt6 section 3.2: a response CC:must-revalidate is present
395 } else if (rep->cache_control->hasMustRevalidate()) {
396 debugs(22, 3, "Authenticated but server reply Cache-Control:must-revalidate");
397 mayStore = true;
398
399#if USE_HTTP_VIOLATIONS
400 // NP: given the must-revalidate exception we should also be able to exempt no-cache.
401 // HTTPbis WG verdict on this is that it is omitted from the spec due to being 'unexpected' by
402 // some. The caching+revalidate is not exactly unsafe though with Squids interpretation of no-cache
403 // (without parameters) as equivalent to must-revalidate in the reply.
404 } else if (rep->cache_control->hasNoCacheWithoutParameters()) {
405 debugs(22, 3, "Authenticated but server reply Cache-Control:no-cache (equivalent to must-revalidate)");
406 mayStore = true;
407#endif
408
409 // HTTPbis pt6 section 3.2: a response CC:s-maxage is present
410 } else if (rep->cache_control->hasSMaxAge()) {
411 debugs(22, 3, "Authenticated but server reply Cache-Control:s-maxage");
412 mayStore = true;
413 }
414
415 if (!mayStore)
416 return decision.make(ReuseDecision::reuseNot, "authenticated transaction");
417
418 // NP: response CC:no-cache is equivalent to CC:must-revalidate,max-age=0. We MAY cache, and do so.
419 // NP: other request CC flags are limiters on HIT/MISS/REFRESH. We don't care about here.
420 }
421
422 /* HACK: The "multipart/x-mixed-replace" content type is used for
423 * continuous push replies. These are generally dynamic and
424 * probably should not be cachable
425 */
426 if ((v = hdr->getStr(Http::HdrType::CONTENT_TYPE)))
427 if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
428 return decision.make(ReuseDecision::reuseNot, "Content-Type:multipart/x-mixed-replace");
429
430 // TODO: if possible, provide more specific message for each status code
431 static const char *shareableError = "shareable error status code";
432 static const char *nonShareableError = "non-shareable error status code";
433 ReuseDecision::Answers statusAnswer = ReuseDecision::reuseNot;
434 const char *statusReason = nonShareableError;
435
436 switch (rep->sline.status()) {
437
438 /* There are several situations when a non-cacheable response may be
439 * still shareable (e.g., among collapsed clients). We assume that these
440 * are 3xx and 5xx responses, indicating server problems and some of
441 * 4xx responses, common for all clients with a given cache key (e.g.,
442 * 404 Not Found or 414 URI Too Long). On the other hand, we should not
443 * share non-cacheable client-specific errors, such as 400 Bad Request
444 * or 406 Not Acceptable.
445 */
446
447 /* Responses that are cacheable */
448
449 case Http::scOkay:
450
451 case Http::scNonAuthoritativeInformation:
452
453 case Http::scMultipleChoices:
454
455 case Http::scMovedPermanently:
456 case Http::scPermanentRedirect:
457
458 case Http::scGone:
459 /*
460 * Don't cache objects that need to be refreshed on next request,
461 * unless we know how to refresh it.
462 */
463
464 if (refreshIsCachable(entry) || REFRESH_OVERRIDE(store_stale))
465 decision.make(ReuseDecision::cachePositively, "refresh check returned cacheable");
466 else
467 decision.make(ReuseDecision::doNotCacheButShare, "refresh check returned non-cacheable");
468 break;
469
470 /* Responses that only are cacheable if the server says so */
471
472 case Http::scFound:
473 case Http::scTemporaryRedirect:
474 if (rep->date <= 0)
475 decision.make(ReuseDecision::doNotCacheButShare, "Date is missing/invalid");
476 else if (rep->expires > rep->date)
477 decision.make(ReuseDecision::cachePositively, "Expires > Date");
478 else
479 decision.make(ReuseDecision::doNotCacheButShare, "Expires <= Date");
480 break;
481
482 /* These responses can be negatively cached. Most can also be shared. */
483 case Http::scNoContent:
484 case Http::scUseProxy:
485 case Http::scForbidden:
486 case Http::scNotFound:
487 case Http::scMethodNotAllowed:
488 case Http::scUriTooLong:
489 case Http::scInternalServerError:
490 case Http::scNotImplemented:
491 case Http::scBadGateway:
492 case Http::scServiceUnavailable:
493 case Http::scGatewayTimeout:
494 case Http::scMisdirectedRequest:
495 statusAnswer = ReuseDecision::doNotCacheButShare;
496 statusReason = shareableError;
497 /* [[fallthrough]] to the actual decision making below */
498
499 case Http::scBadRequest: // no sharing; perhaps the server did not like something specific to this request
500#if USE_HTTP_VIOLATIONS
501 if (Config.negativeTtl > 0)
502 decision.make(ReuseDecision::cacheNegatively, "Config.negativeTtl > 0");
503 else
504#endif
505 decision.make(statusAnswer, statusReason);
506 break;
507
508 /* these responses can never be cached, some
509 of them can be shared though */
510 case Http::scSeeOther:
511 case Http::scNotModified:
512 case Http::scUnauthorized:
513 case Http::scProxyAuthenticationRequired:
514 case Http::scPaymentRequired:
515 case Http::scInsufficientStorage:
516 // TODO: use more specific reason for non-error status codes
517 decision.make(ReuseDecision::doNotCacheButShare, shareableError);
518 break;
519
520 case Http::scPartialContent: /* Not yet supported. TODO: make shareable for suitable ranges */
521 case Http::scNotAcceptable:
522 case Http::scRequestTimeout: // TODO: is this shareable?
523 case Http::scConflict: // TODO: is this shareable?
524 case Http::scLengthRequired:
525 case Http::scPreconditionFailed:
526 case Http::scContentTooLarge:
527 case Http::scUnsupportedMediaType:
528 case Http::scUnprocessableEntity:
529 case Http::scLocked: // TODO: is this shareable?
530 case Http::scFailedDependency:
531 case Http::scRequestedRangeNotSatisfied:
532 case Http::scExpectationFailed:
533 case Http::scInvalidHeader: /* Squid header parsing error */
534 case Http::scHeaderTooLarge:
535 decision.make(ReuseDecision::reuseNot, nonShareableError);
536 break;
537
538 default:
539 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
540 decision.make(ReuseDecision::reuseNot, "unknown status code");
541 break;
542 }
543
544 return decision.answer;
545}
546
547/// assemble a variant key (vary-mark) from the given Vary header and HTTP request
548static void
549assembleVaryKey(String &vary, SBuf &vstr, const HttpRequest &request)
550{
551 static const SBuf asterisk("*");
552 const char *pos = nullptr;
553 const char *item = nullptr;
554 int ilen = 0;
555
556 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
557 SBuf name(item, ilen);
558 if (name == asterisk) {
559 vstr = asterisk;
560 break;
561 }
562 name.toLower();
563 if (!vstr.isEmpty())
564 vstr.append(", ", 2);
565 vstr.append(name);
566 String hdr(request.header.getByName(name));
567 const char *value = hdr.termedBuf();
568 if (value) {
569 value = rfc1738_escape_part(value);
570 vstr.append("=\"", 2);
571 vstr.append(value);
572 vstr.append("\"", 1);
573 }
574
575 hdr.clean();
576 }
577}
578
579/*
580 * For Vary, store the relevant request headers as
581 * virtual headers in the reply
582 * Returns an empty SBuf if the variance cannot be stored
583 */
584SBuf
585httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
586{
587 SBuf vstr;
588 String vary;
589
590 vary = reply->header.getList(Http::HdrType::VARY);
591 assembleVaryKey(vary, vstr, *request);
592
593#if X_ACCELERATOR_VARY
594 vary.clean();
595 vary = reply->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
596 assembleVaryKey(vary, vstr, *request);
597#endif
598
599 debugs(11, 3, vstr);
600 return vstr;
601}
602
603void
604HttpStateData::keepaliveAccounting(HttpReply *reply)
605{
606 if (flags.keepalive)
607 if (flags.peering && !flags.tunneling)
608 ++ _peer->stats.n_keepalives_sent;
609
610 if (reply->keep_alive) {
611 if (flags.peering && !flags.tunneling)
612 ++ _peer->stats.n_keepalives_recv;
613
614 if (Config.onoff.detect_broken_server_pconns
615 && reply->bodySize(request->method) == -1 && !flags.chunked) {
616 debugs(11, DBG_IMPORTANT, "keepaliveAccounting: Impossible keep-alive header from '" << entry->url() << "'" );
617 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
618 flags.keepalive_broken = true;
619 }
620 }
621}
622
623void
624HttpStateData::checkDateSkew(HttpReply *reply)
625{
626 if (reply->date > -1 && flags.toOrigin) {
627 int skew = abs((int)(reply->date - squid_curtime));
628
629 if (skew > 86400)
630 debugs(11, 3, "" << request->url.host() << "'s clock is skewed by " << skew << " seconds!");
631 }
632}
633
634/**
635 * This creates the error page itself.. its likely
636 * that the forward ported reply header max size patch
637 * generates non http conformant error pages - in which
638 * case the errors where should be 'BAD_GATEWAY' etc
639 */
640void
641HttpStateData::processReplyHeader()
642{
643 /** Creates a blank header. If this routine is made incremental, this will not do */
644
645 debugs(11, 3, "processReplyHeader: key '" << entry->getMD5Text() << "'");
646
647 assert(!flags.headers_parsed);
648
649 if (!inBuf.length())
650 return;
651
652 /* Attempt to parse the first line; this will define where the protocol, status, reason-phrase and header begin */
653 {
654 if (hp == NULL)
655 hp = new Http1::ResponseParser;
656
657 bool parsedOk = hp->parse(inBuf);
658 // remember the actual received status-code before returning on errors,
659 // overwriting any previously stored value from earlier forwarding attempts
660 request->hier.peer_reply_status = hp->messageStatus(); // may still be scNone
661
662 // sync the buffers after parsing.
663 inBuf = hp->remaining();
664
665 if (hp->needsMoreData()) {
666 if (eof) { // no more data coming
667 assert(!parsedOk);
668 // fall through to handle this premature EOF as an error
669 } else {
670 debugs(33, 5, "Incomplete response, waiting for end of response headers");
671 return;
672 }
673 }
674
675 if (!parsedOk) {
676 // unrecoverable parsing error
677 // TODO: Use Raw! XXX: inBuf no longer has the [beginning of the] malformed header.
678 debugs(11, 3, "Non-HTTP-compliant header:\n---------\n" << inBuf << "\n----------");
679 flags.headers_parsed = true;
680 HttpReply *newrep = new HttpReply;
681 // hp->needsMoreData() means hp->parseStatusCode is unusable, but, here,
682 // it also means that the reply header got truncated by a premature EOF
683 assert(!hp->needsMoreData() || eof);
684 const auto scode = hp->needsMoreData() ? Http::scInvalidHeader : hp->parseStatusCode;
685 newrep->sline.set(Http::ProtocolVersion(), scode);
686 setVirginReply(newrep);
687 return;
688 }
689 }
690
691 /* We know the whole response is in parser now */
692 debugs(11, 2, "HTTP Server " << serverConnection);
693 debugs(11, 2, "HTTP Server RESPONSE:\n---------\n" <<
694 hp->messageProtocol() << " " << hp->messageStatus() << " " << hp->reasonPhrase() << "\n" <<
695 hp->mimeHeader() <<
696 "----------");
697
698 // reset payload tracking to begin after message headers
699 payloadSeen = inBuf.length();
700
701 HttpReply *newrep = new HttpReply;
702 // XXX: RFC 7230 indicates we MAY ignore the reason phrase,
703 // and use an empty string on unknown status.
704 // We do that now to avoid performance regression from using SBuf::c_str()
705 newrep->sline.set(hp->messageProtocol(), hp->messageStatus() /* , hp->reasonPhrase() */);
706
707 // parse headers
708 if (!newrep->parseHeader(*hp)) {
709 newrep->sline.set(hp->messageProtocol(), Http::scInvalidHeader);
710 debugs(11, 2, "error parsing response headers mime block");
711 }
712
713 // done with Parser, now process using the HttpReply
714 hp = NULL;
715
716 newrep->sources |= request->url.getScheme() == AnyP::PROTO_HTTPS ? Http::Message::srcHttps : Http::Message::srcHttp;
717
718 newrep->removeStaleWarnings();
719
720 if (newrep->sline.version.protocol == AnyP::PROTO_HTTP && Http::Is1xx(newrep->sline.status())) {
721 handle1xx(newrep);
722 return;
723 }
724
725 flags.chunked = false;
726 if (newrep->sline.version.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) {
727 flags.chunked = true;
728 httpChunkDecoder = new Http1::TeChunkedParser;
729 }
730
731 if (!peerSupportsConnectionPinning())
732 request->flags.connectionAuthDisabled = true;
733
734 HttpReply *vrep = setVirginReply(newrep);
735 flags.headers_parsed = true;
736
737 keepaliveAccounting(vrep);
738
739 checkDateSkew(vrep);
740
741 processSurrogateControl (vrep);
742}
743
744/// ignore or start forwarding the 1xx response (a.k.a., control message)
745void
746HttpStateData::handle1xx(HttpReply *reply)
747{
748 if (fwd->al)
749 fwd->al->reply = reply;
750
751 HttpReply::Pointer msg(reply); // will destroy reply if unused
752
753 // one 1xx at a time: we must not be called while waiting for previous 1xx
754 Must(!flags.handling1xx);
755 flags.handling1xx = true;
756
757 const auto statusCode = reply->sline.status();
758
759 // drop1xx() needs to handle HTTP 101 (Switching Protocols) responses
760 // specially because they indicate that the server has stopped speaking HTTP
761 Must(!flags.serverSwitchedProtocols);
762 flags.serverSwitchedProtocols = (statusCode == Http::scSwitchingProtocols);
763
764 if (statusCode == Http::scContinue && request->forcedBodyContinuation)
765 return drop1xx("we have sent it already");
766
767 if (!request->canHandle1xx())
768 return drop1xx("the client does not support it");
769
770#if USE_HTTP_VIOLATIONS
771 // check whether the 1xx response forwarding is allowed by squid.conf
772 if (Config.accessList.reply) {
773 ACLFilledChecklist ch(Config.accessList.reply, originalRequest().getRaw());
774 ch.al = fwd->al;
775 ch.reply = reply;
776 ch.syncAle(originalRequest().getRaw(), nullptr);
777 HTTPMSGLOCK(ch.reply);
778 if (!ch.fastCheck().allowed()) // TODO: support slow lookups?
779 return drop1xx("http_reply_access blocked it");
780 }
781#endif // USE_HTTP_VIOLATIONS
782
783 if (flags.serverSwitchedProtocols) {
784 if (const auto reason = blockSwitchingProtocols(*reply))
785 return drop1xx(reason);
786 }
787
788 debugs(11, 2, "forwarding 1xx to client");
789
790 // the Sink will use this to call us back after writing 1xx to the client
791 typedef NullaryMemFunT<HttpStateData> CbDialer;
792 const AsyncCall::Pointer cb = JobCallback(11, 3, CbDialer, this,
793 HttpStateData::proceedAfter1xx);
794 CallJobHere1(11, 4, request->clientConnectionManager, ConnStateData,
795 ConnStateData::sendControlMsg, HttpControlMsg(msg, cb));
796 // If the call is not fired, then the Sink is gone, and HttpStateData
797 // will terminate due to an aborted store entry or another similar error.
798 // If we get stuck, it is not handle1xx fault if we could get stuck
799 // for similar reasons without a 1xx response.
800}
801
802/// if possible, safely ignores the received 1xx control message
803/// otherwise, terminates the server connection
804void
805HttpStateData::drop1xx(const char *reason)
806{
807 if (flags.serverSwitchedProtocols) {
808 debugs(11, 2, "bad 101 because " << reason);
809 const auto err = new ErrorState(ERR_INVALID_RESP, Http::scBadGateway, request.getRaw(), fwd->al);
810 fwd->fail(err);
811 closeServer();
812 mustStop("prohibited HTTP/101 response");
813 return;
814 }
815
816 debugs(11, 2, "ignoring 1xx because " << reason);
817 proceedAfter1xx();
818}
819
820/// \retval nil if the HTTP/101 (Switching Protocols) reply should be forwarded
821/// \retval reason why an attempt to switch protocols should be stopped
822const char *
823HttpStateData::blockSwitchingProtocols(const HttpReply &reply) const
824{
825 if (!upgradeHeaderOut)
826 return "Squid offered no Upgrade at all, but server switched to a tunnel";
827
828 // See RFC 7230 section 6.7 for the corresponding MUSTs
829
830 if (!reply.header.has(Http::HdrType::UPGRADE))
831 return "server did not send an Upgrade header field";
832
833 if (!reply.header.hasListMember(Http::HdrType::CONNECTION, "upgrade", ','))
834 return "server did not send 'Connection: upgrade'";
835
836 const auto acceptedProtos = reply.header.getList(Http::HdrType::UPGRADE);
837 const char *pos = nullptr;
838 const char *accepted = nullptr;
839 int acceptedLen = 0;
840 while (strListGetItem(&acceptedProtos, ',', &accepted, &acceptedLen, &pos)) {
841 debugs(11, 5, "server accepted at least" << Raw(nullptr, accepted, acceptedLen));
842 return nullptr; // OK: let the client validate server's selection
843 }
844
845 return "server sent an essentially empty Upgrade header field";
846}
847
848/// restores state and resumes processing after 1xx is ignored or forwarded
849void
850HttpStateData::proceedAfter1xx()
851{
852 Must(flags.handling1xx);
853
854 if (flags.serverSwitchedProtocols) {
855 // pass server connection ownership to request->clientConnectionManager
856 ConnStateData::ServerConnectionContext scc(serverConnection, inBuf);
857 typedef UnaryMemFunT<ConnStateData, ConnStateData::ServerConnectionContext> MyDialer;
858 AsyncCall::Pointer call = asyncCall(11, 3, "ConnStateData::noteTakeServerConnectionControl",
859 MyDialer(request->clientConnectionManager,
860 &ConnStateData::noteTakeServerConnectionControl, scc));
861 ScheduleCallHere(call);
862 fwd->unregister(serverConnection);
863 comm_remove_close_handler(serverConnection->fd, closeHandler);
864 closeHandler = nullptr;
865 serverConnection = nullptr;
866 doneWithFwd = "switched protocols";
867 mustStop(doneWithFwd);
868 return;
869 }
870
871 debugs(11, 2, "continuing with " << payloadSeen << " bytes in buffer after 1xx");
872 CallJobHere(11, 3, this, HttpStateData, HttpStateData::processReply);
873}
874
875/**
876 * returns true if the peer can support connection pinning
877*/
878bool
879HttpStateData::peerSupportsConnectionPinning() const
880{
881 if (!_peer)
882 return true;
883
884 // we are talking "through" rather than "to" our _peer
885 if (flags.tunneling)
886 return true;
887
888 /*If this peer does not support connection pinning (authenticated
889 connections) return false
890 */
891 if (!_peer->connection_auth)
892 return false;
893
894 const auto &rep = entry->mem().freshestReply();
895
896 /*The peer supports connection pinning and the http reply status
897 is not unauthorized, so the related connection can be pinned
898 */
899 if (rep.sline.status() != Http::scUnauthorized)
900 return true;
901
902 /*The server respond with Http::scUnauthorized and the peer configured
903 with "connection-auth=on" we know that the peer supports pinned
904 connections
905 */
906 if (_peer->connection_auth == 1)
907 return true;
908
909 /*At this point peer has configured with "connection-auth=auto"
910 parameter so we need some extra checks to decide if we are going
911 to allow pinned connections or not
912 */
913
914 /*if the peer configured with originserver just allow connection
915 pinning (squid 2.6 behaviour)
916 */
917 if (_peer->options.originserver)
918 return true;
919
920 /*if the connections it is already pinned it is OK*/
921 if (request->flags.pinned)
922 return true;
923
924 /*Allow pinned connections only if the Proxy-support header exists in
925 reply and has in its list the "Session-Based-Authentication"
926 which means that the peer supports connection pinning.
927 */
928 if (rep.header.hasListMember(Http::HdrType::PROXY_SUPPORT, "Session-Based-Authentication", ','))
929 return true;
930
931 return false;
932}
933
934// Called when we parsed (and possibly adapted) the headers but
935// had not starting storing (a.k.a., sending) the body yet.
936void
937HttpStateData::haveParsedReplyHeaders()
938{
939 Client::haveParsedReplyHeaders();
940
941 HttpReply *rep = finalReply();
942 const Http::StatusCode statusCode = rep->sline.status();
943
944 entry->timestampsSet();
945
946 /* Check if object is cacheable or not based on reply code */
947 debugs(11, 3, "HTTP CODE: " << statusCode);
948
949 if (StoreEntry *oldEntry = findPreviouslyCachedEntry(entry)) {
950 oldEntry->lock("HttpStateData::haveParsedReplyHeaders");
951 sawDateGoBack = rep->olderThan(oldEntry->hasFreshestReply());
952 oldEntry->unlock("HttpStateData::haveParsedReplyHeaders");
953 }
954
955 if (neighbors_do_private_keys && !sawDateGoBack)
956 httpMaybeRemovePublic(entry, rep->sline.status());
957
958 bool varyFailure = false;
959 if (rep->header.has(Http::HdrType::VARY)
960#if X_ACCELERATOR_VARY
961 || rep->header.has(Http::HdrType::HDR_X_ACCELERATOR_VARY)
962#endif
963 ) {
964 const SBuf vary(httpMakeVaryMark(request.getRaw(), rep));
965
966 if (vary.isEmpty()) {
967 // TODO: check whether such responses are shareable.
968 // Do not share for now.
969 entry->makePrivate(false);
970 if (fwd->reforwardableStatus(rep->sline.status()))
971 EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
972 varyFailure = true;
973 } else {
974 entry->mem_obj->vary_headers = vary;
975
976 // RFC 7231 section 7.1.4
977 // Vary:* can be cached, but has mandatory revalidation
978 static const SBuf asterisk("*");
979 if (vary == asterisk)
980 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
981 }
982 }
983
984 if (!varyFailure) {
985 /*
986 * If its not a reply that we will re-forward, then
987 * allow the client to get it.
988 */
989 if (fwd->reforwardableStatus(rep->sline.status()))
990 EBIT_SET(entry->flags, ENTRY_FWD_HDR_WAIT);
991
992 ReuseDecision decision(entry, statusCode);
993
994 switch (reusableReply(decision)) {
995
996 case ReuseDecision::reuseNot:
997 entry->makePrivate(false);
998 break;
999
1000 case ReuseDecision::cachePositively:
1001 if (!entry->makePublic()) {
1002 decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
1003 entry->makePrivate(true);
1004 }
1005 break;
1006
1007 case ReuseDecision::cacheNegatively:
1008 if (!entry->cacheNegatively()) {
1009 decision.make(ReuseDecision::doNotCacheButShare, "public key creation error");
1010 entry->makePrivate(true);
1011 }
1012 break;
1013
1014 case ReuseDecision::doNotCacheButShare:
1015 entry->makePrivate(true);
1016 break;
1017
1018 default:
1019 assert(0);
1020 break;
1021 }
1022 debugs(11, 3, "decided: " << decision);
1023 }
1024
1025 if (!ignoreCacheControl) {
1026 if (rep->cache_control) {
1027 // We are required to revalidate on many conditions.
1028 // For security reasons we do so even if storage was caused by refresh_pattern ignore-* option
1029
1030 // CC:must-revalidate or CC:proxy-revalidate
1031 const bool ccMustRevalidate = (rep->cache_control->hasProxyRevalidate() || rep->cache_control->hasMustRevalidate());
1032
1033 // CC:no-cache (only if there are no parameters)
1034 const bool ccNoCacheNoParams = rep->cache_control->hasNoCacheWithoutParameters();
1035
1036 // CC:s-maxage=N
1037 const bool ccSMaxAge = rep->cache_control->hasSMaxAge();
1038
1039 // CC:private (yes, these can sometimes be stored)
1040 const bool ccPrivate = rep->cache_control->hasPrivate();
1041
1042 if (ccNoCacheNoParams || ccPrivate)
1043 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
1044 else if (ccMustRevalidate || ccSMaxAge)
1045 EBIT_SET(entry->flags, ENTRY_REVALIDATE_STALE);
1046 }
1047#if USE_HTTP_VIOLATIONS // response header Pragma::no-cache is undefined in HTTP
1048 else {
1049 // Expensive calculation. So only do it IF the CC: header is not present.
1050
1051 /* HACK: Pragma: no-cache in _replies_ is not documented in HTTP,
1052 * but servers like "Active Imaging Webcast/2.0" sure do use it */
1053 if (rep->header.has(Http::HdrType::PRAGMA) &&
1054 rep->header.hasListMember(Http::HdrType::PRAGMA,"no-cache",','))
1055 EBIT_SET(entry->flags, ENTRY_REVALIDATE_ALWAYS);
1056 }
1057#endif
1058 }
1059
1060#if HEADERS_LOG
1061 headersLog(1, 0, request->method, rep);
1062
1063#endif
1064}
1065
1066HttpStateData::ConnectionStatus
1067HttpStateData::statusIfComplete() const
1068{
1069 const HttpReply *rep = virginReply();
1070 /** \par
1071 * If the reply wants to close the connection, it takes precedence */
1072
1073 static SBuf close("close", 5);
1074 if (httpHeaderHasConnDir(&rep->header, close))
1075 return COMPLETE_NONPERSISTENT_MSG;
1076
1077 /** \par
1078 * If we sent a Connection:close request header, then this
1079 * can not be a persistent connection.
1080 */
1081 if (!flags.keepalive)
1082 return COMPLETE_NONPERSISTENT_MSG;
1083
1084 /** \par
1085 * If we banned reuse, then this cannot be a persistent connection.
1086 */
1087 if (flags.forceClose)
1088 return COMPLETE_NONPERSISTENT_MSG;
1089
1090 /** \par
1091 * If we haven't sent the whole request then this can not be a persistent
1092 * connection.
1093 */
1094 if (!flags.request_sent) {
1095 debugs(11, 2, "Request not yet fully sent " << request->method << ' ' << entry->url());
1096 return COMPLETE_NONPERSISTENT_MSG;
1097 }
1098
1099 /** \par
1100 * What does the reply have to say about keep-alive?
1101 */
1102 /**
1103 \bug XXX BUG?
1104 * If the origin server (HTTP/1.0) does not send a keep-alive
1105 * header, but keeps the connection open anyway, what happens?
1106 * We'll return here and http.c waits for an EOF before changing
1107 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
1108 * and an error status code, and we might have to wait until
1109 * the server times out the socket.
1110 */
1111 if (!rep->keep_alive)
1112 return COMPLETE_NONPERSISTENT_MSG;
1113
1114 return COMPLETE_PERSISTENT_MSG;
1115}
1116
1117HttpStateData::ConnectionStatus
1118HttpStateData::persistentConnStatus() const
1119{
1120 debugs(11, 3, serverConnection << " eof=" << eof);
1121 if (eof) // already reached EOF
1122 return COMPLETE_NONPERSISTENT_MSG;
1123
1124 /* If server fd is closing (but we have not been notified yet), stop Comm
1125 I/O to avoid assertions. TODO: Change Comm API to handle callers that
1126 want more I/O after async closing (usually initiated by others). */
1127 // XXX: add canReceive or s/canSend/canTalkToServer/
1128 if (!Comm::IsConnOpen(serverConnection))
1129 return COMPLETE_NONPERSISTENT_MSG;
1130
1131 /** \par
1132 * In chunked response we do not know the content length but we are absolutely
1133 * sure about the end of response, so we are calling the statusIfComplete to
1134 * decide if we can be persistent
1135 */
1136 if (lastChunk && flags.chunked)
1137 return statusIfComplete();
1138
1139 const HttpReply *vrep = virginReply();
1140 debugs(11, 5, "persistentConnStatus: content_length=" << vrep->content_length);
1141
1142 const int64_t clen = vrep->bodySize(request->method);
1143
1144 debugs(11, 5, "persistentConnStatus: clen=" << clen);
1145
1146 /* If the body size is unknown we must wait for EOF */
1147 if (clen < 0)
1148 return INCOMPLETE_MSG;
1149
1150 /** \par
1151 * If the body size is known, we must wait until we've gotten all of it. */
1152 if (clen > 0) {
1153 debugs(11,5, "payloadSeen=" << payloadSeen << " content_length=" << vrep->content_length);
1154
1155 if (payloadSeen < vrep->content_length)
1156 return INCOMPLETE_MSG;
1157
1158 if (payloadTruncated > 0) // already read more than needed
1159 return COMPLETE_NONPERSISTENT_MSG; // disable pconns
1160 }
1161
1162 /** \par
1163 * If there is no message body or we got it all, we can be persistent */
1164 return statusIfComplete();
1165}
1166
1167static void
1168readDelayed(void *context, CommRead const &)
1169{
1170 HttpStateData *state = static_cast<HttpStateData*>(context);
1171 state->flags.do_next_read = true;
1172 state->maybeReadVirginBody();
1173}
1174
1175void
1176HttpStateData::readReply(const CommIoCbParams &io)
1177{
1178 Must(!flags.do_next_read); // XXX: should have been set false by mayReadVirginBody()
1179 flags.do_next_read = false;
1180
1181 debugs(11, 5, io.conn);
1182
1183 // Bail out early on Comm::ERR_CLOSING - close handlers will tidy up for us
1184 if (io.flag == Comm::ERR_CLOSING) {
1185 debugs(11, 3, "http socket closing");
1186 return;
1187 }
1188
1189 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1190 abortTransaction("store entry aborted while reading reply");
1191 return;
1192 }
1193
1194 Must(Comm::IsConnOpen(serverConnection));
1195 Must(io.conn->fd == serverConnection->fd);
1196
1197 /*
1198 * Don't reset the timeout value here. The value should be
1199 * counting Config.Timeout.request and applies to the request
1200 * as a whole, not individual read() calls.
1201 * Plus, it breaks our lame *HalfClosed() detection
1202 */
1203
1204 Must(maybeMakeSpaceAvailable(true));
1205 CommIoCbParams rd(this); // will be expanded with ReadNow results
1206 rd.conn = io.conn;
1207 rd.size = entry->bytesWanted(Range<size_t>(0, inBuf.spaceSize()));
1208
1209 if (rd.size <= 0) {
1210 assert(entry->mem_obj);
1211 AsyncCall::Pointer nilCall;
1212 entry->mem_obj->delayRead(DeferredRead(readDelayed, this, CommRead(io.conn, NULL, 0, nilCall)));
1213 return;
1214 }
1215
1216 switch (Comm::ReadNow(rd, inBuf)) {
1217 case Comm::INPROGRESS:
1218 if (inBuf.isEmpty())
1219 debugs(33, 2, io.conn << ": no data to process, " << xstrerr(rd.xerrno));
1220 flags.do_next_read = true;
1221 maybeReadVirginBody();
1222 return;
1223
1224 case Comm::OK:
1225 {
1226 payloadSeen += rd.size;
1227#if USE_DELAY_POOLS
1228 DelayId delayId = entry->mem_obj->mostBytesAllowed();
1229 delayId.bytesIn(rd.size);
1230#endif
1231
1232 statCounter.server.all.kbytes_in += rd.size;
1233 statCounter.server.http.kbytes_in += rd.size;
1234 ++ IOStats.Http.reads;
1235
1236 int bin = 0;
1237 for (int clen = rd.size - 1; clen; ++bin)
1238 clen >>= 1;
1239
1240 ++ IOStats.Http.read_hist[bin];
1241
1242 request->hier.notePeerRead();
1243 }
1244
1245 /* Continue to process previously read data */
1246 break;
1247
1248 case Comm::ENDFILE: // close detected by 0-byte read
1249 eof = 1;
1250 flags.do_next_read = false;
1251
1252 /* Continue to process previously read data */
1253 break;
1254
1255 // case Comm::COMM_ERROR:
1256 default: // no other flags should ever occur
1257 debugs(11, 2, io.conn << ": read failure: " << xstrerr(rd.xerrno));
1258 const auto err = new ErrorState(ERR_READ_ERROR, Http::scBadGateway, fwd->request, fwd->al);
1259 err->xerrno = rd.xerrno;
1260 fwd->fail(err);
1261 flags.do_next_read = false;
1262 closeServer();
1263 mustStop("HttpStateData::readReply");
1264 return;
1265 }
1266
1267 /* Process next response from buffer */
1268 processReply();
1269}
1270
1271/// processes the already read and buffered response data, possibly after
1272/// waiting for asynchronous 1xx control message processing
1273void
1274HttpStateData::processReply()
1275{
1276
1277 if (flags.handling1xx) { // we came back after handling a 1xx response
1278 debugs(11, 5, "done with 1xx handling");
1279 flags.handling1xx = false;
1280 Must(!flags.headers_parsed);
1281 }
1282
1283 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1284 abortTransaction("store entry aborted while we were waiting for processReply()");
1285 return;
1286 }
1287
1288 if (!flags.headers_parsed) { // have not parsed headers yet?
1289 processReplyHeader();
1290
1291 if (!continueAfterParsingHeader()) // parsing error or need more data
1292 return; // TODO: send errors to ICAP
1293
1294 adaptOrFinalizeReply(); // may write to, abort, or "close" the entry
1295 }
1296
1297 // kick more reads if needed and/or process the response body, if any
1298 processReplyBody(); // may call serverComplete()
1299}
1300
1301/**
1302 \retval true if we can continue with processing the body or doing ICAP.
1303 */
1304bool
1305HttpStateData::continueAfterParsingHeader()
1306{
1307 if (flags.handling1xx) {
1308 debugs(11, 5, "wait for 1xx handling");
1309 Must(!flags.headers_parsed);
1310 return false;
1311 }
1312
1313 if (!flags.headers_parsed && !eof) {
1314 debugs(11, 9, "needs more at " << inBuf.length());
1315 flags.do_next_read = true;
1316 /** \retval false If we have not finished parsing the headers and may get more data.
1317 * Schedules more reads to retrieve the missing data.
1318 */
1319 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1320 return false;
1321 }
1322
1323 /** If we are done with parsing, check for errors */
1324
1325 err_type error = ERR_NONE;
1326
1327 if (flags.headers_parsed) { // parsed headers, possibly with errors
1328 // check for header parsing errors
1329 if (HttpReply *vrep = virginReply()) {
1330 const Http::StatusCode s = vrep->sline.status();
1331 const AnyP::ProtocolVersion &v = vrep->sline.version;
1332 if (s == Http::scInvalidHeader && v != Http::ProtocolVersion(0,9)) {
1333 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << request->url);
1334 error = ERR_INVALID_RESP;
1335 } else if (s == Http::scHeaderTooLarge) {
1336 fwd->dontRetry(true);
1337 error = ERR_TOO_BIG;
1338 } else if (vrep->header.conflictingContentLength()) {
1339 fwd->dontRetry(true);
1340 error = ERR_INVALID_RESP;
1341 } else if (vrep->header.unsupportedTe()) {
1342 fwd->dontRetry(true);
1343 error = ERR_INVALID_RESP;
1344 } else {
1345 return true; // done parsing, got reply, and no error
1346 }
1347 } else {
1348 // parsed headers but got no reply
1349 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << request->url);
1350 error = ERR_INVALID_RESP;
1351 }
1352 } else {
1353 assert(eof);
1354 if (inBuf.length()) {
1355 error = ERR_INVALID_RESP;
1356 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << request->url);
1357 } else {
1358 error = ERR_ZERO_SIZE_OBJECT;
1359 debugs(11, (request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " << entry->url() << " AKA " << request->url);
1360 }
1361 }
1362
1363 assert(error != ERR_NONE);
1364 entry->reset();
1365 fwd->fail(new ErrorState(error, Http::scBadGateway, fwd->request, fwd->al));
1366 flags.do_next_read = false;
1367 closeServer();
1368 mustStop("HttpStateData::continueAfterParsingHeader");
1369 return false; // quit on error
1370}
1371
1372/** truncate what we read if we read too much so that writeReplyBody()
1373 writes no more than what we should have read */
1374void
1375HttpStateData::truncateVirginBody()
1376{
1377 assert(flags.headers_parsed);
1378
1379 HttpReply *vrep = virginReply();
1380 int64_t clen = -1;
1381 if (!vrep->expectingBody(request->method, clen) || clen < 0)
1382 return; // no body or a body of unknown size, including chunked
1383
1384 if (payloadSeen - payloadTruncated <= clen)
1385 return; // we did not read too much or already took care of the extras
1386
1387 if (const int64_t extras = payloadSeen - payloadTruncated - clen) {
1388 // server sent more that the advertised content length
1389 debugs(11, 5, "payloadSeen=" << payloadSeen <<
1390 " clen=" << clen << '/' << vrep->content_length <<
1391 " truncated=" << payloadTruncated << '+' << extras);
1392
1393 inBuf.chop(0, inBuf.length() - extras);
1394 payloadTruncated += extras;
1395 }
1396}
1397
1398/**
1399 * Call this when there is data from the origin server
1400 * which should be sent to either StoreEntry, or to ICAP...
1401 */
1402void
1403HttpStateData::writeReplyBody()
1404{
1405 truncateVirginBody(); // if needed
1406 const char *data = inBuf.rawContent();
1407 int len = inBuf.length();
1408 addVirginReplyBody(data, len);
1409 inBuf.consume(len);
1410
1411 // after addVirginReplyBody() wrote (when not adapting) everything we have
1412 // received to Store, check whether we have received/parsed the entire reply
1413 int64_t clen = -1;
1414 const char *parsedWhole = nullptr;
1415 if (!virginReply()->expectingBody(request->method, clen))
1416 parsedWhole = "http parsed header-only reply";
1417 else if (clen >= 0 && clen == payloadSeen - payloadTruncated)
1418 parsedWhole = "http parsed Content-Length body bytes";
1419 else if (clen < 0 && eof)
1420 parsedWhole = "http parsed body ending with expected/required EOF";
1421 if (parsedWhole)
1422 markParsedVirginReplyAsWhole(parsedWhole);
1423}
1424
1425bool
1426HttpStateData::decodeAndWriteReplyBody()
1427{
1428 assert(flags.chunked);
1429 assert(httpChunkDecoder);
1430 try {
1431 MemBuf decodedData;
1432 decodedData.init();
1433 httpChunkDecoder->setPayloadBuffer(&decodedData);
1434 const bool doneParsing = httpChunkDecoder->parse(inBuf);
1435 inBuf = httpChunkDecoder->remaining(); // sync buffers after parse
1436 addVirginReplyBody(decodedData.content(), decodedData.contentSize());
1437 if (doneParsing) {
1438 lastChunk = 1;
1439 flags.do_next_read = false;
1440 markParsedVirginReplyAsWhole("http parsed last-chunk");
1441 }
1442 return true;
1443 }
1444 catch (...) {
1445 debugs (11, 2, "de-chunking failure: " << CurrentException);
1446 }
1447 return false;
1448}
1449
1450/**
1451 * processReplyBody has two purposes:
1452 * 1 - take the reply body data, if any, and put it into either
1453 * the StoreEntry, or give it over to ICAP.
1454 * 2 - see if we made it to the end of the response (persistent
1455 * connections and such)
1456 */
1457void
1458HttpStateData::processReplyBody()
1459{
1460 if (!flags.headers_parsed) {
1461 flags.do_next_read = true;
1462 maybeReadVirginBody();
1463 return;
1464 }
1465
1466#if USE_ADAPTATION
1467 debugs(11,5, "adaptationAccessCheckPending=" << adaptationAccessCheckPending);
1468 if (adaptationAccessCheckPending)
1469 return;
1470
1471#endif
1472
1473 /*
1474 * At this point the reply headers have been parsed and consumed.
1475 * That means header content has been removed from readBuf and
1476 * it contains only body data.
1477 */
1478 if (entry->isAccepting()) {
1479 if (flags.chunked) {
1480 if (!decodeAndWriteReplyBody()) {
1481 flags.do_next_read = false;
1482 serverComplete();
1483 return;
1484 }
1485 } else
1486 writeReplyBody();
1487 }
1488
1489 // storing/sending methods like earlier adaptOrFinalizeReply() or
1490 // above writeReplyBody() may release/abort the store entry.
1491 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1492 // TODO: In some cases (e.g., 304), we should keep persistent conn open.
1493 // Detect end-of-reply (and, hence, pool our idle pconn) earlier (ASAP).
1494 abortTransaction("store entry aborted while storing reply");
1495 return;
1496 } else
1497 switch (persistentConnStatus()) {
1498 case INCOMPLETE_MSG: {
1499 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG from " << serverConnection);
1500 /* Wait for more data or EOF condition */
1501 AsyncCall::Pointer nil;
1502 if (flags.keepalive_broken) {
1503 commSetConnTimeout(serverConnection, 10, nil);
1504 } else {
1505 commSetConnTimeout(serverConnection, Config.Timeout.read, nil);
1506 }
1507
1508 flags.do_next_read = true;
1509 }
1510 break;
1511
1512 case COMPLETE_PERSISTENT_MSG: {
1513 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection);
1514
1515 // TODO: Remove serverConnectionSaved but preserve exception safety.
1516
1517 commUnsetConnTimeout(serverConnection);
1518 flags.do_next_read = false;
1519
1520 comm_remove_close_handler(serverConnection->fd, closeHandler);
1521 closeHandler = NULL;
1522
1523 Ip::Address client_addr; // XXX: Remove as unused. Why was it added?
1524 if (request->flags.spoofClientIp)
1525 client_addr = request->client_addr;
1526
1527 auto serverConnectionSaved = serverConnection;
1528 fwd->unregister(serverConnection);
1529 serverConnection = nullptr;
1530
1531 bool ispinned = false; // TODO: Rename to isOrShouldBePinned
1532 if (request->flags.pinned) {
1533 ispinned = true;
1534 } else if (request->flags.connectionAuth && request->flags.authSent) {
1535 ispinned = true;
1536 }
1537
1538 if (ispinned) {
1539 if (request->clientConnectionManager.valid()) {
1540 CallJobHere1(11, 4, request->clientConnectionManager,
1541 ConnStateData,
1542 notePinnedConnectionBecameIdle,
1543 ConnStateData::PinnedIdleContext(serverConnectionSaved, request));
1544 } else {
1545 // must not pool/share ispinned connections, even orphaned ones
1546 serverConnectionSaved->close();
1547 }
1548 } else {
1549 fwdPconnPool->push(serverConnectionSaved, request->url.host());
1550 }
1551
1552 serverComplete();
1553 return;
1554 }
1555
1556 case COMPLETE_NONPERSISTENT_MSG:
1557 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection);
1558
1559 serverComplete();
1560 return;
1561 }
1562
1563 maybeReadVirginBody();
1564}
1565
1566bool
1567HttpStateData::mayReadVirginReplyBody() const
1568{
1569 // TODO: Be more precise here. For example, if/when reading trailer, we may
1570 // not be doneWithServer() yet, but we should return false. Similarly, we
1571 // could still be writing the request body after receiving the whole reply.
1572 return !doneWithServer();
1573}
1574
1575void
1576HttpStateData::maybeReadVirginBody()
1577{
1578 // too late to read
1579 if (!Comm::IsConnOpen(serverConnection) || fd_table[serverConnection->fd].closing())
1580 return;
1581
1582 if (!maybeMakeSpaceAvailable(false))
1583 return;
1584
1585 // XXX: get rid of the do_next_read flag
1586 // check for the proper reasons preventing read(2)
1587 if (!flags.do_next_read)
1588 return;
1589
1590 flags.do_next_read = false;
1591
1592 // must not already be waiting for read(2) ...
1593 assert(!Comm::MonitorsRead(serverConnection->fd));
1594
1595 // wait for read(2) to be possible.
1596 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1597 AsyncCall::Pointer call = JobCallback(11, 5, Dialer, this, HttpStateData::readReply);
1598 Comm::Read(serverConnection, call);
1599}
1600
1601bool
1602HttpStateData::maybeMakeSpaceAvailable(bool doGrow)
1603{
1604 // how much we are allowed to buffer
1605 const int limitBuffer = (flags.headers_parsed ? Config.readAheadGap : Config.maxReplyHeaderSize);
1606
1607 if (limitBuffer < 0 || inBuf.length() >= (SBuf::size_type)limitBuffer) {
1608 // when buffer is at or over limit already
1609 debugs(11, 7, "will not read up to " << limitBuffer << ". buffer has (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
1610 debugs(11, DBG_DATA, "buffer has {" << inBuf << "}");
1611 // Process next response from buffer
1612 processReply();
1613 return false;
1614 }
1615
1616 // how much we want to read
1617 const size_t read_size = calcBufferSpaceToReserve(inBuf.spaceSize(), (limitBuffer - inBuf.length()));
1618
1619 if (!read_size) {
1620 debugs(11, 7, "will not read up to " << read_size << " into buffer (" << inBuf.length() << "/" << inBuf.spaceSize() << ") from " << serverConnection);
1621 return false;
1622 }
1623
1624 // just report whether we could grow or not, do not actually do it
1625 if (doGrow)
1626 return (read_size >= 2);
1627
1628 // we may need to grow the buffer
1629 inBuf.reserveSpace(read_size);
1630 debugs(11, 8, (!flags.do_next_read ? "will not" : "may") <<
1631 " read up to " << read_size << " bytes info buf(" << inBuf.length() << "/" << inBuf.spaceSize() <<
1632 ") from " << serverConnection);
1633
1634 return (inBuf.spaceSize() >= 2); // only read if there is 1+ bytes of space available
1635}
1636
1637/// called after writing the very last request byte (body, last-chunk, etc)
1638void
1639HttpStateData::wroteLast(const CommIoCbParams &io)
1640{
1641 debugs(11, 5, serverConnection << ": size " << io.size << ": errflag " << io.flag << ".");
1642#if URL_CHECKSUM_DEBUG
1643
1644 entry->mem_obj->checkUrlChecksum();
1645#endif
1646
1647 // XXX: Keep in sync with Client::sentRequestBody().
1648 // TODO: Extract common parts.
1649
1650 if (io.size > 0) {
1651 fd_bytes(io.fd, io.size, FD_WRITE);
1652 statCounter.server.all.kbytes_out += io.size;
1653 statCounter.server.http.kbytes_out += io.size;
1654 }
1655
1656 if (io.flag == Comm::ERR_CLOSING)
1657 return;
1658
1659 // both successful and failed writes affect response times
1660 request->hier.notePeerWrite();
1661
1662 if (io.flag) {
1663 const auto err = new ErrorState(ERR_WRITE_ERROR, Http::scBadGateway, fwd->request, fwd->al);
1664 err->xerrno = io.xerrno;
1665 fwd->fail(err);
1666 closeServer();
1667 mustStop("HttpStateData::wroteLast");
1668 return;
1669 }
1670
1671 sendComplete();
1672}
1673
1674/// successfully wrote the entire request (including body, last-chunk, etc.)
1675void
1676HttpStateData::sendComplete()
1677{
1678 /*
1679 * Set the read timeout here because it hasn't been set yet.
1680 * We only set the read timeout after the request has been
1681 * fully written to the peer. If we start the timeout
1682 * after connection establishment, then we are likely to hit
1683 * the timeout for POST/PUT requests that have very large
1684 * request bodies.
1685 */
1686 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1687 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
1688 TimeoutDialer, this, HttpStateData::httpTimeout);
1689
1690 commSetConnTimeout(serverConnection, Config.Timeout.read, timeoutCall);
1691 flags.request_sent = true;
1692}
1693
1694void
1695HttpStateData::closeServer()
1696{
1697 debugs(11,5, "closing HTTP server " << serverConnection << " this " << this);
1698
1699 if (Comm::IsConnOpen(serverConnection)) {
1700 fwd->unregister(serverConnection);
1701 comm_remove_close_handler(serverConnection->fd, closeHandler);
1702 closeHandler = NULL;
1703 serverConnection->close();
1704 }
1705}
1706
1707bool
1708HttpStateData::doneWithServer() const
1709{
1710 return !Comm::IsConnOpen(serverConnection);
1711}
1712
1713/*
1714 * Fixup authentication request headers for special cases
1715 */
1716static void
1717httpFixupAuthentication(HttpRequest * request, const HttpHeader * hdr_in, HttpHeader * hdr_out, const Http::StateFlags &flags)
1718{
1719 /* Nothing to do unless we are forwarding to a peer */
1720 if (!flags.peering)
1721 return;
1722
1723 // This request is going "through" rather than "to" our _peer.
1724 if (flags.tunneling)
1725 return;
1726
1727 /* Needs to be explicitly enabled */
1728 if (!request->peer_login)
1729 return;
1730
1731 const auto header = flags.toOrigin ? Http::HdrType::AUTHORIZATION : Http::HdrType::PROXY_AUTHORIZATION;
1732 /* Maybe already dealt with? */
1733 if (hdr_out->has(header))
1734 return;
1735
1736 /* Nothing to do here for PASSTHRU */
1737 if (strcmp(request->peer_login, "PASSTHRU") == 0)
1738 return;
1739
1740 // Dangerous and undocumented PROXYPASS is a single-signon to servers with
1741 // the proxy password. Only Basic Authentication can work this way. This
1742 // statement forwards a "basic" Proxy-Authorization value from our client
1743 // to an originserver peer. Other PROXYPASS cases are handled lower.
1744 if (flags.toOrigin &&
1745 strcmp(request->peer_login, "PROXYPASS") == 0 &&
1746 hdr_in->has(Http::HdrType::PROXY_AUTHORIZATION)) {
1747
1748 const char *auth = hdr_in->getStr(Http::HdrType::PROXY_AUTHORIZATION);
1749
1750 if (auth && strncasecmp(auth, "basic ", 6) == 0) {
1751 hdr_out->putStr(header, auth);
1752 return;
1753 }
1754 }
1755
1756 char loginbuf[base64_encode_len(MAX_LOGIN_SZ)];
1757 size_t blen;
1758 struct base64_encode_ctx ctx;
1759 base64_encode_init(&ctx);
1760
1761 /* Special mode to pass the username to the upstream cache */
1762 if (*request->peer_login == '*') {
1763 const char *username = "-";
1764
1765 if (request->extacl_user.size())
1766 username = request->extacl_user.termedBuf();
1767#if USE_AUTH
1768 else if (request->auth_user_request != NULL)
1769 username = request->auth_user_request->username();
1770#endif
1771
1772 blen = base64_encode_update(&ctx, loginbuf, strlen(username), reinterpret_cast<const uint8_t*>(username));
1773 blen += base64_encode_update(&ctx, loginbuf+blen, strlen(request->peer_login +1), reinterpret_cast<const uint8_t*>(request->peer_login +1));
1774 blen += base64_encode_final(&ctx, loginbuf+blen);
1775 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1776 return;
1777 }
1778
1779 /* external_acl provided credentials */
1780 if (request->extacl_user.size() && request->extacl_passwd.size() &&
1781 (strcmp(request->peer_login, "PASS") == 0 ||
1782 strcmp(request->peer_login, "PROXYPASS") == 0)) {
1783
1784 blen = base64_encode_update(&ctx, loginbuf, request->extacl_user.size(), reinterpret_cast<const uint8_t*>(request->extacl_user.rawBuf()));
1785 blen += base64_encode_update(&ctx, loginbuf+blen, 1, reinterpret_cast<const uint8_t*>(":"));
1786 blen += base64_encode_update(&ctx, loginbuf+blen, request->extacl_passwd.size(), reinterpret_cast<const uint8_t*>(request->extacl_passwd.rawBuf()));
1787 blen += base64_encode_final(&ctx, loginbuf+blen);
1788 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1789 return;
1790 }
1791 // if no external user credentials are available to fake authentication with PASS acts like PASSTHRU
1792 if (strcmp(request->peer_login, "PASS") == 0)
1793 return;
1794
1795 /* Kerberos login to peer */
1796#if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1797 if (strncmp(request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1798 char *Token=NULL;
1799 char *PrincipalName=NULL,*p;
1800 int negotiate_flags = 0;
1801
1802 if ((p=strchr(request->peer_login,':')) != NULL ) {
1803 PrincipalName=++p;
1804 }
1805 if (request->flags.auth_no_keytab) {
1806 negotiate_flags |= PEER_PROXY_NEGOTIATE_NOKEYTAB;
1807 }
1808 Token = peer_proxy_negotiate_auth(PrincipalName, request->peer_host, negotiate_flags);
1809 if (Token) {
1810 httpHeaderPutStrf(hdr_out, header, "Negotiate %s",Token);
1811 }
1812 return;
1813 }
1814#endif /* HAVE_KRB5 && HAVE_GSSAPI */
1815
1816 blen = base64_encode_update(&ctx, loginbuf, strlen(request->peer_login), reinterpret_cast<const uint8_t*>(request->peer_login));
1817 blen += base64_encode_final(&ctx, loginbuf+blen);
1818 httpHeaderPutStrf(hdr_out, header, "Basic %.*s", (int)blen, loginbuf);
1819 return;
1820}
1821
1822/*
1823 * build request headers and append them to a given MemBuf
1824 * used by buildRequestPrefix()
1825 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1826 */
1827void
1828HttpStateData::httpBuildRequestHeader(HttpRequest * request,
1829 StoreEntry * entry,
1830 const AccessLogEntryPointer &al,
1831 HttpHeader * hdr_out,
1832 const Http::StateFlags &flags)
1833{
1834 /* building buffer for complex strings */
1835#define BBUF_SZ (MAX_URL+32)
1836 LOCAL_ARRAY(char, bbuf, BBUF_SZ);
1837 LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN);
1838 const HttpHeader *hdr_in = &request->header;
1839 const HttpHeaderEntry *e = NULL;
1840 HttpHeaderPos pos = HttpHeaderInitPos;
1841 assert (hdr_out->owner == hoRequest);
1842
1843 /* use our IMS header if the cached entry has Last-Modified time */
1844 if (request->lastmod > -1)
1845 hdr_out->putTime(Http::HdrType::IF_MODIFIED_SINCE, request->lastmod);
1846
1847 // Add our own If-None-Match field if the cached entry has a strong ETag.
1848 // copyOneHeaderFromClientsideRequestToUpstreamRequest() adds client ones.
1849 if (request->etag.size() > 0) {
1850 hdr_out->addEntry(new HttpHeaderEntry(Http::HdrType::IF_NONE_MATCH, SBuf(),
1851 request->etag.termedBuf()));
1852 }
1853
1854 bool we_do_ranges = decideIfWeDoRanges (request);
1855
1856 String strConnection (hdr_in->getList(Http::HdrType::CONNECTION));
1857
1858 while ((e = hdr_in->getEntry(&pos)))
1859 copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, hdr_out, we_do_ranges, flags);
1860
1861 /* Abstraction break: We should interpret multipart/byterange responses
1862 * into offset-length data, and this works around our inability to do so.
1863 */
1864 if (!we_do_ranges && request->multipartRangeRequest()) {
1865 /* don't cache the result */
1866 request->flags.cachable = false;
1867 /* pretend it's not a range request */
1868 request->ignoreRange("want to request the whole object");
1869 request->flags.isRanged = false;
1870 }
1871
1872 hdr_out->addVia(request->http_ver, hdr_in);
1873
1874 if (request->flags.accelerated) {
1875 /* Append Surrogate-Capabilities */
1876 String strSurrogate(hdr_in->getList(Http::HdrType::SURROGATE_CAPABILITY));
1877#if USE_SQUID_ESI
1878 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id);
1879#else
1880 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id);
1881#endif
1882 strListAdd(&strSurrogate, bbuf, ',');
1883 hdr_out->putStr(Http::HdrType::SURROGATE_CAPABILITY, strSurrogate.termedBuf());
1884 }
1885
1886 /** \pre Handle X-Forwarded-For */
1887 if (strcmp(opt_forwarded_for, "delete") != 0) {
1888
1889 String strFwd = hdr_in->getList(Http::HdrType::X_FORWARDED_FOR);
1890
1891 // if we cannot double strFwd size, then it grew past 50% of the limit
1892 if (!strFwd.canGrowBy(strFwd.size())) {
1893 // There is probably a forwarding loop with Via detection disabled.
1894 // If we do nothing, String will assert on overflow soon.
1895 // TODO: Terminate all transactions with huge XFF?
1896 strFwd = "error";
1897
1898 static int warnedCount = 0;
1899 if (warnedCount++ < 100) {
1900 const SBuf url(entry ? SBuf(entry->url()) : request->effectiveRequestUri());
1901 debugs(11, DBG_IMPORTANT, "WARNING: likely forwarding loop with " << url);
1902 }
1903 }
1904
1905 if (strcmp(opt_forwarded_for, "on") == 0) {
1906 /** If set to ON - append client IP or 'unknown'. */
1907 if ( request->client_addr.isNoAddr() )
1908 strListAdd(&strFwd, "unknown", ',');
1909 else
1910 strListAdd(&strFwd, request->client_addr.toStr(ntoabuf, MAX_IPSTRLEN), ',');
1911 } else if (strcmp(opt_forwarded_for, "off") == 0) {
1912 /** If set to OFF - append 'unknown'. */
1913 strListAdd(&strFwd, "unknown", ',');
1914 } else if (strcmp(opt_forwarded_for, "transparent") == 0) {
1915 /** If set to TRANSPARENT - pass through unchanged. */
1916 } else if (strcmp(opt_forwarded_for, "truncate") == 0) {
1917 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1918 if ( request->client_addr.isNoAddr() )
1919 strFwd = "unknown";
1920 else
1921 strFwd = request->client_addr.toStr(ntoabuf, MAX_IPSTRLEN);
1922 }
1923 if (strFwd.size() > 0)
1924 hdr_out->putStr(Http::HdrType::X_FORWARDED_FOR, strFwd.termedBuf());
1925 }
1926 /** If set to DELETE - do not copy through. */
1927
1928 /* append Host if not there already */
1929 if (!hdr_out->has(Http::HdrType::HOST)) {
1930 if (request->peer_domain) {
1931 hdr_out->putStr(Http::HdrType::HOST, request->peer_domain);
1932 } else {
1933 SBuf authority = request->url.authority();
1934 hdr_out->putStr(Http::HdrType::HOST, authority.c_str());
1935 }
1936 }
1937
1938 /* append Authorization if known in URL, not in header and going direct */
1939 if (!hdr_out->has(Http::HdrType::AUTHORIZATION)) {
1940 if (flags.toOrigin && !request->url.userInfo().isEmpty()) {
1941 static char result[base64_encode_len(MAX_URL*2)]; // should be big enough for a single URI segment
1942 struct base64_encode_ctx ctx;
1943 base64_encode_init(&ctx);
1944 size_t blen = base64_encode_update(&ctx, result, request->url.userInfo().length(), reinterpret_cast<const uint8_t*>(request->url.userInfo().rawContent()));
1945 blen += base64_encode_final(&ctx, result+blen);
1946 result[blen] = '\0';
1947 if (blen)
1948 httpHeaderPutStrf(hdr_out, Http::HdrType::AUTHORIZATION, "Basic %.*s", (int)blen, result);
1949 }
1950 }
1951
1952 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1953 httpFixupAuthentication(request, hdr_in, hdr_out, flags);
1954
1955 /* append Cache-Control, add max-age if not there already */
1956 {
1957 HttpHdrCc *cc = hdr_in->getCc();
1958
1959 if (!cc)
1960 cc = new HttpHdrCc();
1961
1962 /* Add max-age only without no-cache */
1963 if (!cc->hasMaxAge() && !cc->hasNoCache()) {
1964 // XXX: performance regression. c_str() reallocates
1965 SBuf tmp(request->effectiveRequestUri());
1966 cc->maxAge(getMaxAge(entry ? entry->url() : tmp.c_str()));
1967 }
1968
1969 /* Enforce sibling relations */
1970 if (flags.only_if_cached)
1971 cc->onlyIfCached(true);
1972
1973 hdr_out->putCc(cc);
1974
1975 delete cc;
1976 }
1977
1978 // Always send Connection because HTTP/1.0 servers need explicit
1979 // "keep-alive", HTTP/1.1 servers need explicit "close", Upgrade recipients
1980 // need bare "upgrade", and we do not always know the server expectations.
1981 if (!hdr_out->has(Http::HdrType::CONNECTION)) // forwardUpgrade() may add it
1982 hdr_out->putStr(Http::HdrType::CONNECTION, flags.keepalive ? "keep-alive" : "close");
1983
1984 /* append Front-End-Https */
1985 if (flags.front_end_https) {
1986 if (flags.front_end_https == 1 || request->url.getScheme() == AnyP::PROTO_HTTPS)
1987 hdr_out->putStr(Http::HdrType::FRONT_END_HTTPS, "On");
1988 }
1989
1990 if (flags.chunked_request) {
1991 // Do not just copy the original value so that if the client-side
1992 // starts decode other encodings, this code may remain valid.
1993 hdr_out->putStr(Http::HdrType::TRANSFER_ENCODING, "chunked");
1994 }
1995
1996 /* Now mangle the headers. */
1997 httpHdrMangleList(hdr_out, request, al, ROR_REQUEST);
1998
1999 strConnection.clean();
2000}
2001
2002/// copies from-client Upgrade info into the given to-server header while
2003/// honoring configuration filters and following HTTP requirements
2004void
2005HttpStateData::forwardUpgrade(HttpHeader &hdrOut)
2006{
2007 if (!Config.http_upgrade_request_protocols)
2008 return; // forward nothing by default
2009
2010 /* RFC 7230 section 6.7 paragraph 10:
2011 * A server MUST ignore an Upgrade header field that is received in
2012 * an HTTP/1.0 request.
2013 */
2014 if (request->http_ver == Http::ProtocolVersion(1,0))
2015 return;
2016
2017 const auto &hdrIn = request->header;
2018 if (!hdrIn.has(Http::HdrType::UPGRADE))
2019 return;
2020 const auto upgradeIn = hdrIn.getList(Http::HdrType::UPGRADE);
2021
2022 String upgradeOut;
2023
2024 ACLFilledChecklist ch(nullptr, request.getRaw());
2025 ch.al = fwd->al;
2026 const char *pos = nullptr;
2027 const char *offeredStr = nullptr;
2028 int offeredStrLen = 0;
2029 while (strListGetItem(&upgradeIn, ',', &offeredStr, &offeredStrLen, &pos)) {
2030 const ProtocolView offeredProto(offeredStr, offeredStrLen);
2031 debugs(11, 5, "checks all rules applicable to " << offeredProto);
2032 Config.http_upgrade_request_protocols->forApplicable(offeredProto, [&ch, offeredStr, offeredStrLen, &upgradeOut] (const SBuf &cfgProto, const acl_access *guard) {
2033 debugs(11, 5, "checks " << cfgProto << " rule(s)");
2034 ch.changeAcl(guard);
2035 const auto answer = ch.fastCheck();
2036 if (answer.implicit)
2037 return false; // keep looking for an explicit rule match
2038 if (answer.allowed())
2039 strListAdd(upgradeOut, offeredStr, offeredStrLen);
2040 // else drop the offer (explicitly denied cases and ACL errors)
2041 return true; // stop after an explicit rule match or an error
2042 });
2043 }
2044
2045 if (upgradeOut.size()) {
2046 hdrOut.putStr(Http::HdrType::UPGRADE, upgradeOut.termedBuf());
2047
2048 /* RFC 7230 section 6.7 paragraph 10:
2049 * When Upgrade is sent, the sender MUST also send a Connection header
2050 * field that contains an "upgrade" connection option, in
2051 * order to prevent Upgrade from being accidentally forwarded by
2052 * intermediaries that might not implement the listed protocols.
2053 *
2054 * NP: Squid does not truly implement the protocol(s) in this Upgrade.
2055 * For now we are treating an explicit blind tunnel as "implemented"
2056 * regardless of the security implications.
2057 */
2058 hdrOut.putStr(Http::HdrType::CONNECTION, "upgrade");
2059
2060 // Connection:close and Connection:keepalive confuse some Upgrade
2061 // recipients, so we do not send those headers. Our Upgrade request
2062 // implicitly offers connection persistency per HTTP/1.1 defaults.
2063 // Update the keepalive flag to reflect that offer.
2064 // * If the server upgrades, then we would not be talking HTTP past the
2065 // HTTP 101 control message, and HTTP persistence would be irrelevant.
2066 // * Otherwise, our request will contradict onoff.server_pconns=off or
2067 // other no-keepalive conditions (if any). We compensate by copying
2068 // the original no-keepalive decision now and honoring it later.
2069 flags.forceClose = !flags.keepalive;
2070 flags.keepalive = true; // should already be true in most cases
2071 }
2072}
2073
2074/**
2075 * Decides whether a particular header may be cloned from the received Clients request
2076 * to our outgoing fetch request.
2077 */
2078void
2079copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, const HttpRequest * request, HttpHeader * hdr_out, const int we_do_ranges, const Http::StateFlags &flags)
2080{
2081 debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
2082
2083 switch (e->id) {
2084
2085 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
2086
2087 case Http::HdrType::PROXY_AUTHORIZATION:
2088 /** \par Proxy-Authorization:
2089 * Only pass on proxy authentication to peers for which
2090 * authentication forwarding is explicitly enabled
2091 */
2092 if (!flags.toOrigin && request->peer_login &&
2093 (strcmp(request->peer_login, "PASS") == 0 ||
2094 strcmp(request->peer_login, "PROXYPASS") == 0 ||
2095 strcmp(request->peer_login, "PASSTHRU") == 0)) {
2096 hdr_out->addEntry(e->clone());
2097 }
2098 break;
2099
2100 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
2101
2102 case Http::HdrType::CONNECTION: /** \par Connection: */
2103 case Http::HdrType::TE: /** \par TE: */
2104 case Http::HdrType::KEEP_ALIVE: /** \par Keep-Alive: */
2105 case Http::HdrType::PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
2106 case Http::HdrType::TRAILER: /** \par Trailer: */
2107 case Http::HdrType::TRANSFER_ENCODING: /** \par Transfer-Encoding: */
2108 break;
2109
2110 /// \par Upgrade is hop-by-hop but forwardUpgrade() may send a filtered one
2111 case Http::HdrType::UPGRADE:
2112 break;
2113
2114 /** \par OTHER headers I haven't bothered to track down yet. */
2115
2116 case Http::HdrType::AUTHORIZATION:
2117 /** \par WWW-Authorization:
2118 * Pass on WWW authentication */
2119
2120 if (!flags.toOriginPeer()) {
2121 hdr_out->addEntry(e->clone());
2122 } else {
2123 /** \note Assume that talking to a cache_peer originserver makes
2124 * us a reverse proxy and only forward authentication if enabled
2125 * (see also httpFixupAuthentication for special cases)
2126 */
2127 if (request->peer_login &&
2128 (strcmp(request->peer_login, "PASS") == 0 ||
2129 strcmp(request->peer_login, "PASSTHRU") == 0 ||
2130 strcmp(request->peer_login, "PROXYPASS") == 0)) {
2131 hdr_out->addEntry(e->clone());
2132 }
2133 }
2134
2135 break;
2136
2137 case Http::HdrType::HOST:
2138 /** \par Host:
2139 * Normally Squid rewrites the Host: header.
2140 * However, there is one case when we don't: If the URL
2141 * went through our redirector and the admin configured
2142 * 'redir_rewrites_host' to be off.
2143 */
2144 if (request->peer_domain)
2145 hdr_out->putStr(Http::HdrType::HOST, request->peer_domain);
2146 else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
2147 hdr_out->addEntry(e->clone());
2148 else {
2149 SBuf authority = request->url.authority();
2150 hdr_out->putStr(Http::HdrType::HOST, authority.c_str());
2151 }
2152
2153 break;
2154
2155 case Http::HdrType::IF_MODIFIED_SINCE:
2156 /** \par If-Modified-Since:
2157 * append unless we added our own,
2158 * but only if cache_miss_revalidate is enabled, or
2159 * the request is not cacheable, or
2160 * the request contains authentication credentials.
2161 * \note at most one client's If-Modified-Since header can pass through
2162 */
2163 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2164 if (hdr_out->has(Http::HdrType::IF_MODIFIED_SINCE))
2165 break;
2166 else if (Config.onoff.cache_miss_revalidate || !request->flags.cachable || request->flags.auth)
2167 hdr_out->addEntry(e->clone());
2168 break;
2169
2170 case Http::HdrType::IF_NONE_MATCH:
2171 /** \par If-None-Match:
2172 * append if the wildcard '*' special case value is present, or
2173 * cache_miss_revalidate is disabled, or
2174 * the request is not cacheable in this proxy, or
2175 * the request contains authentication credentials.
2176 * \note this header lists a set of responses for the server to elide sending. Squid added values are extending that set.
2177 */
2178 // XXX: need to check and cleanup the auth case so cacheable auth requests get cached.
2179 if (hdr_out->hasListMember(Http::HdrType::IF_MATCH, "*", ',') || Config.onoff.cache_miss_revalidate || !request->flags.cachable || request->flags.auth)
2180 hdr_out->addEntry(e->clone());
2181 break;
2182
2183 case Http::HdrType::MAX_FORWARDS:
2184 /** \par Max-Forwards:
2185 * pass only on TRACE or OPTIONS requests */
2186 if (request->method == Http::METHOD_TRACE || request->method == Http::METHOD_OPTIONS) {
2187 const int64_t hops = e->getInt64();
2188
2189 if (hops > 0)
2190 hdr_out->putInt64(Http::HdrType::MAX_FORWARDS, hops - 1);
2191 }
2192
2193 break;
2194
2195 case Http::HdrType::VIA:
2196 /** \par Via:
2197 * If Via is disabled then forward any received header as-is.
2198 * Otherwise leave for explicit updated addition later. */
2199
2200 if (!Config.onoff.via)
2201 hdr_out->addEntry(e->clone());
2202
2203 break;
2204
2205 case Http::HdrType::RANGE:
2206
2207 case Http::HdrType::IF_RANGE:
2208
2209 case Http::HdrType::REQUEST_RANGE:
2210 /** \par Range:, If-Range:, Request-Range:
2211 * Only pass if we accept ranges */
2212 if (!we_do_ranges)
2213 hdr_out->addEntry(e->clone());
2214
2215 break;
2216
2217 case Http::HdrType::PROXY_CONNECTION: // SHOULD ignore. But doing so breaks things.
2218 break;
2219
2220 case Http::HdrType::CONTENT_LENGTH:
2221 // pass through unless we chunk; also, keeping this away from default
2222 // prevents request smuggling via Connection: Content-Length tricks
2223 if (!flags.chunked_request)
2224 hdr_out->addEntry(e->clone());
2225 break;
2226
2227 case Http::HdrType::X_FORWARDED_FOR:
2228
2229 case Http::HdrType::CACHE_CONTROL:
2230 /** \par X-Forwarded-For:, Cache-Control:
2231 * handled specially by Squid, so leave off for now.
2232 * append these after the loop if needed */
2233 break;
2234
2235 case Http::HdrType::FRONT_END_HTTPS:
2236 /** \par Front-End-Https:
2237 * Pass thru only if peer is configured with front-end-https */
2238 if (!flags.front_end_https)
2239 hdr_out->addEntry(e->clone());
2240
2241 break;
2242
2243 default:
2244 /** \par default.
2245 * pass on all other header fields
2246 * which are NOT listed by the special Connection: header. */
2247 if (strConnection.size()>0 && strListIsMember(&strConnection, e->name, ',')) {
2248 debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
2249 return;
2250 }
2251
2252 hdr_out->addEntry(e->clone());
2253 }
2254}
2255
2256bool
2257HttpStateData::decideIfWeDoRanges (HttpRequest * request)
2258{
2259 bool result = true;
2260 /* decide if we want to do Ranges ourselves
2261 * and fetch the whole object now)
2262 * We want to handle Ranges ourselves iff
2263 * - we can actually parse client Range specs
2264 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
2265 * - reply will be cachable
2266 * (If the reply will be uncachable we have to throw it away after
2267 * serving this request, so it is better to forward ranges to
2268 * the server and fetch only the requested content)
2269 */
2270
2271 int64_t roffLimit = request->getRangeOffsetLimit();
2272
2273 if (NULL == request->range || !request->flags.cachable
2274 || request->range->offsetLimitExceeded(roffLimit) || request->flags.connectionAuth)
2275 result = false;
2276
2277 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
2278 request->range << ", cachable: " <<
2279 request->flags.cachable << "; we_do_ranges: " << result);
2280
2281 return result;
2282}
2283
2284/* build request prefix and append it to a given MemBuf;
2285 * return the length of the prefix */
2286mb_size_t
2287HttpStateData::buildRequestPrefix(MemBuf * mb)
2288{
2289 const int offset = mb->size;
2290 /* Uses a local httpver variable to print the HTTP label
2291 * since the HttpRequest may have an older version label.
2292 * XXX: This could create protocol bugs as the headers sent and
2293 * flow control should all be based on the HttpRequest version
2294 * not the one we are sending. Needs checking.
2295 */
2296 const AnyP::ProtocolVersion httpver = Http::ProtocolVersion();
2297 const SBuf url(flags.toOrigin ? request->url.path() : request->effectiveRequestUri());
2298 mb->appendf(SQUIDSBUFPH " " SQUIDSBUFPH " %s/%d.%d\r\n",
2299 SQUIDSBUFPRINT(request->method.image()),
2300 SQUIDSBUFPRINT(url),
2301 AnyP::ProtocolType_str[httpver.protocol],
2302 httpver.major,httpver.minor);
2303 /* build and pack headers */
2304 {
2305 HttpHeader hdr(hoRequest);
2306 forwardUpgrade(hdr); // before httpBuildRequestHeader() for CONNECTION
2307 httpBuildRequestHeader(request.getRaw(), entry, fwd->al, &hdr, flags);
2308
2309 if (request->flags.pinned && request->flags.connectionAuth)
2310 request->flags.authSent = true;
2311 else if (hdr.has(Http::HdrType::AUTHORIZATION))
2312 request->flags.authSent = true;
2313
2314 // The late placement of this check supports reply_header_add mangling,
2315 // but also complicates optimizing upgradeHeaderOut-like lookups.
2316 if (hdr.has(Http::HdrType::UPGRADE)) {
2317 assert(!upgradeHeaderOut);
2318 upgradeHeaderOut = new String(hdr.getList(Http::HdrType::UPGRADE));
2319 }
2320
2321 hdr.packInto(mb);
2322 hdr.clean();
2323 }
2324 /* append header terminator */
2325 mb->append(crlf, 2);
2326 return mb->size - offset;
2327}
2328
2329/* This will be called when connect completes. Write request. */
2330bool
2331HttpStateData::sendRequest()
2332{
2333 MemBuf mb;
2334
2335 debugs(11, 5, serverConnection << ", request " << request << ", this " << this << ".");
2336
2337 if (!Comm::IsConnOpen(serverConnection)) {
2338 debugs(11,3, "cannot send request to closing " << serverConnection);
2339 assert(closeHandler != NULL);
2340 return false;
2341 }
2342
2343 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
2344 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
2345 TimeoutDialer, this, HttpStateData::httpTimeout);
2346 commSetConnTimeout(serverConnection, Config.Timeout.lifetime, timeoutCall);
2347 flags.do_next_read = true;
2348 maybeReadVirginBody();
2349
2350 if (request->body_pipe != NULL) {
2351 if (!startRequestBodyFlow()) // register to receive body data
2352 return false;
2353 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2354 requestSender = JobCallback(11,5,
2355 Dialer, this, HttpStateData::sentRequestBody);
2356
2357 Must(!flags.chunked_request);
2358 // use chunked encoding if we do not know the length
2359 if (request->content_length < 0)
2360 flags.chunked_request = true;
2361 } else {
2362 assert(!requestBodySource);
2363 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2364 requestSender = JobCallback(11,5,
2365 Dialer, this, HttpStateData::wroteLast);
2366 }
2367
2368 /*
2369 * Is keep-alive okay for all request methods?
2370 */
2371 if (request->flags.mustKeepalive)
2372 flags.keepalive = true;
2373 else if (request->flags.pinned)
2374 flags.keepalive = request->persistent();
2375 else if (!Config.onoff.server_pconns)
2376 flags.keepalive = false;
2377 else if (flags.tunneling)
2378 // tunneled non pinned bumped requests must not keepalive
2379 flags.keepalive = !request->flags.sslBumped;
2380 else if (_peer == NULL)
2381 flags.keepalive = true;
2382 else if (_peer->stats.n_keepalives_sent < 10)
2383 flags.keepalive = true;
2384 else if ((double) _peer->stats.n_keepalives_recv /
2385 (double) _peer->stats.n_keepalives_sent > 0.50)
2386 flags.keepalive = true;
2387
2388 if (_peer && !flags.tunneling) {
2389 /*The old code here was
2390 if (neighborType(_peer, request->url) == PEER_SIBLING && ...
2391 which is equivalent to:
2392 if (neighborType(_peer, URL()) == PEER_SIBLING && ...
2393 or better:
2394 if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) ||
2395 _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss)
2396 flags.only_if_cached = 1;
2397
2398 But I suppose it was a bug
2399 */
2400 if (neighborType(_peer, request->url) == PEER_SIBLING && !_peer->options.allow_miss)
2401 flags.only_if_cached = true;
2402
2403 flags.front_end_https = _peer->front_end_https;
2404 }
2405
2406 mb.init();
2407 request->peer_host=_peer?_peer->host:NULL;
2408 buildRequestPrefix(&mb);
2409
2410 debugs(11, 2, "HTTP Server " << serverConnection);
2411 debugs(11, 2, "HTTP Server REQUEST:\n---------\n" << mb.buf << "\n----------");
2412
2413 Comm::Write(serverConnection, &mb, requestSender);
2414 return true;
2415}
2416
2417bool
2418HttpStateData::getMoreRequestBody(MemBuf &buf)
2419{
2420 // parent's implementation can handle the no-encoding case
2421 if (!flags.chunked_request)
2422 return Client::getMoreRequestBody(buf);
2423
2424 MemBuf raw;
2425
2426 Must(requestBodySource != NULL);
2427 if (!requestBodySource->getMoreData(raw))
2428 return false; // no request body bytes to chunk yet
2429
2430 // optimization: pre-allocate buffer size that should be enough
2431 const mb_size_t rawDataSize = raw.contentSize();
2432 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2433 buf.init(16 + 2 + rawDataSize + 2 + 5, raw.max_capacity);
2434
2435 buf.appendf("%x\r\n", static_cast<unsigned int>(rawDataSize));
2436 buf.append(raw.content(), rawDataSize);
2437 buf.append("\r\n", 2);
2438
2439 Must(rawDataSize > 0); // we did not accidentally created last-chunk above
2440
2441 // Do not send last-chunk unless we successfully received everything
2442 if (receivedWholeRequestBody) {
2443 Must(!flags.sentLastChunk);
2444 flags.sentLastChunk = true;
2445 buf.append("0\r\n\r\n", 5);
2446 }
2447
2448 return true;
2449}
2450
2451void
2452httpStart(FwdState *fwd)
2453{
2454 debugs(11, 3, fwd->request->method << ' ' << fwd->entry->url());
2455 AsyncJob::Start(new HttpStateData(fwd));
2456}
2457
2458void
2459HttpStateData::start()
2460{
2461 if (!sendRequest()) {
2462 debugs(11, 3, "httpStart: aborted");
2463 mustStop("HttpStateData::start failed");
2464 return;
2465 }
2466
2467 ++ statCounter.server.all.requests;
2468 ++ statCounter.server.http.requests;
2469
2470 /*
2471 * We used to set the read timeout here, but not any more.
2472 * Now its set in httpSendComplete() after the full request,
2473 * including request body, has been written to the server.
2474 */
2475}
2476
2477/// if broken posts are enabled for the request, try to fix and return true
2478bool
2479HttpStateData::finishingBrokenPost()
2480{
2481#if USE_HTTP_VIOLATIONS
2482 if (!Config.accessList.brokenPosts) {
2483 debugs(11, 5, "No brokenPosts list");
2484 return false;
2485 }
2486
2487 ACLFilledChecklist ch(Config.accessList.brokenPosts, originalRequest().getRaw());
2488 ch.al = fwd->al;
2489 ch.syncAle(originalRequest().getRaw(), nullptr);
2490 if (!ch.fastCheck().allowed()) {
2491 debugs(11, 5, "didn't match brokenPosts");
2492 return false;
2493 }
2494
2495 if (!Comm::IsConnOpen(serverConnection)) {
2496 debugs(11, 3, "ignoring broken POST for closed " << serverConnection);
2497 assert(closeHandler != NULL);
2498 return true; // prevent caller from proceeding as if nothing happened
2499 }
2500
2501 debugs(11, 3, "finishingBrokenPost: fixing broken POST");
2502 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2503 requestSender = JobCallback(11,5,
2504 Dialer, this, HttpStateData::wroteLast);
2505 Comm::Write(serverConnection, "\r\n", 2, requestSender, NULL);
2506 return true;
2507#else
2508 return false;
2509#endif /* USE_HTTP_VIOLATIONS */
2510}
2511
2512/// if needed, write last-chunk to end the request body and return true
2513bool
2514HttpStateData::finishingChunkedRequest()
2515{
2516 if (flags.sentLastChunk) {
2517 debugs(11, 5, "already sent last-chunk");
2518 return false;
2519 }
2520
2521 Must(receivedWholeRequestBody); // or we should not be sending last-chunk
2522 flags.sentLastChunk = true;
2523
2524 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2525 requestSender = JobCallback(11,5, Dialer, this, HttpStateData::wroteLast);
2526 Comm::Write(serverConnection, "0\r\n\r\n", 5, requestSender, NULL);
2527 return true;
2528}
2529
2530void
2531HttpStateData::doneSendingRequestBody()
2532{
2533 Client::doneSendingRequestBody();
2534 debugs(11,5, serverConnection);
2535
2536 // do we need to write something after the last body byte?
2537 if (flags.chunked_request && finishingChunkedRequest())
2538 return;
2539 if (!flags.chunked_request && finishingBrokenPost())
2540 return;
2541
2542 sendComplete();
2543}
2544
2545// more origin request body data is available
2546void
2547HttpStateData::handleMoreRequestBodyAvailable()
2548{
2549 if (eof || !Comm::IsConnOpen(serverConnection)) {
2550 // XXX: we should check this condition in other callbacks then!
2551 // TODO: Check whether this can actually happen: We should unsubscribe
2552 // as a body consumer when the above condition(s) are detected.
2553 debugs(11, DBG_IMPORTANT, "Transaction aborted while reading HTTP body");
2554 return;
2555 }
2556
2557 assert(requestBodySource != NULL);
2558
2559 if (requestBodySource->buf().hasContent()) {
2560 // XXX: why does not this trigger a debug message on every request?
2561
2562 if (flags.headers_parsed && !flags.abuse_detected) {
2563 flags.abuse_detected = true;
2564 debugs(11, DBG_IMPORTANT, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request->client_addr << "' -> '" << entry->url() << "'" );
2565
2566 if (virginReply()->sline.status() == Http::scInvalidHeader) {
2567 closeServer();
2568 mustStop("HttpStateData::handleMoreRequestBodyAvailable");
2569 return;
2570 }
2571 }
2572 }
2573
2574 HttpStateData::handleMoreRequestBodyAvailable();
2575}
2576
2577// premature end of the request body
2578void
2579HttpStateData::handleRequestBodyProducerAborted()
2580{
2581 Client::handleRequestBodyProducerAborted();
2582 if (entry->isEmpty()) {
2583 debugs(11, 3, "request body aborted: " << serverConnection);
2584 // We usually get here when ICAP REQMOD aborts during body processing.
2585 // We might also get here if client-side aborts, but then our response
2586 // should not matter because either client-side will provide its own or
2587 // there will be no response at all (e.g., if the the client has left).
2588 const auto err = new ErrorState(ERR_ICAP_FAILURE, Http::scInternalServerError, fwd->request, fwd->al);
2589 static const auto d = MakeNamedErrorDetail("SRV_REQMOD_REQ_BODY");
2590 err->detailError(d);
2591 fwd->fail(err);
2592 }
2593
2594 abortTransaction("request body producer aborted");
2595}
2596
2597// called when we wrote request headers(!) or a part of the body
2598void
2599HttpStateData::sentRequestBody(const CommIoCbParams &io)
2600{
2601 if (io.size > 0)
2602 statCounter.server.http.kbytes_out += io.size;
2603
2604 Client::sentRequestBody(io);
2605}
2606
2607void
2608HttpStateData::abortAll(const char *reason)
2609{
2610 debugs(11,5, "aborting transaction for " << reason <<
2611 "; " << serverConnection << ", this " << this);
2612 mustStop(reason);
2613}
2614
2615HttpStateData::ReuseDecision::ReuseDecision(const StoreEntry *e, const Http::StatusCode code)
2616 : answer(HttpStateData::ReuseDecision::reuseNot), reason(nullptr), entry(e), statusCode(code) {}
2617
2618HttpStateData::ReuseDecision::Answers
2619HttpStateData::ReuseDecision::make(const HttpStateData::ReuseDecision::Answers ans, const char *why)
2620{
2621 answer = ans;
2622 reason = why;
2623 return answer;
2624}
2625
2626std::ostream &operator <<(std::ostream &os, const HttpStateData::ReuseDecision &d)
2627{
2628 static const char *ReuseMessages[] = {
2629 "do not cache and do not share", // reuseNot
2630 "cache positively and share", // cachePositively
2631 "cache negatively and share", // cacheNegatively
2632 "do not cache but share" // doNotCacheButShare
2633 };
2634
2635 assert(d.answer >= HttpStateData::ReuseDecision::reuseNot &&
2636 d.answer <= HttpStateData::ReuseDecision::doNotCacheButShare);
2637 return os << ReuseMessages[d.answer] << " because " << d.reason <<
2638 "; HTTP status " << d.statusCode << " " << *(d.entry);
2639}
2640