]> git.ipfire.org Git - thirdparty/squid.git/blame_incremental - src/http.cc
Instead of exiting, disable optional eCAP services that fail initialization.
[thirdparty/squid.git] / src / http.cc
... / ...
CommitLineData
1
2/*
3 * $Id$
4 *
5 * DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36/*
37 * Anonymizing patch by lutz@as-node.jena.thur.de
38 * have a look into http-anon.c to get more informations.
39 */
40
41#include "squid.h"
42
43#include "acl/FilledChecklist.h"
44#if USE_AUTH
45#include "auth/UserRequest.h"
46#endif
47#include "base/AsyncJobCalls.h"
48#include "base/TextException.h"
49#include "base64.h"
50#include "comm/Write.h"
51#if USE_DELAY_POOLS
52#include "DelayPools.h"
53#endif
54#include "errorpage.h"
55#include "http.h"
56#include "HttpControlMsg.h"
57#include "HttpHdrContRange.h"
58#include "HttpHdrSc.h"
59#include "HttpHdrScTarget.h"
60#include "HttpReply.h"
61#include "HttpRequest.h"
62#include "MemBuf.h"
63#include "MemObject.h"
64#include "protos.h"
65#include "rfc1738.h"
66#include "SquidTime.h"
67#include "Store.h"
68
69
70#define SQUID_ENTER_THROWING_CODE() try {
71#define SQUID_EXIT_THROWING_CODE(status) \
72 status = true; \
73 } \
74 catch (const std::exception &e) { \
75 debugs (11, 1, "Exception error:" << e.what()); \
76 status = false; \
77 }
78
79CBDATA_CLASS_INIT(HttpStateData);
80
81static const char *const crlf = "\r\n";
82
83static void httpMaybeRemovePublic(StoreEntry *, http_status);
84static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request,
85 HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags);
86
87HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), ServerStateData(theFwdState),
88 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
89 body_bytes_truncated(0), httpChunkDecoder(NULL)
90{
91 debugs(11,5,HERE << "HttpStateData " << this << " created");
92 ignoreCacheControl = false;
93 surrogateNoStore = false;
94 fd = fwd->server_fd;
95 readBuf = new MemBuf;
96 readBuf->init();
97 orig_request = HTTPMSGLOCK(fwd->request);
98
99 // reset peer response time stats for %<pt
100 orig_request->hier.peer_http_request_sent.tv_sec = 0;
101 orig_request->hier.peer_http_request_sent.tv_usec = 0;
102
103 if (fwd->servers)
104 _peer = fwd->servers->_peer; /* might be NULL */
105
106 if (_peer) {
107 const char *url;
108
109 if (_peer->options.originserver)
110 url = orig_request->urlpath.termedBuf();
111 else
112 url = entry->url();
113
114 HttpRequest * proxy_req = new HttpRequest(orig_request->method,
115 orig_request->protocol, url);
116
117 proxy_req->SetHost(_peer->host);
118
119 proxy_req->port = _peer->http_port;
120
121 proxy_req->flags = orig_request->flags;
122
123 proxy_req->lastmod = orig_request->lastmod;
124
125 proxy_req->flags.proxying = 1;
126
127 HTTPMSGUNLOCK(request);
128
129 request = HTTPMSGLOCK(proxy_req);
130
131 /*
132 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
133 * We might end up getting the object from somewhere else if,
134 * for example, the request to this neighbor fails.
135 */
136 if (_peer->options.proxy_only)
137 entry->releaseRequest();
138
139#if USE_DELAY_POOLS
140 entry->setNoDelay(_peer->options.no_delay);
141#endif
142 }
143
144 /*
145 * register the handler to free HTTP state data when the FD closes
146 */
147 typedef CommCbMemFunT<HttpStateData, CommCloseCbParams> Dialer;
148 closeHandler = JobCallback(9, 5,
149 Dialer, this, HttpStateData::httpStateConnClosed);
150 comm_add_close_handler(fd, closeHandler);
151}
152
153HttpStateData::~HttpStateData()
154{
155 /*
156 * don't forget that ~ServerStateData() gets called automatically
157 */
158
159 if (!readBuf->isNull())
160 readBuf->clean();
161
162 delete readBuf;
163
164 if (httpChunkDecoder)
165 delete httpChunkDecoder;
166
167 HTTPMSGUNLOCK(orig_request);
168
169 debugs(11,5, HERE << "HttpStateData " << this << " destroyed; FD " << fd);
170}
171
172int
173HttpStateData::dataDescriptor() const
174{
175 return fd;
176}
177/*
178static void
179httpStateFree(int fd, void *data)
180{
181 HttpStateData *httpState = static_cast<HttpStateData *>(data);
182 debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data);
183 delete httpState;
184}*/
185
186void
187HttpStateData::httpStateConnClosed(const CommCloseCbParams &params)
188{
189 debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
190 deleteThis("HttpStateData::httpStateConnClosed");
191}
192
193int
194httpCachable(const HttpRequestMethod& method)
195{
196 /* GET and HEAD are cachable. Others are not. */
197
198 // TODO: replase to HttpRequestMethod::isCachable() ?
199 if (method != METHOD_GET && method != METHOD_HEAD)
200 return 0;
201
202 /* else cachable */
203 return 1;
204}
205
206void
207HttpStateData::httpTimeout(const CommTimeoutCbParams &params)
208{
209 debugs(11, 4, "httpTimeout: FD " << fd << ": '" << entry->url() << "'" );
210
211 if (entry->store_status == STORE_PENDING) {
212 fwd->fail(errorCon(ERR_READ_TIMEOUT, HTTP_GATEWAY_TIMEOUT, fwd->request));
213 }
214
215 comm_close(fd);
216}
217
218static void
219httpMaybeRemovePublic(StoreEntry * e, http_status status)
220{
221 int remove = 0;
222 int forbidden = 0;
223 StoreEntry *pe;
224
225 if (!EBIT_TEST(e->flags, KEY_PRIVATE))
226 return;
227
228 switch (status) {
229
230 case HTTP_OK:
231
232 case HTTP_NON_AUTHORITATIVE_INFORMATION:
233
234 case HTTP_MULTIPLE_CHOICES:
235
236 case HTTP_MOVED_PERMANENTLY:
237
238 case HTTP_MOVED_TEMPORARILY:
239
240 case HTTP_GONE:
241
242 case HTTP_NOT_FOUND:
243 remove = 1;
244
245 break;
246
247 case HTTP_FORBIDDEN:
248
249 case HTTP_METHOD_NOT_ALLOWED:
250 forbidden = 1;
251
252 break;
253
254#if WORK_IN_PROGRESS
255
256 case HTTP_UNAUTHORIZED:
257 forbidden = 1;
258
259 break;
260
261#endif
262
263 default:
264#if QUESTIONABLE
265 /*
266 * Any 2xx response should eject previously cached entities...
267 */
268
269 if (status >= 200 && status < 300)
270 remove = 1;
271
272#endif
273
274 break;
275 }
276
277 if (!remove && !forbidden)
278 return;
279
280 assert(e->mem_obj);
281
282 if (e->mem_obj->request)
283 pe = storeGetPublicByRequest(e->mem_obj->request);
284 else
285 pe = storeGetPublic(e->mem_obj->url, e->mem_obj->method);
286
287 if (pe != NULL) {
288 assert(e != pe);
289#if USE_HTCP
290 neighborsHtcpClear(e, NULL, e->mem_obj->request, e->mem_obj->method, HTCP_CLR_INVALIDATION);
291#endif
292 pe->release();
293 }
294
295 /** \par
296 * Also remove any cached HEAD response in case the object has
297 * changed.
298 */
299 if (e->mem_obj->request)
300 pe = storeGetPublicByRequestMethod(e->mem_obj->request, METHOD_HEAD);
301 else
302 pe = storeGetPublic(e->mem_obj->url, METHOD_HEAD);
303
304 if (pe != NULL) {
305 assert(e != pe);
306#if USE_HTCP
307 neighborsHtcpClear(e, NULL, e->mem_obj->request, HttpRequestMethod(METHOD_HEAD), HTCP_CLR_INVALIDATION);
308#endif
309 pe->release();
310 }
311}
312
313void
314HttpStateData::processSurrogateControl(HttpReply *reply)
315{
316 if (request->flags.accelerated && reply->surrogate_control) {
317 HttpHdrScTarget *sctusable = httpHdrScGetMergedTarget(reply->surrogate_control, Config.Accel.surrogate_id);
318
319 if (sctusable) {
320 if (EBIT_TEST(sctusable->mask, SC_NO_STORE) ||
321 (Config.onoff.surrogate_is_remote
322 && EBIT_TEST(sctusable->mask, SC_NO_STORE_REMOTE))) {
323 surrogateNoStore = true;
324 entry->makePrivate();
325 }
326
327 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
328 * accelerated request or not...
329 * Still, this is an abstraction breach. - RC
330 */
331 if (sctusable->max_age != -1) {
332 if (sctusable->max_age < sctusable->max_stale)
333 reply->expires = reply->date + sctusable->max_age;
334 else
335 reply->expires = reply->date + sctusable->max_stale;
336
337 /* And update the timestamps */
338 entry->timestampsSet();
339 }
340
341 /* We ignore cache-control directives as per the Surrogate specification */
342 ignoreCacheControl = true;
343
344 httpHdrScTargetDestroy(sctusable);
345 }
346 }
347}
348
349int
350HttpStateData::cacheableReply()
351{
352 HttpReply const *rep = finalReply();
353 HttpHeader const *hdr = &rep->header;
354 const int cc_mask = (rep->cache_control) ? rep->cache_control->mask : 0;
355 const char *v;
356#if USE_HTTP_VIOLATIONS
357
358 const refresh_t *R = NULL;
359
360 /* This strange looking define first looks up the refresh pattern
361 * and then checks if the specified flag is set. The main purpose
362 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
363 * condition
364 */
365#define REFRESH_OVERRIDE(flag) \
366 ((R = (R ? R : refreshLimits(entry->mem_obj->url))) , \
367 (R && R->flags.flag))
368#else
369#define REFRESH_OVERRIDE(flag) 0
370#endif
371
372 if (surrogateNoStore)
373 return 0;
374
375 // RFC 2616: do not cache replies to responses with no-store CC directive
376 if (request && request->cache_control &&
377 EBIT_TEST(request->cache_control->mask, CC_NO_STORE) &&
378 !REFRESH_OVERRIDE(ignore_no_store))
379 return 0;
380
381 if (!ignoreCacheControl) {
382 if (EBIT_TEST(cc_mask, CC_PRIVATE)) {
383 if (!REFRESH_OVERRIDE(ignore_private))
384 return 0;
385 }
386
387 if (EBIT_TEST(cc_mask, CC_NO_CACHE)) {
388 if (!REFRESH_OVERRIDE(ignore_no_cache))
389 return 0;
390 }
391
392 if (EBIT_TEST(cc_mask, CC_NO_STORE)) {
393 if (!REFRESH_OVERRIDE(ignore_no_store))
394 return 0;
395 }
396 }
397
398 if (request->flags.auth || request->flags.auth_sent) {
399 /*
400 * Responses to requests with authorization may be cached
401 * only if a Cache-Control: public reply header is present.
402 * RFC 2068, sec 14.9.4
403 */
404
405 if (!EBIT_TEST(cc_mask, CC_PUBLIC)) {
406 if (!REFRESH_OVERRIDE(ignore_auth))
407 return 0;
408 }
409 }
410
411 /* Pragma: no-cache in _replies_ is not documented in HTTP,
412 * but servers like "Active Imaging Webcast/2.0" sure do use it */
413 if (hdr->has(HDR_PRAGMA)) {
414 String s = hdr->getList(HDR_PRAGMA);
415 const int no_cache = strListIsMember(&s, "no-cache", ',');
416 s.clean();
417
418 if (no_cache) {
419 if (!REFRESH_OVERRIDE(ignore_no_cache))
420 return 0;
421 }
422 }
423
424 /*
425 * The "multipart/x-mixed-replace" content type is used for
426 * continuous push replies. These are generally dynamic and
427 * probably should not be cachable
428 */
429 if ((v = hdr->getStr(HDR_CONTENT_TYPE)))
430 if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
431 return 0;
432
433 switch (rep->sline.status) {
434 /* Responses that are cacheable */
435
436 case HTTP_OK:
437
438 case HTTP_NON_AUTHORITATIVE_INFORMATION:
439
440 case HTTP_MULTIPLE_CHOICES:
441
442 case HTTP_MOVED_PERMANENTLY:
443
444 case HTTP_GONE:
445 /*
446 * Don't cache objects that need to be refreshed on next request,
447 * unless we know how to refresh it.
448 */
449
450 if (!refreshIsCachable(entry) && !REFRESH_OVERRIDE(store_stale)) {
451 debugs(22, 3, "refreshIsCachable() returned non-cacheable..");
452 return 0;
453 } else
454 return 1;
455
456 /* NOTREACHED */
457 break;
458
459 /* Responses that only are cacheable if the server says so */
460
461 case HTTP_MOVED_TEMPORARILY:
462 case HTTP_TEMPORARY_REDIRECT:
463 if (rep->expires > rep->date && rep->date > 0)
464 return 1;
465 else
466 return 0;
467
468 /* NOTREACHED */
469 break;
470
471 /* Errors can be negatively cached */
472
473 case HTTP_NO_CONTENT:
474
475 case HTTP_USE_PROXY:
476
477 case HTTP_BAD_REQUEST:
478
479 case HTTP_FORBIDDEN:
480
481 case HTTP_NOT_FOUND:
482
483 case HTTP_METHOD_NOT_ALLOWED:
484
485 case HTTP_REQUEST_URI_TOO_LARGE:
486
487 case HTTP_INTERNAL_SERVER_ERROR:
488
489 case HTTP_NOT_IMPLEMENTED:
490
491 case HTTP_BAD_GATEWAY:
492
493 case HTTP_SERVICE_UNAVAILABLE:
494
495 case HTTP_GATEWAY_TIMEOUT:
496 return -1;
497
498 /* NOTREACHED */
499 break;
500
501 /* Some responses can never be cached */
502
503 case HTTP_PARTIAL_CONTENT: /* Not yet supported */
504
505 case HTTP_SEE_OTHER:
506
507 case HTTP_NOT_MODIFIED:
508
509 case HTTP_UNAUTHORIZED:
510
511 case HTTP_PROXY_AUTHENTICATION_REQUIRED:
512
513 case HTTP_INVALID_HEADER: /* Squid header parsing error */
514
515 case HTTP_HEADER_TOO_LARGE:
516
517 case HTTP_PAYMENT_REQUIRED:
518 case HTTP_NOT_ACCEPTABLE:
519 case HTTP_REQUEST_TIMEOUT:
520 case HTTP_CONFLICT:
521 case HTTP_LENGTH_REQUIRED:
522 case HTTP_PRECONDITION_FAILED:
523 case HTTP_REQUEST_ENTITY_TOO_LARGE:
524 case HTTP_UNSUPPORTED_MEDIA_TYPE:
525 case HTTP_UNPROCESSABLE_ENTITY:
526 case HTTP_LOCKED:
527 case HTTP_FAILED_DEPENDENCY:
528 case HTTP_INSUFFICIENT_STORAGE:
529 case HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
530 case HTTP_EXPECTATION_FAILED:
531
532 return 0;
533
534 default:
535 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
536 debugs (11, 3, HERE << "Unknown HTTP status code " << rep->sline.status << ". Not cacheable.");
537
538 return 0;
539
540 /* NOTREACHED */
541 break;
542 }
543
544 /* NOTREACHED */
545}
546
547/*
548 * For Vary, store the relevant request headers as
549 * virtual headers in the reply
550 * Returns false if the variance cannot be stored
551 */
552const char *
553httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
554{
555 String vary, hdr;
556 const char *pos = NULL;
557 const char *item;
558 const char *value;
559 int ilen;
560 static String vstr;
561
562 vstr.clean();
563 vary = reply->header.getList(HDR_VARY);
564
565 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
566 char *name = (char *)xmalloc(ilen + 1);
567 xstrncpy(name, item, ilen + 1);
568 Tolower(name);
569
570 if (strcmp(name, "*") == 0) {
571 /* Can not handle "Vary: *" withtout ETag support */
572 safe_free(name);
573 vstr.clean();
574 break;
575 }
576
577 strListAdd(&vstr, name, ',');
578 hdr = request->header.getByName(name);
579 safe_free(name);
580 value = hdr.termedBuf();
581
582 if (value) {
583 value = rfc1738_escape_part(value);
584 vstr.append("=\"", 2);
585 vstr.append(value);
586 vstr.append("\"", 1);
587 }
588
589 hdr.clean();
590 }
591
592 vary.clean();
593#if X_ACCELERATOR_VARY
594
595 pos = NULL;
596 vary = reply->header.getList(HDR_X_ACCELERATOR_VARY);
597
598 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
599 char *name = (char *)xmalloc(ilen + 1);
600 xstrncpy(name, item, ilen + 1);
601 Tolower(name);
602 strListAdd(&vstr, name, ',');
603 hdr = request->header.getByName(name);
604 safe_free(name);
605 value = hdr.termedBuf();
606
607 if (value) {
608 value = rfc1738_escape_part(value);
609 vstr.append("=\"", 2);
610 vstr.append(value);
611 vstr.append("\"", 1);
612 }
613
614 hdr.clean();
615 }
616
617 vary.clean();
618#endif
619
620 debugs(11, 3, "httpMakeVaryMark: " << vstr);
621 return vstr.termedBuf();
622}
623
624void
625HttpStateData::keepaliveAccounting(HttpReply *reply)
626{
627 if (flags.keepalive)
628 if (_peer)
629 _peer->stats.n_keepalives_sent++;
630
631 if (reply->keep_alive) {
632 if (_peer)
633 _peer->stats.n_keepalives_recv++;
634
635 if (Config.onoff.detect_broken_server_pconns
636 && reply->bodySize(request->method) == -1 && !flags.chunked) {
637 debugs(11, 1, "keepaliveAccounting: Impossible keep-alive header from '" << entry->url() << "'" );
638 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
639 flags.keepalive_broken = 1;
640 }
641 }
642}
643
644void
645HttpStateData::checkDateSkew(HttpReply *reply)
646{
647 if (reply->date > -1 && !_peer) {
648 int skew = abs((int)(reply->date - squid_curtime));
649
650 if (skew > 86400)
651 debugs(11, 3, "" << request->GetHost() << "'s clock is skewed by " << skew << " seconds!");
652 }
653}
654
655/**
656 * This creates the error page itself.. its likely
657 * that the forward ported reply header max size patch
658 * generates non http conformant error pages - in which
659 * case the errors where should be 'BAD_GATEWAY' etc
660 */
661void
662HttpStateData::processReplyHeader()
663{
664 /** Creates a blank header. If this routine is made incremental, this will not do */
665
666 /* NP: all exit points to this function MUST call ctx_exit(ctx) */
667 Ctx ctx = ctx_enter(entry->mem_obj->url);
668
669 debugs(11, 3, "processReplyHeader: key '" << entry->getMD5Text() << "'");
670
671 assert(!flags.headers_parsed);
672
673 if (!readBuf->hasContent()) {
674 ctx_exit(ctx);
675 return;
676 }
677
678 http_status error = HTTP_STATUS_NONE;
679
680 HttpReply *newrep = new HttpReply;
681 const bool parsed = newrep->parse(readBuf, eof, &error);
682
683 if (!parsed && readBuf->contentSize() > 5 && strncmp(readBuf->content(), "HTTP/", 5) != 0 && strncmp(readBuf->content(), "ICY", 3) != 0) {
684 MemBuf *mb;
685 HttpReply *tmprep = new HttpReply;
686 tmprep->setHeaders(HTTP_OK, "Gatewaying", NULL, -1, -1, -1);
687 tmprep->header.putExt("X-Transformed-From", "HTTP/0.9");
688 mb = tmprep->pack();
689 newrep->parse(mb, eof, &error);
690 delete mb;
691 delete tmprep;
692 } else {
693 if (!parsed && error > 0) { // unrecoverable parsing error
694 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf->content() << "'");
695 flags.headers_parsed = 1;
696 newrep->sline.version = HttpVersion(1,1);
697 newrep->sline.status = error;
698 HttpReply *vrep = setVirginReply(newrep);
699 entry->replaceHttpReply(vrep);
700 ctx_exit(ctx);
701 return;
702 }
703
704 if (!parsed) { // need more data
705 assert(!error);
706 assert(!eof);
707 delete newrep;
708 ctx_exit(ctx);
709 return;
710 }
711
712 debugs(11, 9, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------");
713
714 header_bytes_read = headersEnd(readBuf->content(), readBuf->contentSize());
715 readBuf->consume(header_bytes_read);
716 }
717
718 newrep->removeStaleWarnings();
719
720 if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->sline.status >= 100 && newrep->sline.status < 200) {
721 handle1xx(newrep);
722 ctx_exit(ctx);
723 return;
724 }
725
726 flags.chunked = 0;
727 if (newrep->sline.protocol == AnyP::PROTO_HTTP && newrep->header.chunked()) {
728 flags.chunked = 1;
729 httpChunkDecoder = new ChunkedCodingParser;
730 }
731
732 if (!peerSupportsConnectionPinning())
733 orig_request->flags.connection_auth_disabled = 1;
734
735 HttpReply *vrep = setVirginReply(newrep);
736 flags.headers_parsed = 1;
737
738 keepaliveAccounting(vrep);
739
740 checkDateSkew(vrep);
741
742 processSurrogateControl (vrep);
743
744 /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header
745 * Parse the header and remove all referenced headers
746 */
747
748 orig_request->hier.peer_reply_status = newrep->sline.status;
749
750 ctx_exit(ctx);
751}
752
753/// ignore or start forwarding the 1xx response (a.k.a., control message)
754void
755HttpStateData::handle1xx(HttpReply *reply)
756{
757 HttpMsgPointerT<HttpReply> msg(reply); // will destroy reply if unused
758
759 // one 1xx at a time: we must not be called while waiting for previous 1xx
760 Must(!flags.handling1xx);
761 flags.handling1xx = true;
762
763 if (!orig_request->canHandle1xx()) {
764 debugs(11, 2, HERE << "ignoring client-unsupported 1xx");
765 proceedAfter1xx();
766 return;
767 }
768
769#if USE_HTTP_VIOLATIONS
770 // check whether the 1xx response forwarding is allowed by squid.conf
771 if (Config.accessList.reply) {
772 ACLFilledChecklist ch(Config.accessList.reply, originalRequest(), NULL);
773 ch.reply = HTTPMSGLOCK(reply);
774 if (!ch.fastCheck()) { // TODO: support slow lookups?
775 debugs(11, 3, HERE << "ignoring denied 1xx");
776 proceedAfter1xx();
777 return;
778 }
779 }
780#endif // USE_HTTP_VIOLATIONS
781
782 debugs(11, 2, HERE << "forwarding 1xx to client");
783
784 // the Sink will use this to call us back after writing 1xx to the client
785 typedef NullaryMemFunT<HttpStateData> CbDialer;
786 const AsyncCall::Pointer cb = JobCallback(11, 3, CbDialer, this,
787 HttpStateData::proceedAfter1xx);
788 CallJobHere1(11, 4, orig_request->clientConnection, ConnStateData,
789 ConnStateData::sendControlMsg, HttpControlMsg(msg, cb));
790 // If the call is not fired, then the Sink is gone, and HttpStateData
791 // will terminate due to an aborted store entry or another similar error.
792 // If we get stuck, it is not handle1xx fault if we could get stuck
793 // for similar reasons without a 1xx response.
794}
795
796/// restores state and resumes processing after 1xx is ignored or forwarded
797void
798HttpStateData::proceedAfter1xx()
799{
800 Must(flags.handling1xx);
801
802 debugs(11, 2, HERE << "consuming " << header_bytes_read <<
803 " header and " << reply_bytes_read << " body bytes read after 1xx");
804 header_bytes_read = 0;
805 reply_bytes_read = 0;
806
807 CallJobHere(11, 3, this, HttpStateData, HttpStateData::processReply);
808}
809
810
811/**
812 * returns true if the peer can support connection pinning
813*/
814bool HttpStateData::peerSupportsConnectionPinning() const
815{
816 const HttpReply *rep = entry->mem_obj->getReply();
817 const HttpHeader *hdr = &rep->header;
818 bool rc;
819 String header;
820
821 if (!_peer)
822 return true;
823
824 /*If this peer does not support connection pinning (authenticated
825 connections) return false
826 */
827 if (!_peer->connection_auth)
828 return false;
829
830 /*The peer supports connection pinning and the http reply status
831 is not unauthorized, so the related connection can be pinned
832 */
833 if (rep->sline.status != HTTP_UNAUTHORIZED)
834 return true;
835
836 /*The server respond with HTTP_UNAUTHORIZED and the peer configured
837 with "connection-auth=on" we know that the peer supports pinned
838 connections
839 */
840 if (_peer->connection_auth == 1)
841 return true;
842
843 /*At this point peer has configured with "connection-auth=auto"
844 parameter so we need some extra checks to decide if we are going
845 to allow pinned connections or not
846 */
847
848 /*if the peer configured with originserver just allow connection
849 pinning (squid 2.6 behaviour)
850 */
851 if (_peer->options.originserver)
852 return true;
853
854 /*if the connections it is already pinned it is OK*/
855 if (request->flags.pinned)
856 return true;
857
858 /*Allow pinned connections only if the Proxy-support header exists in
859 reply and has in its list the "Session-Based-Authentication"
860 which means that the peer supports connection pinning.
861 */
862 if (!hdr->has(HDR_PROXY_SUPPORT))
863 return false;
864
865 header = hdr->getStrOrList(HDR_PROXY_SUPPORT);
866 /* XXX This ought to be done in a case-insensitive manner */
867 rc = (strstr(header.termedBuf(), "Session-Based-Authentication") != NULL);
868
869 return rc;
870}
871
872// Called when we parsed (and possibly adapted) the headers but
873// had not starting storing (a.k.a., sending) the body yet.
874void
875HttpStateData::haveParsedReplyHeaders()
876{
877 ServerStateData::haveParsedReplyHeaders();
878
879 Ctx ctx = ctx_enter(entry->mem_obj->url);
880 HttpReply *rep = finalReply();
881
882 if (rep->sline.status == HTTP_PARTIAL_CONTENT &&
883 rep->content_range)
884 currentOffset = rep->content_range->spec.offset;
885
886 entry->timestampsSet();
887
888 /* Check if object is cacheable or not based on reply code */
889 debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep->sline.status);
890
891 if (neighbors_do_private_keys)
892 httpMaybeRemovePublic(entry, rep->sline.status);
893
894 if (rep->header.has(HDR_VARY)
895#if X_ACCELERATOR_VARY
896 || rep->header.has(HDR_X_ACCELERATOR_VARY)
897#endif
898 ) {
899 const char *vary = httpMakeVaryMark(orig_request, rep);
900
901 if (!vary) {
902 entry->makePrivate();
903 if (!fwd->reforwardableStatus(rep->sline.status))
904 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
905 goto no_cache;
906 }
907
908 entry->mem_obj->vary_headers = xstrdup(vary);
909 }
910
911 /*
912 * If its not a reply that we will re-forward, then
913 * allow the client to get it.
914 */
915 if (!fwd->reforwardableStatus(rep->sline.status))
916 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
917
918 switch (cacheableReply()) {
919
920 case 1:
921 entry->makePublic();
922 break;
923
924 case 0:
925 entry->makePrivate();
926 break;
927
928 case -1:
929
930#if USE_HTTP_VIOLATIONS
931 if (Config.negativeTtl > 0)
932 entry->cacheNegatively();
933 else
934#endif
935 entry->makePrivate();
936
937 break;
938
939 default:
940 assert(0);
941
942 break;
943 }
944
945no_cache:
946
947 if (!ignoreCacheControl && rep->cache_control) {
948 if (EBIT_TEST(rep->cache_control->mask, CC_PROXY_REVALIDATE) ||
949 EBIT_TEST(rep->cache_control->mask, CC_MUST_REVALIDATE) ||
950 EBIT_TEST(rep->cache_control->mask, CC_S_MAXAGE))
951 EBIT_SET(entry->flags, ENTRY_REVALIDATE);
952 }
953
954#if HEADERS_LOG
955 headersLog(1, 0, request->method, rep);
956
957#endif
958
959 ctx_exit(ctx);
960}
961
962HttpStateData::ConnectionStatus
963HttpStateData::statusIfComplete() const
964{
965 const HttpReply *rep = virginReply();
966 /** \par
967 * If the reply wants to close the connection, it takes precedence */
968
969 if (httpHeaderHasConnDir(&rep->header, "close"))
970 return COMPLETE_NONPERSISTENT_MSG;
971
972 /** \par
973 * If we didn't send a keep-alive request header, then this
974 * can not be a persistent connection.
975 */
976 if (!flags.keepalive)
977 return COMPLETE_NONPERSISTENT_MSG;
978
979 /** \par
980 * If we haven't sent the whole request then this can not be a persistent
981 * connection.
982 */
983 if (!flags.request_sent) {
984 debugs(11, 2, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(orig_request->method) << " " << entry->url() << "\"" );
985 return COMPLETE_NONPERSISTENT_MSG;
986 }
987
988 /** \par
989 * What does the reply have to say about keep-alive?
990 */
991 /**
992 \bug XXX BUG?
993 * If the origin server (HTTP/1.0) does not send a keep-alive
994 * header, but keeps the connection open anyway, what happens?
995 * We'll return here and http.c waits for an EOF before changing
996 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
997 * and an error status code, and we might have to wait until
998 * the server times out the socket.
999 */
1000 if (!rep->keep_alive)
1001 return COMPLETE_NONPERSISTENT_MSG;
1002
1003 return COMPLETE_PERSISTENT_MSG;
1004}
1005
1006HttpStateData::ConnectionStatus
1007HttpStateData::persistentConnStatus() const
1008{
1009 debugs(11, 3, "persistentConnStatus: FD " << fd << " eof=" << eof);
1010 if (eof) // already reached EOF
1011 return COMPLETE_NONPERSISTENT_MSG;
1012
1013 /* If server fd is closing (but we have not been notified yet), stop Comm
1014 I/O to avoid assertions. TODO: Change Comm API to handle callers that
1015 want more I/O after async closing (usually initiated by others). */
1016 // XXX: add canReceive or s/canSend/canTalkToServer/
1017 if (!canSend(fd))
1018 return COMPLETE_NONPERSISTENT_MSG;
1019
1020 /** \par
1021 * In chunked response we do not know the content length but we are absolutely
1022 * sure about the end of response, so we are calling the statusIfComplete to
1023 * decide if we can be persistant
1024 */
1025 if (lastChunk && flags.chunked)
1026 return statusIfComplete();
1027
1028 const HttpReply *vrep = virginReply();
1029 debugs(11, 5, "persistentConnStatus: content_length=" << vrep->content_length);
1030
1031 const int64_t clen = vrep->bodySize(request->method);
1032
1033 debugs(11, 5, "persistentConnStatus: clen=" << clen);
1034
1035 /* If the body size is unknown we must wait for EOF */
1036 if (clen < 0)
1037 return INCOMPLETE_MSG;
1038
1039 /** \par
1040 * If the body size is known, we must wait until we've gotten all of it. */
1041 if (clen > 0) {
1042 // old technique:
1043 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
1044 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
1045 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1046 body_bytes_read << " content_length=" << vrep->content_length);
1047
1048 if (body_bytes_read < vrep->content_length)
1049 return INCOMPLETE_MSG;
1050
1051 if (body_bytes_truncated > 0) // already read more than needed
1052 return COMPLETE_NONPERSISTENT_MSG; // disable pconns
1053 }
1054
1055 /** \par
1056 * If there is no message body or we got it all, we can be persistent */
1057 return statusIfComplete();
1058}
1059
1060/*
1061 * This is the callback after some data has been read from the network
1062 */
1063/*
1064void
1065HttpStateData::ReadReplyWrapper(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
1066{
1067 HttpStateData *httpState = static_cast<HttpStateData *>(data);
1068 assert (fd == httpState->fd);
1069 // assert(buf == readBuf->content());
1070 PROF_start(HttpStateData_readReply);
1071 httpState->readReply(len, flag, xerrno);
1072 PROF_stop(HttpStateData_readReply);
1073}
1074*/
1075
1076/* XXX this function is too long! */
1077void
1078HttpStateData::readReply(const CommIoCbParams &io)
1079{
1080 int bin;
1081 int clen;
1082 int len = io.size;
1083
1084 assert(fd == io.fd);
1085
1086 flags.do_next_read = 0;
1087
1088 debugs(11, 5, "httpReadReply: FD " << fd << ": len " << len << ".");
1089
1090 // Bail out early on COMM_ERR_CLOSING - close handlers will tidy up for us
1091 if (io.flag == COMM_ERR_CLOSING) {
1092 debugs(11, 3, "http socket closing");
1093 return;
1094 }
1095
1096 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1097 maybeReadVirginBody();
1098 return;
1099 }
1100
1101 // handle I/O errors
1102 if (io.flag != COMM_OK || len < 0) {
1103 debugs(11, 2, "httpReadReply: FD " << fd << ": read failure: " << xstrerror() << ".");
1104
1105 if (ignoreErrno(io.xerrno)) {
1106 flags.do_next_read = 1;
1107 } else {
1108 ErrorState *err;
1109 err = errorCon(ERR_READ_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1110 err->xerrno = io.xerrno;
1111 fwd->fail(err);
1112 flags.do_next_read = 0;
1113 comm_close(fd);
1114 }
1115
1116 return;
1117 }
1118
1119 // update I/O stats
1120 if (len > 0) {
1121 readBuf->appended(len);
1122 reply_bytes_read += len;
1123#if USE_DELAY_POOLS
1124 DelayId delayId = entry->mem_obj->mostBytesAllowed();
1125 delayId.bytesIn(len);
1126#endif
1127
1128 kb_incr(&statCounter.server.all.kbytes_in, len);
1129 kb_incr(&statCounter.server.http.kbytes_in, len);
1130 IOStats.Http.reads++;
1131
1132 for (clen = len - 1, bin = 0; clen; bin++)
1133 clen >>= 1;
1134
1135 IOStats.Http.read_hist[bin]++;
1136
1137 // update peer response time stats (%<pt)
1138 const timeval &sent = orig_request->hier.peer_http_request_sent;
1139 orig_request->hier.peer_response_time =
1140 sent.tv_sec ? tvSubMsec(sent, current_time) : -1;
1141 }
1142
1143 /** \par
1144 * Here the RFC says we should ignore whitespace between replies, but we can't as
1145 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1146 * the response splitting countermeasures is extremely likely to trigger on this,
1147 * not allowing connection reuse in the first place.
1148 */
1149#if DONT_DO_THIS
1150 if (!flags.headers_parsed && len > 0 && fd_table[fd].uses > 1) {
1151 /* Skip whitespace between replies */
1152
1153 while (len > 0 && xisspace(*buf))
1154 memmove(buf, buf + 1, len--);
1155
1156 if (len == 0) {
1157 /* Continue to read... */
1158 /* Timeout NOT increased. This whitespace was from previous reply */
1159 flags.do_next_read = 1;
1160 maybeReadVirginBody();
1161 return;
1162 }
1163 }
1164
1165#endif
1166
1167 if (len == 0) { // reached EOF?
1168 eof = 1;
1169 flags.do_next_read = 0;
1170
1171 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n
1172 * Ensure here that we have at minimum two \r\n when EOF is seen.
1173 * TODO: Add eof parameter to headersEnd() and move this hack there.
1174 */
1175 if (readBuf->contentSize() && !flags.headers_parsed) {
1176 /*
1177 * Yes Henrik, there is a point to doing this. When we
1178 * called httpProcessReplyHeader() before, we didn't find
1179 * the end of headers, but now we are definately at EOF, so
1180 * we want to process the reply headers.
1181 */
1182 /* Fake an "end-of-headers" to work around such broken servers */
1183 readBuf->append("\r\n", 2);
1184 }
1185 }
1186
1187 processReply();
1188}
1189
1190/// processes the already read and buffered response data, possibly after
1191/// waiting for asynchronous 1xx control message processing
1192void
1193HttpStateData::processReply()
1194{
1195
1196 if (flags.handling1xx) { // we came back after handling a 1xx response
1197 debugs(11, 5, HERE << "done with 1xx handling");
1198 flags.handling1xx = false;
1199 Must(!flags.headers_parsed);
1200 }
1201
1202 if (!flags.headers_parsed) { // have not parsed headers yet?
1203 PROF_start(HttpStateData_processReplyHeader);
1204 processReplyHeader();
1205 PROF_stop(HttpStateData_processReplyHeader);
1206
1207 if (!continueAfterParsingHeader()) // parsing error or need more data
1208 return; // TODO: send errors to ICAP
1209
1210 adaptOrFinalizeReply();
1211 }
1212
1213 // kick more reads if needed and/or process the response body, if any
1214 PROF_start(HttpStateData_processReplyBody);
1215 processReplyBody(); // may call serverComplete()
1216 PROF_stop(HttpStateData_processReplyBody);
1217}
1218
1219/**
1220 \retval true if we can continue with processing the body or doing ICAP.
1221 */
1222bool
1223HttpStateData::continueAfterParsingHeader()
1224{
1225 if (flags.handling1xx) {
1226 debugs(11, 5, HERE << "wait for 1xx handling");
1227 Must(!flags.headers_parsed);
1228 return false;
1229 }
1230
1231 if (!flags.headers_parsed && !eof) {
1232 debugs(11, 9, HERE << "needs more at " << readBuf->contentSize());
1233 flags.do_next_read = 1;
1234 /** \retval false If we have not finished parsing the headers and may get more data.
1235 * Schedules more reads to retrieve the missing data.
1236 */
1237 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1238 return false;
1239 }
1240
1241 /** If we are done with parsing, check for errors */
1242
1243 err_type error = ERR_NONE;
1244
1245 if (flags.headers_parsed) { // parsed headers, possibly with errors
1246 // check for header parsing errors
1247 if (HttpReply *vrep = virginReply()) {
1248 const http_status s = vrep->sline.status;
1249 const HttpVersion &v = vrep->sline.version;
1250 if (s == HTTP_INVALID_HEADER && v != HttpVersion(0,9)) {
1251 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1252 error = ERR_INVALID_RESP;
1253 } else if (s == HTTP_HEADER_TOO_LARGE) {
1254 fwd->dontRetry(true);
1255 error = ERR_TOO_BIG;
1256 } else {
1257 return true; // done parsing, got reply, and no error
1258 }
1259 } else {
1260 // parsed headers but got no reply
1261 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1262 error = ERR_INVALID_RESP;
1263 }
1264 } else {
1265 assert(eof);
1266 if (readBuf->hasContent()) {
1267 error = ERR_INVALID_RESP;
1268 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1269 } else {
1270 error = ERR_ZERO_SIZE_OBJECT;
1271 debugs(11, (orig_request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " <<
1272 entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1273 }
1274 }
1275
1276 assert(error != ERR_NONE);
1277 entry->reset();
1278 fwd->fail(errorCon(error, HTTP_BAD_GATEWAY, fwd->request));
1279 flags.do_next_read = 0;
1280 comm_close(fd);
1281 return false; // quit on error
1282}
1283
1284/** truncate what we read if we read too much so that writeReplyBody()
1285 writes no more than what we should have read */
1286void
1287HttpStateData::truncateVirginBody()
1288{
1289 assert(flags.headers_parsed);
1290
1291 HttpReply *vrep = virginReply();
1292 int64_t clen = -1;
1293 if (!vrep->expectingBody(request->method, clen) || clen < 0)
1294 return; // no body or a body of unknown size, including chunked
1295
1296 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
1297 if (body_bytes_read - body_bytes_truncated <= clen)
1298 return; // we did not read too much or already took care of the extras
1299
1300 if (const int64_t extras = body_bytes_read - body_bytes_truncated - clen) {
1301 // server sent more that the advertised content length
1302 debugs(11,5, HERE << "body_bytes_read=" << body_bytes_read <<
1303 " clen=" << clen << '/' << vrep->content_length <<
1304 " body_bytes_truncated=" << body_bytes_truncated << '+' << extras);
1305
1306 readBuf->truncate(extras);
1307 body_bytes_truncated += extras;
1308 }
1309}
1310
1311/**
1312 * Call this when there is data from the origin server
1313 * which should be sent to either StoreEntry, or to ICAP...
1314 */
1315void
1316HttpStateData::writeReplyBody()
1317{
1318 truncateVirginBody(); // if needed
1319 const char *data = readBuf->content();
1320 int len = readBuf->contentSize();
1321 addVirginReplyBody(data, len);
1322 readBuf->consume(len);
1323}
1324
1325bool
1326HttpStateData::decodeAndWriteReplyBody()
1327{
1328 const char *data = NULL;
1329 int len;
1330 bool wasThereAnException = false;
1331 assert(flags.chunked);
1332 assert(httpChunkDecoder);
1333 SQUID_ENTER_THROWING_CODE();
1334 MemBuf decodedData;
1335 decodedData.init();
1336 const bool doneParsing = httpChunkDecoder->parse(readBuf,&decodedData);
1337 len = decodedData.contentSize();
1338 data=decodedData.content();
1339 addVirginReplyBody(data, len);
1340 if (doneParsing) {
1341 lastChunk = 1;
1342 flags.do_next_read = 0;
1343 }
1344 SQUID_EXIT_THROWING_CODE(wasThereAnException);
1345 return wasThereAnException;
1346}
1347
1348/**
1349 * processReplyBody has two purposes:
1350 * 1 - take the reply body data, if any, and put it into either
1351 * the StoreEntry, or give it over to ICAP.
1352 * 2 - see if we made it to the end of the response (persistent
1353 * connections and such)
1354 */
1355void
1356HttpStateData::processReplyBody()
1357{
1358 AsyncCall::Pointer call;
1359 Ip::Address client_addr;
1360 bool ispinned = false;
1361
1362 if (!flags.headers_parsed) {
1363 flags.do_next_read = 1;
1364 maybeReadVirginBody();
1365 return;
1366 }
1367
1368#if USE_ADAPTATION
1369 debugs(11,5, HERE << "adaptationAccessCheckPending=" << adaptationAccessCheckPending);
1370 if (adaptationAccessCheckPending)
1371 return;
1372
1373#endif
1374
1375 /*
1376 * At this point the reply headers have been parsed and consumed.
1377 * That means header content has been removed from readBuf and
1378 * it contains only body data.
1379 */
1380 if (flags.chunked) {
1381 if (!decodeAndWriteReplyBody()) {
1382 flags.do_next_read = 0;
1383 serverComplete();
1384 return;
1385 }
1386 } else
1387 writeReplyBody();
1388
1389 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1390 /*
1391 * The above writeReplyBody() call could ABORT this entry,
1392 * in that case, the server FD should already be closed.
1393 * there's nothing for us to do.
1394 */
1395 (void) 0;
1396 } else
1397 switch (persistentConnStatus()) {
1398 case INCOMPLETE_MSG:
1399 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG");
1400 /* Wait for more data or EOF condition */
1401 if (flags.keepalive_broken) {
1402 call = NULL;
1403 commSetTimeout(fd, 10, call);
1404 } else {
1405 call = NULL;
1406 commSetTimeout(fd, Config.Timeout.read, call);
1407 }
1408
1409 flags.do_next_read = 1;
1410 break;
1411
1412 case COMPLETE_PERSISTENT_MSG:
1413 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG");
1414 /* yes we have to clear all these! */
1415 call = NULL;
1416 commSetTimeout(fd, -1, call);
1417 flags.do_next_read = 0;
1418
1419 comm_remove_close_handler(fd, closeHandler);
1420 closeHandler = NULL;
1421 fwd->unregister(fd);
1422
1423 if (orig_request->flags.spoof_client_ip)
1424 client_addr = orig_request->client_addr;
1425
1426
1427 if (request->flags.pinned) {
1428 ispinned = true;
1429 } else if (request->flags.connection_auth && request->flags.auth_sent) {
1430 ispinned = true;
1431 }
1432
1433 if (orig_request->pinnedConnection() && ispinned) {
1434 orig_request->pinnedConnection()->pinConnection(fd, orig_request, _peer,
1435 (request->flags.connection_auth != 0));
1436 } else {
1437 fwd->pconnPush(fd, _peer, request, orig_request->GetHost(), client_addr);
1438 }
1439
1440 fd = -1;
1441
1442 serverComplete();
1443 return;
1444
1445 case COMPLETE_NONPERSISTENT_MSG:
1446 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG");
1447 serverComplete();
1448 return;
1449 }
1450
1451 maybeReadVirginBody();
1452}
1453
1454void
1455HttpStateData::maybeReadVirginBody()
1456{
1457 // we may need to grow the buffer if headers do not fit
1458 const int minRead = flags.headers_parsed ? 0 :1024;
1459 const int read_size = replyBodySpace(*readBuf, minRead);
1460
1461 debugs(11,9, HERE << (flags.do_next_read ? "may" : "wont") <<
1462 " read up to " << read_size << " bytes from FD " << fd);
1463
1464 /*
1465 * why <2? Because delayAwareRead() won't actually read if
1466 * you ask it to read 1 byte. The delayed read request
1467 * just gets re-queued until the client side drains, then
1468 * the I/O thread hangs. Better to not register any read
1469 * handler until we get a notification from someone that
1470 * its okay to read again.
1471 */
1472 if (read_size < 2)
1473 return;
1474
1475 if (flags.do_next_read) {
1476 flags.do_next_read = 0;
1477 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1478 entry->delayAwareRead(fd, readBuf->space(read_size), read_size,
1479 JobCallback(11, 5, Dialer, this, HttpStateData::readReply));
1480 }
1481}
1482
1483/// called after writing the very last request byte (body, last-chunk, etc)
1484void
1485HttpStateData::wroteLast(const CommIoCbParams &io)
1486{
1487 debugs(11, 5, HERE << "FD " << fd << ": size " << io.size << ": errflag " << io.flag << ".");
1488#if URL_CHECKSUM_DEBUG
1489
1490 entry->mem_obj->checkUrlChecksum();
1491#endif
1492
1493 if (io.size > 0) {
1494 fd_bytes(fd, io.size, FD_WRITE);
1495 kb_incr(&statCounter.server.all.kbytes_out, io.size);
1496 kb_incr(&statCounter.server.http.kbytes_out, io.size);
1497 }
1498
1499 if (io.flag == COMM_ERR_CLOSING)
1500 return;
1501
1502 if (io.flag) {
1503 ErrorState *err;
1504 err = errorCon(ERR_WRITE_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1505 err->xerrno = io.xerrno;
1506 fwd->fail(err);
1507 comm_close(fd);
1508 return;
1509 }
1510
1511 sendComplete();
1512}
1513
1514/// successfully wrote the entire request (including body, last-chunk, etc.)
1515void
1516HttpStateData::sendComplete()
1517{
1518 /*
1519 * Set the read timeout here because it hasn't been set yet.
1520 * We only set the read timeout after the request has been
1521 * fully written to the server-side. If we start the timeout
1522 * after connection establishment, then we are likely to hit
1523 * the timeout for POST/PUT requests that have very large
1524 * request bodies.
1525 */
1526 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1527 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
1528 TimeoutDialer, this, HttpStateData::httpTimeout);
1529
1530 commSetTimeout(fd, Config.Timeout.read, timeoutCall);
1531
1532 flags.request_sent = 1;
1533
1534 orig_request->hier.peer_http_request_sent = current_time;
1535}
1536
1537// Close the HTTP server connection. Used by serverComplete().
1538void
1539HttpStateData::closeServer()
1540{
1541 debugs(11,5, HERE << "closing HTTP server FD " << fd << " this " << this);
1542
1543 if (fd >= 0) {
1544 fwd->unregister(fd);
1545 comm_remove_close_handler(fd, closeHandler);
1546 closeHandler = NULL;
1547 comm_close(fd);
1548 fd = -1;
1549 }
1550}
1551
1552bool
1553HttpStateData::doneWithServer() const
1554{
1555 return fd < 0;
1556}
1557
1558
1559/*
1560 * Fixup authentication request headers for special cases
1561 */
1562static void
1563httpFixupAuthentication(HttpRequest * request, HttpRequest * orig_request, const HttpHeader * hdr_in, HttpHeader * hdr_out, http_state_flags flags)
1564{
1565 http_hdr_type header = flags.originpeer ? HDR_AUTHORIZATION : HDR_PROXY_AUTHORIZATION;
1566
1567 /* Nothing to do unless we are forwarding to a peer */
1568 if (!request->flags.proxying)
1569 return;
1570
1571 /* Needs to be explicitly enabled */
1572 if (!orig_request->peer_login)
1573 return;
1574
1575 /* Maybe already dealt with? */
1576 if (hdr_out->has(header))
1577 return;
1578
1579 /* Nothing to do here for PASSTHRU */
1580 if (strcmp(orig_request->peer_login, "PASSTHRU") == 0)
1581 return;
1582
1583 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1584 if (flags.originpeer && strcmp(orig_request->peer_login, "PROXYPASS") == 0 && hdr_in->has(HDR_PROXY_AUTHORIZATION)) {
1585 const char *auth = hdr_in->getStr(HDR_PROXY_AUTHORIZATION);
1586
1587 if (auth && strncasecmp(auth, "basic ", 6) == 0) {
1588 hdr_out->putStr(header, auth);
1589 return;
1590 }
1591 }
1592
1593 /* Special mode to pass the username to the upstream cache */
1594 if (*orig_request->peer_login == '*') {
1595 char loginbuf[256];
1596 const char *username = "-";
1597
1598 if (orig_request->extacl_user.size())
1599 username = orig_request->extacl_user.termedBuf();
1600#if USE_AUTH
1601 else if (orig_request->auth_user_request != NULL)
1602 username = orig_request->auth_user_request->username();
1603#endif
1604
1605 snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, orig_request->peer_login + 1);
1606
1607 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1608 base64_encode(loginbuf));
1609 return;
1610 }
1611
1612 /* external_acl provided credentials */
1613 if (orig_request->extacl_user.size() && orig_request->extacl_passwd.size() &&
1614 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1615 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1616 char loginbuf[256];
1617 snprintf(loginbuf, sizeof(loginbuf), SQUIDSTRINGPH ":" SQUIDSTRINGPH,
1618 SQUIDSTRINGPRINT(orig_request->extacl_user),
1619 SQUIDSTRINGPRINT(orig_request->extacl_passwd));
1620 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1621 base64_encode(loginbuf));
1622 return;
1623 }
1624
1625 /* Kerberos login to peer */
1626#if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1627 if (strncmp(orig_request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1628 char *Token=NULL;
1629 char *PrincipalName=NULL,*p;
1630 if ((p=strchr(orig_request->peer_login,':')) != NULL ) {
1631 PrincipalName=++p;
1632 }
1633 Token = peer_proxy_negotiate_auth(PrincipalName,request->peer_host);
1634 if (Token) {
1635 httpHeaderPutStrf(hdr_out, HDR_PROXY_AUTHORIZATION, "Negotiate %s",Token);
1636 }
1637 return;
1638 }
1639#endif /* HAVE_KRB5 && HAVE_GSSAPI */
1640
1641 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1642 base64_encode(orig_request->peer_login));
1643 return;
1644}
1645
1646/*
1647 * build request headers and append them to a given MemBuf
1648 * used by buildRequestPrefix()
1649 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1650 */
1651void
1652HttpStateData::httpBuildRequestHeader(HttpRequest * request,
1653 HttpRequest * orig_request,
1654 StoreEntry * entry,
1655 HttpHeader * hdr_out,
1656 const http_state_flags flags)
1657{
1658 /* building buffer for complex strings */
1659#define BBUF_SZ (MAX_URL+32)
1660 LOCAL_ARRAY(char, bbuf, BBUF_SZ);
1661 LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN);
1662 const HttpHeader *hdr_in = &orig_request->header;
1663 const HttpHeaderEntry *e = NULL;
1664 HttpHeaderPos pos = HttpHeaderInitPos;
1665 assert (hdr_out->owner == hoRequest);
1666
1667 /* append our IMS header */
1668 if (request->lastmod > -1)
1669 hdr_out->putTime(HDR_IF_MODIFIED_SINCE, request->lastmod);
1670
1671 bool we_do_ranges = decideIfWeDoRanges (orig_request);
1672
1673 String strConnection (hdr_in->getList(HDR_CONNECTION));
1674
1675 while ((e = hdr_in->getEntry(&pos)))
1676 copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, orig_request, hdr_out, we_do_ranges, flags);
1677
1678 /* Abstraction break: We should interpret multipart/byterange responses
1679 * into offset-length data, and this works around our inability to do so.
1680 */
1681 if (!we_do_ranges && orig_request->multipartRangeRequest()) {
1682 /* don't cache the result */
1683 orig_request->flags.cachable = 0;
1684 /* pretend it's not a range request */
1685 delete orig_request->range;
1686 orig_request->range = NULL;
1687 orig_request->flags.range = 0;
1688 }
1689
1690 /* append Via */
1691 if (Config.onoff.via) {
1692 String strVia;
1693 strVia = hdr_in->getList(HDR_VIA);
1694 snprintf(bbuf, BBUF_SZ, "%d.%d %s",
1695 orig_request->http_ver.major,
1696 orig_request->http_ver.minor, ThisCache);
1697 strListAdd(&strVia, bbuf, ',');
1698 hdr_out->putStr(HDR_VIA, strVia.termedBuf());
1699 strVia.clean();
1700 }
1701
1702 if (orig_request->flags.accelerated) {
1703 /* Append Surrogate-Capabilities */
1704 String strSurrogate(hdr_in->getList(HDR_SURROGATE_CAPABILITY));
1705#if USE_SQUID_ESI
1706 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id);
1707#else
1708 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id);
1709#endif
1710 strListAdd(&strSurrogate, bbuf, ',');
1711 hdr_out->putStr(HDR_SURROGATE_CAPABILITY, strSurrogate.termedBuf());
1712 }
1713
1714 /** \pre Handle X-Forwarded-For */
1715 if (strcmp(opt_forwarded_for, "delete") != 0) {
1716
1717 String strFwd = hdr_in->getList(HDR_X_FORWARDED_FOR);
1718
1719 if (strFwd.size() > 65536/2) {
1720 // There is probably a forwarding loop with Via detection disabled.
1721 // If we do nothing, String will assert on overflow soon.
1722 // TODO: Terminate all transactions with huge XFF?
1723 strFwd = "error";
1724
1725 static int warnedCount = 0;
1726 if (warnedCount++ < 100) {
1727 const char *url = entry ? entry->url() : urlCanonical(orig_request);
1728 debugs(11, 1, "Warning: likely forwarding loop with " << url);
1729 }
1730 }
1731
1732 if (strcmp(opt_forwarded_for, "on") == 0) {
1733 /** If set to ON - append client IP or 'unknown'. */
1734 if ( orig_request->client_addr.IsNoAddr() )
1735 strListAdd(&strFwd, "unknown", ',');
1736 else
1737 strListAdd(&strFwd, orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN), ',');
1738 } else if (strcmp(opt_forwarded_for, "off") == 0) {
1739 /** If set to OFF - append 'unknown'. */
1740 strListAdd(&strFwd, "unknown", ',');
1741 } else if (strcmp(opt_forwarded_for, "transparent") == 0) {
1742 /** If set to TRANSPARENT - pass through unchanged. */
1743 } else if (strcmp(opt_forwarded_for, "truncate") == 0) {
1744 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1745 if ( orig_request->client_addr.IsNoAddr() )
1746 strFwd = "unknown";
1747 else
1748 strFwd = orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN);
1749 }
1750 if (strFwd.size() > 0)
1751 hdr_out->putStr(HDR_X_FORWARDED_FOR, strFwd.termedBuf());
1752 }
1753 /** If set to DELETE - do not copy through. */
1754
1755 /* append Host if not there already */
1756 if (!hdr_out->has(HDR_HOST)) {
1757 if (orig_request->peer_domain) {
1758 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1759 } else if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1760 /* use port# only if not default */
1761 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1762 } else {
1763 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1764 orig_request->GetHost(),
1765 (int) orig_request->port);
1766 }
1767 }
1768
1769 /* append Authorization if known in URL, not in header and going direct */
1770 if (!hdr_out->has(HDR_AUTHORIZATION)) {
1771 if (!request->flags.proxying && *request->login) {
1772 httpHeaderPutStrf(hdr_out, HDR_AUTHORIZATION, "Basic %s",
1773 base64_encode(request->login));
1774 }
1775 }
1776
1777 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1778 httpFixupAuthentication(request, orig_request, hdr_in, hdr_out, flags);
1779
1780 /* append Cache-Control, add max-age if not there already */
1781 {
1782 HttpHdrCc *cc = hdr_in->getCc();
1783
1784 if (!cc)
1785 cc = httpHdrCcCreate();
1786
1787#if 0 /* see bug 2330 */
1788 /* Set no-cache if determined needed but not found */
1789 if (orig_request->flags.nocache)
1790 EBIT_SET(cc->mask, CC_NO_CACHE);
1791#endif
1792
1793 /* Add max-age only without no-cache */
1794 if (!EBIT_TEST(cc->mask, CC_MAX_AGE) && !EBIT_TEST(cc->mask, CC_NO_CACHE)) {
1795 const char *url =
1796 entry ? entry->url() : urlCanonical(orig_request);
1797 httpHdrCcSetMaxAge(cc, getMaxAge(url));
1798
1799 if (request->urlpath.size())
1800 assert(strstr(url, request->urlpath.termedBuf()));
1801 }
1802
1803 /* Enforce sibling relations */
1804 if (flags.only_if_cached)
1805 EBIT_SET(cc->mask, CC_ONLY_IF_CACHED);
1806
1807 hdr_out->putCc(cc);
1808
1809 httpHdrCcDestroy(cc);
1810 }
1811
1812 /* maybe append Connection: keep-alive */
1813 if (flags.keepalive) {
1814 hdr_out->putStr(HDR_CONNECTION, "keep-alive");
1815 }
1816
1817 /* append Front-End-Https */
1818 if (flags.front_end_https) {
1819 if (flags.front_end_https == 1 || request->protocol == AnyP::PROTO_HTTPS)
1820 hdr_out->putStr(HDR_FRONT_END_HTTPS, "On");
1821 }
1822
1823 if (flags.chunked_request) {
1824 // Do not just copy the original value so that if the client-side
1825 // starts decode other encodings, this code may remain valid.
1826 hdr_out->putStr(HDR_TRANSFER_ENCODING, "chunked");
1827 }
1828
1829 /* Now mangle the headers. */
1830 if (Config2.onoff.mangle_request_headers)
1831 httpHdrMangleList(hdr_out, request, ROR_REQUEST);
1832
1833 strConnection.clean();
1834}
1835
1836/**
1837 * Decides whether a particular header may be cloned from the received Clients request
1838 * to our outgoing fetch request.
1839 */
1840void
1841copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags flags)
1842{
1843 debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
1844
1845 switch (e->id) {
1846
1847 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1848
1849 case HDR_PROXY_AUTHORIZATION:
1850 /** \par Proxy-Authorization:
1851 * Only pass on proxy authentication to peers for which
1852 * authentication forwarding is explicitly enabled
1853 */
1854 if (!flags.originpeer && flags.proxying && orig_request->peer_login &&
1855 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1856 strcmp(orig_request->peer_login, "PROXYPASS") == 0 ||
1857 strcmp(orig_request->peer_login, "PASSTHRU") == 0)) {
1858 hdr_out->addEntry(e->clone());
1859 }
1860 break;
1861
1862 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1863
1864 case HDR_CONNECTION: /** \par Connection: */
1865 case HDR_TE: /** \par TE: */
1866 case HDR_KEEP_ALIVE: /** \par Keep-Alive: */
1867 case HDR_PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
1868 case HDR_TRAILER: /** \par Trailer: */
1869 case HDR_UPGRADE: /** \par Upgrade: */
1870 case HDR_TRANSFER_ENCODING: /** \par Transfer-Encoding: */
1871 break;
1872
1873
1874 /** \par OTHER headers I haven't bothered to track down yet. */
1875
1876 case HDR_AUTHORIZATION:
1877 /** \par WWW-Authorization:
1878 * Pass on WWW authentication */
1879
1880 if (!flags.originpeer) {
1881 hdr_out->addEntry(e->clone());
1882 } else {
1883 /** \note In accelerators, only forward authentication if enabled
1884 * (see also httpFixupAuthentication for special cases)
1885 */
1886 if (orig_request->peer_login &&
1887 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1888 strcmp(orig_request->peer_login, "PASSTHRU") == 0 ||
1889 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1890 hdr_out->addEntry(e->clone());
1891 }
1892 }
1893
1894 break;
1895
1896 case HDR_HOST:
1897 /** \par Host:
1898 * Normally Squid rewrites the Host: header.
1899 * However, there is one case when we don't: If the URL
1900 * went through our redirector and the admin configured
1901 * 'redir_rewrites_host' to be off.
1902 */
1903 if (orig_request->peer_domain)
1904 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1905 else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
1906 hdr_out->addEntry(e->clone());
1907 else {
1908 /* use port# only if not default */
1909
1910 if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1911 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1912 } else {
1913 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1914 orig_request->GetHost(),
1915 (int) orig_request->port);
1916 }
1917 }
1918
1919 break;
1920
1921 case HDR_IF_MODIFIED_SINCE:
1922 /** \par If-Modified-Since:
1923 * append unless we added our own;
1924 * \note at most one client's ims header can pass through */
1925
1926 if (!hdr_out->has(HDR_IF_MODIFIED_SINCE))
1927 hdr_out->addEntry(e->clone());
1928
1929 break;
1930
1931 case HDR_MAX_FORWARDS:
1932 /** \par Max-Forwards:
1933 * pass only on TRACE or OPTIONS requests */
1934 if (orig_request->method == METHOD_TRACE || orig_request->method == METHOD_OPTIONS) {
1935 const int64_t hops = e->getInt64();
1936
1937 if (hops > 0)
1938 hdr_out->putInt64(HDR_MAX_FORWARDS, hops - 1);
1939 }
1940
1941 break;
1942
1943 case HDR_VIA:
1944 /** \par Via:
1945 * If Via is disabled then forward any received header as-is.
1946 * Otherwise leave for explicit updated addition later. */
1947
1948 if (!Config.onoff.via)
1949 hdr_out->addEntry(e->clone());
1950
1951 break;
1952
1953 case HDR_RANGE:
1954
1955 case HDR_IF_RANGE:
1956
1957 case HDR_REQUEST_RANGE:
1958 /** \par Range:, If-Range:, Request-Range:
1959 * Only pass if we accept ranges */
1960 if (!we_do_ranges)
1961 hdr_out->addEntry(e->clone());
1962
1963 break;
1964
1965 case HDR_PROXY_CONNECTION: // SHOULD ignore. But doing so breaks things.
1966 break;
1967
1968 case HDR_X_FORWARDED_FOR:
1969
1970 case HDR_CACHE_CONTROL:
1971 /** \par X-Forwarded-For:, Cache-Control:
1972 * handled specially by Squid, so leave off for now.
1973 * append these after the loop if needed */
1974 break;
1975
1976 case HDR_FRONT_END_HTTPS:
1977 /** \par Front-End-Https:
1978 * Pass thru only if peer is configured with front-end-https */
1979 if (!flags.front_end_https)
1980 hdr_out->addEntry(e->clone());
1981
1982 break;
1983
1984 default:
1985 /** \par default.
1986 * pass on all other header fields
1987 * which are NOT listed by the special Connection: header. */
1988
1989 if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) {
1990 debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
1991 return;
1992 }
1993
1994 hdr_out->addEntry(e->clone());
1995 }
1996}
1997
1998bool
1999HttpStateData::decideIfWeDoRanges (HttpRequest * orig_request)
2000{
2001 bool result = true;
2002 /* decide if we want to do Ranges ourselves
2003 * and fetch the whole object now)
2004 * We want to handle Ranges ourselves iff
2005 * - we can actually parse client Range specs
2006 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
2007 * - reply will be cachable
2008 * (If the reply will be uncachable we have to throw it away after
2009 * serving this request, so it is better to forward ranges to
2010 * the server and fetch only the requested content)
2011 */
2012
2013 int64_t roffLimit = orig_request->getRangeOffsetLimit();
2014
2015 if (NULL == orig_request->range || !orig_request->flags.cachable
2016 || orig_request->range->offsetLimitExceeded(roffLimit) || orig_request->flags.connection_auth)
2017 result = false;
2018
2019 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
2020 orig_request->range << ", cachable: " <<
2021 orig_request->flags.cachable << "; we_do_ranges: " << result);
2022
2023 return result;
2024}
2025
2026/* build request prefix and append it to a given MemBuf;
2027 * return the length of the prefix */
2028mb_size_t
2029HttpStateData::buildRequestPrefix(HttpRequest * aRequest,
2030 HttpRequest * original_request,
2031 StoreEntry * sentry,
2032 MemBuf * mb)
2033{
2034 const int offset = mb->size;
2035 HttpVersion httpver(1,1);
2036 mb->Printf("%s %s HTTP/%d.%d\r\n",
2037 RequestMethodStr(aRequest->method),
2038 aRequest->urlpath.size() ? aRequest->urlpath.termedBuf() : "/",
2039 httpver.major,httpver.minor);
2040 /* build and pack headers */
2041 {
2042 HttpHeader hdr(hoRequest);
2043 Packer p;
2044 httpBuildRequestHeader(aRequest, original_request, sentry, &hdr, flags);
2045
2046 if (aRequest->flags.pinned && aRequest->flags.connection_auth)
2047 aRequest->flags.auth_sent = 1;
2048 else if (hdr.has(HDR_AUTHORIZATION))
2049 aRequest->flags.auth_sent = 1;
2050
2051 packerToMemInit(&p, mb);
2052 hdr.packInto(&p);
2053 hdr.clean();
2054 packerClean(&p);
2055 }
2056 /* append header terminator */
2057 mb->append(crlf, 2);
2058 return mb->size - offset;
2059}
2060
2061/* This will be called when connect completes. Write request. */
2062bool
2063HttpStateData::sendRequest()
2064{
2065 MemBuf mb;
2066
2067 debugs(11, 5, "httpSendRequest: FD " << fd << ", request " << request << ", this " << this << ".");
2068
2069 if (!canSend(fd)) {
2070 debugs(11,3, HERE << "cannot send request to closing FD " << fd);
2071 assert(closeHandler != NULL);
2072 return false;
2073 }
2074
2075 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
2076 AsyncCall::Pointer timeoutCall = JobCallback(11, 5,
2077 TimeoutDialer, this, HttpStateData::httpTimeout);
2078 commSetTimeout(fd, Config.Timeout.lifetime, timeoutCall);
2079 flags.do_next_read = 1;
2080 maybeReadVirginBody();
2081
2082 if (orig_request->body_pipe != NULL) {
2083 if (!startRequestBodyFlow()) // register to receive body data
2084 return false;
2085 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2086 requestSender = JobCallback(11,5,
2087 Dialer, this, HttpStateData::sentRequestBody);
2088
2089 Must(!flags.chunked_request);
2090 // Preserve original chunked encoding unless we learned the length.
2091 if (orig_request->header.chunked() && orig_request->content_length < 0)
2092 flags.chunked_request = 1;
2093 } else {
2094 assert(!requestBodySource);
2095 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2096 requestSender = JobCallback(11,5,
2097 Dialer, this, HttpStateData::wroteLast);
2098 }
2099
2100 if (_peer != NULL) {
2101 if (_peer->options.originserver) {
2102 flags.proxying = 0;
2103 flags.originpeer = 1;
2104 } else {
2105 flags.proxying = 1;
2106 flags.originpeer = 0;
2107 }
2108 } else {
2109 flags.proxying = 0;
2110 flags.originpeer = 0;
2111 }
2112
2113 /*
2114 * Is keep-alive okay for all request methods?
2115 */
2116 if (orig_request->flags.must_keepalive)
2117 flags.keepalive = 1;
2118 else if (!Config.onoff.server_pconns)
2119 flags.keepalive = 0;
2120 else if (_peer == NULL)
2121 flags.keepalive = 1;
2122 else if (_peer->stats.n_keepalives_sent < 10)
2123 flags.keepalive = 1;
2124 else if ((double) _peer->stats.n_keepalives_recv /
2125 (double) _peer->stats.n_keepalives_sent > 0.50)
2126 flags.keepalive = 1;
2127
2128 if (_peer) {
2129 if (neighborType(_peer, request) == PEER_SIBLING &&
2130 !_peer->options.allow_miss)
2131 flags.only_if_cached = 1;
2132
2133 flags.front_end_https = _peer->front_end_https;
2134 }
2135
2136 mb.init();
2137 request->peer_host=_peer?_peer->host:NULL;
2138 buildRequestPrefix(request, orig_request, entry, &mb);
2139 debugs(11, 6, "httpSendRequest: FD " << fd << ":\n" << mb.buf);
2140 Comm::Write(fd, &mb, requestSender);
2141
2142 return true;
2143}
2144
2145bool
2146HttpStateData::getMoreRequestBody(MemBuf &buf)
2147{
2148 // parent's implementation can handle the no-encoding case
2149 if (!flags.chunked_request)
2150 return ServerStateData::getMoreRequestBody(buf);
2151
2152 MemBuf raw;
2153
2154 Must(requestBodySource != NULL);
2155 if (!requestBodySource->getMoreData(raw))
2156 return false; // no request body bytes to chunk yet
2157
2158 // optimization: pre-allocate buffer size that should be enough
2159 const mb_size_t rawDataSize = raw.contentSize();
2160 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2161 buf.init(16 + 2 + rawDataSize + 2 + 5, raw.max_capacity);
2162
2163 buf.Printf("%x\r\n", static_cast<unsigned int>(rawDataSize));
2164 buf.append(raw.content(), rawDataSize);
2165 buf.Printf("\r\n");
2166
2167 Must(rawDataSize > 0); // we did not accidently created last-chunk above
2168
2169 // Do not send last-chunk unless we successfully received everything
2170 if (receivedWholeRequestBody) {
2171 Must(!flags.sentLastChunk);
2172 flags.sentLastChunk = true;
2173 buf.append("0\r\n\r\n", 5);
2174 }
2175
2176 return true;
2177}
2178
2179void
2180httpStart(FwdState *fwd)
2181{
2182 debugs(11, 3, "httpStart: \"" << RequestMethodStr(fwd->request->method) << " " << fwd->entry->url() << "\"" );
2183 HttpStateData *httpState = new HttpStateData(fwd);
2184
2185 if (!httpState->sendRequest()) {
2186 debugs(11, 3, "httpStart: aborted");
2187 delete httpState;
2188 return;
2189 }
2190
2191 statCounter.server.all.requests++;
2192 statCounter.server.http.requests++;
2193
2194 /*
2195 * We used to set the read timeout here, but not any more.
2196 * Now its set in httpSendComplete() after the full request,
2197 * including request body, has been written to the server.
2198 */
2199}
2200
2201/// if broken posts are enabled for the request, try to fix and return true
2202bool
2203HttpStateData::finishingBrokenPost()
2204{
2205#if USE_HTTP_VIOLATIONS
2206 if (!Config.accessList.brokenPosts) {
2207 debugs(11, 5, HERE << "No brokenPosts list");
2208 return false;
2209 }
2210
2211 ACLFilledChecklist ch(Config.accessList.brokenPosts, originalRequest(), NULL);
2212 if (!ch.fastCheck()) {
2213 debugs(11, 5, HERE << "didn't match brokenPosts");
2214 return false;
2215 }
2216
2217 if (!canSend(fd)) {
2218 debugs(11,2, HERE << "ignoring broken POST for closing FD " << fd);
2219 assert(closeHandler != NULL);
2220 return true; // prevent caller from proceeding as if nothing happened
2221 }
2222
2223 debugs(11, 2, "finishingBrokenPost: fixing broken POST");
2224 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2225 requestSender = JobCallback(11,5,
2226 Dialer, this, HttpStateData::wroteLast);
2227 Comm::Write(fd, "\r\n", 2, requestSender, NULL);
2228 return true;
2229#else
2230 return false;
2231#endif /* USE_HTTP_VIOLATIONS */
2232}
2233
2234/// if needed, write last-chunk to end the request body and return true
2235bool
2236HttpStateData::finishingChunkedRequest()
2237{
2238 if (flags.sentLastChunk) {
2239 debugs(11, 5, HERE << "already sent last-chunk");
2240 return false;
2241 }
2242
2243 Must(receivedWholeRequestBody); // or we should not be sending last-chunk
2244 flags.sentLastChunk = true;
2245
2246 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2247 requestSender = JobCallback(11,5,
2248 Dialer, this, HttpStateData::wroteLast);
2249 Comm::Write(fd, "0\r\n\r\n", 5, requestSender, NULL);
2250 return true;
2251}
2252
2253void
2254HttpStateData::doneSendingRequestBody()
2255{
2256 ServerStateData::doneSendingRequestBody();
2257 debugs(11,5, HERE << "doneSendingRequestBody: FD " << fd);
2258
2259 // do we need to write something after the last body byte?
2260 if (flags.chunked_request && finishingChunkedRequest())
2261 return;
2262 if (!flags.chunked_request && finishingBrokenPost())
2263 return;
2264
2265 sendComplete();
2266}
2267
2268// more origin request body data is available
2269void
2270HttpStateData::handleMoreRequestBodyAvailable()
2271{
2272 if (eof || fd < 0) {
2273 // XXX: we should check this condition in other callbacks then!
2274 // TODO: Check whether this can actually happen: We should unsubscribe
2275 // as a body consumer when the above condition(s) are detected.
2276 debugs(11, 1, HERE << "Transaction aborted while reading HTTP body");
2277 return;
2278 }
2279
2280 assert(requestBodySource != NULL);
2281
2282 if (requestBodySource->buf().hasContent()) {
2283 // XXX: why does not this trigger a debug message on every request?
2284
2285 if (flags.headers_parsed && !flags.abuse_detected) {
2286 flags.abuse_detected = 1;
2287 debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << orig_request->client_addr << "' -> '" << entry->url() << "'" );
2288
2289 if (virginReply()->sline.status == HTTP_INVALID_HEADER) {
2290 comm_close(fd);
2291 return;
2292 }
2293 }
2294 }
2295
2296 HttpStateData::handleMoreRequestBodyAvailable();
2297}
2298
2299// premature end of the request body
2300void
2301HttpStateData::handleRequestBodyProducerAborted()
2302{
2303 ServerStateData::handleRequestBodyProducerAborted();
2304 if (entry->isEmpty()) {
2305 debugs(11, 3, "request body aborted: FD " << fd);
2306 ErrorState *err;
2307 err = errorCon(ERR_READ_ERROR, HTTP_BAD_GATEWAY, fwd->request);
2308 err->xerrno = errno;
2309 fwd->fail(err);
2310 }
2311
2312 abortTransaction("request body producer aborted");
2313}
2314
2315// called when we wrote request headers(!) or a part of the body
2316void
2317HttpStateData::sentRequestBody(const CommIoCbParams &io)
2318{
2319 if (io.size > 0)
2320 kb_incr(&statCounter.server.http.kbytes_out, io.size);
2321
2322 ServerStateData::sentRequestBody(io);
2323}
2324
2325// Quickly abort the transaction
2326// TODO: destruction should be sufficient as the destructor should cleanup,
2327// including canceling close handlers
2328void
2329HttpStateData::abortTransaction(const char *reason)
2330{
2331 debugs(11,5, HERE << "aborting transaction for " << reason <<
2332 "; FD " << fd << ", this " << this);
2333
2334 if (fd >= 0) {
2335 comm_close(fd);
2336 return;
2337 }
2338
2339 fwd->handleUnregisteredServerEnd();
2340 deleteThis("HttpStateData::abortTransaction");
2341}
2342
2343HttpRequest *
2344HttpStateData::originalRequest()
2345{
2346 return orig_request;
2347}