]> git.ipfire.org Git - thirdparty/squid.git/blame_incremental - src/http.cc
Remove remainder of EXTERNNEW hackup.
[thirdparty/squid.git] / src / http.cc
... / ...
CommitLineData
1
2/*
3 * $Id$
4 *
5 * DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36/*
37 * Anonymizing patch by lutz@as-node.jena.thur.de
38 * have a look into http-anon.c to get more informations.
39 */
40
41#include "squid.h"
42
43#include "acl/FilledChecklist.h"
44#include "auth/UserRequest.h"
45#if DELAY_POOLS
46#include "DelayPools.h"
47#endif
48#include "errorpage.h"
49#include "fde.h"
50#include "http.h"
51#include "HttpHdrContRange.h"
52#include "HttpHdrSc.h"
53#include "HttpHdrScTarget.h"
54#include "HttpReply.h"
55#include "HttpRequest.h"
56#include "MemBuf.h"
57#include "MemObject.h"
58#include "protos.h"
59#include "rfc1738.h"
60#include "SquidTime.h"
61#include "Store.h"
62#include "TextException.h"
63
64
65#define SQUID_ENTER_THROWING_CODE() try {
66#define SQUID_EXIT_THROWING_CODE(status) \
67 status = true; \
68 } \
69 catch (const std::exception &e) { \
70 debugs (11, 1, "Exception error:" << e.what()); \
71 status = false; \
72 }
73
74CBDATA_CLASS_INIT(HttpStateData);
75
76static const char *const crlf = "\r\n";
77
78static void httpMaybeRemovePublic(StoreEntry *, http_status);
79static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request,
80 HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags);
81
82HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), ServerStateData(theFwdState),
83 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
84 body_bytes_truncated(0), httpChunkDecoder(NULL)
85{
86 debugs(11,5,HERE << "HttpStateData " << this << " created");
87 ignoreCacheControl = false;
88 surrogateNoStore = false;
89 fd = fwd->server_fd;
90 readBuf = new MemBuf;
91 readBuf->init();
92 orig_request = HTTPMSGLOCK(fwd->request);
93
94 // reset peer response time stats for %<pt
95 orig_request->hier.peer_http_request_sent.tv_sec = 0;
96 orig_request->hier.peer_http_request_sent.tv_usec = 0;
97
98 if (fwd->servers)
99 _peer = fwd->servers->_peer; /* might be NULL */
100
101 if (_peer) {
102 const char *url;
103
104 if (_peer->options.originserver)
105 url = orig_request->urlpath.termedBuf();
106 else
107 url = entry->url();
108
109 HttpRequest * proxy_req = new HttpRequest(orig_request->method,
110 orig_request->protocol, url);
111
112 proxy_req->SetHost(_peer->host);
113
114 proxy_req->port = _peer->http_port;
115
116 proxy_req->flags = orig_request->flags;
117
118 proxy_req->lastmod = orig_request->lastmod;
119
120 proxy_req->flags.proxying = 1;
121
122 HTTPMSGUNLOCK(request);
123
124 request = HTTPMSGLOCK(proxy_req);
125
126 /*
127 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
128 * We might end up getting the object from somewhere else if,
129 * for example, the request to this neighbor fails.
130 */
131 if (_peer->options.proxy_only)
132 entry->releaseRequest();
133
134#if DELAY_POOLS
135
136 entry->setNoDelay(_peer->options.no_delay);
137
138#endif
139 }
140
141 /*
142 * register the handler to free HTTP state data when the FD closes
143 */
144 typedef CommCbMemFunT<HttpStateData, CommCloseCbParams> Dialer;
145 closeHandler = asyncCall(9, 5, "httpStateData::httpStateConnClosed",
146 Dialer(this,&HttpStateData::httpStateConnClosed));
147 comm_add_close_handler(fd, closeHandler);
148}
149
150HttpStateData::~HttpStateData()
151{
152 /*
153 * don't forget that ~ServerStateData() gets called automatically
154 */
155
156 if (!readBuf->isNull())
157 readBuf->clean();
158
159 delete readBuf;
160
161 if (httpChunkDecoder)
162 delete httpChunkDecoder;
163
164 HTTPMSGUNLOCK(orig_request);
165
166 debugs(11,5, HERE << "HttpStateData " << this << " destroyed; FD " << fd);
167}
168
169int
170HttpStateData::dataDescriptor() const
171{
172 return fd;
173}
174/*
175static void
176httpStateFree(int fd, void *data)
177{
178 HttpStateData *httpState = static_cast<HttpStateData *>(data);
179 debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data);
180 delete httpState;
181}*/
182
183void
184HttpStateData::httpStateConnClosed(const CommCloseCbParams &params)
185{
186 debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
187 deleteThis("HttpStateData::httpStateConnClosed");
188}
189
190int
191httpCachable(const HttpRequestMethod& method)
192{
193 /* GET and HEAD are cachable. Others are not. */
194
195 // TODO: replase to HttpRequestMethod::isCachable() ?
196 if (method != METHOD_GET && method != METHOD_HEAD)
197 return 0;
198
199 /* else cachable */
200 return 1;
201}
202
203void
204HttpStateData::httpTimeout(const CommTimeoutCbParams &params)
205{
206 debugs(11, 4, "httpTimeout: FD " << fd << ": '" << entry->url() << "'" );
207
208 if (entry->store_status == STORE_PENDING) {
209 fwd->fail(errorCon(ERR_READ_TIMEOUT, HTTP_GATEWAY_TIMEOUT, fwd->request));
210 }
211
212 comm_close(fd);
213}
214
215static void
216httpMaybeRemovePublic(StoreEntry * e, http_status status)
217{
218 int remove = 0;
219 int forbidden = 0;
220 StoreEntry *pe;
221
222 if (!EBIT_TEST(e->flags, KEY_PRIVATE))
223 return;
224
225 switch (status) {
226
227 case HTTP_OK:
228
229 case HTTP_NON_AUTHORITATIVE_INFORMATION:
230
231 case HTTP_MULTIPLE_CHOICES:
232
233 case HTTP_MOVED_PERMANENTLY:
234
235 case HTTP_MOVED_TEMPORARILY:
236
237 case HTTP_GONE:
238
239 case HTTP_NOT_FOUND:
240 remove = 1;
241
242 break;
243
244 case HTTP_FORBIDDEN:
245
246 case HTTP_METHOD_NOT_ALLOWED:
247 forbidden = 1;
248
249 break;
250
251#if WORK_IN_PROGRESS
252
253 case HTTP_UNAUTHORIZED:
254 forbidden = 1;
255
256 break;
257
258#endif
259
260 default:
261#if QUESTIONABLE
262 /*
263 * Any 2xx response should eject previously cached entities...
264 */
265
266 if (status >= 200 && status < 300)
267 remove = 1;
268
269#endif
270
271 break;
272 }
273
274 if (!remove && !forbidden)
275 return;
276
277 assert(e->mem_obj);
278
279 if (e->mem_obj->request)
280 pe = storeGetPublicByRequest(e->mem_obj->request);
281 else
282 pe = storeGetPublic(e->mem_obj->url, e->mem_obj->method);
283
284 if (pe != NULL) {
285 assert(e != pe);
286#if USE_HTCP
287 neighborsHtcpClear(e, NULL, e->mem_obj->request, e->mem_obj->method, HTCP_CLR_INVALIDATION);
288#endif
289 pe->release();
290 }
291
292 /** \par
293 * Also remove any cached HEAD response in case the object has
294 * changed.
295 */
296 if (e->mem_obj->request)
297 pe = storeGetPublicByRequestMethod(e->mem_obj->request, METHOD_HEAD);
298 else
299 pe = storeGetPublic(e->mem_obj->url, METHOD_HEAD);
300
301 if (pe != NULL) {
302 assert(e != pe);
303#if USE_HTCP
304 neighborsHtcpClear(e, NULL, e->mem_obj->request, HttpRequestMethod(METHOD_HEAD), HTCP_CLR_INVALIDATION);
305#endif
306 pe->release();
307 }
308}
309
310void
311HttpStateData::processSurrogateControl(HttpReply *reply)
312{
313 if (request->flags.accelerated && reply->surrogate_control) {
314 HttpHdrScTarget *sctusable = httpHdrScGetMergedTarget(reply->surrogate_control, Config.Accel.surrogate_id);
315
316 if (sctusable) {
317 if (EBIT_TEST(sctusable->mask, SC_NO_STORE) ||
318 (Config.onoff.surrogate_is_remote
319 && EBIT_TEST(sctusable->mask, SC_NO_STORE_REMOTE))) {
320 surrogateNoStore = true;
321 entry->makePrivate();
322 }
323
324 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
325 * accelerated request or not...
326 * Still, this is an abstraction breach. - RC
327 */
328 if (sctusable->max_age != -1) {
329 if (sctusable->max_age < sctusable->max_stale)
330 reply->expires = reply->date + sctusable->max_age;
331 else
332 reply->expires = reply->date + sctusable->max_stale;
333
334 /* And update the timestamps */
335 entry->timestampsSet();
336 }
337
338 /* We ignore cache-control directives as per the Surrogate specification */
339 ignoreCacheControl = true;
340
341 httpHdrScTargetDestroy(sctusable);
342 }
343 }
344}
345
346int
347HttpStateData::cacheableReply()
348{
349 HttpReply const *rep = finalReply();
350 HttpHeader const *hdr = &rep->header;
351 const int cc_mask = (rep->cache_control) ? rep->cache_control->mask : 0;
352 const char *v;
353#if HTTP_VIOLATIONS
354
355 const refresh_t *R = NULL;
356
357 /* This strange looking define first looks up the refresh pattern
358 * and then checks if the specified flag is set. The main purpose
359 * of this is to simplify the refresh pattern lookup and HTTP_VIOLATIONS
360 * condition
361 */
362#define REFRESH_OVERRIDE(flag) \
363 ((R = (R ? R : refreshLimits(entry->mem_obj->url))) , \
364 (R && R->flags.flag))
365#else
366#define REFRESH_OVERRIDE(flag) 0
367#endif
368
369 if (surrogateNoStore)
370 return 0;
371
372 if (!ignoreCacheControl) {
373 if (EBIT_TEST(cc_mask, CC_PRIVATE)) {
374 if (!REFRESH_OVERRIDE(ignore_private))
375 return 0;
376 }
377
378 if (EBIT_TEST(cc_mask, CC_NO_CACHE)) {
379 if (!REFRESH_OVERRIDE(ignore_no_cache))
380 return 0;
381 }
382
383 if (EBIT_TEST(cc_mask, CC_NO_STORE)) {
384 if (!REFRESH_OVERRIDE(ignore_no_store))
385 return 0;
386 }
387 }
388
389 if (request->flags.auth || request->flags.auth_sent) {
390 /*
391 * Responses to requests with authorization may be cached
392 * only if a Cache-Control: public reply header is present.
393 * RFC 2068, sec 14.9.4
394 */
395
396 if (!EBIT_TEST(cc_mask, CC_PUBLIC)) {
397 if (!REFRESH_OVERRIDE(ignore_auth))
398 return 0;
399 }
400 }
401
402 /* Pragma: no-cache in _replies_ is not documented in HTTP,
403 * but servers like "Active Imaging Webcast/2.0" sure do use it */
404 if (hdr->has(HDR_PRAGMA)) {
405 String s = hdr->getList(HDR_PRAGMA);
406 const int no_cache = strListIsMember(&s, "no-cache", ',');
407 s.clean();
408
409 if (no_cache) {
410 if (!REFRESH_OVERRIDE(ignore_no_cache))
411 return 0;
412 }
413 }
414
415 /*
416 * The "multipart/x-mixed-replace" content type is used for
417 * continuous push replies. These are generally dynamic and
418 * probably should not be cachable
419 */
420 if ((v = hdr->getStr(HDR_CONTENT_TYPE)))
421 if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
422 return 0;
423
424 switch (rep->sline.status) {
425 /* Responses that are cacheable */
426
427 case HTTP_OK:
428
429 case HTTP_NON_AUTHORITATIVE_INFORMATION:
430
431 case HTTP_MULTIPLE_CHOICES:
432
433 case HTTP_MOVED_PERMANENTLY:
434
435 case HTTP_GONE:
436 /*
437 * Don't cache objects that need to be refreshed on next request,
438 * unless we know how to refresh it.
439 */
440
441 if (!refreshIsCachable(entry)) {
442 debugs(22, 3, "refreshIsCachable() returned non-cacheable..");
443 return 0;
444 }
445
446 /* don't cache objects from peers w/o LMT, Date, or Expires */
447 /* check that is it enough to check headers @?@ */
448 if (rep->date > -1)
449 return 1;
450 else if (rep->last_modified > -1)
451 return 1;
452 else if (!_peer)
453 return 1;
454
455 /* @?@ (here and 302): invalid expires header compiles to squid_curtime */
456 else if (rep->expires > -1)
457 return 1;
458 else
459 return 0;
460
461 /* NOTREACHED */
462 break;
463
464 /* Responses that only are cacheable if the server says so */
465
466 case HTTP_MOVED_TEMPORARILY:
467 case HTTP_TEMPORARY_REDIRECT:
468 if (rep->expires > rep->date && rep->date > 0)
469 return 1;
470 else
471 return 0;
472
473 /* NOTREACHED */
474 break;
475
476 /* Errors can be negatively cached */
477
478 case HTTP_NO_CONTENT:
479
480 case HTTP_USE_PROXY:
481
482 case HTTP_BAD_REQUEST:
483
484 case HTTP_FORBIDDEN:
485
486 case HTTP_NOT_FOUND:
487
488 case HTTP_METHOD_NOT_ALLOWED:
489
490 case HTTP_REQUEST_URI_TOO_LARGE:
491
492 case HTTP_INTERNAL_SERVER_ERROR:
493
494 case HTTP_NOT_IMPLEMENTED:
495
496 case HTTP_BAD_GATEWAY:
497
498 case HTTP_SERVICE_UNAVAILABLE:
499
500 case HTTP_GATEWAY_TIMEOUT:
501 return -1;
502
503 /* NOTREACHED */
504 break;
505
506 /* Some responses can never be cached */
507
508 case HTTP_PARTIAL_CONTENT: /* Not yet supported */
509
510 case HTTP_SEE_OTHER:
511
512 case HTTP_NOT_MODIFIED:
513
514 case HTTP_UNAUTHORIZED:
515
516 case HTTP_PROXY_AUTHENTICATION_REQUIRED:
517
518 case HTTP_INVALID_HEADER: /* Squid header parsing error */
519
520 case HTTP_HEADER_TOO_LARGE:
521
522 case HTTP_PAYMENT_REQUIRED:
523 case HTTP_NOT_ACCEPTABLE:
524 case HTTP_REQUEST_TIMEOUT:
525 case HTTP_CONFLICT:
526 case HTTP_LENGTH_REQUIRED:
527 case HTTP_PRECONDITION_FAILED:
528 case HTTP_REQUEST_ENTITY_TOO_LARGE:
529 case HTTP_UNSUPPORTED_MEDIA_TYPE:
530 case HTTP_UNPROCESSABLE_ENTITY:
531 case HTTP_LOCKED:
532 case HTTP_FAILED_DEPENDENCY:
533 case HTTP_INSUFFICIENT_STORAGE:
534 case HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
535 case HTTP_EXPECTATION_FAILED:
536
537 return 0;
538
539 default:
540 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
541 debugs (11, 3, HERE << "Unknown HTTP status code " << rep->sline.status << ". Not cacheable.");
542
543 return 0;
544
545 /* NOTREACHED */
546 break;
547 }
548
549 /* NOTREACHED */
550}
551
552/*
553 * For Vary, store the relevant request headers as
554 * virtual headers in the reply
555 * Returns false if the variance cannot be stored
556 */
557const char *
558httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
559{
560 String vary, hdr;
561 const char *pos = NULL;
562 const char *item;
563 const char *value;
564 int ilen;
565 static String vstr;
566
567 vstr.clean();
568 vary = reply->header.getList(HDR_VARY);
569
570 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
571 char *name = (char *)xmalloc(ilen + 1);
572 xstrncpy(name, item, ilen + 1);
573 Tolower(name);
574
575 if (strcmp(name, "*") == 0) {
576 /* Can not handle "Vary: *" withtout ETag support */
577 safe_free(name);
578 vstr.clean();
579 break;
580 }
581
582 strListAdd(&vstr, name, ',');
583 hdr = request->header.getByName(name);
584 safe_free(name);
585 value = hdr.termedBuf();
586
587 if (value) {
588 value = rfc1738_escape_part(value);
589 vstr.append("=\"", 2);
590 vstr.append(value);
591 vstr.append("\"", 1);
592 }
593
594 hdr.clean();
595 }
596
597 vary.clean();
598#if X_ACCELERATOR_VARY
599
600 pos = NULL;
601 vary = reply->header.getList(HDR_X_ACCELERATOR_VARY);
602
603 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
604 char *name = (char *)xmalloc(ilen + 1);
605 xstrncpy(name, item, ilen + 1);
606 Tolower(name);
607 strListAdd(&vstr, name, ',');
608 hdr = request->header.getByName(name);
609 safe_free(name);
610 value = hdr.termedBuf();
611
612 if (value) {
613 value = rfc1738_escape_part(value);
614 vstr.append("=\"", 2);
615 vstr.append(value);
616 vstr.append("\"", 1);
617 }
618
619 hdr.clean();
620 }
621
622 vary.clean();
623#endif
624
625 debugs(11, 3, "httpMakeVaryMark: " << vstr);
626 return vstr.termedBuf();
627}
628
629void
630HttpStateData::keepaliveAccounting(HttpReply *reply)
631{
632 if (flags.keepalive)
633 if (_peer)
634 _peer->stats.n_keepalives_sent++;
635
636 if (reply->keep_alive) {
637 if (_peer)
638 _peer->stats.n_keepalives_recv++;
639
640 if (Config.onoff.detect_broken_server_pconns
641 && reply->bodySize(request->method) == -1 && !flags.chunked) {
642 debugs(11, 1, "keepaliveAccounting: Impossible keep-alive header from '" << entry->url() << "'" );
643 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
644 flags.keepalive_broken = 1;
645 }
646 }
647}
648
649void
650HttpStateData::checkDateSkew(HttpReply *reply)
651{
652 if (reply->date > -1 && !_peer) {
653 int skew = abs((int)(reply->date - squid_curtime));
654
655 if (skew > 86400)
656 debugs(11, 3, "" << request->GetHost() << "'s clock is skewed by " << skew << " seconds!");
657 }
658}
659
660/**
661 * This creates the error page itself.. its likely
662 * that the forward ported reply header max size patch
663 * generates non http conformant error pages - in which
664 * case the errors where should be 'BAD_GATEWAY' etc
665 */
666void
667HttpStateData::processReplyHeader()
668{
669 /** Creates a blank header. If this routine is made incremental, this will not do */
670 Ctx ctx = ctx_enter(entry->mem_obj->url);
671 debugs(11, 3, "processReplyHeader: key '" << entry->getMD5Text() << "'");
672
673 assert(!flags.headers_parsed);
674
675 http_status error = HTTP_STATUS_NONE;
676
677 HttpReply *newrep = new HttpReply;
678 const bool parsed = newrep->parse(readBuf, eof, &error);
679
680 if (!parsed && readBuf->contentSize() > 5 && strncmp(readBuf->content(), "HTTP/", 5) != 0 && strncmp(readBuf->content(), "ICY", 3) != 0) {
681 MemBuf *mb;
682 HttpReply *tmprep = new HttpReply;
683 tmprep->setHeaders(HTTP_OK, "Gatewaying", NULL, -1, -1, -1);
684 tmprep->header.putExt("X-Transformed-From", "HTTP/0.9");
685 mb = tmprep->pack();
686 newrep->parse(mb, eof, &error);
687 delete tmprep;
688 } else {
689 if (!parsed && error > 0) { // unrecoverable parsing error
690 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf->content() << "'");
691 flags.headers_parsed = 1;
692 newrep->sline.version = HttpVersion(1,0);
693 newrep->sline.status = error;
694 HttpReply *vrep = setVirginReply(newrep);
695 entry->replaceHttpReply(vrep);
696 ctx_exit(ctx);
697 return;
698 }
699
700 if (!parsed) { // need more data
701 assert(!error);
702 assert(!eof);
703 delete newrep;
704 ctx_exit(ctx);
705 return;
706 }
707
708 debugs(11, 9, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------");
709
710 header_bytes_read = headersEnd(readBuf->content(), readBuf->contentSize());
711 readBuf->consume(header_bytes_read);
712 }
713
714 /* Skip 1xx messages for now. Advertised in Via as an internal 1.0 hop */
715 if (newrep->sline.protocol == PROTO_HTTP && newrep->sline.status >= 100 && newrep->sline.status < 200) {
716
717#if WHEN_HTTP11
718 /* When HTTP/1.1 check if the client is expecting a 1xx reply and maybe pass it on */
719 if (orig_request->header.has(HDR_EXPECT)) {
720 // TODO: pass to the client anyway?
721 }
722#endif
723 delete newrep;
724 debugs(11, 2, HERE << "1xx headers consume " << header_bytes_read << " bytes header.");
725 header_bytes_read = 0;
726 if (reply_bytes_read > 0)
727 debugs(11, 2, HERE << "1xx headers consume " << reply_bytes_read << " bytes reply.");
728 reply_bytes_read = 0;
729 ctx_exit(ctx);
730 processReplyHeader();
731 return;
732 }
733
734 flags.chunked = 0;
735 if (newrep->sline.protocol == PROTO_HTTP && newrep->header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',')) {
736 flags.chunked = 1;
737 httpChunkDecoder = new ChunkedCodingParser;
738 }
739
740 if (!peerSupportsConnectionPinning())
741 orig_request->flags.connection_auth_disabled = 1;
742
743 HttpReply *vrep = setVirginReply(newrep);
744 flags.headers_parsed = 1;
745
746 keepaliveAccounting(vrep);
747
748 checkDateSkew(vrep);
749
750 processSurrogateControl (vrep);
751
752 /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header
753 * Parse the header and remove all referenced headers
754 */
755
756 orig_request->hier.peer_reply_status = newrep->sline.status;
757
758 ctx_exit(ctx);
759
760}
761
762/**
763 * returns true if the peer can support connection pinning
764*/
765bool HttpStateData::peerSupportsConnectionPinning() const
766{
767 const HttpReply *rep = entry->mem_obj->getReply();
768 const HttpHeader *hdr = &rep->header;
769 bool rc;
770 String header;
771
772 if (!_peer)
773 return true;
774
775 /*If this peer does not support connection pinning (authenticated
776 connections) return false
777 */
778 if (!_peer->connection_auth)
779 return false;
780
781 /*The peer supports connection pinning and the http reply status
782 is not unauthorized, so the related connection can be pinned
783 */
784 if (rep->sline.status != HTTP_UNAUTHORIZED)
785 return true;
786
787 /*The server respond with HTTP_UNAUTHORIZED and the peer configured
788 with "connection-auth=on" we know that the peer supports pinned
789 connections
790 */
791 if (_peer->connection_auth == 1)
792 return true;
793
794 /*At this point peer has configured with "connection-auth=auto"
795 parameter so we need some extra checks to decide if we are going
796 to allow pinned connections or not
797 */
798
799 /*if the peer configured with originserver just allow connection
800 pinning (squid 2.6 behaviour)
801 */
802 if (_peer->options.originserver)
803 return true;
804
805 /*if the connections it is already pinned it is OK*/
806 if (request->flags.pinned)
807 return true;
808
809 /*Allow pinned connections only if the Proxy-support header exists in
810 reply and has in its list the "Session-Based-Authentication"
811 which means that the peer supports connection pinning.
812 */
813 if (!hdr->has(HDR_PROXY_SUPPORT))
814 return false;
815
816 header = hdr->getStrOrList(HDR_PROXY_SUPPORT);
817 /* XXX This ought to be done in a case-insensitive manner */
818 rc = (strstr(header.termedBuf(), "Session-Based-Authentication") != NULL);
819
820 return rc;
821}
822
823// Called when we parsed (and possibly adapted) the headers but
824// had not starting storing (a.k.a., sending) the body yet.
825void
826HttpStateData::haveParsedReplyHeaders()
827{
828 ServerStateData::haveParsedReplyHeaders();
829
830 Ctx ctx = ctx_enter(entry->mem_obj->url);
831 HttpReply *rep = finalReply();
832
833 if (rep->sline.status == HTTP_PARTIAL_CONTENT &&
834 rep->content_range)
835 currentOffset = rep->content_range->spec.offset;
836
837 entry->timestampsSet();
838
839 /* Check if object is cacheable or not based on reply code */
840 debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep->sline.status);
841
842 if (neighbors_do_private_keys)
843 httpMaybeRemovePublic(entry, rep->sline.status);
844
845 if (rep->header.has(HDR_VARY)
846#if X_ACCELERATOR_VARY
847 || rep->header.has(HDR_X_ACCELERATOR_VARY)
848#endif
849 ) {
850 const char *vary = httpMakeVaryMark(orig_request, rep);
851
852 if (!vary) {
853 entry->makePrivate();
854 if (!fwd->reforwardableStatus(rep->sline.status))
855 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
856 goto no_cache;
857 }
858
859 entry->mem_obj->vary_headers = xstrdup(vary);
860 }
861
862#if WIP_FWD_LOG
863 fwdStatus(fwd, s);
864
865#endif
866 /*
867 * If its not a reply that we will re-forward, then
868 * allow the client to get it.
869 */
870 if (!fwd->reforwardableStatus(rep->sline.status))
871 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
872
873 switch (cacheableReply()) {
874
875 case 1:
876 entry->makePublic();
877 break;
878
879 case 0:
880 entry->makePrivate();
881 break;
882
883 case -1:
884
885#if HTTP_VIOLATIONS
886 if (Config.negativeTtl > 0)
887 entry->cacheNegatively();
888 else
889#endif
890 entry->makePrivate();
891
892 break;
893
894 default:
895 assert(0);
896
897 break;
898 }
899
900no_cache:
901
902 if (!ignoreCacheControl && rep->cache_control) {
903 if (EBIT_TEST(rep->cache_control->mask, CC_PROXY_REVALIDATE))
904 EBIT_SET(entry->flags, ENTRY_REVALIDATE);
905 else if (EBIT_TEST(rep->cache_control->mask, CC_MUST_REVALIDATE))
906 EBIT_SET(entry->flags, ENTRY_REVALIDATE);
907 }
908
909#if HEADERS_LOG
910 headersLog(1, 0, request->method, rep);
911
912#endif
913
914 ctx_exit(ctx);
915}
916
917HttpStateData::ConnectionStatus
918HttpStateData::statusIfComplete() const
919{
920 const HttpReply *rep = virginReply();
921 /** \par
922 * If the reply wants to close the connection, it takes precedence */
923
924 if (httpHeaderHasConnDir(&rep->header, "close"))
925 return COMPLETE_NONPERSISTENT_MSG;
926
927 /** \par
928 * If we didn't send a keep-alive request header, then this
929 * can not be a persistent connection.
930 */
931 if (!flags.keepalive)
932 return COMPLETE_NONPERSISTENT_MSG;
933
934 /** \par
935 * If we haven't sent the whole request then this can not be a persistent
936 * connection.
937 */
938 if (!flags.request_sent) {
939 debugs(11, 1, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(orig_request->method) << " " << entry->url() << "\"" );
940 return COMPLETE_NONPERSISTENT_MSG;
941 }
942
943 /** \par
944 * What does the reply have to say about keep-alive?
945 */
946 /**
947 \bug XXX BUG?
948 * If the origin server (HTTP/1.0) does not send a keep-alive
949 * header, but keeps the connection open anyway, what happens?
950 * We'll return here and http.c waits for an EOF before changing
951 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
952 * and an error status code, and we might have to wait until
953 * the server times out the socket.
954 */
955 if (!rep->keep_alive)
956 return COMPLETE_NONPERSISTENT_MSG;
957
958 return COMPLETE_PERSISTENT_MSG;
959}
960
961HttpStateData::ConnectionStatus
962HttpStateData::persistentConnStatus() const
963{
964 debugs(11, 3, "persistentConnStatus: FD " << fd << " eof=" << eof);
965 const HttpReply *vrep = virginReply();
966 debugs(11, 5, "persistentConnStatus: content_length=" << vrep->content_length);
967
968 /* If we haven't seen the end of reply headers, we are not done */
969 debugs(11, 5, "persistentConnStatus: flags.headers_parsed=" << flags.headers_parsed);
970
971 if (!flags.headers_parsed)
972 return INCOMPLETE_MSG;
973
974 if (eof) // already reached EOF
975 return COMPLETE_NONPERSISTENT_MSG;
976
977 /** \par
978 * In chunked response we do not know the content length but we are absolutely
979 * sure about the end of response, so we are calling the statusIfComplete to
980 * decide if we can be persistant
981 */
982 if (lastChunk && flags.chunked)
983 return statusIfComplete();
984
985 const int64_t clen = vrep->bodySize(request->method);
986
987 debugs(11, 5, "persistentConnStatus: clen=" << clen);
988
989 /* If the body size is unknown we must wait for EOF */
990 if (clen < 0)
991 return INCOMPLETE_MSG;
992
993 /** \par
994 * If the body size is known, we must wait until we've gotten all of it. */
995 if (clen > 0) {
996 // old technique:
997 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
998 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
999 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1000 body_bytes_read << " content_length=" << vrep->content_length);
1001
1002 if (body_bytes_read < vrep->content_length)
1003 return INCOMPLETE_MSG;
1004
1005 if (body_bytes_truncated > 0) // already read more than needed
1006 return COMPLETE_NONPERSISTENT_MSG; // disable pconns
1007 }
1008
1009 /** \par
1010 * If there is no message body or we got it all, we can be persistent */
1011 return statusIfComplete();
1012}
1013
1014/*
1015 * This is the callback after some data has been read from the network
1016 */
1017/*
1018void
1019HttpStateData::ReadReplyWrapper(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
1020{
1021 HttpStateData *httpState = static_cast<HttpStateData *>(data);
1022 assert (fd == httpState->fd);
1023 // assert(buf == readBuf->content());
1024 PROF_start(HttpStateData_readReply);
1025 httpState->readReply(len, flag, xerrno);
1026 PROF_stop(HttpStateData_readReply);
1027}
1028*/
1029
1030/* XXX this function is too long! */
1031void
1032HttpStateData::readReply(const CommIoCbParams &io)
1033{
1034 int bin;
1035 int clen;
1036 int len = io.size;
1037
1038 assert(fd == io.fd);
1039
1040 flags.do_next_read = 0;
1041
1042 debugs(11, 5, "httpReadReply: FD " << fd << ": len " << len << ".");
1043
1044 // Bail out early on COMM_ERR_CLOSING - close handlers will tidy up for us
1045 if (io.flag == COMM_ERR_CLOSING) {
1046 debugs(11, 3, "http socket closing");
1047 return;
1048 }
1049
1050 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1051 maybeReadVirginBody();
1052 return;
1053 }
1054
1055 // handle I/O errors
1056 if (io.flag != COMM_OK || len < 0) {
1057 debugs(11, 2, "httpReadReply: FD " << fd << ": read failure: " << xstrerror() << ".");
1058
1059 if (ignoreErrno(io.xerrno)) {
1060 flags.do_next_read = 1;
1061 } else {
1062 ErrorState *err;
1063 err = errorCon(ERR_READ_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1064 err->xerrno = io.xerrno;
1065 fwd->fail(err);
1066 flags.do_next_read = 0;
1067 comm_close(fd);
1068 }
1069
1070 return;
1071 }
1072
1073 // update I/O stats
1074 if (len > 0) {
1075 readBuf->appended(len);
1076 reply_bytes_read += len;
1077#if DELAY_POOLS
1078
1079 DelayId delayId = entry->mem_obj->mostBytesAllowed();
1080 delayId.bytesIn(len);
1081#endif
1082
1083 kb_incr(&statCounter.server.all.kbytes_in, len);
1084 kb_incr(&statCounter.server.http.kbytes_in, len);
1085 IOStats.Http.reads++;
1086
1087 for (clen = len - 1, bin = 0; clen; bin++)
1088 clen >>= 1;
1089
1090 IOStats.Http.read_hist[bin]++;
1091
1092 // update peer response time stats (%<pt)
1093 const timeval &sent = orig_request->hier.peer_http_request_sent;
1094 orig_request->hier.peer_response_time =
1095 sent.tv_sec ? tvSubMsec(sent, current_time) : -1;
1096 }
1097
1098 /** \par
1099 * Here the RFC says we should ignore whitespace between replies, but we can't as
1100 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1101 * the response splitting countermeasures is extremely likely to trigger on this,
1102 * not allowing connection reuse in the first place.
1103 */
1104#if DONT_DO_THIS
1105 if (!flags.headers_parsed && len > 0 && fd_table[fd].uses > 1) {
1106 /* Skip whitespace between replies */
1107
1108 while (len > 0 && xisspace(*buf))
1109 xmemmove(buf, buf + 1, len--);
1110
1111 if (len == 0) {
1112 /* Continue to read... */
1113 /* Timeout NOT increased. This whitespace was from previous reply */
1114 flags.do_next_read = 1;
1115 maybeReadVirginBody();
1116 return;
1117 }
1118 }
1119
1120#endif
1121
1122 if (len == 0) { // reached EOF?
1123 eof = 1;
1124 flags.do_next_read = 0;
1125 }
1126
1127 if (!flags.headers_parsed) { // have not parsed headers yet?
1128 PROF_start(HttpStateData_processReplyHeader);
1129 processReplyHeader();
1130 PROF_stop(HttpStateData_processReplyHeader);
1131
1132 if (!continueAfterParsingHeader()) // parsing error or need more data
1133 return; // TODO: send errors to ICAP
1134
1135 adaptOrFinalizeReply();
1136 }
1137
1138 // kick more reads if needed and/or process the response body, if any
1139 PROF_start(HttpStateData_processReplyBody);
1140 processReplyBody(); // may call serverComplete()
1141 PROF_stop(HttpStateData_processReplyBody);
1142}
1143
1144/**
1145 \retval true if we can continue with processing the body or doing ICAP.
1146 */
1147bool
1148HttpStateData::continueAfterParsingHeader()
1149{
1150 if (!flags.headers_parsed && !eof) {
1151 debugs(11, 9, HERE << "needs more at " << readBuf->contentSize());
1152 flags.do_next_read = 1;
1153 /** \retval false If we have not finished parsing the headers and may get more data.
1154 * Schedules more reads to retrieve the missing data.
1155 */
1156 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1157 return false;
1158 }
1159
1160 /** If we are done with parsing, check for errors */
1161
1162 err_type error = ERR_NONE;
1163
1164 if (flags.headers_parsed) { // parsed headers, possibly with errors
1165 // check for header parsing errors
1166 if (HttpReply *vrep = virginReply()) {
1167 const http_status s = vrep->sline.status;
1168 const HttpVersion &v = vrep->sline.version;
1169 if (s == HTTP_INVALID_HEADER && v != HttpVersion(0,9)) {
1170 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1171 error = ERR_INVALID_RESP;
1172 } else if (s == HTTP_HEADER_TOO_LARGE) {
1173 fwd->dontRetry(true);
1174 error = ERR_TOO_BIG;
1175 } else {
1176 return true; // done parsing, got reply, and no error
1177 }
1178 } else {
1179 // parsed headers but got no reply
1180 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1181 error = ERR_INVALID_RESP;
1182 }
1183 } else {
1184 assert(eof);
1185 if (readBuf->hasContent()) {
1186 error = ERR_INVALID_RESP;
1187 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1188 } else {
1189 error = ERR_ZERO_SIZE_OBJECT;
1190 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No object data received for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1191 }
1192 }
1193
1194 assert(error != ERR_NONE);
1195 entry->reset();
1196 fwd->fail(errorCon(error, HTTP_BAD_GATEWAY, fwd->request));
1197 flags.do_next_read = 0;
1198 comm_close(fd);
1199 return false; // quit on error
1200}
1201
1202/** truncate what we read if we read too much so that writeReplyBody()
1203 writes no more than what we should have read */
1204void
1205HttpStateData::truncateVirginBody()
1206{
1207 assert(flags.headers_parsed);
1208
1209 HttpReply *vrep = virginReply();
1210 int64_t clen = -1;
1211 if (!vrep->expectingBody(request->method, clen) || clen < 0)
1212 return; // no body or a body of unknown size, including chunked
1213
1214 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
1215 if (body_bytes_read - body_bytes_truncated <= clen)
1216 return; // we did not read too much or already took care of the extras
1217
1218 if (const int64_t extras = body_bytes_read - body_bytes_truncated - clen) {
1219 // server sent more that the advertised content length
1220 debugs(11,5, HERE << "body_bytes_read=" << body_bytes_read <<
1221 " clen=" << clen << '/' << vrep->content_length <<
1222 " body_bytes_truncated=" << body_bytes_truncated << '+' << extras);
1223
1224 readBuf->truncate(extras);
1225 body_bytes_truncated += extras;
1226 }
1227}
1228
1229/**
1230 * Call this when there is data from the origin server
1231 * which should be sent to either StoreEntry, or to ICAP...
1232 */
1233void
1234HttpStateData::writeReplyBody()
1235{
1236 truncateVirginBody(); // if needed
1237 const char *data = readBuf->content();
1238 int len = readBuf->contentSize();
1239 addVirginReplyBody(data, len);
1240 readBuf->consume(len);
1241}
1242
1243bool
1244HttpStateData::decodeAndWriteReplyBody()
1245{
1246 const char *data = NULL;
1247 int len;
1248 bool wasThereAnException = false;
1249 assert(flags.chunked);
1250 assert(httpChunkDecoder);
1251 SQUID_ENTER_THROWING_CODE();
1252 MemBuf decodedData;
1253 decodedData.init();
1254 const bool doneParsing = httpChunkDecoder->parse(readBuf,&decodedData);
1255 len = decodedData.contentSize();
1256 data=decodedData.content();
1257 addVirginReplyBody(data, len);
1258 if (doneParsing) {
1259 lastChunk = 1;
1260 flags.do_next_read = 0;
1261 }
1262 SQUID_EXIT_THROWING_CODE(wasThereAnException);
1263 return wasThereAnException;
1264}
1265
1266/**
1267 * processReplyBody has two purposes:
1268 * 1 - take the reply body data, if any, and put it into either
1269 * the StoreEntry, or give it over to ICAP.
1270 * 2 - see if we made it to the end of the response (persistent
1271 * connections and such)
1272 */
1273void
1274HttpStateData::processReplyBody()
1275{
1276 AsyncCall::Pointer call;
1277 IpAddress client_addr;
1278 bool ispinned = false;
1279
1280 if (!flags.headers_parsed) {
1281 flags.do_next_read = 1;
1282 maybeReadVirginBody();
1283 return;
1284 }
1285
1286#if USE_ADAPTATION
1287 debugs(11,5, HERE << "adaptationAccessCheckPending=" << adaptationAccessCheckPending);
1288 if (adaptationAccessCheckPending)
1289 return;
1290
1291#endif
1292
1293 /*
1294 * At this point the reply headers have been parsed and consumed.
1295 * That means header content has been removed from readBuf and
1296 * it contains only body data.
1297 */
1298 if (flags.chunked) {
1299 if (!decodeAndWriteReplyBody()) {
1300 flags.do_next_read = 0;
1301 serverComplete();
1302 return;
1303 }
1304 } else
1305 writeReplyBody();
1306
1307 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1308 /*
1309 * The above writeReplyBody() call could ABORT this entry,
1310 * in that case, the server FD should already be closed.
1311 * there's nothing for us to do.
1312 */
1313 (void) 0;
1314 } else
1315 switch (persistentConnStatus()) {
1316 case INCOMPLETE_MSG:
1317 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG");
1318 /* Wait for more data or EOF condition */
1319 if (flags.keepalive_broken) {
1320 call = NULL;
1321 commSetTimeout(fd, 10, call);
1322 } else {
1323 call = NULL;
1324 commSetTimeout(fd, Config.Timeout.read, call);
1325 }
1326
1327 flags.do_next_read = 1;
1328 break;
1329
1330 case COMPLETE_PERSISTENT_MSG:
1331 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG");
1332 /* yes we have to clear all these! */
1333 call = NULL;
1334 commSetTimeout(fd, -1, call);
1335 flags.do_next_read = 0;
1336
1337 comm_remove_close_handler(fd, closeHandler);
1338 closeHandler = NULL;
1339 fwd->unregister(fd);
1340
1341 if (orig_request->flags.spoof_client_ip)
1342 client_addr = orig_request->client_addr;
1343
1344
1345 if (request->flags.pinned) {
1346 ispinned = true;
1347 } else if (request->flags.connection_auth && request->flags.auth_sent) {
1348 ispinned = true;
1349 }
1350
1351 if (orig_request->pinnedConnection() && ispinned) {
1352 orig_request->pinnedConnection()->pinConnection(fd, orig_request, _peer,
1353 (request->flags.connection_auth != 0));
1354 } else {
1355 fwd->pconnPush(fd, _peer, request, orig_request->GetHost(), client_addr);
1356 }
1357
1358 fd = -1;
1359
1360 serverComplete();
1361 return;
1362
1363 case COMPLETE_NONPERSISTENT_MSG:
1364 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG");
1365 serverComplete();
1366 return;
1367 }
1368
1369 maybeReadVirginBody();
1370}
1371
1372void
1373HttpStateData::maybeReadVirginBody()
1374{
1375 // we may need to grow the buffer if headers do not fit
1376 const int minRead = flags.headers_parsed ? 0 :1024;
1377 const int read_size = replyBodySpace(*readBuf, minRead);
1378
1379 debugs(11,9, HERE << (flags.do_next_read ? "may" : "wont") <<
1380 " read up to " << read_size << " bytes from FD " << fd);
1381
1382 /*
1383 * why <2? Because delayAwareRead() won't actually read if
1384 * you ask it to read 1 byte. The delayed read request
1385 * just gets re-queued until the client side drains, then
1386 * the I/O thread hangs. Better to not register any read
1387 * handler until we get a notification from someone that
1388 * its okay to read again.
1389 */
1390 if (read_size < 2)
1391 return;
1392
1393 if (flags.do_next_read) {
1394 flags.do_next_read = 0;
1395 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1396 entry->delayAwareRead(fd, readBuf->space(read_size), read_size,
1397 asyncCall(11, 5, "HttpStateData::readReply",
1398 Dialer(this, &HttpStateData::readReply)));
1399 }
1400}
1401
1402/*
1403 * This will be called when request write is complete.
1404 */
1405void
1406HttpStateData::sendComplete(const CommIoCbParams &io)
1407{
1408 debugs(11, 5, "httpSendComplete: FD " << fd << ": size " << io.size << ": errflag " << io.flag << ".");
1409#if URL_CHECKSUM_DEBUG
1410
1411 entry->mem_obj->checkUrlChecksum();
1412#endif
1413
1414 if (io.size > 0) {
1415 fd_bytes(fd, io.size, FD_WRITE);
1416 kb_incr(&statCounter.server.all.kbytes_out, io.size);
1417 kb_incr(&statCounter.server.http.kbytes_out, io.size);
1418 }
1419
1420 if (io.flag == COMM_ERR_CLOSING)
1421 return;
1422
1423 if (io.flag) {
1424 ErrorState *err;
1425 err = errorCon(ERR_WRITE_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1426 err->xerrno = io.xerrno;
1427 fwd->fail(err);
1428 comm_close(fd);
1429 return;
1430 }
1431
1432 /*
1433 * Set the read timeout here because it hasn't been set yet.
1434 * We only set the read timeout after the request has been
1435 * fully written to the server-side. If we start the timeout
1436 * after connection establishment, then we are likely to hit
1437 * the timeout for POST/PUT requests that have very large
1438 * request bodies.
1439 */
1440 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1441 AsyncCall::Pointer timeoutCall = asyncCall(11, 5, "HttpStateData::httpTimeout",
1442 TimeoutDialer(this,&HttpStateData::httpTimeout));
1443
1444 commSetTimeout(fd, Config.Timeout.read, timeoutCall);
1445
1446 flags.request_sent = 1;
1447
1448 orig_request->hier.peer_http_request_sent = current_time;
1449}
1450
1451// Close the HTTP server connection. Used by serverComplete().
1452void
1453HttpStateData::closeServer()
1454{
1455 debugs(11,5, HERE << "closing HTTP server FD " << fd << " this " << this);
1456
1457 if (fd >= 0) {
1458 fwd->unregister(fd);
1459 comm_remove_close_handler(fd, closeHandler);
1460 closeHandler = NULL;
1461 comm_close(fd);
1462 fd = -1;
1463 }
1464}
1465
1466bool
1467HttpStateData::doneWithServer() const
1468{
1469 return fd < 0;
1470}
1471
1472
1473/*
1474 * Fixup authentication request headers for special cases
1475 */
1476static void
1477httpFixupAuthentication(HttpRequest * request, HttpRequest * orig_request, const HttpHeader * hdr_in, HttpHeader * hdr_out, http_state_flags flags)
1478{
1479 http_hdr_type header = flags.originpeer ? HDR_AUTHORIZATION : HDR_PROXY_AUTHORIZATION;
1480
1481 /* Nothing to do unless we are forwarding to a peer */
1482 if (!request->flags.proxying)
1483 return;
1484
1485 /* Needs to be explicitly enabled */
1486 if (!orig_request->peer_login)
1487 return;
1488
1489 /* Maybe already dealt with? */
1490 if (hdr_out->has(header))
1491 return;
1492
1493 /* Nothing to do here for PASSTHRU */
1494 if (strcmp(orig_request->peer_login, "PASSTHRU") == 0)
1495 return;
1496
1497 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1498 if (flags.originpeer && strcmp(orig_request->peer_login, "PROXYPASS") == 0 && hdr_in->has(HDR_PROXY_AUTHORIZATION)) {
1499 const char *auth = hdr_in->getStr(HDR_PROXY_AUTHORIZATION);
1500
1501 if (auth && strncasecmp(auth, "basic ", 6) == 0) {
1502 hdr_out->putStr(header, auth);
1503 return;
1504 }
1505 }
1506
1507 /* Special mode to pass the username to the upstream cache */
1508 if (*orig_request->peer_login == '*') {
1509 char loginbuf[256];
1510 const char *username = "-";
1511
1512 if (orig_request->extacl_user.size())
1513 username = orig_request->extacl_user.termedBuf();
1514 else if (orig_request->auth_user_request)
1515 username = orig_request->auth_user_request->username();
1516
1517 snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, orig_request->peer_login + 1);
1518
1519 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1520 base64_encode(loginbuf));
1521 return;
1522 }
1523
1524 /* external_acl provided credentials */
1525 if (orig_request->extacl_user.size() && orig_request->extacl_passwd.size() &&
1526 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1527 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1528 char loginbuf[256];
1529 snprintf(loginbuf, sizeof(loginbuf), SQUIDSTRINGPH ":" SQUIDSTRINGPH,
1530 SQUIDSTRINGPRINT(orig_request->extacl_user),
1531 SQUIDSTRINGPRINT(orig_request->extacl_passwd));
1532 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1533 base64_encode(loginbuf));
1534 return;
1535 }
1536
1537 /* Kerberos login to peer */
1538#if HAVE_KRB5 && HAVE_GSSAPI
1539 if (strncmp(orig_request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1540 char *Token=NULL;
1541 char *PrincipalName=NULL,*p;
1542 if ((p=strchr(orig_request->peer_login,':')) != NULL ) {
1543 PrincipalName=++p;
1544 }
1545 Token = peer_proxy_negotiate_auth(PrincipalName,request->peer_host);
1546 if (Token) {
1547 httpHeaderPutStrf(hdr_out, HDR_PROXY_AUTHORIZATION, "Negotiate %s",Token);
1548 }
1549 return;
1550 }
1551#endif /* HAVE_KRB5 && HAVE_GSSAPI */
1552
1553 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1554 base64_encode(orig_request->peer_login));
1555 return;
1556}
1557
1558/*
1559 * build request headers and append them to a given MemBuf
1560 * used by buildRequestPrefix()
1561 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1562 */
1563void
1564HttpStateData::httpBuildRequestHeader(HttpRequest * request,
1565 HttpRequest * orig_request,
1566 StoreEntry * entry,
1567 HttpHeader * hdr_out,
1568 http_state_flags flags)
1569{
1570 /* building buffer for complex strings */
1571#define BBUF_SZ (MAX_URL+32)
1572 LOCAL_ARRAY(char, bbuf, BBUF_SZ);
1573 LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN);
1574 const HttpHeader *hdr_in = &orig_request->header;
1575 const HttpHeaderEntry *e = NULL;
1576 HttpHeaderPos pos = HttpHeaderInitPos;
1577 assert (hdr_out->owner == hoRequest);
1578
1579 /* append our IMS header */
1580 if (request->lastmod > -1)
1581 hdr_out->putTime(HDR_IF_MODIFIED_SINCE, request->lastmod);
1582
1583 bool we_do_ranges = decideIfWeDoRanges (orig_request);
1584
1585 String strConnection (hdr_in->getList(HDR_CONNECTION));
1586
1587 while ((e = hdr_in->getEntry(&pos)))
1588 copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, orig_request, hdr_out, we_do_ranges, flags);
1589
1590 /* Abstraction break: We should interpret multipart/byterange responses
1591 * into offset-length data, and this works around our inability to do so.
1592 */
1593 if (!we_do_ranges && orig_request->multipartRangeRequest()) {
1594 /* don't cache the result */
1595 orig_request->flags.cachable = 0;
1596 /* pretend it's not a range request */
1597 delete orig_request->range;
1598 orig_request->range = NULL;
1599 orig_request->flags.range = 0;
1600 }
1601
1602 /* append Via */
1603 if (Config.onoff.via) {
1604 String strVia;
1605 strVia = hdr_in->getList(HDR_VIA);
1606 snprintf(bbuf, BBUF_SZ, "%d.%d %s",
1607 orig_request->http_ver.major,
1608 orig_request->http_ver.minor, ThisCache);
1609 strListAdd(&strVia, bbuf, ',');
1610 hdr_out->putStr(HDR_VIA, strVia.termedBuf());
1611 strVia.clean();
1612 }
1613
1614 if (orig_request->flags.accelerated) {
1615 /* Append Surrogate-Capabilities */
1616 String strSurrogate(hdr_in->getList(HDR_SURROGATE_CAPABILITY));
1617#if USE_SQUID_ESI
1618 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id);
1619#else
1620 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id);
1621#endif
1622 strListAdd(&strSurrogate, bbuf, ',');
1623 hdr_out->putStr(HDR_SURROGATE_CAPABILITY, strSurrogate.termedBuf());
1624 }
1625
1626 /** \pre Handle X-Forwarded-For */
1627 if (strcmp(opt_forwarded_for, "delete") != 0) {
1628
1629 String strFwd = hdr_in->getList(HDR_X_FORWARDED_FOR);
1630
1631 if (strFwd.size() > 65536/2) {
1632 // There is probably a forwarding loop with Via detection disabled.
1633 // If we do nothing, String will assert on overflow soon.
1634 // TODO: Terminate all transactions with huge XFF?
1635 strFwd = "error";
1636
1637 static int warnedCount = 0;
1638 if (warnedCount++ < 100) {
1639 const char *url = entry ? entry->url() : urlCanonical(orig_request);
1640 debugs(11, 1, "Warning: likely forwarding loop with " << url);
1641 }
1642 }
1643
1644 if (strcmp(opt_forwarded_for, "on") == 0) {
1645 /** If set to ON - append client IP or 'unknown'. */
1646 if ( orig_request->client_addr.IsNoAddr() )
1647 strListAdd(&strFwd, "unknown", ',');
1648 else
1649 strListAdd(&strFwd, orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN), ',');
1650 } else if (strcmp(opt_forwarded_for, "off") == 0) {
1651 /** If set to OFF - append 'unknown'. */
1652 strListAdd(&strFwd, "unknown", ',');
1653 } else if (strcmp(opt_forwarded_for, "transparent") == 0) {
1654 /** If set to TRANSPARENT - pass through unchanged. */
1655 } else if (strcmp(opt_forwarded_for, "truncate") == 0) {
1656 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1657 if ( orig_request->client_addr.IsNoAddr() )
1658 strFwd = "unknown";
1659 else
1660 strFwd = orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN);
1661 }
1662 if (strFwd.size() > 0)
1663 hdr_out->putStr(HDR_X_FORWARDED_FOR, strFwd.termedBuf());
1664 }
1665 /** If set to DELETE - do not copy through. */
1666
1667 /* append Host if not there already */
1668 if (!hdr_out->has(HDR_HOST)) {
1669 if (orig_request->peer_domain) {
1670 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1671 } else if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1672 /* use port# only if not default */
1673 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1674 } else {
1675 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1676 orig_request->GetHost(),
1677 (int) orig_request->port);
1678 }
1679 }
1680
1681 /* append Authorization if known in URL, not in header and going direct */
1682 if (!hdr_out->has(HDR_AUTHORIZATION)) {
1683 if (!request->flags.proxying && *request->login) {
1684 httpHeaderPutStrf(hdr_out, HDR_AUTHORIZATION, "Basic %s",
1685 base64_encode(request->login));
1686 }
1687 }
1688
1689 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1690 httpFixupAuthentication(request, orig_request, hdr_in, hdr_out, flags);
1691
1692 /* append Cache-Control, add max-age if not there already */
1693 {
1694 HttpHdrCc *cc = hdr_in->getCc();
1695
1696 if (!cc)
1697 cc = httpHdrCcCreate();
1698
1699#if 0 /* see bug 2330 */
1700 /* Set no-cache if determined needed but not found */
1701 if (orig_request->flags.nocache)
1702 EBIT_SET(cc->mask, CC_NO_CACHE);
1703#endif
1704
1705 /* Add max-age only without no-cache */
1706 if (!EBIT_TEST(cc->mask, CC_MAX_AGE) && !EBIT_TEST(cc->mask, CC_NO_CACHE)) {
1707 const char *url =
1708 entry ? entry->url() : urlCanonical(orig_request);
1709 httpHdrCcSetMaxAge(cc, getMaxAge(url));
1710
1711 if (request->urlpath.size())
1712 assert(strstr(url, request->urlpath.termedBuf()));
1713 }
1714
1715 /* Enforce sibling relations */
1716 if (flags.only_if_cached)
1717 EBIT_SET(cc->mask, CC_ONLY_IF_CACHED);
1718
1719 hdr_out->putCc(cc);
1720
1721 httpHdrCcDestroy(cc);
1722 }
1723
1724 /* maybe append Connection: keep-alive */
1725 if (flags.keepalive) {
1726 if (flags.proxying) {
1727 hdr_out->putStr(HDR_PROXY_CONNECTION, "keep-alive");
1728 } else {
1729 hdr_out->putStr(HDR_CONNECTION, "keep-alive");
1730 }
1731 }
1732
1733 /* append Front-End-Https */
1734 if (flags.front_end_https) {
1735 if (flags.front_end_https == 1 || request->protocol == PROTO_HTTPS)
1736 hdr_out->putStr(HDR_FRONT_END_HTTPS, "On");
1737 }
1738
1739 /* Now mangle the headers. */
1740 if (Config2.onoff.mangle_request_headers)
1741 httpHdrMangleList(hdr_out, request, ROR_REQUEST);
1742
1743 strConnection.clean();
1744}
1745
1746/**
1747 * Decides whether a particular header may be cloned from the received Clients request
1748 * to our outgoing fetch request.
1749 */
1750void
1751copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags flags)
1752{
1753 debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
1754
1755 switch (e->id) {
1756
1757 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1758
1759 case HDR_PROXY_AUTHORIZATION:
1760 /** \par Proxy-Authorization:
1761 * Only pass on proxy authentication to peers for which
1762 * authentication forwarding is explicitly enabled
1763 */
1764 if (!flags.originpeer && flags.proxying && orig_request->peer_login &&
1765 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1766 strcmp(orig_request->peer_login, "PROXYPASS") == 0 ||
1767 strcmp(orig_request->peer_login, "PASSTHRU") == 0)) {
1768 hdr_out->addEntry(e->clone());
1769 }
1770 break;
1771
1772 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1773
1774 case HDR_CONNECTION: /** \par Connection: */
1775 case HDR_TE: /** \par TE: */
1776 case HDR_KEEP_ALIVE: /** \par Keep-Alive: */
1777 case HDR_PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
1778 case HDR_TRAILERS: /** \par Trailers: */
1779 case HDR_UPGRADE: /** \par Upgrade: */
1780 case HDR_TRANSFER_ENCODING: /** \par Transfer-Encoding: */
1781 break;
1782
1783
1784 /** \par OTHER headers I haven't bothered to track down yet. */
1785
1786 case HDR_AUTHORIZATION:
1787 /** \par WWW-Authorization:
1788 * Pass on WWW authentication */
1789
1790 if (!flags.originpeer) {
1791 hdr_out->addEntry(e->clone());
1792 } else {
1793 /** \note In accelerators, only forward authentication if enabled
1794 * (see also httpFixupAuthentication for special cases)
1795 */
1796 if (orig_request->peer_login &&
1797 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1798 strcmp(orig_request->peer_login, "PASSTHRU") == 0 ||
1799 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1800 hdr_out->addEntry(e->clone());
1801 }
1802 }
1803
1804 break;
1805
1806 case HDR_HOST:
1807 /** \par Host:
1808 * Normally Squid rewrites the Host: header.
1809 * However, there is one case when we don't: If the URL
1810 * went through our redirector and the admin configured
1811 * 'redir_rewrites_host' to be off.
1812 */
1813 if (orig_request->peer_domain)
1814 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1815 else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
1816 hdr_out->addEntry(e->clone());
1817 else {
1818 /* use port# only if not default */
1819
1820 if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1821 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1822 } else {
1823 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1824 orig_request->GetHost(),
1825 (int) orig_request->port);
1826 }
1827 }
1828
1829 break;
1830
1831 case HDR_IF_MODIFIED_SINCE:
1832 /** \par If-Modified-Since:
1833 * append unless we added our own;
1834 * \note at most one client's ims header can pass through */
1835
1836 if (!hdr_out->has(HDR_IF_MODIFIED_SINCE))
1837 hdr_out->addEntry(e->clone());
1838
1839 break;
1840
1841 case HDR_MAX_FORWARDS:
1842 /** \par Max-Forwards:
1843 * pass only on TRACE or OPTIONS requests */
1844 if (orig_request->method == METHOD_TRACE || orig_request->method == METHOD_OPTIONS) {
1845 const int64_t hops = e->getInt64();
1846
1847 if (hops > 0)
1848 hdr_out->putInt64(HDR_MAX_FORWARDS, hops - 1);
1849 }
1850
1851 break;
1852
1853 case HDR_VIA:
1854 /** \par Via:
1855 * If Via is disabled then forward any received header as-is.
1856 * Otherwise leave for explicit updated addition later. */
1857
1858 if (!Config.onoff.via)
1859 hdr_out->addEntry(e->clone());
1860
1861 break;
1862
1863 case HDR_RANGE:
1864
1865 case HDR_IF_RANGE:
1866
1867 case HDR_REQUEST_RANGE:
1868 /** \par Range:, If-Range:, Request-Range:
1869 * Only pass if we accept ranges */
1870 if (!we_do_ranges)
1871 hdr_out->addEntry(e->clone());
1872
1873 break;
1874
1875 case HDR_PROXY_CONNECTION:
1876
1877 case HDR_X_FORWARDED_FOR:
1878
1879 case HDR_CACHE_CONTROL:
1880 /** \par Proxy-Connaction:, X-Forwarded-For:, Cache-Control:
1881 * handled specially by Squid, so leave off for now.
1882 * append these after the loop if needed */
1883 break;
1884
1885 case HDR_FRONT_END_HTTPS:
1886 /** \par Front-End-Https:
1887 * Pass thru only if peer is configured with front-end-https */
1888 if (!flags.front_end_https)
1889 hdr_out->addEntry(e->clone());
1890
1891 break;
1892
1893 default:
1894 /** \par default.
1895 * pass on all other header fields
1896 * which are NOT listed by the special Connection: header. */
1897
1898 if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) {
1899 debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
1900 return;
1901 }
1902
1903 hdr_out->addEntry(e->clone());
1904 }
1905}
1906
1907bool
1908HttpStateData::decideIfWeDoRanges (HttpRequest * orig_request)
1909{
1910 bool result = true;
1911 /* decide if we want to do Ranges ourselves
1912 * and fetch the whole object now)
1913 * We want to handle Ranges ourselves iff
1914 * - we can actually parse client Range specs
1915 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
1916 * - reply will be cachable
1917 * (If the reply will be uncachable we have to throw it away after
1918 * serving this request, so it is better to forward ranges to
1919 * the server and fetch only the requested content)
1920 */
1921
1922 if (NULL == orig_request->range || !orig_request->flags.cachable
1923 || orig_request->range->offsetLimitExceeded() || orig_request->flags.connection_auth)
1924 result = false;
1925
1926 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
1927 orig_request->range << ", cachable: " <<
1928 orig_request->flags.cachable << "; we_do_ranges: " << result);
1929
1930 return result;
1931}
1932
1933/* build request prefix and append it to a given MemBuf;
1934 * return the length of the prefix */
1935mb_size_t
1936HttpStateData::buildRequestPrefix(HttpRequest * aRequest,
1937 HttpRequest * original_request,
1938 StoreEntry * sentry,
1939 MemBuf * mb,
1940 http_state_flags stateFlags)
1941{
1942 const int offset = mb->size;
1943 HttpVersion httpver(1,1);
1944 mb->Printf("%s %s HTTP/%d.%d\r\n",
1945 RequestMethodStr(aRequest->method),
1946 aRequest->urlpath.size() ? aRequest->urlpath.termedBuf() : "/",
1947 httpver.major,httpver.minor);
1948 /* build and pack headers */
1949 {
1950 HttpHeader hdr(hoRequest);
1951 Packer p;
1952 httpBuildRequestHeader(aRequest, original_request, sentry, &hdr, stateFlags);
1953
1954 if (aRequest->flags.pinned && aRequest->flags.connection_auth)
1955 aRequest->flags.auth_sent = 1;
1956 else if (hdr.has(HDR_AUTHORIZATION))
1957 aRequest->flags.auth_sent = 1;
1958
1959 packerToMemInit(&p, mb);
1960 hdr.packInto(&p);
1961 hdr.clean();
1962 packerClean(&p);
1963 }
1964 /* append header terminator */
1965 mb->append(crlf, 2);
1966 return mb->size - offset;
1967}
1968
1969/* This will be called when connect completes. Write request. */
1970bool
1971HttpStateData::sendRequest()
1972{
1973 MemBuf mb;
1974
1975 debugs(11, 5, "httpSendRequest: FD " << fd << ", request " << request << ", this " << this << ".");
1976 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1977 AsyncCall::Pointer timeoutCall = asyncCall(11, 5, "HttpStateData::httpTimeout",
1978 TimeoutDialer(this,&HttpStateData::httpTimeout));
1979 commSetTimeout(fd, Config.Timeout.lifetime, timeoutCall);
1980 flags.do_next_read = 1;
1981 maybeReadVirginBody();
1982
1983 if (orig_request->body_pipe != NULL) {
1984 if (!startRequestBodyFlow()) // register to receive body data
1985 return false;
1986 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1987 Dialer dialer(this, &HttpStateData::sentRequestBody);
1988 requestSender = asyncCall(11,5, "HttpStateData::sentRequestBody", dialer);
1989 } else {
1990 assert(!requestBodySource);
1991 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1992 Dialer dialer(this, &HttpStateData::sendComplete);
1993 requestSender = asyncCall(11,5, "HttpStateData::SendComplete", dialer);
1994 }
1995
1996 if (_peer != NULL) {
1997 if (_peer->options.originserver) {
1998 flags.proxying = 0;
1999 flags.originpeer = 1;
2000 } else {
2001 flags.proxying = 1;
2002 flags.originpeer = 0;
2003 }
2004 } else {
2005 flags.proxying = 0;
2006 flags.originpeer = 0;
2007 }
2008
2009 /*
2010 * Is keep-alive okay for all request methods?
2011 */
2012 if (orig_request->flags.must_keepalive)
2013 flags.keepalive = 1;
2014 else if (!Config.onoff.server_pconns)
2015 flags.keepalive = 0;
2016 else if (_peer == NULL)
2017 flags.keepalive = 1;
2018 else if (_peer->stats.n_keepalives_sent < 10)
2019 flags.keepalive = 1;
2020 else if ((double) _peer->stats.n_keepalives_recv /
2021 (double) _peer->stats.n_keepalives_sent > 0.50)
2022 flags.keepalive = 1;
2023
2024 if (_peer) {
2025 if (neighborType(_peer, request) == PEER_SIBLING &&
2026 !_peer->options.allow_miss)
2027 flags.only_if_cached = 1;
2028
2029 flags.front_end_https = _peer->front_end_https;
2030 }
2031
2032 mb.init();
2033 request->peer_host=_peer?_peer->host:NULL;
2034 buildRequestPrefix(request, orig_request, entry, &mb, flags);
2035 debugs(11, 6, "httpSendRequest: FD " << fd << ":\n" << mb.buf);
2036 comm_write_mbuf(fd, &mb, requestSender);
2037
2038 return true;
2039}
2040
2041void
2042httpStart(FwdState *fwd)
2043{
2044 debugs(11, 3, "httpStart: \"" << RequestMethodStr(fwd->request->method) << " " << fwd->entry->url() << "\"" );
2045 HttpStateData *httpState = new HttpStateData(fwd);
2046
2047 if (!httpState->sendRequest()) {
2048 debugs(11, 3, "httpStart: aborted");
2049 delete httpState;
2050 return;
2051 }
2052
2053 statCounter.server.all.requests++;
2054 statCounter.server.http.requests++;
2055
2056 /*
2057 * We used to set the read timeout here, but not any more.
2058 * Now its set in httpSendComplete() after the full request,
2059 * including request body, has been written to the server.
2060 */
2061}
2062
2063void
2064HttpStateData::doneSendingRequestBody()
2065{
2066 debugs(11,5, HERE << "doneSendingRequestBody: FD " << fd);
2067
2068#if HTTP_VIOLATIONS
2069 if (Config.accessList.brokenPosts) {
2070 ACLFilledChecklist ch(Config.accessList.brokenPosts, request, NULL);
2071 if (!ch.fastCheck()) {
2072 debugs(11, 5, "doneSendingRequestBody: didn't match brokenPosts");
2073 CommIoCbParams io(NULL);
2074 io.fd=fd;
2075 io.flag=COMM_OK;
2076 sendComplete(io);
2077 } else {
2078 debugs(11, 2, "doneSendingRequestBody: matched brokenPosts");
2079 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2080 Dialer dialer(this, &HttpStateData::sendComplete);
2081 AsyncCall::Pointer call= asyncCall(11,5, "HttpStateData::SendComplete", dialer);
2082 comm_write(fd, "\r\n", 2, call);
2083 }
2084 return;
2085 }
2086 debugs(11, 5, "doneSendingRequestBody: No brokenPosts list");
2087#endif /* HTTP_VIOLATIONS */
2088
2089 CommIoCbParams io(NULL);
2090 io.fd=fd;
2091 io.flag=COMM_OK;
2092 sendComplete(io);
2093}
2094
2095// more origin request body data is available
2096void
2097HttpStateData::handleMoreRequestBodyAvailable()
2098{
2099 if (eof || fd < 0) {
2100 // XXX: we should check this condition in other callbacks then!
2101 // TODO: Check whether this can actually happen: We should unsubscribe
2102 // as a body consumer when the above condition(s) are detected.
2103 debugs(11, 1, HERE << "Transaction aborted while reading HTTP body");
2104 return;
2105 }
2106
2107 assert(requestBodySource != NULL);
2108
2109 if (requestBodySource->buf().hasContent()) {
2110 // XXX: why does not this trigger a debug message on every request?
2111
2112 if (flags.headers_parsed && !flags.abuse_detected) {
2113 flags.abuse_detected = 1;
2114 debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << orig_request->client_addr << "' -> '" << entry->url() << "'" );
2115
2116 if (virginReply()->sline.status == HTTP_INVALID_HEADER) {
2117 comm_close(fd);
2118 return;
2119 }
2120 }
2121 }
2122
2123 HttpStateData::handleMoreRequestBodyAvailable();
2124}
2125
2126// premature end of the request body
2127void
2128HttpStateData::handleRequestBodyProducerAborted()
2129{
2130 ServerStateData::handleRequestBodyProducerAborted();
2131 // XXX: SendComplete(COMM_ERR_CLOSING) does little. Is it enough?
2132 CommIoCbParams io(NULL);
2133 io.fd=fd;
2134 io.flag=COMM_ERR_CLOSING;
2135 sendComplete(io);
2136}
2137
2138// called when we wrote request headers(!) or a part of the body
2139void
2140HttpStateData::sentRequestBody(const CommIoCbParams &io)
2141{
2142 if (io.size > 0)
2143 kb_incr(&statCounter.server.http.kbytes_out, io.size);
2144
2145 ServerStateData::sentRequestBody(io);
2146}
2147
2148// Quickly abort the transaction
2149// TODO: destruction should be sufficient as the destructor should cleanup,
2150// including canceling close handlers
2151void
2152HttpStateData::abortTransaction(const char *reason)
2153{
2154 debugs(11,5, HERE << "aborting transaction for " << reason <<
2155 "; FD " << fd << ", this " << this);
2156
2157 if (fd >= 0) {
2158 comm_close(fd);
2159 return;
2160 }
2161
2162 fwd->handleUnregisteredServerEnd();
2163 deleteThis("HttpStateData::abortTransaction");
2164}
2165
2166HttpRequest *
2167HttpStateData::originalRequest()
2168{
2169 return orig_request;
2170}