]> git.ipfire.org Git - thirdparty/squid.git/blob - src/http.cc
Merge from trunk
[thirdparty/squid.git] / src / http.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 /*
37 * Anonymizing patch by lutz@as-node.jena.thur.de
38 * have a look into http-anon.c to get more informations.
39 */
40
41 #include "squid.h"
42
43 #include "acl/FilledChecklist.h"
44 #include "auth/UserRequest.h"
45 #include "base/TextException.h"
46 #if DELAY_POOLS
47 #include "DelayPools.h"
48 #endif
49 #include "errorpage.h"
50 #include "fde.h"
51 #include "http.h"
52 #include "HttpHdrContRange.h"
53 #include "HttpHdrSc.h"
54 #include "HttpHdrScTarget.h"
55 #include "HttpReply.h"
56 #include "HttpRequest.h"
57 #include "MemBuf.h"
58 #include "MemObject.h"
59 #include "protos.h"
60 #include "rfc1738.h"
61 #include "SquidTime.h"
62 #include "Store.h"
63
64
65 #define SQUID_ENTER_THROWING_CODE() try {
66 #define SQUID_EXIT_THROWING_CODE(status) \
67 status = true; \
68 } \
69 catch (const std::exception &e) { \
70 debugs (11, 1, "Exception error:" << e.what()); \
71 status = false; \
72 }
73
74 CBDATA_CLASS_INIT(HttpStateData);
75
76 static const char *const crlf = "\r\n";
77
78 static void httpMaybeRemovePublic(StoreEntry *, http_status);
79 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request,
80 HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags);
81
82 HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), ServerStateData(theFwdState),
83 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
84 body_bytes_truncated(0), httpChunkDecoder(NULL)
85 {
86 debugs(11,5,HERE << "HttpStateData " << this << " created");
87 ignoreCacheControl = false;
88 surrogateNoStore = false;
89 fd = fwd->conn()->fd; // TODO: store Comm::Connection instead of FD
90 readBuf = new MemBuf;
91 readBuf->init();
92 orig_request = HTTPMSGLOCK(fwd->request);
93
94 // reset peer response time stats for %<pt
95 orig_request->hier.peer_http_request_sent.tv_sec = 0;
96 orig_request->hier.peer_http_request_sent.tv_usec = 0;
97
98 if (fwd->conn() != NULL)
99 _peer = cbdataReference(fwd->conn()->getPeer()); /* might be NULL */
100
101 if (_peer) {
102 const char *url;
103
104 if (_peer->options.originserver)
105 url = orig_request->urlpath.termedBuf();
106 else
107 url = entry->url();
108
109 HttpRequest * proxy_req = new HttpRequest(orig_request->method, orig_request->protocol, url);
110
111 proxy_req->SetHost(_peer->host);
112
113 proxy_req->port = _peer->http_port;
114
115 proxy_req->flags = orig_request->flags;
116
117 proxy_req->lastmod = orig_request->lastmod;
118
119 proxy_req->flags.proxying = 1;
120
121 HTTPMSGUNLOCK(request);
122
123 request = HTTPMSGLOCK(proxy_req);
124
125 /*
126 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
127 * We might end up getting the object from somewhere else if,
128 * for example, the request to this neighbor fails.
129 */
130 if (_peer->options.proxy_only)
131 entry->releaseRequest();
132
133 #if DELAY_POOLS
134
135 entry->setNoDelay(_peer->options.no_delay);
136
137 #endif
138 }
139
140 /*
141 * register the handler to free HTTP state data when the FD closes
142 */
143 typedef CommCbMemFunT<HttpStateData, CommCloseCbParams> Dialer;
144 closeHandler = asyncCall(9, 5, "httpStateData::httpStateConnClosed",
145 Dialer(this,&HttpStateData::httpStateConnClosed));
146 comm_add_close_handler(fd, closeHandler);
147 }
148
149 HttpStateData::~HttpStateData()
150 {
151 /*
152 * don't forget that ~ServerStateData() gets called automatically
153 */
154
155 if (!readBuf->isNull())
156 readBuf->clean();
157
158 delete readBuf;
159
160 if (httpChunkDecoder)
161 delete httpChunkDecoder;
162
163 HTTPMSGUNLOCK(orig_request);
164
165 debugs(11,5, HERE << "HttpStateData " << this << " destroyed; FD " << fd);
166 }
167
168 int
169 HttpStateData::dataDescriptor() const
170 {
171 return fd;
172 }
173 /*
174 static void
175 httpStateFree(int fd, void *data)
176 {
177 HttpStateData *httpState = static_cast<HttpStateData *>(data);
178 debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data);
179 delete httpState;
180 }*/
181
182 void
183 HttpStateData::httpStateConnClosed(const CommCloseCbParams &params)
184 {
185 debugs(11, 5, "httpStateFree: FD " << params.fd << ", httpState=" << params.data);
186 deleteThis("HttpStateData::httpStateConnClosed");
187 }
188
189 int
190 httpCachable(const HttpRequestMethod& method)
191 {
192 /* GET and HEAD are cachable. Others are not. */
193
194 // TODO: replase to HttpRequestMethod::isCachable() ?
195 if (method != METHOD_GET && method != METHOD_HEAD)
196 return 0;
197
198 /* else cachable */
199 return 1;
200 }
201
202 void
203 HttpStateData::httpTimeout(const CommTimeoutCbParams &params)
204 {
205 debugs(11, 4, "httpTimeout: FD " << fd << ": '" << entry->url() << "'" );
206
207 if (entry->store_status == STORE_PENDING) {
208 fwd->fail(errorCon(ERR_READ_TIMEOUT, HTTP_GATEWAY_TIMEOUT, fwd->request));
209 }
210
211 comm_close(fd);
212 }
213
214 static void
215 httpMaybeRemovePublic(StoreEntry * e, http_status status)
216 {
217 int remove = 0;
218 int forbidden = 0;
219 StoreEntry *pe;
220
221 if (!EBIT_TEST(e->flags, KEY_PRIVATE))
222 return;
223
224 switch (status) {
225
226 case HTTP_OK:
227
228 case HTTP_NON_AUTHORITATIVE_INFORMATION:
229
230 case HTTP_MULTIPLE_CHOICES:
231
232 case HTTP_MOVED_PERMANENTLY:
233
234 case HTTP_MOVED_TEMPORARILY:
235
236 case HTTP_GONE:
237
238 case HTTP_NOT_FOUND:
239 remove = 1;
240
241 break;
242
243 case HTTP_FORBIDDEN:
244
245 case HTTP_METHOD_NOT_ALLOWED:
246 forbidden = 1;
247
248 break;
249
250 #if WORK_IN_PROGRESS
251
252 case HTTP_UNAUTHORIZED:
253 forbidden = 1;
254
255 break;
256
257 #endif
258
259 default:
260 #if QUESTIONABLE
261 /*
262 * Any 2xx response should eject previously cached entities...
263 */
264
265 if (status >= 200 && status < 300)
266 remove = 1;
267
268 #endif
269
270 break;
271 }
272
273 if (!remove && !forbidden)
274 return;
275
276 assert(e->mem_obj);
277
278 if (e->mem_obj->request)
279 pe = storeGetPublicByRequest(e->mem_obj->request);
280 else
281 pe = storeGetPublic(e->mem_obj->url, e->mem_obj->method);
282
283 if (pe != NULL) {
284 assert(e != pe);
285 #if USE_HTCP
286 neighborsHtcpClear(e, NULL, e->mem_obj->request, e->mem_obj->method, HTCP_CLR_INVALIDATION);
287 #endif
288 pe->release();
289 }
290
291 /** \par
292 * Also remove any cached HEAD response in case the object has
293 * changed.
294 */
295 if (e->mem_obj->request)
296 pe = storeGetPublicByRequestMethod(e->mem_obj->request, METHOD_HEAD);
297 else
298 pe = storeGetPublic(e->mem_obj->url, METHOD_HEAD);
299
300 if (pe != NULL) {
301 assert(e != pe);
302 #if USE_HTCP
303 neighborsHtcpClear(e, NULL, e->mem_obj->request, HttpRequestMethod(METHOD_HEAD), HTCP_CLR_INVALIDATION);
304 #endif
305 pe->release();
306 }
307 }
308
309 void
310 HttpStateData::processSurrogateControl(HttpReply *reply)
311 {
312 if (request->flags.accelerated && reply->surrogate_control) {
313 HttpHdrScTarget *sctusable = httpHdrScGetMergedTarget(reply->surrogate_control, Config.Accel.surrogate_id);
314
315 if (sctusable) {
316 if (EBIT_TEST(sctusable->mask, SC_NO_STORE) ||
317 (Config.onoff.surrogate_is_remote
318 && EBIT_TEST(sctusable->mask, SC_NO_STORE_REMOTE))) {
319 surrogateNoStore = true;
320 entry->makePrivate();
321 }
322
323 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
324 * accelerated request or not...
325 * Still, this is an abstraction breach. - RC
326 */
327 if (sctusable->max_age != -1) {
328 if (sctusable->max_age < sctusable->max_stale)
329 reply->expires = reply->date + sctusable->max_age;
330 else
331 reply->expires = reply->date + sctusable->max_stale;
332
333 /* And update the timestamps */
334 entry->timestampsSet();
335 }
336
337 /* We ignore cache-control directives as per the Surrogate specification */
338 ignoreCacheControl = true;
339
340 httpHdrScTargetDestroy(sctusable);
341 }
342 }
343 }
344
345 int
346 HttpStateData::cacheableReply()
347 {
348 HttpReply const *rep = finalReply();
349 HttpHeader const *hdr = &rep->header;
350 const int cc_mask = (rep->cache_control) ? rep->cache_control->mask : 0;
351 const char *v;
352 #if HTTP_VIOLATIONS
353
354 const refresh_t *R = NULL;
355
356 /* This strange looking define first looks up the refresh pattern
357 * and then checks if the specified flag is set. The main purpose
358 * of this is to simplify the refresh pattern lookup and HTTP_VIOLATIONS
359 * condition
360 */
361 #define REFRESH_OVERRIDE(flag) \
362 ((R = (R ? R : refreshLimits(entry->mem_obj->url))) , \
363 (R && R->flags.flag))
364 #else
365 #define REFRESH_OVERRIDE(flag) 0
366 #endif
367
368 if (surrogateNoStore)
369 return 0;
370
371 if (!ignoreCacheControl) {
372 if (EBIT_TEST(cc_mask, CC_PRIVATE)) {
373 if (!REFRESH_OVERRIDE(ignore_private))
374 return 0;
375 }
376
377 if (EBIT_TEST(cc_mask, CC_NO_CACHE)) {
378 if (!REFRESH_OVERRIDE(ignore_no_cache))
379 return 0;
380 }
381
382 if (EBIT_TEST(cc_mask, CC_NO_STORE)) {
383 if (!REFRESH_OVERRIDE(ignore_no_store))
384 return 0;
385 }
386 }
387
388 if (request->flags.auth || request->flags.auth_sent) {
389 /*
390 * Responses to requests with authorization may be cached
391 * only if a Cache-Control: public reply header is present.
392 * RFC 2068, sec 14.9.4
393 */
394
395 if (!EBIT_TEST(cc_mask, CC_PUBLIC)) {
396 if (!REFRESH_OVERRIDE(ignore_auth))
397 return 0;
398 }
399 }
400
401 /* Pragma: no-cache in _replies_ is not documented in HTTP,
402 * but servers like "Active Imaging Webcast/2.0" sure do use it */
403 if (hdr->has(HDR_PRAGMA)) {
404 String s = hdr->getList(HDR_PRAGMA);
405 const int no_cache = strListIsMember(&s, "no-cache", ',');
406 s.clean();
407
408 if (no_cache) {
409 if (!REFRESH_OVERRIDE(ignore_no_cache))
410 return 0;
411 }
412 }
413
414 /*
415 * The "multipart/x-mixed-replace" content type is used for
416 * continuous push replies. These are generally dynamic and
417 * probably should not be cachable
418 */
419 if ((v = hdr->getStr(HDR_CONTENT_TYPE)))
420 if (!strncasecmp(v, "multipart/x-mixed-replace", 25))
421 return 0;
422
423 switch (rep->sline.status) {
424 /* Responses that are cacheable */
425
426 case HTTP_OK:
427
428 case HTTP_NON_AUTHORITATIVE_INFORMATION:
429
430 case HTTP_MULTIPLE_CHOICES:
431
432 case HTTP_MOVED_PERMANENTLY:
433
434 case HTTP_GONE:
435 /*
436 * Don't cache objects that need to be refreshed on next request,
437 * unless we know how to refresh it.
438 */
439
440 if (!refreshIsCachable(entry) && !REFRESH_OVERRIDE(store_stale)) {
441 debugs(22, 3, "refreshIsCachable() returned non-cacheable..");
442 return 0;
443 }
444
445 /* don't cache objects from peers w/o LMT, Date, or Expires */
446 /* check that is it enough to check headers @?@ */
447 if (rep->date > -1)
448 return 1;
449 else if (rep->last_modified > -1)
450 return 1;
451 else if (!_peer)
452 return 1;
453
454 /* @?@ (here and 302): invalid expires header compiles to squid_curtime */
455 else if (rep->expires > -1)
456 return 1;
457 else
458 return 0;
459
460 /* NOTREACHED */
461 break;
462
463 /* Responses that only are cacheable if the server says so */
464
465 case HTTP_MOVED_TEMPORARILY:
466 case HTTP_TEMPORARY_REDIRECT:
467 if (rep->expires > rep->date && rep->date > 0)
468 return 1;
469 else
470 return 0;
471
472 /* NOTREACHED */
473 break;
474
475 /* Errors can be negatively cached */
476
477 case HTTP_NO_CONTENT:
478
479 case HTTP_USE_PROXY:
480
481 case HTTP_BAD_REQUEST:
482
483 case HTTP_FORBIDDEN:
484
485 case HTTP_NOT_FOUND:
486
487 case HTTP_METHOD_NOT_ALLOWED:
488
489 case HTTP_REQUEST_URI_TOO_LARGE:
490
491 case HTTP_INTERNAL_SERVER_ERROR:
492
493 case HTTP_NOT_IMPLEMENTED:
494
495 case HTTP_BAD_GATEWAY:
496
497 case HTTP_SERVICE_UNAVAILABLE:
498
499 case HTTP_GATEWAY_TIMEOUT:
500 return -1;
501
502 /* NOTREACHED */
503 break;
504
505 /* Some responses can never be cached */
506
507 case HTTP_PARTIAL_CONTENT: /* Not yet supported */
508
509 case HTTP_SEE_OTHER:
510
511 case HTTP_NOT_MODIFIED:
512
513 case HTTP_UNAUTHORIZED:
514
515 case HTTP_PROXY_AUTHENTICATION_REQUIRED:
516
517 case HTTP_INVALID_HEADER: /* Squid header parsing error */
518
519 case HTTP_HEADER_TOO_LARGE:
520
521 case HTTP_PAYMENT_REQUIRED:
522 case HTTP_NOT_ACCEPTABLE:
523 case HTTP_REQUEST_TIMEOUT:
524 case HTTP_CONFLICT:
525 case HTTP_LENGTH_REQUIRED:
526 case HTTP_PRECONDITION_FAILED:
527 case HTTP_REQUEST_ENTITY_TOO_LARGE:
528 case HTTP_UNSUPPORTED_MEDIA_TYPE:
529 case HTTP_UNPROCESSABLE_ENTITY:
530 case HTTP_LOCKED:
531 case HTTP_FAILED_DEPENDENCY:
532 case HTTP_INSUFFICIENT_STORAGE:
533 case HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
534 case HTTP_EXPECTATION_FAILED:
535
536 return 0;
537
538 default:
539 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
540 debugs (11, 3, HERE << "Unknown HTTP status code " << rep->sline.status << ". Not cacheable.");
541
542 return 0;
543
544 /* NOTREACHED */
545 break;
546 }
547
548 /* NOTREACHED */
549 }
550
551 /*
552 * For Vary, store the relevant request headers as
553 * virtual headers in the reply
554 * Returns false if the variance cannot be stored
555 */
556 const char *
557 httpMakeVaryMark(HttpRequest * request, HttpReply const * reply)
558 {
559 String vary, hdr;
560 const char *pos = NULL;
561 const char *item;
562 const char *value;
563 int ilen;
564 static String vstr;
565
566 vstr.clean();
567 vary = reply->header.getList(HDR_VARY);
568
569 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
570 char *name = (char *)xmalloc(ilen + 1);
571 xstrncpy(name, item, ilen + 1);
572 Tolower(name);
573
574 if (strcmp(name, "*") == 0) {
575 /* Can not handle "Vary: *" withtout ETag support */
576 safe_free(name);
577 vstr.clean();
578 break;
579 }
580
581 strListAdd(&vstr, name, ',');
582 hdr = request->header.getByName(name);
583 safe_free(name);
584 value = hdr.termedBuf();
585
586 if (value) {
587 value = rfc1738_escape_part(value);
588 vstr.append("=\"", 2);
589 vstr.append(value);
590 vstr.append("\"", 1);
591 }
592
593 hdr.clean();
594 }
595
596 vary.clean();
597 #if X_ACCELERATOR_VARY
598
599 pos = NULL;
600 vary = reply->header.getList(HDR_X_ACCELERATOR_VARY);
601
602 while (strListGetItem(&vary, ',', &item, &ilen, &pos)) {
603 char *name = (char *)xmalloc(ilen + 1);
604 xstrncpy(name, item, ilen + 1);
605 Tolower(name);
606 strListAdd(&vstr, name, ',');
607 hdr = request->header.getByName(name);
608 safe_free(name);
609 value = hdr.termedBuf();
610
611 if (value) {
612 value = rfc1738_escape_part(value);
613 vstr.append("=\"", 2);
614 vstr.append(value);
615 vstr.append("\"", 1);
616 }
617
618 hdr.clean();
619 }
620
621 vary.clean();
622 #endif
623
624 debugs(11, 3, "httpMakeVaryMark: " << vstr);
625 return vstr.termedBuf();
626 }
627
628 void
629 HttpStateData::keepaliveAccounting(HttpReply *reply)
630 {
631 if (flags.keepalive)
632 if (_peer)
633 _peer->stats.n_keepalives_sent++;
634
635 if (reply->keep_alive) {
636 if (_peer)
637 _peer->stats.n_keepalives_recv++;
638
639 if (Config.onoff.detect_broken_server_pconns
640 && reply->bodySize(request->method) == -1 && !flags.chunked) {
641 debugs(11, 1, "keepaliveAccounting: Impossible keep-alive header from '" << entry->url() << "'" );
642 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
643 flags.keepalive_broken = 1;
644 }
645 }
646 }
647
648 void
649 HttpStateData::checkDateSkew(HttpReply *reply)
650 {
651 if (reply->date > -1 && !_peer) {
652 int skew = abs((int)(reply->date - squid_curtime));
653
654 if (skew > 86400)
655 debugs(11, 3, "" << request->GetHost() << "'s clock is skewed by " << skew << " seconds!");
656 }
657 }
658
659 /**
660 * This creates the error page itself.. its likely
661 * that the forward ported reply header max size patch
662 * generates non http conformant error pages - in which
663 * case the errors where should be 'BAD_GATEWAY' etc
664 */
665 void
666 HttpStateData::processReplyHeader()
667 {
668 /** Creates a blank header. If this routine is made incremental, this will not do */
669 Ctx ctx = ctx_enter(entry->mem_obj->url);
670 debugs(11, 3, "processReplyHeader: key '" << entry->getMD5Text() << "'");
671
672 assert(!flags.headers_parsed);
673
674 if (!readBuf->hasContent())
675 return;
676
677 http_status error = HTTP_STATUS_NONE;
678
679 HttpReply *newrep = new HttpReply;
680 const bool parsed = newrep->parse(readBuf, eof, &error);
681
682 if (!parsed && readBuf->contentSize() > 5 && strncmp(readBuf->content(), "HTTP/", 5) != 0 && strncmp(readBuf->content(), "ICY", 3) != 0) {
683 MemBuf *mb;
684 HttpReply *tmprep = new HttpReply;
685 tmprep->setHeaders(HTTP_OK, "Gatewaying", NULL, -1, -1, -1);
686 tmprep->header.putExt("X-Transformed-From", "HTTP/0.9");
687 mb = tmprep->pack();
688 newrep->parse(mb, eof, &error);
689 delete tmprep;
690 } else {
691 if (!parsed && error > 0) { // unrecoverable parsing error
692 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf->content() << "'");
693 flags.headers_parsed = 1;
694 newrep->sline.version = HttpVersion(1,1);
695 newrep->sline.status = error;
696 HttpReply *vrep = setVirginReply(newrep);
697 entry->replaceHttpReply(vrep);
698 ctx_exit(ctx);
699 return;
700 }
701
702 if (!parsed) { // need more data
703 assert(!error);
704 assert(!eof);
705 delete newrep;
706 ctx_exit(ctx);
707 return;
708 }
709
710 debugs(11, 9, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------");
711
712 header_bytes_read = headersEnd(readBuf->content(), readBuf->contentSize());
713 readBuf->consume(header_bytes_read);
714 }
715
716 /* Skip 1xx messages for now. Advertised in Via as an internal 1.0 hop */
717 if (newrep->sline.protocol == PROTO_HTTP && newrep->sline.status >= 100 && newrep->sline.status < 200) {
718
719 #if WHEN_HTTP11_EXPECT_HANDLED
720 /* When HTTP/1.1 check if the client is expecting a 1xx reply and maybe pass it on */
721 if (orig_request->header.has(HDR_EXPECT)) {
722 // TODO: pass to the client anyway?
723 }
724 #endif
725 delete newrep;
726 debugs(11, 2, HERE << "1xx headers consume " << header_bytes_read << " bytes header.");
727 header_bytes_read = 0;
728 if (reply_bytes_read > 0)
729 debugs(11, 2, HERE << "1xx headers consume " << reply_bytes_read << " bytes reply.");
730 reply_bytes_read = 0;
731 ctx_exit(ctx);
732 processReplyHeader();
733 return;
734 }
735
736 flags.chunked = 0;
737 if (newrep->sline.protocol == PROTO_HTTP && newrep->header.hasListMember(HDR_TRANSFER_ENCODING, "chunked", ',')) {
738 flags.chunked = 1;
739 httpChunkDecoder = new ChunkedCodingParser;
740 }
741
742 if (!peerSupportsConnectionPinning())
743 orig_request->flags.connection_auth_disabled = 1;
744
745 HttpReply *vrep = setVirginReply(newrep);
746 flags.headers_parsed = 1;
747
748 keepaliveAccounting(vrep);
749
750 checkDateSkew(vrep);
751
752 processSurrogateControl (vrep);
753
754 /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header
755 * Parse the header and remove all referenced headers
756 */
757
758 orig_request->hier.peer_reply_status = newrep->sline.status;
759
760 ctx_exit(ctx);
761
762 }
763
764 /**
765 * returns true if the peer can support connection pinning
766 */
767 bool HttpStateData::peerSupportsConnectionPinning() const
768 {
769 const HttpReply *rep = entry->mem_obj->getReply();
770 const HttpHeader *hdr = &rep->header;
771 bool rc;
772 String header;
773
774 if (!_peer)
775 return true;
776
777 /*If this peer does not support connection pinning (authenticated
778 connections) return false
779 */
780 if (!_peer->connection_auth)
781 return false;
782
783 /*The peer supports connection pinning and the http reply status
784 is not unauthorized, so the related connection can be pinned
785 */
786 if (rep->sline.status != HTTP_UNAUTHORIZED)
787 return true;
788
789 /*The server respond with HTTP_UNAUTHORIZED and the peer configured
790 with "connection-auth=on" we know that the peer supports pinned
791 connections
792 */
793 if (_peer->connection_auth == 1)
794 return true;
795
796 /*At this point peer has configured with "connection-auth=auto"
797 parameter so we need some extra checks to decide if we are going
798 to allow pinned connections or not
799 */
800
801 /*if the peer configured with originserver just allow connection
802 pinning (squid 2.6 behaviour)
803 */
804 if (_peer->options.originserver)
805 return true;
806
807 /*if the connections it is already pinned it is OK*/
808 if (request->flags.pinned)
809 return true;
810
811 /*Allow pinned connections only if the Proxy-support header exists in
812 reply and has in its list the "Session-Based-Authentication"
813 which means that the peer supports connection pinning.
814 */
815 if (!hdr->has(HDR_PROXY_SUPPORT))
816 return false;
817
818 header = hdr->getStrOrList(HDR_PROXY_SUPPORT);
819 /* XXX This ought to be done in a case-insensitive manner */
820 rc = (strstr(header.termedBuf(), "Session-Based-Authentication") != NULL);
821
822 return rc;
823 }
824
825 // Called when we parsed (and possibly adapted) the headers but
826 // had not starting storing (a.k.a., sending) the body yet.
827 void
828 HttpStateData::haveParsedReplyHeaders()
829 {
830 ServerStateData::haveParsedReplyHeaders();
831
832 Ctx ctx = ctx_enter(entry->mem_obj->url);
833 HttpReply *rep = finalReply();
834
835 if (rep->sline.status == HTTP_PARTIAL_CONTENT &&
836 rep->content_range)
837 currentOffset = rep->content_range->spec.offset;
838
839 entry->timestampsSet();
840
841 /* Check if object is cacheable or not based on reply code */
842 debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep->sline.status);
843
844 if (neighbors_do_private_keys)
845 httpMaybeRemovePublic(entry, rep->sline.status);
846
847 if (rep->header.has(HDR_VARY)
848 #if X_ACCELERATOR_VARY
849 || rep->header.has(HDR_X_ACCELERATOR_VARY)
850 #endif
851 ) {
852 const char *vary = httpMakeVaryMark(orig_request, rep);
853
854 if (!vary) {
855 entry->makePrivate();
856 if (!fwd->reforwardableStatus(rep->sline.status))
857 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
858 goto no_cache;
859 }
860
861 entry->mem_obj->vary_headers = xstrdup(vary);
862 }
863
864 #if WIP_FWD_LOG
865 fwdStatus(fwd, s);
866
867 #endif
868 /*
869 * If its not a reply that we will re-forward, then
870 * allow the client to get it.
871 */
872 if (!fwd->reforwardableStatus(rep->sline.status))
873 EBIT_CLR(entry->flags, ENTRY_FWD_HDR_WAIT);
874
875 switch (cacheableReply()) {
876
877 case 1:
878 entry->makePublic();
879 break;
880
881 case 0:
882 entry->makePrivate();
883 break;
884
885 case -1:
886
887 #if HTTP_VIOLATIONS
888 if (Config.negativeTtl > 0)
889 entry->cacheNegatively();
890 else
891 #endif
892 entry->makePrivate();
893
894 break;
895
896 default:
897 assert(0);
898
899 break;
900 }
901
902 no_cache:
903
904 if (!ignoreCacheControl && rep->cache_control) {
905 if (EBIT_TEST(rep->cache_control->mask, CC_PROXY_REVALIDATE))
906 EBIT_SET(entry->flags, ENTRY_REVALIDATE);
907 else if (EBIT_TEST(rep->cache_control->mask, CC_MUST_REVALIDATE))
908 EBIT_SET(entry->flags, ENTRY_REVALIDATE);
909 }
910
911 #if HEADERS_LOG
912 headersLog(1, 0, request->method, rep);
913
914 #endif
915
916 ctx_exit(ctx);
917 }
918
919 HttpStateData::ConnectionStatus
920 HttpStateData::statusIfComplete() const
921 {
922 const HttpReply *rep = virginReply();
923 /** \par
924 * If the reply wants to close the connection, it takes precedence */
925
926 if (httpHeaderHasConnDir(&rep->header, "close"))
927 return COMPLETE_NONPERSISTENT_MSG;
928
929 /** \par
930 * If we didn't send a keep-alive request header, then this
931 * can not be a persistent connection.
932 */
933 if (!flags.keepalive)
934 return COMPLETE_NONPERSISTENT_MSG;
935
936 /** \par
937 * If we haven't sent the whole request then this can not be a persistent
938 * connection.
939 */
940 if (!flags.request_sent) {
941 debugs(11, 1, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(orig_request->method) << " " << entry->url() << "\"" );
942 return COMPLETE_NONPERSISTENT_MSG;
943 }
944
945 /** \par
946 * What does the reply have to say about keep-alive?
947 */
948 /**
949 \bug XXX BUG?
950 * If the origin server (HTTP/1.0) does not send a keep-alive
951 * header, but keeps the connection open anyway, what happens?
952 * We'll return here and http.c waits for an EOF before changing
953 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
954 * and an error status code, and we might have to wait until
955 * the server times out the socket.
956 */
957 if (!rep->keep_alive)
958 return COMPLETE_NONPERSISTENT_MSG;
959
960 return COMPLETE_PERSISTENT_MSG;
961 }
962
963 HttpStateData::ConnectionStatus
964 HttpStateData::persistentConnStatus() const
965 {
966 debugs(11, 3, "persistentConnStatus: FD " << fd << " eof=" << eof);
967 const HttpReply *vrep = virginReply();
968 debugs(11, 5, "persistentConnStatus: content_length=" << vrep->content_length);
969
970 /* If we haven't seen the end of reply headers, we are not done */
971 debugs(11, 5, "persistentConnStatus: flags.headers_parsed=" << flags.headers_parsed);
972
973 if (!flags.headers_parsed)
974 return INCOMPLETE_MSG;
975
976 if (eof) // already reached EOF
977 return COMPLETE_NONPERSISTENT_MSG;
978
979 /** \par
980 * In chunked response we do not know the content length but we are absolutely
981 * sure about the end of response, so we are calling the statusIfComplete to
982 * decide if we can be persistant
983 */
984 if (lastChunk && flags.chunked)
985 return statusIfComplete();
986
987 const int64_t clen = vrep->bodySize(request->method);
988
989 debugs(11, 5, "persistentConnStatus: clen=" << clen);
990
991 /* If the body size is unknown we must wait for EOF */
992 if (clen < 0)
993 return INCOMPLETE_MSG;
994
995 /** \par
996 * If the body size is known, we must wait until we've gotten all of it. */
997 if (clen > 0) {
998 // old technique:
999 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
1000 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
1001 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1002 body_bytes_read << " content_length=" << vrep->content_length);
1003
1004 if (body_bytes_read < vrep->content_length)
1005 return INCOMPLETE_MSG;
1006
1007 if (body_bytes_truncated > 0) // already read more than needed
1008 return COMPLETE_NONPERSISTENT_MSG; // disable pconns
1009 }
1010
1011 /** \par
1012 * If there is no message body or we got it all, we can be persistent */
1013 return statusIfComplete();
1014 }
1015
1016 /*
1017 * This is the callback after some data has been read from the network
1018 */
1019 /*
1020 void
1021 HttpStateData::ReadReplyWrapper(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
1022 {
1023 HttpStateData *httpState = static_cast<HttpStateData *>(data);
1024 assert (fd == httpState->fd);
1025 // assert(buf == readBuf->content());
1026 PROF_start(HttpStateData_readReply);
1027 httpState->readReply(len, flag, xerrno);
1028 PROF_stop(HttpStateData_readReply);
1029 }
1030 */
1031
1032 /* XXX this function is too long! */
1033 void
1034 HttpStateData::readReply(const CommIoCbParams &io)
1035 {
1036 int bin;
1037 int clen;
1038 int len = io.size;
1039
1040 assert(fd == io.fd);
1041
1042 flags.do_next_read = 0;
1043
1044 debugs(11, 5, "httpReadReply: FD " << fd << ": len " << len << ".");
1045
1046 // Bail out early on COMM_ERR_CLOSING - close handlers will tidy up for us
1047 if (io.flag == COMM_ERR_CLOSING) {
1048 debugs(11, 3, "http socket closing");
1049 return;
1050 }
1051
1052 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1053 maybeReadVirginBody();
1054 return;
1055 }
1056
1057 // handle I/O errors
1058 if (io.flag != COMM_OK || len < 0) {
1059 debugs(11, 2, "httpReadReply: FD " << fd << ": read failure: " << xstrerror() << ".");
1060
1061 if (ignoreErrno(io.xerrno)) {
1062 flags.do_next_read = 1;
1063 } else {
1064 ErrorState *err;
1065 err = errorCon(ERR_READ_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1066 err->xerrno = io.xerrno;
1067 fwd->fail(err);
1068 flags.do_next_read = 0;
1069 comm_close(fd);
1070 }
1071
1072 return;
1073 }
1074
1075 // update I/O stats
1076 if (len > 0) {
1077 readBuf->appended(len);
1078 reply_bytes_read += len;
1079 #if DELAY_POOLS
1080
1081 DelayId delayId = entry->mem_obj->mostBytesAllowed();
1082 delayId.bytesIn(len);
1083 #endif
1084
1085 kb_incr(&statCounter.server.all.kbytes_in, len);
1086 kb_incr(&statCounter.server.http.kbytes_in, len);
1087 IOStats.Http.reads++;
1088
1089 for (clen = len - 1, bin = 0; clen; bin++)
1090 clen >>= 1;
1091
1092 IOStats.Http.read_hist[bin]++;
1093
1094 // update peer response time stats (%<pt)
1095 const timeval &sent = orig_request->hier.peer_http_request_sent;
1096 orig_request->hier.peer_response_time =
1097 sent.tv_sec ? tvSubMsec(sent, current_time) : -1;
1098 }
1099
1100 /** \par
1101 * Here the RFC says we should ignore whitespace between replies, but we can't as
1102 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1103 * the response splitting countermeasures is extremely likely to trigger on this,
1104 * not allowing connection reuse in the first place.
1105 */
1106 #if DONT_DO_THIS
1107 if (!flags.headers_parsed && len > 0 && fd_table[fd].uses > 1) {
1108 /* Skip whitespace between replies */
1109
1110 while (len > 0 && xisspace(*buf))
1111 xmemmove(buf, buf + 1, len--);
1112
1113 if (len == 0) {
1114 /* Continue to read... */
1115 /* Timeout NOT increased. This whitespace was from previous reply */
1116 flags.do_next_read = 1;
1117 maybeReadVirginBody();
1118 return;
1119 }
1120 }
1121
1122 #endif
1123
1124 if (len == 0) { // reached EOF?
1125 eof = 1;
1126 flags.do_next_read = 0;
1127
1128 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n
1129 * Ensure here that we have at minimum two \r\n when EOF is seen.
1130 * TODO: Add eof parameter to headersEnd() and move this hack there.
1131 */
1132 if (readBuf->contentSize() && !flags.headers_parsed) {
1133 /*
1134 * Yes Henrik, there is a point to doing this. When we
1135 * called httpProcessReplyHeader() before, we didn't find
1136 * the end of headers, but now we are definately at EOF, so
1137 * we want to process the reply headers.
1138 */
1139 /* Fake an "end-of-headers" to work around such broken servers */
1140 readBuf->append("\r\n", 2);
1141 }
1142 }
1143
1144 if (!flags.headers_parsed) { // have not parsed headers yet?
1145 PROF_start(HttpStateData_processReplyHeader);
1146 processReplyHeader();
1147 PROF_stop(HttpStateData_processReplyHeader);
1148
1149 if (!continueAfterParsingHeader()) // parsing error or need more data
1150 return; // TODO: send errors to ICAP
1151
1152 adaptOrFinalizeReply();
1153 }
1154
1155 // kick more reads if needed and/or process the response body, if any
1156 PROF_start(HttpStateData_processReplyBody);
1157 processReplyBody(); // may call serverComplete()
1158 PROF_stop(HttpStateData_processReplyBody);
1159 }
1160
1161 /**
1162 \retval true if we can continue with processing the body or doing ICAP.
1163 */
1164 bool
1165 HttpStateData::continueAfterParsingHeader()
1166 {
1167 if (!flags.headers_parsed && !eof) {
1168 debugs(11, 9, HERE << "needs more at " << readBuf->contentSize());
1169 flags.do_next_read = 1;
1170 /** \retval false If we have not finished parsing the headers and may get more data.
1171 * Schedules more reads to retrieve the missing data.
1172 */
1173 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1174 return false;
1175 }
1176
1177 /** If we are done with parsing, check for errors */
1178
1179 err_type error = ERR_NONE;
1180
1181 if (flags.headers_parsed) { // parsed headers, possibly with errors
1182 // check for header parsing errors
1183 if (HttpReply *vrep = virginReply()) {
1184 const http_status s = vrep->sline.status;
1185 const HttpVersion &v = vrep->sline.version;
1186 if (s == HTTP_INVALID_HEADER && v != HttpVersion(0,9)) {
1187 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1188 error = ERR_INVALID_RESP;
1189 } else if (s == HTTP_HEADER_TOO_LARGE) {
1190 fwd->dontRetry(true);
1191 error = ERR_TOO_BIG;
1192 } else {
1193 return true; // done parsing, got reply, and no error
1194 }
1195 } else {
1196 // parsed headers but got no reply
1197 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No reply at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1198 error = ERR_INVALID_RESP;
1199 }
1200 } else {
1201 assert(eof);
1202 if (readBuf->hasContent()) {
1203 error = ERR_INVALID_RESP;
1204 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1205 } else {
1206 error = ERR_ZERO_SIZE_OBJECT;
1207 debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: No object data received for " << entry->url() << " AKA " << orig_request->GetHost() << orig_request->urlpath.termedBuf() );
1208 }
1209 }
1210
1211 assert(error != ERR_NONE);
1212 entry->reset();
1213 fwd->fail(errorCon(error, HTTP_BAD_GATEWAY, fwd->request));
1214 flags.do_next_read = 0;
1215 comm_close(fd);
1216 return false; // quit on error
1217 }
1218
1219 /** truncate what we read if we read too much so that writeReplyBody()
1220 writes no more than what we should have read */
1221 void
1222 HttpStateData::truncateVirginBody()
1223 {
1224 assert(flags.headers_parsed);
1225
1226 HttpReply *vrep = virginReply();
1227 int64_t clen = -1;
1228 if (!vrep->expectingBody(request->method, clen) || clen < 0)
1229 return; // no body or a body of unknown size, including chunked
1230
1231 const int64_t body_bytes_read = reply_bytes_read - header_bytes_read;
1232 if (body_bytes_read - body_bytes_truncated <= clen)
1233 return; // we did not read too much or already took care of the extras
1234
1235 if (const int64_t extras = body_bytes_read - body_bytes_truncated - clen) {
1236 // server sent more that the advertised content length
1237 debugs(11,5, HERE << "body_bytes_read=" << body_bytes_read <<
1238 " clen=" << clen << '/' << vrep->content_length <<
1239 " body_bytes_truncated=" << body_bytes_truncated << '+' << extras);
1240
1241 readBuf->truncate(extras);
1242 body_bytes_truncated += extras;
1243 }
1244 }
1245
1246 /**
1247 * Call this when there is data from the origin server
1248 * which should be sent to either StoreEntry, or to ICAP...
1249 */
1250 void
1251 HttpStateData::writeReplyBody()
1252 {
1253 truncateVirginBody(); // if needed
1254 const char *data = readBuf->content();
1255 int len = readBuf->contentSize();
1256 addVirginReplyBody(data, len);
1257 readBuf->consume(len);
1258 }
1259
1260 bool
1261 HttpStateData::decodeAndWriteReplyBody()
1262 {
1263 const char *data = NULL;
1264 int len;
1265 bool wasThereAnException = false;
1266 assert(flags.chunked);
1267 assert(httpChunkDecoder);
1268 SQUID_ENTER_THROWING_CODE();
1269 MemBuf decodedData;
1270 decodedData.init();
1271 const bool doneParsing = httpChunkDecoder->parse(readBuf,&decodedData);
1272 len = decodedData.contentSize();
1273 data=decodedData.content();
1274 addVirginReplyBody(data, len);
1275 if (doneParsing) {
1276 lastChunk = 1;
1277 flags.do_next_read = 0;
1278 }
1279 SQUID_EXIT_THROWING_CODE(wasThereAnException);
1280 return wasThereAnException;
1281 }
1282
1283 /**
1284 * processReplyBody has two purposes:
1285 * 1 - take the reply body data, if any, and put it into either
1286 * the StoreEntry, or give it over to ICAP.
1287 * 2 - see if we made it to the end of the response (persistent
1288 * connections and such)
1289 */
1290 void
1291 HttpStateData::processReplyBody()
1292 {
1293 AsyncCall::Pointer call;
1294 Ip::Address client_addr;
1295 bool ispinned = false;
1296
1297 if (!flags.headers_parsed) {
1298 flags.do_next_read = 1;
1299 maybeReadVirginBody();
1300 return;
1301 }
1302
1303 #if USE_ADAPTATION
1304 debugs(11,5, HERE << "adaptationAccessCheckPending=" << adaptationAccessCheckPending);
1305 if (adaptationAccessCheckPending)
1306 return;
1307
1308 #endif
1309
1310 /*
1311 * At this point the reply headers have been parsed and consumed.
1312 * That means header content has been removed from readBuf and
1313 * it contains only body data.
1314 */
1315 if (flags.chunked) {
1316 if (!decodeAndWriteReplyBody()) {
1317 flags.do_next_read = 0;
1318 serverComplete();
1319 return;
1320 }
1321 } else
1322 writeReplyBody();
1323
1324 if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
1325 /*
1326 * The above writeReplyBody() call could ABORT this entry,
1327 * in that case, the server FD should already be closed.
1328 * there's nothing for us to do.
1329 */
1330 (void) 0;
1331 } else
1332 switch (persistentConnStatus()) {
1333 case INCOMPLETE_MSG:
1334 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG");
1335 /* Wait for more data or EOF condition */
1336 if (flags.keepalive_broken) {
1337 call = NULL;
1338 commSetTimeout(fd, 10, call);
1339 } else {
1340 call = NULL;
1341 commSetTimeout(fd, Config.Timeout.read, call);
1342 }
1343
1344 flags.do_next_read = 1;
1345 break;
1346
1347 case COMPLETE_PERSISTENT_MSG:
1348 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG");
1349 /* yes we have to clear all these! */
1350 call = NULL;
1351 commSetTimeout(fd, -1, call);
1352 flags.do_next_read = 0;
1353
1354 comm_remove_close_handler(fd, closeHandler);
1355 closeHandler = NULL;
1356 fwd->unregister(fd);
1357
1358 if (orig_request->flags.spoof_client_ip)
1359 client_addr = orig_request->client_addr;
1360
1361
1362 if (request->flags.pinned) {
1363 ispinned = true;
1364 } else if (request->flags.connection_auth && request->flags.auth_sent) {
1365 ispinned = true;
1366 }
1367
1368 if (orig_request->pinnedConnection() && ispinned) {
1369 orig_request->pinnedConnection()->pinConnection(fd, orig_request, _peer,
1370 (request->flags.connection_auth != 0));
1371 } else {
1372 fwd->pconnPush(fwd->conn(), _peer, request, orig_request->GetHost(), client_addr);
1373 }
1374
1375 fd = -1;
1376
1377 serverComplete();
1378 return;
1379
1380 case COMPLETE_NONPERSISTENT_MSG:
1381 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG");
1382 serverComplete();
1383 return;
1384 }
1385
1386 maybeReadVirginBody();
1387 }
1388
1389 void
1390 HttpStateData::maybeReadVirginBody()
1391 {
1392 // we may need to grow the buffer if headers do not fit
1393 const int minRead = flags.headers_parsed ? 0 :1024;
1394 const int read_size = replyBodySpace(*readBuf, minRead);
1395
1396 debugs(11,9, HERE << (flags.do_next_read ? "may" : "wont") <<
1397 " read up to " << read_size << " bytes from FD " << fd);
1398
1399 /*
1400 * why <2? Because delayAwareRead() won't actually read if
1401 * you ask it to read 1 byte. The delayed read request
1402 * just gets re-queued until the client side drains, then
1403 * the I/O thread hangs. Better to not register any read
1404 * handler until we get a notification from someone that
1405 * its okay to read again.
1406 */
1407 if (read_size < 2)
1408 return;
1409
1410 if (flags.do_next_read) {
1411 flags.do_next_read = 0;
1412 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
1413 entry->delayAwareRead(fd, readBuf->space(read_size), read_size,
1414 asyncCall(11, 5, "HttpStateData::readReply",
1415 Dialer(this, &HttpStateData::readReply)));
1416 }
1417 }
1418
1419 /*
1420 * This will be called when request write is complete.
1421 */
1422 void
1423 HttpStateData::sendComplete(const CommIoCbParams &io)
1424 {
1425 debugs(11, 5, "httpSendComplete: FD " << fd << ": size " << io.size << ": errflag " << io.flag << ".");
1426 #if URL_CHECKSUM_DEBUG
1427
1428 entry->mem_obj->checkUrlChecksum();
1429 #endif
1430
1431 if (io.size > 0) {
1432 fd_bytes(fd, io.size, FD_WRITE);
1433 kb_incr(&statCounter.server.all.kbytes_out, io.size);
1434 kb_incr(&statCounter.server.http.kbytes_out, io.size);
1435 }
1436
1437 if (io.flag == COMM_ERR_CLOSING)
1438 return;
1439
1440 if (io.flag) {
1441 ErrorState *err;
1442 err = errorCon(ERR_WRITE_ERROR, HTTP_BAD_GATEWAY, fwd->request);
1443 err->xerrno = io.xerrno;
1444 fwd->fail(err);
1445 comm_close(fd);
1446 return;
1447 }
1448
1449 /*
1450 * Set the read timeout here because it hasn't been set yet.
1451 * We only set the read timeout after the request has been
1452 * fully written to the server-side. If we start the timeout
1453 * after connection establishment, then we are likely to hit
1454 * the timeout for POST/PUT requests that have very large
1455 * request bodies.
1456 */
1457 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
1458 AsyncCall::Pointer timeoutCall = asyncCall(11, 5, "HttpStateData::httpTimeout",
1459 TimeoutDialer(this,&HttpStateData::httpTimeout));
1460
1461 commSetTimeout(fd, Config.Timeout.read, timeoutCall);
1462
1463 flags.request_sent = 1;
1464
1465 orig_request->hier.peer_http_request_sent = current_time;
1466 }
1467
1468 // Close the HTTP server connection. Used by serverComplete().
1469 void
1470 HttpStateData::closeServer()
1471 {
1472 debugs(11,5, HERE << "closing HTTP server FD " << fd << " this " << this);
1473
1474 if (fd >= 0) {
1475 fwd->unregister(fd);
1476 comm_remove_close_handler(fd, closeHandler);
1477 closeHandler = NULL;
1478 comm_close(fd);
1479 fd = -1;
1480 }
1481 }
1482
1483 bool
1484 HttpStateData::doneWithServer() const
1485 {
1486 return fd < 0;
1487 }
1488
1489
1490 /*
1491 * Fixup authentication request headers for special cases
1492 */
1493 static void
1494 httpFixupAuthentication(HttpRequest * request, HttpRequest * orig_request, const HttpHeader * hdr_in, HttpHeader * hdr_out, http_state_flags flags)
1495 {
1496 http_hdr_type header = flags.originpeer ? HDR_AUTHORIZATION : HDR_PROXY_AUTHORIZATION;
1497
1498 /* Nothing to do unless we are forwarding to a peer */
1499 if (!request->flags.proxying)
1500 return;
1501
1502 /* Needs to be explicitly enabled */
1503 if (!orig_request->peer_login)
1504 return;
1505
1506 /* Maybe already dealt with? */
1507 if (hdr_out->has(header))
1508 return;
1509
1510 /* Nothing to do here for PASSTHRU */
1511 if (strcmp(orig_request->peer_login, "PASSTHRU") == 0)
1512 return;
1513
1514 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1515 if (flags.originpeer && strcmp(orig_request->peer_login, "PROXYPASS") == 0 && hdr_in->has(HDR_PROXY_AUTHORIZATION)) {
1516 const char *auth = hdr_in->getStr(HDR_PROXY_AUTHORIZATION);
1517
1518 if (auth && strncasecmp(auth, "basic ", 6) == 0) {
1519 hdr_out->putStr(header, auth);
1520 return;
1521 }
1522 }
1523
1524 /* Special mode to pass the username to the upstream cache */
1525 if (*orig_request->peer_login == '*') {
1526 char loginbuf[256];
1527 const char *username = "-";
1528
1529 if (orig_request->extacl_user.size())
1530 username = orig_request->extacl_user.termedBuf();
1531 else if (orig_request->auth_user_request)
1532 username = orig_request->auth_user_request->username();
1533
1534 snprintf(loginbuf, sizeof(loginbuf), "%s%s", username, orig_request->peer_login + 1);
1535
1536 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1537 base64_encode(loginbuf));
1538 return;
1539 }
1540
1541 /* external_acl provided credentials */
1542 if (orig_request->extacl_user.size() && orig_request->extacl_passwd.size() &&
1543 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1544 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1545 char loginbuf[256];
1546 snprintf(loginbuf, sizeof(loginbuf), SQUIDSTRINGPH ":" SQUIDSTRINGPH,
1547 SQUIDSTRINGPRINT(orig_request->extacl_user),
1548 SQUIDSTRINGPRINT(orig_request->extacl_passwd));
1549 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1550 base64_encode(loginbuf));
1551 return;
1552 }
1553
1554 /* Kerberos login to peer */
1555 #if HAVE_KRB5 && HAVE_GSSAPI
1556 if (strncmp(orig_request->peer_login, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1557 char *Token=NULL;
1558 char *PrincipalName=NULL,*p;
1559 if ((p=strchr(orig_request->peer_login,':')) != NULL ) {
1560 PrincipalName=++p;
1561 }
1562 Token = peer_proxy_negotiate_auth(PrincipalName,request->peer_host);
1563 if (Token) {
1564 httpHeaderPutStrf(hdr_out, HDR_PROXY_AUTHORIZATION, "Negotiate %s",Token);
1565 }
1566 return;
1567 }
1568 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1569
1570 httpHeaderPutStrf(hdr_out, header, "Basic %s",
1571 base64_encode(orig_request->peer_login));
1572 return;
1573 }
1574
1575 /*
1576 * build request headers and append them to a given MemBuf
1577 * used by buildRequestPrefix()
1578 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1579 */
1580 void
1581 HttpStateData::httpBuildRequestHeader(HttpRequest * request,
1582 HttpRequest * orig_request,
1583 StoreEntry * entry,
1584 HttpHeader * hdr_out,
1585 http_state_flags flags)
1586 {
1587 /* building buffer for complex strings */
1588 #define BBUF_SZ (MAX_URL+32)
1589 LOCAL_ARRAY(char, bbuf, BBUF_SZ);
1590 LOCAL_ARRAY(char, ntoabuf, MAX_IPSTRLEN);
1591 const HttpHeader *hdr_in = &orig_request->header;
1592 const HttpHeaderEntry *e = NULL;
1593 HttpHeaderPos pos = HttpHeaderInitPos;
1594 assert (hdr_out->owner == hoRequest);
1595
1596 /* append our IMS header */
1597 if (request->lastmod > -1)
1598 hdr_out->putTime(HDR_IF_MODIFIED_SINCE, request->lastmod);
1599
1600 bool we_do_ranges = decideIfWeDoRanges (orig_request);
1601
1602 String strConnection (hdr_in->getList(HDR_CONNECTION));
1603
1604 while ((e = hdr_in->getEntry(&pos)))
1605 copyOneHeaderFromClientsideRequestToUpstreamRequest(e, strConnection, request, orig_request, hdr_out, we_do_ranges, flags);
1606
1607 /* Abstraction break: We should interpret multipart/byterange responses
1608 * into offset-length data, and this works around our inability to do so.
1609 */
1610 if (!we_do_ranges && orig_request->multipartRangeRequest()) {
1611 /* don't cache the result */
1612 orig_request->flags.cachable = 0;
1613 /* pretend it's not a range request */
1614 delete orig_request->range;
1615 orig_request->range = NULL;
1616 orig_request->flags.range = 0;
1617 }
1618
1619 /* append Via */
1620 if (Config.onoff.via) {
1621 String strVia;
1622 strVia = hdr_in->getList(HDR_VIA);
1623 snprintf(bbuf, BBUF_SZ, "%d.%d %s",
1624 orig_request->http_ver.major,
1625 orig_request->http_ver.minor, ThisCache);
1626 strListAdd(&strVia, bbuf, ',');
1627 hdr_out->putStr(HDR_VIA, strVia.termedBuf());
1628 strVia.clean();
1629 }
1630
1631 if (orig_request->flags.accelerated) {
1632 /* Append Surrogate-Capabilities */
1633 String strSurrogate(hdr_in->getList(HDR_SURROGATE_CAPABILITY));
1634 #if USE_SQUID_ESI
1635 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0 ESI/1.0\"", Config.Accel.surrogate_id);
1636 #else
1637 snprintf(bbuf, BBUF_SZ, "%s=\"Surrogate/1.0\"", Config.Accel.surrogate_id);
1638 #endif
1639 strListAdd(&strSurrogate, bbuf, ',');
1640 hdr_out->putStr(HDR_SURROGATE_CAPABILITY, strSurrogate.termedBuf());
1641 }
1642
1643 /** \pre Handle X-Forwarded-For */
1644 if (strcmp(opt_forwarded_for, "delete") != 0) {
1645
1646 String strFwd = hdr_in->getList(HDR_X_FORWARDED_FOR);
1647
1648 if (strFwd.size() > 65536/2) {
1649 // There is probably a forwarding loop with Via detection disabled.
1650 // If we do nothing, String will assert on overflow soon.
1651 // TODO: Terminate all transactions with huge XFF?
1652 strFwd = "error";
1653
1654 static int warnedCount = 0;
1655 if (warnedCount++ < 100) {
1656 const char *url = entry ? entry->url() : urlCanonical(orig_request);
1657 debugs(11, 1, "Warning: likely forwarding loop with " << url);
1658 }
1659 }
1660
1661 if (strcmp(opt_forwarded_for, "on") == 0) {
1662 /** If set to ON - append client IP or 'unknown'. */
1663 if ( orig_request->client_addr.IsNoAddr() )
1664 strListAdd(&strFwd, "unknown", ',');
1665 else
1666 strListAdd(&strFwd, orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN), ',');
1667 } else if (strcmp(opt_forwarded_for, "off") == 0) {
1668 /** If set to OFF - append 'unknown'. */
1669 strListAdd(&strFwd, "unknown", ',');
1670 } else if (strcmp(opt_forwarded_for, "transparent") == 0) {
1671 /** If set to TRANSPARENT - pass through unchanged. */
1672 } else if (strcmp(opt_forwarded_for, "truncate") == 0) {
1673 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1674 if ( orig_request->client_addr.IsNoAddr() )
1675 strFwd = "unknown";
1676 else
1677 strFwd = orig_request->client_addr.NtoA(ntoabuf, MAX_IPSTRLEN);
1678 }
1679 if (strFwd.size() > 0)
1680 hdr_out->putStr(HDR_X_FORWARDED_FOR, strFwd.termedBuf());
1681 }
1682 /** If set to DELETE - do not copy through. */
1683
1684 /* append Host if not there already */
1685 if (!hdr_out->has(HDR_HOST)) {
1686 if (orig_request->peer_domain) {
1687 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1688 } else if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1689 /* use port# only if not default */
1690 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1691 } else {
1692 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1693 orig_request->GetHost(),
1694 (int) orig_request->port);
1695 }
1696 }
1697
1698 /* append Authorization if known in URL, not in header and going direct */
1699 if (!hdr_out->has(HDR_AUTHORIZATION)) {
1700 if (!request->flags.proxying && *request->login) {
1701 httpHeaderPutStrf(hdr_out, HDR_AUTHORIZATION, "Basic %s",
1702 base64_encode(request->login));
1703 }
1704 }
1705
1706 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1707 httpFixupAuthentication(request, orig_request, hdr_in, hdr_out, flags);
1708
1709 /* append Cache-Control, add max-age if not there already */
1710 {
1711 HttpHdrCc *cc = hdr_in->getCc();
1712
1713 if (!cc)
1714 cc = httpHdrCcCreate();
1715
1716 #if 0 /* see bug 2330 */
1717 /* Set no-cache if determined needed but not found */
1718 if (orig_request->flags.nocache)
1719 EBIT_SET(cc->mask, CC_NO_CACHE);
1720 #endif
1721
1722 /* Add max-age only without no-cache */
1723 if (!EBIT_TEST(cc->mask, CC_MAX_AGE) && !EBIT_TEST(cc->mask, CC_NO_CACHE)) {
1724 const char *url =
1725 entry ? entry->url() : urlCanonical(orig_request);
1726 httpHdrCcSetMaxAge(cc, getMaxAge(url));
1727
1728 if (request->urlpath.size())
1729 assert(strstr(url, request->urlpath.termedBuf()));
1730 }
1731
1732 /* Enforce sibling relations */
1733 if (flags.only_if_cached)
1734 EBIT_SET(cc->mask, CC_ONLY_IF_CACHED);
1735
1736 hdr_out->putCc(cc);
1737
1738 httpHdrCcDestroy(cc);
1739 }
1740
1741 /* maybe append Connection: keep-alive */
1742 if (flags.keepalive) {
1743 if (flags.proxying) {
1744 hdr_out->putStr(HDR_PROXY_CONNECTION, "keep-alive");
1745 } else {
1746 hdr_out->putStr(HDR_CONNECTION, "keep-alive");
1747 }
1748 }
1749
1750 /* append Front-End-Https */
1751 if (flags.front_end_https) {
1752 if (flags.front_end_https == 1 || request->protocol == PROTO_HTTPS)
1753 hdr_out->putStr(HDR_FRONT_END_HTTPS, "On");
1754 }
1755
1756 /* Now mangle the headers. */
1757 if (Config2.onoff.mangle_request_headers)
1758 httpHdrMangleList(hdr_out, request, ROR_REQUEST);
1759
1760 strConnection.clean();
1761 }
1762
1763 /**
1764 * Decides whether a particular header may be cloned from the received Clients request
1765 * to our outgoing fetch request.
1766 */
1767 void
1768 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry *e, const String strConnection, HttpRequest * request, const HttpRequest * orig_request, HttpHeader * hdr_out, const int we_do_ranges, const http_state_flags flags)
1769 {
1770 debugs(11, 5, "httpBuildRequestHeader: " << e->name << ": " << e->value );
1771
1772 switch (e->id) {
1773
1774 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1775
1776 case HDR_PROXY_AUTHORIZATION:
1777 /** \par Proxy-Authorization:
1778 * Only pass on proxy authentication to peers for which
1779 * authentication forwarding is explicitly enabled
1780 */
1781 if (!flags.originpeer && flags.proxying && orig_request->peer_login &&
1782 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1783 strcmp(orig_request->peer_login, "PROXYPASS") == 0 ||
1784 strcmp(orig_request->peer_login, "PASSTHRU") == 0)) {
1785 hdr_out->addEntry(e->clone());
1786 }
1787 break;
1788
1789 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1790
1791 case HDR_CONNECTION: /** \par Connection: */
1792 case HDR_TE: /** \par TE: */
1793 case HDR_KEEP_ALIVE: /** \par Keep-Alive: */
1794 case HDR_PROXY_AUTHENTICATE: /** \par Proxy-Authenticate: */
1795 case HDR_TRAILERS: /** \par Trailers: */
1796 case HDR_UPGRADE: /** \par Upgrade: */
1797 case HDR_TRANSFER_ENCODING: /** \par Transfer-Encoding: */
1798 break;
1799
1800
1801 /** \par OTHER headers I haven't bothered to track down yet. */
1802
1803 case HDR_AUTHORIZATION:
1804 /** \par WWW-Authorization:
1805 * Pass on WWW authentication */
1806
1807 if (!flags.originpeer) {
1808 hdr_out->addEntry(e->clone());
1809 } else {
1810 /** \note In accelerators, only forward authentication if enabled
1811 * (see also httpFixupAuthentication for special cases)
1812 */
1813 if (orig_request->peer_login &&
1814 (strcmp(orig_request->peer_login, "PASS") == 0 ||
1815 strcmp(orig_request->peer_login, "PASSTHRU") == 0 ||
1816 strcmp(orig_request->peer_login, "PROXYPASS") == 0)) {
1817 hdr_out->addEntry(e->clone());
1818 }
1819 }
1820
1821 break;
1822
1823 case HDR_HOST:
1824 /** \par Host:
1825 * Normally Squid rewrites the Host: header.
1826 * However, there is one case when we don't: If the URL
1827 * went through our redirector and the admin configured
1828 * 'redir_rewrites_host' to be off.
1829 */
1830 if (orig_request->peer_domain)
1831 hdr_out->putStr(HDR_HOST, orig_request->peer_domain);
1832 else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
1833 hdr_out->addEntry(e->clone());
1834 else {
1835 /* use port# only if not default */
1836
1837 if (orig_request->port == urlDefaultPort(orig_request->protocol)) {
1838 hdr_out->putStr(HDR_HOST, orig_request->GetHost());
1839 } else {
1840 httpHeaderPutStrf(hdr_out, HDR_HOST, "%s:%d",
1841 orig_request->GetHost(),
1842 (int) orig_request->port);
1843 }
1844 }
1845
1846 break;
1847
1848 case HDR_IF_MODIFIED_SINCE:
1849 /** \par If-Modified-Since:
1850 * append unless we added our own;
1851 * \note at most one client's ims header can pass through */
1852
1853 if (!hdr_out->has(HDR_IF_MODIFIED_SINCE))
1854 hdr_out->addEntry(e->clone());
1855
1856 break;
1857
1858 case HDR_MAX_FORWARDS:
1859 /** \par Max-Forwards:
1860 * pass only on TRACE or OPTIONS requests */
1861 if (orig_request->method == METHOD_TRACE || orig_request->method == METHOD_OPTIONS) {
1862 const int64_t hops = e->getInt64();
1863
1864 if (hops > 0)
1865 hdr_out->putInt64(HDR_MAX_FORWARDS, hops - 1);
1866 }
1867
1868 break;
1869
1870 case HDR_VIA:
1871 /** \par Via:
1872 * If Via is disabled then forward any received header as-is.
1873 * Otherwise leave for explicit updated addition later. */
1874
1875 if (!Config.onoff.via)
1876 hdr_out->addEntry(e->clone());
1877
1878 break;
1879
1880 case HDR_RANGE:
1881
1882 case HDR_IF_RANGE:
1883
1884 case HDR_REQUEST_RANGE:
1885 /** \par Range:, If-Range:, Request-Range:
1886 * Only pass if we accept ranges */
1887 if (!we_do_ranges)
1888 hdr_out->addEntry(e->clone());
1889
1890 break;
1891
1892 case HDR_PROXY_CONNECTION:
1893
1894 case HDR_X_FORWARDED_FOR:
1895
1896 case HDR_CACHE_CONTROL:
1897 /** \par Proxy-Connaction:, X-Forwarded-For:, Cache-Control:
1898 * handled specially by Squid, so leave off for now.
1899 * append these after the loop if needed */
1900 break;
1901
1902 case HDR_FRONT_END_HTTPS:
1903 /** \par Front-End-Https:
1904 * Pass thru only if peer is configured with front-end-https */
1905 if (!flags.front_end_https)
1906 hdr_out->addEntry(e->clone());
1907
1908 break;
1909
1910 default:
1911 /** \par default.
1912 * pass on all other header fields
1913 * which are NOT listed by the special Connection: header. */
1914
1915 if (strConnection.size()>0 && strListIsMember(&strConnection, e->name.termedBuf(), ',')) {
1916 debugs(11, 2, "'" << e->name << "' header cropped by Connection: definition");
1917 return;
1918 }
1919
1920 hdr_out->addEntry(e->clone());
1921 }
1922 }
1923
1924 bool
1925 HttpStateData::decideIfWeDoRanges (HttpRequest * orig_request)
1926 {
1927 bool result = true;
1928 /* decide if we want to do Ranges ourselves
1929 * and fetch the whole object now)
1930 * We want to handle Ranges ourselves iff
1931 * - we can actually parse client Range specs
1932 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
1933 * - reply will be cachable
1934 * (If the reply will be uncachable we have to throw it away after
1935 * serving this request, so it is better to forward ranges to
1936 * the server and fetch only the requested content)
1937 */
1938
1939 int64_t roffLimit = orig_request->getRangeOffsetLimit();
1940
1941 if (NULL == orig_request->range || !orig_request->flags.cachable
1942 || orig_request->range->offsetLimitExceeded(roffLimit) || orig_request->flags.connection_auth)
1943 result = false;
1944
1945 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
1946 orig_request->range << ", cachable: " <<
1947 orig_request->flags.cachable << "; we_do_ranges: " << result);
1948
1949 return result;
1950 }
1951
1952 /* build request prefix and append it to a given MemBuf;
1953 * return the length of the prefix */
1954 mb_size_t
1955 HttpStateData::buildRequestPrefix(HttpRequest * aRequest,
1956 HttpRequest * original_request,
1957 StoreEntry * sentry,
1958 MemBuf * mb,
1959 http_state_flags stateFlags)
1960 {
1961 const int offset = mb->size;
1962 HttpVersion httpver(1,1);
1963 mb->Printf("%s %s HTTP/%d.%d\r\n",
1964 RequestMethodStr(aRequest->method),
1965 aRequest->urlpath.size() ? aRequest->urlpath.termedBuf() : "/",
1966 httpver.major,httpver.minor);
1967 /* build and pack headers */
1968 {
1969 HttpHeader hdr(hoRequest);
1970 Packer p;
1971 httpBuildRequestHeader(aRequest, original_request, sentry, &hdr, stateFlags);
1972
1973 if (aRequest->flags.pinned && aRequest->flags.connection_auth)
1974 aRequest->flags.auth_sent = 1;
1975 else if (hdr.has(HDR_AUTHORIZATION))
1976 aRequest->flags.auth_sent = 1;
1977
1978 packerToMemInit(&p, mb);
1979 hdr.packInto(&p);
1980 hdr.clean();
1981 packerClean(&p);
1982 }
1983 /* append header terminator */
1984 mb->append(crlf, 2);
1985 return mb->size - offset;
1986 }
1987
1988 /* This will be called when connect completes. Write request. */
1989 bool
1990 HttpStateData::sendRequest()
1991 {
1992 MemBuf mb;
1993
1994 debugs(11, 5, "httpSendRequest: FD " << fd << ", request " << request << ", this " << this << ".");
1995
1996 if (!canSend(fd)) {
1997 debugs(11,3, HERE << "cannot send request to closing FD " << fd);
1998 assert(closeHandler != NULL);
1999 return false;
2000 }
2001
2002 typedef CommCbMemFunT<HttpStateData, CommTimeoutCbParams> TimeoutDialer;
2003 AsyncCall::Pointer timeoutCall = asyncCall(11, 5, "HttpStateData::httpTimeout",
2004 TimeoutDialer(this,&HttpStateData::httpTimeout));
2005 commSetTimeout(fd, Config.Timeout.lifetime, timeoutCall);
2006 flags.do_next_read = 1;
2007 maybeReadVirginBody();
2008
2009 if (orig_request->body_pipe != NULL) {
2010 if (!startRequestBodyFlow()) // register to receive body data
2011 return false;
2012 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2013 Dialer dialer(this, &HttpStateData::sentRequestBody);
2014 requestSender = asyncCall(11,5, "HttpStateData::sentRequestBody", dialer);
2015 } else {
2016 assert(!requestBodySource);
2017 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2018 Dialer dialer(this, &HttpStateData::sendComplete);
2019 requestSender = asyncCall(11,5, "HttpStateData::SendComplete", dialer);
2020 }
2021
2022 if (_peer != NULL) {
2023 if (_peer->options.originserver) {
2024 flags.proxying = 0;
2025 flags.originpeer = 1;
2026 } else {
2027 flags.proxying = 1;
2028 flags.originpeer = 0;
2029 }
2030 } else {
2031 flags.proxying = 0;
2032 flags.originpeer = 0;
2033 }
2034
2035 /*
2036 * Is keep-alive okay for all request methods?
2037 */
2038 if (orig_request->flags.must_keepalive)
2039 flags.keepalive = 1;
2040 else if (!Config.onoff.server_pconns)
2041 flags.keepalive = 0;
2042 else if (_peer == NULL)
2043 flags.keepalive = 1;
2044 else if (_peer->stats.n_keepalives_sent < 10)
2045 flags.keepalive = 1;
2046 else if ((double) _peer->stats.n_keepalives_recv /
2047 (double) _peer->stats.n_keepalives_sent > 0.50)
2048 flags.keepalive = 1;
2049
2050 if (_peer) {
2051 if (neighborType(_peer, request) == PEER_SIBLING &&
2052 !_peer->options.allow_miss)
2053 flags.only_if_cached = 1;
2054
2055 flags.front_end_https = _peer->front_end_https;
2056 }
2057
2058 mb.init();
2059 request->peer_host=_peer?_peer->host:NULL;
2060 buildRequestPrefix(request, orig_request, entry, &mb, flags);
2061 debugs(11, 6, "httpSendRequest: FD " << fd << ":\n" << mb.buf);
2062 comm_write_mbuf(fd, &mb, requestSender);
2063
2064 return true;
2065 }
2066
2067 void
2068 httpStart(FwdState *fwd)
2069 {
2070 debugs(11, 3, "httpStart: \"" << RequestMethodStr(fwd->request->method) << " " << fwd->entry->url() << "\"" );
2071 HttpStateData *httpState = new HttpStateData(fwd);
2072
2073 if (!httpState->sendRequest()) {
2074 debugs(11, 3, "httpStart: aborted");
2075 delete httpState;
2076 return;
2077 }
2078
2079 statCounter.server.all.requests++;
2080 statCounter.server.http.requests++;
2081
2082 /*
2083 * We used to set the read timeout here, but not any more.
2084 * Now its set in httpSendComplete() after the full request,
2085 * including request body, has been written to the server.
2086 */
2087 }
2088
2089 void
2090 HttpStateData::doneSendingRequestBody()
2091 {
2092 debugs(11,5, HERE << "doneSendingRequestBody: FD " << fd);
2093
2094 #if HTTP_VIOLATIONS
2095 if (Config.accessList.brokenPosts) {
2096 ACLFilledChecklist ch(Config.accessList.brokenPosts, request, NULL);
2097 if (!ch.fastCheck()) {
2098 debugs(11, 5, "doneSendingRequestBody: didn't match brokenPosts");
2099 CommIoCbParams io(NULL);
2100 io.fd=fd;
2101 io.flag=COMM_OK;
2102 sendComplete(io);
2103 } else {
2104 debugs(11, 2, "doneSendingRequestBody: matched brokenPosts");
2105
2106 if (!canSend(fd)) {
2107 debugs(11,2, HERE << "cannot send CRLF to closing FD " << fd);
2108 assert(closeHandler != NULL);
2109 return;
2110 }
2111
2112 typedef CommCbMemFunT<HttpStateData, CommIoCbParams> Dialer;
2113 Dialer dialer(this, &HttpStateData::sendComplete);
2114 AsyncCall::Pointer call= asyncCall(11,5, "HttpStateData::SendComplete", dialer);
2115 comm_write(fd, "\r\n", 2, call);
2116 }
2117 return;
2118 }
2119 debugs(11, 5, "doneSendingRequestBody: No brokenPosts list");
2120 #endif /* HTTP_VIOLATIONS */
2121
2122 CommIoCbParams io(NULL);
2123 io.fd=fd;
2124 io.flag=COMM_OK;
2125 sendComplete(io);
2126 }
2127
2128 // more origin request body data is available
2129 void
2130 HttpStateData::handleMoreRequestBodyAvailable()
2131 {
2132 if (eof || fd < 0) {
2133 // XXX: we should check this condition in other callbacks then!
2134 // TODO: Check whether this can actually happen: We should unsubscribe
2135 // as a body consumer when the above condition(s) are detected.
2136 debugs(11, 1, HERE << "Transaction aborted while reading HTTP body");
2137 return;
2138 }
2139
2140 assert(requestBodySource != NULL);
2141
2142 if (requestBodySource->buf().hasContent()) {
2143 // XXX: why does not this trigger a debug message on every request?
2144
2145 if (flags.headers_parsed && !flags.abuse_detected) {
2146 flags.abuse_detected = 1;
2147 debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << orig_request->client_addr << "' -> '" << entry->url() << "'" );
2148
2149 if (virginReply()->sline.status == HTTP_INVALID_HEADER) {
2150 comm_close(fd);
2151 return;
2152 }
2153 }
2154 }
2155
2156 HttpStateData::handleMoreRequestBodyAvailable();
2157 }
2158
2159 // premature end of the request body
2160 void
2161 HttpStateData::handleRequestBodyProducerAborted()
2162 {
2163 ServerStateData::handleRequestBodyProducerAborted();
2164 // XXX: SendComplete(COMM_ERR_CLOSING) does little. Is it enough?
2165 CommIoCbParams io(NULL);
2166 io.fd=fd;
2167 io.flag=COMM_ERR_CLOSING;
2168 sendComplete(io);
2169 }
2170
2171 // called when we wrote request headers(!) or a part of the body
2172 void
2173 HttpStateData::sentRequestBody(const CommIoCbParams &io)
2174 {
2175 if (io.size > 0)
2176 kb_incr(&statCounter.server.http.kbytes_out, io.size);
2177
2178 ServerStateData::sentRequestBody(io);
2179 }
2180
2181 // Quickly abort the transaction
2182 // TODO: destruction should be sufficient as the destructor should cleanup,
2183 // including canceling close handlers
2184 void
2185 HttpStateData::abortTransaction(const char *reason)
2186 {
2187 debugs(11,5, HERE << "aborting transaction for " << reason <<
2188 "; FD " << fd << ", this " << this);
2189
2190 if (fd >= 0) {
2191 comm_close(fd);
2192 return;
2193 }
2194
2195 fwd->handleUnregisteredServerEnd();
2196 deleteThis("HttpStateData::abortTransaction");
2197 }
2198
2199 HttpRequest *
2200 HttpStateData::originalRequest()
2201 {
2202 return orig_request;
2203 }