5 * DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
6 * AUTHOR: Harvest Derived
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 * Anonymizing patch by lutz@as-node.jena.thur.de
38 * have a look into http-anon.c to get more informations.
43 #include "acl/FilledChecklist.h"
44 #include "auth/UserRequest.h"
45 #include "base/TextException.h"
47 #include "DelayPools.h"
49 #include "errorpage.h"
52 #include "HttpHdrContRange.h"
53 #include "HttpHdrSc.h"
54 #include "HttpHdrScTarget.h"
55 #include "HttpReply.h"
56 #include "HttpRequest.h"
58 #include "MemObject.h"
61 #include "SquidTime.h"
65 #define SQUID_ENTER_THROWING_CODE() try {
66 #define SQUID_EXIT_THROWING_CODE(status) \
69 catch (const std::exception &e) { \
70 debugs (11, 1, "Exception error:" << e.what()); \
74 CBDATA_CLASS_INIT(HttpStateData
);
76 static const char *const crlf
= "\r\n";
78 static void httpMaybeRemovePublic(StoreEntry
*, http_status
);
79 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, HttpRequest
* request
, const HttpRequest
* orig_request
,
80 HttpHeader
* hdr_out
, const int we_do_ranges
, const http_state_flags
);
82 HttpStateData::HttpStateData(FwdState
*theFwdState
) : AsyncJob("HttpStateData"), ServerStateData(theFwdState
),
83 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
84 body_bytes_truncated(0), httpChunkDecoder(NULL
)
86 debugs(11,5,HERE
<< "HttpStateData " << this << " created");
87 ignoreCacheControl
= false;
88 surrogateNoStore
= false;
89 fd
= fwd
->conn()->fd
; // TODO: store Comm::Connection instead of FD
92 orig_request
= HTTPMSGLOCK(fwd
->request
);
94 // reset peer response time stats for %<pt
95 orig_request
->hier
.peer_http_request_sent
.tv_sec
= 0;
96 orig_request
->hier
.peer_http_request_sent
.tv_usec
= 0;
98 if (fwd
->conn() != NULL
)
99 _peer
= cbdataReference(fwd
->conn()->getPeer()); /* might be NULL */
104 if (_peer
->options
.originserver
)
105 url
= orig_request
->urlpath
.termedBuf();
109 HttpRequest
* proxy_req
= new HttpRequest(orig_request
->method
, orig_request
->protocol
, url
);
111 proxy_req
->SetHost(_peer
->host
);
113 proxy_req
->port
= _peer
->http_port
;
115 proxy_req
->flags
= orig_request
->flags
;
117 proxy_req
->lastmod
= orig_request
->lastmod
;
119 proxy_req
->flags
.proxying
= 1;
121 HTTPMSGUNLOCK(request
);
123 request
= HTTPMSGLOCK(proxy_req
);
126 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
127 * We might end up getting the object from somewhere else if,
128 * for example, the request to this neighbor fails.
130 if (_peer
->options
.proxy_only
)
131 entry
->releaseRequest();
135 entry
->setNoDelay(_peer
->options
.no_delay
);
141 * register the handler to free HTTP state data when the FD closes
143 typedef CommCbMemFunT
<HttpStateData
, CommCloseCbParams
> Dialer
;
144 closeHandler
= asyncCall(9, 5, "httpStateData::httpStateConnClosed",
145 Dialer(this,&HttpStateData::httpStateConnClosed
));
146 comm_add_close_handler(fd
, closeHandler
);
149 HttpStateData::~HttpStateData()
152 * don't forget that ~ServerStateData() gets called automatically
155 if (!readBuf
->isNull())
160 if (httpChunkDecoder
)
161 delete httpChunkDecoder
;
163 HTTPMSGUNLOCK(orig_request
);
165 debugs(11,5, HERE
<< "HttpStateData " << this << " destroyed; FD " << fd
);
169 HttpStateData::dataDescriptor() const
175 httpStateFree(int fd, void *data)
177 HttpStateData *httpState = static_cast<HttpStateData *>(data);
178 debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data);
183 HttpStateData::httpStateConnClosed(const CommCloseCbParams
¶ms
)
185 debugs(11, 5, "httpStateFree: FD " << params
.fd
<< ", httpState=" << params
.data
);
186 deleteThis("HttpStateData::httpStateConnClosed");
190 httpCachable(const HttpRequestMethod
& method
)
192 /* GET and HEAD are cachable. Others are not. */
194 // TODO: replase to HttpRequestMethod::isCachable() ?
195 if (method
!= METHOD_GET
&& method
!= METHOD_HEAD
)
203 HttpStateData::httpTimeout(const CommTimeoutCbParams
¶ms
)
205 debugs(11, 4, "httpTimeout: FD " << fd
<< ": '" << entry
->url() << "'" );
207 if (entry
->store_status
== STORE_PENDING
) {
208 fwd
->fail(errorCon(ERR_READ_TIMEOUT
, HTTP_GATEWAY_TIMEOUT
, fwd
->request
));
215 httpMaybeRemovePublic(StoreEntry
* e
, http_status status
)
221 if (!EBIT_TEST(e
->flags
, KEY_PRIVATE
))
228 case HTTP_NON_AUTHORITATIVE_INFORMATION
:
230 case HTTP_MULTIPLE_CHOICES
:
232 case HTTP_MOVED_PERMANENTLY
:
234 case HTTP_MOVED_TEMPORARILY
:
245 case HTTP_METHOD_NOT_ALLOWED
:
252 case HTTP_UNAUTHORIZED
:
262 * Any 2xx response should eject previously cached entities...
265 if (status
>= 200 && status
< 300)
273 if (!remove
&& !forbidden
)
278 if (e
->mem_obj
->request
)
279 pe
= storeGetPublicByRequest(e
->mem_obj
->request
);
281 pe
= storeGetPublic(e
->mem_obj
->url
, e
->mem_obj
->method
);
286 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, e
->mem_obj
->method
, HTCP_CLR_INVALIDATION
);
292 * Also remove any cached HEAD response in case the object has
295 if (e
->mem_obj
->request
)
296 pe
= storeGetPublicByRequestMethod(e
->mem_obj
->request
, METHOD_HEAD
);
298 pe
= storeGetPublic(e
->mem_obj
->url
, METHOD_HEAD
);
303 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, HttpRequestMethod(METHOD_HEAD
), HTCP_CLR_INVALIDATION
);
310 HttpStateData::processSurrogateControl(HttpReply
*reply
)
312 if (request
->flags
.accelerated
&& reply
->surrogate_control
) {
313 HttpHdrScTarget
*sctusable
= httpHdrScGetMergedTarget(reply
->surrogate_control
, Config
.Accel
.surrogate_id
);
316 if (EBIT_TEST(sctusable
->mask
, SC_NO_STORE
) ||
317 (Config
.onoff
.surrogate_is_remote
318 && EBIT_TEST(sctusable
->mask
, SC_NO_STORE_REMOTE
))) {
319 surrogateNoStore
= true;
320 entry
->makePrivate();
323 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
324 * accelerated request or not...
325 * Still, this is an abstraction breach. - RC
327 if (sctusable
->max_age
!= -1) {
328 if (sctusable
->max_age
< sctusable
->max_stale
)
329 reply
->expires
= reply
->date
+ sctusable
->max_age
;
331 reply
->expires
= reply
->date
+ sctusable
->max_stale
;
333 /* And update the timestamps */
334 entry
->timestampsSet();
337 /* We ignore cache-control directives as per the Surrogate specification */
338 ignoreCacheControl
= true;
340 httpHdrScTargetDestroy(sctusable
);
346 HttpStateData::cacheableReply()
348 HttpReply
const *rep
= finalReply();
349 HttpHeader
const *hdr
= &rep
->header
;
350 const int cc_mask
= (rep
->cache_control
) ? rep
->cache_control
->mask
: 0;
354 const refresh_t
*R
= NULL
;
356 /* This strange looking define first looks up the refresh pattern
357 * and then checks if the specified flag is set. The main purpose
358 * of this is to simplify the refresh pattern lookup and HTTP_VIOLATIONS
361 #define REFRESH_OVERRIDE(flag) \
362 ((R = (R ? R : refreshLimits(entry->mem_obj->url))) , \
363 (R && R->flags.flag))
365 #define REFRESH_OVERRIDE(flag) 0
368 if (surrogateNoStore
)
371 if (!ignoreCacheControl
) {
372 if (EBIT_TEST(cc_mask
, CC_PRIVATE
)) {
373 if (!REFRESH_OVERRIDE(ignore_private
))
377 if (EBIT_TEST(cc_mask
, CC_NO_CACHE
)) {
378 if (!REFRESH_OVERRIDE(ignore_no_cache
))
382 if (EBIT_TEST(cc_mask
, CC_NO_STORE
)) {
383 if (!REFRESH_OVERRIDE(ignore_no_store
))
388 if (request
->flags
.auth
|| request
->flags
.auth_sent
) {
390 * Responses to requests with authorization may be cached
391 * only if a Cache-Control: public reply header is present.
392 * RFC 2068, sec 14.9.4
395 if (!EBIT_TEST(cc_mask
, CC_PUBLIC
)) {
396 if (!REFRESH_OVERRIDE(ignore_auth
))
401 /* Pragma: no-cache in _replies_ is not documented in HTTP,
402 * but servers like "Active Imaging Webcast/2.0" sure do use it */
403 if (hdr
->has(HDR_PRAGMA
)) {
404 String s
= hdr
->getList(HDR_PRAGMA
);
405 const int no_cache
= strListIsMember(&s
, "no-cache", ',');
409 if (!REFRESH_OVERRIDE(ignore_no_cache
))
415 * The "multipart/x-mixed-replace" content type is used for
416 * continuous push replies. These are generally dynamic and
417 * probably should not be cachable
419 if ((v
= hdr
->getStr(HDR_CONTENT_TYPE
)))
420 if (!strncasecmp(v
, "multipart/x-mixed-replace", 25))
423 switch (rep
->sline
.status
) {
424 /* Responses that are cacheable */
428 case HTTP_NON_AUTHORITATIVE_INFORMATION
:
430 case HTTP_MULTIPLE_CHOICES
:
432 case HTTP_MOVED_PERMANENTLY
:
436 * Don't cache objects that need to be refreshed on next request,
437 * unless we know how to refresh it.
440 if (!refreshIsCachable(entry
) && !REFRESH_OVERRIDE(store_stale
)) {
441 debugs(22, 3, "refreshIsCachable() returned non-cacheable..");
445 /* don't cache objects from peers w/o LMT, Date, or Expires */
446 /* check that is it enough to check headers @?@ */
449 else if (rep
->last_modified
> -1)
454 /* @?@ (here and 302): invalid expires header compiles to squid_curtime */
455 else if (rep
->expires
> -1)
463 /* Responses that only are cacheable if the server says so */
465 case HTTP_MOVED_TEMPORARILY
:
466 case HTTP_TEMPORARY_REDIRECT
:
467 if (rep
->expires
> rep
->date
&& rep
->date
> 0)
475 /* Errors can be negatively cached */
477 case HTTP_NO_CONTENT
:
481 case HTTP_BAD_REQUEST
:
487 case HTTP_METHOD_NOT_ALLOWED
:
489 case HTTP_REQUEST_URI_TOO_LARGE
:
491 case HTTP_INTERNAL_SERVER_ERROR
:
493 case HTTP_NOT_IMPLEMENTED
:
495 case HTTP_BAD_GATEWAY
:
497 case HTTP_SERVICE_UNAVAILABLE
:
499 case HTTP_GATEWAY_TIMEOUT
:
505 /* Some responses can never be cached */
507 case HTTP_PARTIAL_CONTENT
: /* Not yet supported */
511 case HTTP_NOT_MODIFIED
:
513 case HTTP_UNAUTHORIZED
:
515 case HTTP_PROXY_AUTHENTICATION_REQUIRED
:
517 case HTTP_INVALID_HEADER
: /* Squid header parsing error */
519 case HTTP_HEADER_TOO_LARGE
:
521 case HTTP_PAYMENT_REQUIRED
:
522 case HTTP_NOT_ACCEPTABLE
:
523 case HTTP_REQUEST_TIMEOUT
:
525 case HTTP_LENGTH_REQUIRED
:
526 case HTTP_PRECONDITION_FAILED
:
527 case HTTP_REQUEST_ENTITY_TOO_LARGE
:
528 case HTTP_UNSUPPORTED_MEDIA_TYPE
:
529 case HTTP_UNPROCESSABLE_ENTITY
:
531 case HTTP_FAILED_DEPENDENCY
:
532 case HTTP_INSUFFICIENT_STORAGE
:
533 case HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
:
534 case HTTP_EXPECTATION_FAILED
:
539 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
540 debugs (11, 3, HERE
<< "Unknown HTTP status code " << rep
->sline
.status
<< ". Not cacheable.");
552 * For Vary, store the relevant request headers as
553 * virtual headers in the reply
554 * Returns false if the variance cannot be stored
557 httpMakeVaryMark(HttpRequest
* request
, HttpReply
const * reply
)
560 const char *pos
= NULL
;
567 vary
= reply
->header
.getList(HDR_VARY
);
569 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
570 char *name
= (char *)xmalloc(ilen
+ 1);
571 xstrncpy(name
, item
, ilen
+ 1);
574 if (strcmp(name
, "*") == 0) {
575 /* Can not handle "Vary: *" withtout ETag support */
581 strListAdd(&vstr
, name
, ',');
582 hdr
= request
->header
.getByName(name
);
584 value
= hdr
.termedBuf();
587 value
= rfc1738_escape_part(value
);
588 vstr
.append("=\"", 2);
590 vstr
.append("\"", 1);
597 #if X_ACCELERATOR_VARY
600 vary
= reply
->header
.getList(HDR_X_ACCELERATOR_VARY
);
602 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
603 char *name
= (char *)xmalloc(ilen
+ 1);
604 xstrncpy(name
, item
, ilen
+ 1);
606 strListAdd(&vstr
, name
, ',');
607 hdr
= request
->header
.getByName(name
);
609 value
= hdr
.termedBuf();
612 value
= rfc1738_escape_part(value
);
613 vstr
.append("=\"", 2);
615 vstr
.append("\"", 1);
624 debugs(11, 3, "httpMakeVaryMark: " << vstr
);
625 return vstr
.termedBuf();
629 HttpStateData::keepaliveAccounting(HttpReply
*reply
)
633 _peer
->stats
.n_keepalives_sent
++;
635 if (reply
->keep_alive
) {
637 _peer
->stats
.n_keepalives_recv
++;
639 if (Config
.onoff
.detect_broken_server_pconns
640 && reply
->bodySize(request
->method
) == -1 && !flags
.chunked
) {
641 debugs(11, 1, "keepaliveAccounting: Impossible keep-alive header from '" << entry
->url() << "'" );
642 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
643 flags
.keepalive_broken
= 1;
649 HttpStateData::checkDateSkew(HttpReply
*reply
)
651 if (reply
->date
> -1 && !_peer
) {
652 int skew
= abs((int)(reply
->date
- squid_curtime
));
655 debugs(11, 3, "" << request
->GetHost() << "'s clock is skewed by " << skew
<< " seconds!");
660 * This creates the error page itself.. its likely
661 * that the forward ported reply header max size patch
662 * generates non http conformant error pages - in which
663 * case the errors where should be 'BAD_GATEWAY' etc
666 HttpStateData::processReplyHeader()
668 /** Creates a blank header. If this routine is made incremental, this will not do */
669 Ctx ctx
= ctx_enter(entry
->mem_obj
->url
);
670 debugs(11, 3, "processReplyHeader: key '" << entry
->getMD5Text() << "'");
672 assert(!flags
.headers_parsed
);
674 if (!readBuf
->hasContent())
677 http_status error
= HTTP_STATUS_NONE
;
679 HttpReply
*newrep
= new HttpReply
;
680 const bool parsed
= newrep
->parse(readBuf
, eof
, &error
);
682 if (!parsed
&& readBuf
->contentSize() > 5 && strncmp(readBuf
->content(), "HTTP/", 5) != 0 && strncmp(readBuf
->content(), "ICY", 3) != 0) {
684 HttpReply
*tmprep
= new HttpReply
;
685 tmprep
->setHeaders(HTTP_OK
, "Gatewaying", NULL
, -1, -1, -1);
686 tmprep
->header
.putExt("X-Transformed-From", "HTTP/0.9");
688 newrep
->parse(mb
, eof
, &error
);
691 if (!parsed
&& error
> 0) { // unrecoverable parsing error
692 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf
->content() << "'");
693 flags
.headers_parsed
= 1;
694 newrep
->sline
.version
= HttpVersion(1,1);
695 newrep
->sline
.status
= error
;
696 HttpReply
*vrep
= setVirginReply(newrep
);
697 entry
->replaceHttpReply(vrep
);
702 if (!parsed
) { // need more data
710 debugs(11, 9, "GOT HTTP REPLY HDR:\n---------\n" << readBuf
->content() << "\n----------");
712 header_bytes_read
= headersEnd(readBuf
->content(), readBuf
->contentSize());
713 readBuf
->consume(header_bytes_read
);
716 /* Skip 1xx messages for now. Advertised in Via as an internal 1.0 hop */
717 if (newrep
->sline
.protocol
== PROTO_HTTP
&& newrep
->sline
.status
>= 100 && newrep
->sline
.status
< 200) {
719 #if WHEN_HTTP11_EXPECT_HANDLED
720 /* When HTTP/1.1 check if the client is expecting a 1xx reply and maybe pass it on */
721 if (orig_request
->header
.has(HDR_EXPECT
)) {
722 // TODO: pass to the client anyway?
726 debugs(11, 2, HERE
<< "1xx headers consume " << header_bytes_read
<< " bytes header.");
727 header_bytes_read
= 0;
728 if (reply_bytes_read
> 0)
729 debugs(11, 2, HERE
<< "1xx headers consume " << reply_bytes_read
<< " bytes reply.");
730 reply_bytes_read
= 0;
732 processReplyHeader();
737 if (newrep
->sline
.protocol
== PROTO_HTTP
&& newrep
->header
.hasListMember(HDR_TRANSFER_ENCODING
, "chunked", ',')) {
739 httpChunkDecoder
= new ChunkedCodingParser
;
742 if (!peerSupportsConnectionPinning())
743 orig_request
->flags
.connection_auth_disabled
= 1;
745 HttpReply
*vrep
= setVirginReply(newrep
);
746 flags
.headers_parsed
= 1;
748 keepaliveAccounting(vrep
);
752 processSurrogateControl (vrep
);
754 /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header
755 * Parse the header and remove all referenced headers
758 orig_request
->hier
.peer_reply_status
= newrep
->sline
.status
;
765 * returns true if the peer can support connection pinning
767 bool HttpStateData::peerSupportsConnectionPinning() const
769 const HttpReply
*rep
= entry
->mem_obj
->getReply();
770 const HttpHeader
*hdr
= &rep
->header
;
777 /*If this peer does not support connection pinning (authenticated
778 connections) return false
780 if (!_peer
->connection_auth
)
783 /*The peer supports connection pinning and the http reply status
784 is not unauthorized, so the related connection can be pinned
786 if (rep
->sline
.status
!= HTTP_UNAUTHORIZED
)
789 /*The server respond with HTTP_UNAUTHORIZED and the peer configured
790 with "connection-auth=on" we know that the peer supports pinned
793 if (_peer
->connection_auth
== 1)
796 /*At this point peer has configured with "connection-auth=auto"
797 parameter so we need some extra checks to decide if we are going
798 to allow pinned connections or not
801 /*if the peer configured with originserver just allow connection
802 pinning (squid 2.6 behaviour)
804 if (_peer
->options
.originserver
)
807 /*if the connections it is already pinned it is OK*/
808 if (request
->flags
.pinned
)
811 /*Allow pinned connections only if the Proxy-support header exists in
812 reply and has in its list the "Session-Based-Authentication"
813 which means that the peer supports connection pinning.
815 if (!hdr
->has(HDR_PROXY_SUPPORT
))
818 header
= hdr
->getStrOrList(HDR_PROXY_SUPPORT
);
819 /* XXX This ought to be done in a case-insensitive manner */
820 rc
= (strstr(header
.termedBuf(), "Session-Based-Authentication") != NULL
);
825 // Called when we parsed (and possibly adapted) the headers but
826 // had not starting storing (a.k.a., sending) the body yet.
828 HttpStateData::haveParsedReplyHeaders()
830 ServerStateData::haveParsedReplyHeaders();
832 Ctx ctx
= ctx_enter(entry
->mem_obj
->url
);
833 HttpReply
*rep
= finalReply();
835 if (rep
->sline
.status
== HTTP_PARTIAL_CONTENT
&&
837 currentOffset
= rep
->content_range
->spec
.offset
;
839 entry
->timestampsSet();
841 /* Check if object is cacheable or not based on reply code */
842 debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep
->sline
.status
);
844 if (neighbors_do_private_keys
)
845 httpMaybeRemovePublic(entry
, rep
->sline
.status
);
847 if (rep
->header
.has(HDR_VARY
)
848 #if X_ACCELERATOR_VARY
849 || rep
->header
.has(HDR_X_ACCELERATOR_VARY
)
852 const char *vary
= httpMakeVaryMark(orig_request
, rep
);
855 entry
->makePrivate();
856 if (!fwd
->reforwardableStatus(rep
->sline
.status
))
857 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
861 entry
->mem_obj
->vary_headers
= xstrdup(vary
);
869 * If its not a reply that we will re-forward, then
870 * allow the client to get it.
872 if (!fwd
->reforwardableStatus(rep
->sline
.status
))
873 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
875 switch (cacheableReply()) {
882 entry
->makePrivate();
888 if (Config
.negativeTtl
> 0)
889 entry
->cacheNegatively();
892 entry
->makePrivate();
904 if (!ignoreCacheControl
&& rep
->cache_control
) {
905 if (EBIT_TEST(rep
->cache_control
->mask
, CC_PROXY_REVALIDATE
))
906 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE
);
907 else if (EBIT_TEST(rep
->cache_control
->mask
, CC_MUST_REVALIDATE
))
908 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE
);
912 headersLog(1, 0, request
->method
, rep
);
919 HttpStateData::ConnectionStatus
920 HttpStateData::statusIfComplete() const
922 const HttpReply
*rep
= virginReply();
924 * If the reply wants to close the connection, it takes precedence */
926 if (httpHeaderHasConnDir(&rep
->header
, "close"))
927 return COMPLETE_NONPERSISTENT_MSG
;
930 * If we didn't send a keep-alive request header, then this
931 * can not be a persistent connection.
933 if (!flags
.keepalive
)
934 return COMPLETE_NONPERSISTENT_MSG
;
937 * If we haven't sent the whole request then this can not be a persistent
940 if (!flags
.request_sent
) {
941 debugs(11, 1, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(orig_request
->method
) << " " << entry
->url() << "\"" );
942 return COMPLETE_NONPERSISTENT_MSG
;
946 * What does the reply have to say about keep-alive?
950 * If the origin server (HTTP/1.0) does not send a keep-alive
951 * header, but keeps the connection open anyway, what happens?
952 * We'll return here and http.c waits for an EOF before changing
953 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
954 * and an error status code, and we might have to wait until
955 * the server times out the socket.
957 if (!rep
->keep_alive
)
958 return COMPLETE_NONPERSISTENT_MSG
;
960 return COMPLETE_PERSISTENT_MSG
;
963 HttpStateData::ConnectionStatus
964 HttpStateData::persistentConnStatus() const
966 debugs(11, 3, "persistentConnStatus: FD " << fd
<< " eof=" << eof
);
967 const HttpReply
*vrep
= virginReply();
968 debugs(11, 5, "persistentConnStatus: content_length=" << vrep
->content_length
);
970 /* If we haven't seen the end of reply headers, we are not done */
971 debugs(11, 5, "persistentConnStatus: flags.headers_parsed=" << flags
.headers_parsed
);
973 if (!flags
.headers_parsed
)
974 return INCOMPLETE_MSG
;
976 if (eof
) // already reached EOF
977 return COMPLETE_NONPERSISTENT_MSG
;
980 * In chunked response we do not know the content length but we are absolutely
981 * sure about the end of response, so we are calling the statusIfComplete to
982 * decide if we can be persistant
984 if (lastChunk
&& flags
.chunked
)
985 return statusIfComplete();
987 const int64_t clen
= vrep
->bodySize(request
->method
);
989 debugs(11, 5, "persistentConnStatus: clen=" << clen
);
991 /* If the body size is unknown we must wait for EOF */
993 return INCOMPLETE_MSG
;
996 * If the body size is known, we must wait until we've gotten all of it. */
999 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
1000 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1001 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1002 body_bytes_read
<< " content_length=" << vrep
->content_length
);
1004 if (body_bytes_read
< vrep
->content_length
)
1005 return INCOMPLETE_MSG
;
1007 if (body_bytes_truncated
> 0) // already read more than needed
1008 return COMPLETE_NONPERSISTENT_MSG
; // disable pconns
1012 * If there is no message body or we got it all, we can be persistent */
1013 return statusIfComplete();
1017 * This is the callback after some data has been read from the network
1021 HttpStateData::ReadReplyWrapper(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
1023 HttpStateData *httpState = static_cast<HttpStateData *>(data);
1024 assert (fd == httpState->fd);
1025 // assert(buf == readBuf->content());
1026 PROF_start(HttpStateData_readReply);
1027 httpState->readReply(len, flag, xerrno);
1028 PROF_stop(HttpStateData_readReply);
1032 /* XXX this function is too long! */
1034 HttpStateData::readReply(const CommIoCbParams
&io
)
1040 assert(fd
== io
.fd
);
1042 flags
.do_next_read
= 0;
1044 debugs(11, 5, "httpReadReply: FD " << fd
<< ": len " << len
<< ".");
1046 // Bail out early on COMM_ERR_CLOSING - close handlers will tidy up for us
1047 if (io
.flag
== COMM_ERR_CLOSING
) {
1048 debugs(11, 3, "http socket closing");
1052 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1053 maybeReadVirginBody();
1057 // handle I/O errors
1058 if (io
.flag
!= COMM_OK
|| len
< 0) {
1059 debugs(11, 2, "httpReadReply: FD " << fd
<< ": read failure: " << xstrerror() << ".");
1061 if (ignoreErrno(io
.xerrno
)) {
1062 flags
.do_next_read
= 1;
1065 err
= errorCon(ERR_READ_ERROR
, HTTP_BAD_GATEWAY
, fwd
->request
);
1066 err
->xerrno
= io
.xerrno
;
1068 flags
.do_next_read
= 0;
1077 readBuf
->appended(len
);
1078 reply_bytes_read
+= len
;
1081 DelayId delayId
= entry
->mem_obj
->mostBytesAllowed();
1082 delayId
.bytesIn(len
);
1085 kb_incr(&statCounter
.server
.all
.kbytes_in
, len
);
1086 kb_incr(&statCounter
.server
.http
.kbytes_in
, len
);
1087 IOStats
.Http
.reads
++;
1089 for (clen
= len
- 1, bin
= 0; clen
; bin
++)
1092 IOStats
.Http
.read_hist
[bin
]++;
1094 // update peer response time stats (%<pt)
1095 const timeval
&sent
= orig_request
->hier
.peer_http_request_sent
;
1096 orig_request
->hier
.peer_response_time
=
1097 sent
.tv_sec
? tvSubMsec(sent
, current_time
) : -1;
1101 * Here the RFC says we should ignore whitespace between replies, but we can't as
1102 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1103 * the response splitting countermeasures is extremely likely to trigger on this,
1104 * not allowing connection reuse in the first place.
1107 if (!flags
.headers_parsed
&& len
> 0 && fd_table
[fd
].uses
> 1) {
1108 /* Skip whitespace between replies */
1110 while (len
> 0 && xisspace(*buf
))
1111 xmemmove(buf
, buf
+ 1, len
--);
1114 /* Continue to read... */
1115 /* Timeout NOT increased. This whitespace was from previous reply */
1116 flags
.do_next_read
= 1;
1117 maybeReadVirginBody();
1124 if (len
== 0) { // reached EOF?
1126 flags
.do_next_read
= 0;
1128 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n
1129 * Ensure here that we have at minimum two \r\n when EOF is seen.
1130 * TODO: Add eof parameter to headersEnd() and move this hack there.
1132 if (readBuf
->contentSize() && !flags
.headers_parsed
) {
1134 * Yes Henrik, there is a point to doing this. When we
1135 * called httpProcessReplyHeader() before, we didn't find
1136 * the end of headers, but now we are definately at EOF, so
1137 * we want to process the reply headers.
1139 /* Fake an "end-of-headers" to work around such broken servers */
1140 readBuf
->append("\r\n", 2);
1144 if (!flags
.headers_parsed
) { // have not parsed headers yet?
1145 PROF_start(HttpStateData_processReplyHeader
);
1146 processReplyHeader();
1147 PROF_stop(HttpStateData_processReplyHeader
);
1149 if (!continueAfterParsingHeader()) // parsing error or need more data
1150 return; // TODO: send errors to ICAP
1152 adaptOrFinalizeReply();
1155 // kick more reads if needed and/or process the response body, if any
1156 PROF_start(HttpStateData_processReplyBody
);
1157 processReplyBody(); // may call serverComplete()
1158 PROF_stop(HttpStateData_processReplyBody
);
1162 \retval true if we can continue with processing the body or doing ICAP.
1165 HttpStateData::continueAfterParsingHeader()
1167 if (!flags
.headers_parsed
&& !eof
) {
1168 debugs(11, 9, HERE
<< "needs more at " << readBuf
->contentSize());
1169 flags
.do_next_read
= 1;
1170 /** \retval false If we have not finished parsing the headers and may get more data.
1171 * Schedules more reads to retrieve the missing data.
1173 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1177 /** If we are done with parsing, check for errors */
1179 err_type error
= ERR_NONE
;
1181 if (flags
.headers_parsed
) { // parsed headers, possibly with errors
1182 // check for header parsing errors
1183 if (HttpReply
*vrep
= virginReply()) {
1184 const http_status s
= vrep
->sline
.status
;
1185 const HttpVersion
&v
= vrep
->sline
.version
;
1186 if (s
== HTTP_INVALID_HEADER
&& v
!= HttpVersion(0,9)) {
1187 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry
->url() << " AKA " << orig_request
->GetHost() << orig_request
->urlpath
.termedBuf() );
1188 error
= ERR_INVALID_RESP
;
1189 } else if (s
== HTTP_HEADER_TOO_LARGE
) {
1190 fwd
->dontRetry(true);
1191 error
= ERR_TOO_BIG
;
1193 return true; // done parsing, got reply, and no error
1196 // parsed headers but got no reply
1197 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: No reply at all for " << entry
->url() << " AKA " << orig_request
->GetHost() << orig_request
->urlpath
.termedBuf() );
1198 error
= ERR_INVALID_RESP
;
1202 if (readBuf
->hasContent()) {
1203 error
= ERR_INVALID_RESP
;
1204 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry
->url() << " AKA " << orig_request
->GetHost() << orig_request
->urlpath
.termedBuf() );
1206 error
= ERR_ZERO_SIZE_OBJECT
;
1207 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: No object data received for " << entry
->url() << " AKA " << orig_request
->GetHost() << orig_request
->urlpath
.termedBuf() );
1211 assert(error
!= ERR_NONE
);
1213 fwd
->fail(errorCon(error
, HTTP_BAD_GATEWAY
, fwd
->request
));
1214 flags
.do_next_read
= 0;
1216 return false; // quit on error
1219 /** truncate what we read if we read too much so that writeReplyBody()
1220 writes no more than what we should have read */
1222 HttpStateData::truncateVirginBody()
1224 assert(flags
.headers_parsed
);
1226 HttpReply
*vrep
= virginReply();
1228 if (!vrep
->expectingBody(request
->method
, clen
) || clen
< 0)
1229 return; // no body or a body of unknown size, including chunked
1231 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1232 if (body_bytes_read
- body_bytes_truncated
<= clen
)
1233 return; // we did not read too much or already took care of the extras
1235 if (const int64_t extras
= body_bytes_read
- body_bytes_truncated
- clen
) {
1236 // server sent more that the advertised content length
1237 debugs(11,5, HERE
<< "body_bytes_read=" << body_bytes_read
<<
1238 " clen=" << clen
<< '/' << vrep
->content_length
<<
1239 " body_bytes_truncated=" << body_bytes_truncated
<< '+' << extras
);
1241 readBuf
->truncate(extras
);
1242 body_bytes_truncated
+= extras
;
1247 * Call this when there is data from the origin server
1248 * which should be sent to either StoreEntry, or to ICAP...
1251 HttpStateData::writeReplyBody()
1253 truncateVirginBody(); // if needed
1254 const char *data
= readBuf
->content();
1255 int len
= readBuf
->contentSize();
1256 addVirginReplyBody(data
, len
);
1257 readBuf
->consume(len
);
1261 HttpStateData::decodeAndWriteReplyBody()
1263 const char *data
= NULL
;
1265 bool wasThereAnException
= false;
1266 assert(flags
.chunked
);
1267 assert(httpChunkDecoder
);
1268 SQUID_ENTER_THROWING_CODE();
1271 const bool doneParsing
= httpChunkDecoder
->parse(readBuf
,&decodedData
);
1272 len
= decodedData
.contentSize();
1273 data
=decodedData
.content();
1274 addVirginReplyBody(data
, len
);
1277 flags
.do_next_read
= 0;
1279 SQUID_EXIT_THROWING_CODE(wasThereAnException
);
1280 return wasThereAnException
;
1284 * processReplyBody has two purposes:
1285 * 1 - take the reply body data, if any, and put it into either
1286 * the StoreEntry, or give it over to ICAP.
1287 * 2 - see if we made it to the end of the response (persistent
1288 * connections and such)
1291 HttpStateData::processReplyBody()
1293 AsyncCall::Pointer call
;
1294 Ip::Address client_addr
;
1295 bool ispinned
= false;
1297 if (!flags
.headers_parsed
) {
1298 flags
.do_next_read
= 1;
1299 maybeReadVirginBody();
1304 debugs(11,5, HERE
<< "adaptationAccessCheckPending=" << adaptationAccessCheckPending
);
1305 if (adaptationAccessCheckPending
)
1311 * At this point the reply headers have been parsed and consumed.
1312 * That means header content has been removed from readBuf and
1313 * it contains only body data.
1315 if (flags
.chunked
) {
1316 if (!decodeAndWriteReplyBody()) {
1317 flags
.do_next_read
= 0;
1324 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1326 * The above writeReplyBody() call could ABORT this entry,
1327 * in that case, the server FD should already be closed.
1328 * there's nothing for us to do.
1332 switch (persistentConnStatus()) {
1333 case INCOMPLETE_MSG
:
1334 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG");
1335 /* Wait for more data or EOF condition */
1336 if (flags
.keepalive_broken
) {
1338 commSetTimeout(fd
, 10, call
);
1341 commSetTimeout(fd
, Config
.Timeout
.read
, call
);
1344 flags
.do_next_read
= 1;
1347 case COMPLETE_PERSISTENT_MSG
:
1348 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG");
1349 /* yes we have to clear all these! */
1351 commSetTimeout(fd
, -1, call
);
1352 flags
.do_next_read
= 0;
1354 comm_remove_close_handler(fd
, closeHandler
);
1355 closeHandler
= NULL
;
1356 fwd
->unregister(fd
);
1358 if (orig_request
->flags
.spoof_client_ip
)
1359 client_addr
= orig_request
->client_addr
;
1362 if (request
->flags
.pinned
) {
1364 } else if (request
->flags
.connection_auth
&& request
->flags
.auth_sent
) {
1368 if (orig_request
->pinnedConnection() && ispinned
) {
1369 orig_request
->pinnedConnection()->pinConnection(fd
, orig_request
, _peer
,
1370 (request
->flags
.connection_auth
!= 0));
1372 fwd
->pconnPush(fwd
->conn(), _peer
, request
, orig_request
->GetHost(), client_addr
);
1380 case COMPLETE_NONPERSISTENT_MSG
:
1381 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG");
1386 maybeReadVirginBody();
1390 HttpStateData::maybeReadVirginBody()
1392 // we may need to grow the buffer if headers do not fit
1393 const int minRead
= flags
.headers_parsed
? 0 :1024;
1394 const int read_size
= replyBodySpace(*readBuf
, minRead
);
1396 debugs(11,9, HERE
<< (flags
.do_next_read
? "may" : "wont") <<
1397 " read up to " << read_size
<< " bytes from FD " << fd
);
1400 * why <2? Because delayAwareRead() won't actually read if
1401 * you ask it to read 1 byte. The delayed read request
1402 * just gets re-queued until the client side drains, then
1403 * the I/O thread hangs. Better to not register any read
1404 * handler until we get a notification from someone that
1405 * its okay to read again.
1410 if (flags
.do_next_read
) {
1411 flags
.do_next_read
= 0;
1412 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
1413 entry
->delayAwareRead(fd
, readBuf
->space(read_size
), read_size
,
1414 asyncCall(11, 5, "HttpStateData::readReply",
1415 Dialer(this, &HttpStateData::readReply
)));
1420 * This will be called when request write is complete.
1423 HttpStateData::sendComplete(const CommIoCbParams
&io
)
1425 debugs(11, 5, "httpSendComplete: FD " << fd
<< ": size " << io
.size
<< ": errflag " << io
.flag
<< ".");
1426 #if URL_CHECKSUM_DEBUG
1428 entry
->mem_obj
->checkUrlChecksum();
1432 fd_bytes(fd
, io
.size
, FD_WRITE
);
1433 kb_incr(&statCounter
.server
.all
.kbytes_out
, io
.size
);
1434 kb_incr(&statCounter
.server
.http
.kbytes_out
, io
.size
);
1437 if (io
.flag
== COMM_ERR_CLOSING
)
1442 err
= errorCon(ERR_WRITE_ERROR
, HTTP_BAD_GATEWAY
, fwd
->request
);
1443 err
->xerrno
= io
.xerrno
;
1450 * Set the read timeout here because it hasn't been set yet.
1451 * We only set the read timeout after the request has been
1452 * fully written to the server-side. If we start the timeout
1453 * after connection establishment, then we are likely to hit
1454 * the timeout for POST/PUT requests that have very large
1457 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1458 AsyncCall::Pointer timeoutCall
= asyncCall(11, 5, "HttpStateData::httpTimeout",
1459 TimeoutDialer(this,&HttpStateData::httpTimeout
));
1461 commSetTimeout(fd
, Config
.Timeout
.read
, timeoutCall
);
1463 flags
.request_sent
= 1;
1465 orig_request
->hier
.peer_http_request_sent
= current_time
;
1468 // Close the HTTP server connection. Used by serverComplete().
1470 HttpStateData::closeServer()
1472 debugs(11,5, HERE
<< "closing HTTP server FD " << fd
<< " this " << this);
1475 fwd
->unregister(fd
);
1476 comm_remove_close_handler(fd
, closeHandler
);
1477 closeHandler
= NULL
;
1484 HttpStateData::doneWithServer() const
1491 * Fixup authentication request headers for special cases
1494 httpFixupAuthentication(HttpRequest
* request
, HttpRequest
* orig_request
, const HttpHeader
* hdr_in
, HttpHeader
* hdr_out
, http_state_flags flags
)
1496 http_hdr_type header
= flags
.originpeer
? HDR_AUTHORIZATION
: HDR_PROXY_AUTHORIZATION
;
1498 /* Nothing to do unless we are forwarding to a peer */
1499 if (!request
->flags
.proxying
)
1502 /* Needs to be explicitly enabled */
1503 if (!orig_request
->peer_login
)
1506 /* Maybe already dealt with? */
1507 if (hdr_out
->has(header
))
1510 /* Nothing to do here for PASSTHRU */
1511 if (strcmp(orig_request
->peer_login
, "PASSTHRU") == 0)
1514 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1515 if (flags
.originpeer
&& strcmp(orig_request
->peer_login
, "PROXYPASS") == 0 && hdr_in
->has(HDR_PROXY_AUTHORIZATION
)) {
1516 const char *auth
= hdr_in
->getStr(HDR_PROXY_AUTHORIZATION
);
1518 if (auth
&& strncasecmp(auth
, "basic ", 6) == 0) {
1519 hdr_out
->putStr(header
, auth
);
1524 /* Special mode to pass the username to the upstream cache */
1525 if (*orig_request
->peer_login
== '*') {
1527 const char *username
= "-";
1529 if (orig_request
->extacl_user
.size())
1530 username
= orig_request
->extacl_user
.termedBuf();
1531 else if (orig_request
->auth_user_request
)
1532 username
= orig_request
->auth_user_request
->username();
1534 snprintf(loginbuf
, sizeof(loginbuf
), "%s%s", username
, orig_request
->peer_login
+ 1);
1536 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1537 base64_encode(loginbuf
));
1541 /* external_acl provided credentials */
1542 if (orig_request
->extacl_user
.size() && orig_request
->extacl_passwd
.size() &&
1543 (strcmp(orig_request
->peer_login
, "PASS") == 0 ||
1544 strcmp(orig_request
->peer_login
, "PROXYPASS") == 0)) {
1546 snprintf(loginbuf
, sizeof(loginbuf
), SQUIDSTRINGPH
":" SQUIDSTRINGPH
,
1547 SQUIDSTRINGPRINT(orig_request
->extacl_user
),
1548 SQUIDSTRINGPRINT(orig_request
->extacl_passwd
));
1549 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1550 base64_encode(loginbuf
));
1554 /* Kerberos login to peer */
1555 #if HAVE_KRB5 && HAVE_GSSAPI
1556 if (strncmp(orig_request
->peer_login
, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1558 char *PrincipalName
=NULL
,*p
;
1559 if ((p
=strchr(orig_request
->peer_login
,':')) != NULL
) {
1562 Token
= peer_proxy_negotiate_auth(PrincipalName
,request
->peer_host
);
1564 httpHeaderPutStrf(hdr_out
, HDR_PROXY_AUTHORIZATION
, "Negotiate %s",Token
);
1568 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1570 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1571 base64_encode(orig_request
->peer_login
));
1576 * build request headers and append them to a given MemBuf
1577 * used by buildRequestPrefix()
1578 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1581 HttpStateData::httpBuildRequestHeader(HttpRequest
* request
,
1582 HttpRequest
* orig_request
,
1584 HttpHeader
* hdr_out
,
1585 http_state_flags flags
)
1587 /* building buffer for complex strings */
1588 #define BBUF_SZ (MAX_URL+32)
1589 LOCAL_ARRAY(char, bbuf
, BBUF_SZ
);
1590 LOCAL_ARRAY(char, ntoabuf
, MAX_IPSTRLEN
);
1591 const HttpHeader
*hdr_in
= &orig_request
->header
;
1592 const HttpHeaderEntry
*e
= NULL
;
1593 HttpHeaderPos pos
= HttpHeaderInitPos
;
1594 assert (hdr_out
->owner
== hoRequest
);
1596 /* append our IMS header */
1597 if (request
->lastmod
> -1)
1598 hdr_out
->putTime(HDR_IF_MODIFIED_SINCE
, request
->lastmod
);
1600 bool we_do_ranges
= decideIfWeDoRanges (orig_request
);
1602 String
strConnection (hdr_in
->getList(HDR_CONNECTION
));
1604 while ((e
= hdr_in
->getEntry(&pos
)))
1605 copyOneHeaderFromClientsideRequestToUpstreamRequest(e
, strConnection
, request
, orig_request
, hdr_out
, we_do_ranges
, flags
);
1607 /* Abstraction break: We should interpret multipart/byterange responses
1608 * into offset-length data, and this works around our inability to do so.
1610 if (!we_do_ranges
&& orig_request
->multipartRangeRequest()) {
1611 /* don't cache the result */
1612 orig_request
->flags
.cachable
= 0;
1613 /* pretend it's not a range request */
1614 delete orig_request
->range
;
1615 orig_request
->range
= NULL
;
1616 orig_request
->flags
.range
= 0;
1620 if (Config
.onoff
.via
) {
1622 strVia
= hdr_in
->getList(HDR_VIA
);
1623 snprintf(bbuf
, BBUF_SZ
, "%d.%d %s",
1624 orig_request
->http_ver
.major
,
1625 orig_request
->http_ver
.minor
, ThisCache
);
1626 strListAdd(&strVia
, bbuf
, ',');
1627 hdr_out
->putStr(HDR_VIA
, strVia
.termedBuf());
1631 if (orig_request
->flags
.accelerated
) {
1632 /* Append Surrogate-Capabilities */
1633 String
strSurrogate(hdr_in
->getList(HDR_SURROGATE_CAPABILITY
));
1635 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0 ESI/1.0\"", Config
.Accel
.surrogate_id
);
1637 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0\"", Config
.Accel
.surrogate_id
);
1639 strListAdd(&strSurrogate
, bbuf
, ',');
1640 hdr_out
->putStr(HDR_SURROGATE_CAPABILITY
, strSurrogate
.termedBuf());
1643 /** \pre Handle X-Forwarded-For */
1644 if (strcmp(opt_forwarded_for
, "delete") != 0) {
1646 String strFwd
= hdr_in
->getList(HDR_X_FORWARDED_FOR
);
1648 if (strFwd
.size() > 65536/2) {
1649 // There is probably a forwarding loop with Via detection disabled.
1650 // If we do nothing, String will assert on overflow soon.
1651 // TODO: Terminate all transactions with huge XFF?
1654 static int warnedCount
= 0;
1655 if (warnedCount
++ < 100) {
1656 const char *url
= entry
? entry
->url() : urlCanonical(orig_request
);
1657 debugs(11, 1, "Warning: likely forwarding loop with " << url
);
1661 if (strcmp(opt_forwarded_for
, "on") == 0) {
1662 /** If set to ON - append client IP or 'unknown'. */
1663 if ( orig_request
->client_addr
.IsNoAddr() )
1664 strListAdd(&strFwd
, "unknown", ',');
1666 strListAdd(&strFwd
, orig_request
->client_addr
.NtoA(ntoabuf
, MAX_IPSTRLEN
), ',');
1667 } else if (strcmp(opt_forwarded_for
, "off") == 0) {
1668 /** If set to OFF - append 'unknown'. */
1669 strListAdd(&strFwd
, "unknown", ',');
1670 } else if (strcmp(opt_forwarded_for
, "transparent") == 0) {
1671 /** If set to TRANSPARENT - pass through unchanged. */
1672 } else if (strcmp(opt_forwarded_for
, "truncate") == 0) {
1673 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1674 if ( orig_request
->client_addr
.IsNoAddr() )
1677 strFwd
= orig_request
->client_addr
.NtoA(ntoabuf
, MAX_IPSTRLEN
);
1679 if (strFwd
.size() > 0)
1680 hdr_out
->putStr(HDR_X_FORWARDED_FOR
, strFwd
.termedBuf());
1682 /** If set to DELETE - do not copy through. */
1684 /* append Host if not there already */
1685 if (!hdr_out
->has(HDR_HOST
)) {
1686 if (orig_request
->peer_domain
) {
1687 hdr_out
->putStr(HDR_HOST
, orig_request
->peer_domain
);
1688 } else if (orig_request
->port
== urlDefaultPort(orig_request
->protocol
)) {
1689 /* use port# only if not default */
1690 hdr_out
->putStr(HDR_HOST
, orig_request
->GetHost());
1692 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1693 orig_request
->GetHost(),
1694 (int) orig_request
->port
);
1698 /* append Authorization if known in URL, not in header and going direct */
1699 if (!hdr_out
->has(HDR_AUTHORIZATION
)) {
1700 if (!request
->flags
.proxying
&& *request
->login
) {
1701 httpHeaderPutStrf(hdr_out
, HDR_AUTHORIZATION
, "Basic %s",
1702 base64_encode(request
->login
));
1706 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1707 httpFixupAuthentication(request
, orig_request
, hdr_in
, hdr_out
, flags
);
1709 /* append Cache-Control, add max-age if not there already */
1711 HttpHdrCc
*cc
= hdr_in
->getCc();
1714 cc
= httpHdrCcCreate();
1716 #if 0 /* see bug 2330 */
1717 /* Set no-cache if determined needed but not found */
1718 if (orig_request
->flags
.nocache
)
1719 EBIT_SET(cc
->mask
, CC_NO_CACHE
);
1722 /* Add max-age only without no-cache */
1723 if (!EBIT_TEST(cc
->mask
, CC_MAX_AGE
) && !EBIT_TEST(cc
->mask
, CC_NO_CACHE
)) {
1725 entry
? entry
->url() : urlCanonical(orig_request
);
1726 httpHdrCcSetMaxAge(cc
, getMaxAge(url
));
1728 if (request
->urlpath
.size())
1729 assert(strstr(url
, request
->urlpath
.termedBuf()));
1732 /* Enforce sibling relations */
1733 if (flags
.only_if_cached
)
1734 EBIT_SET(cc
->mask
, CC_ONLY_IF_CACHED
);
1738 httpHdrCcDestroy(cc
);
1741 /* maybe append Connection: keep-alive */
1742 if (flags
.keepalive
) {
1743 if (flags
.proxying
) {
1744 hdr_out
->putStr(HDR_PROXY_CONNECTION
, "keep-alive");
1746 hdr_out
->putStr(HDR_CONNECTION
, "keep-alive");
1750 /* append Front-End-Https */
1751 if (flags
.front_end_https
) {
1752 if (flags
.front_end_https
== 1 || request
->protocol
== PROTO_HTTPS
)
1753 hdr_out
->putStr(HDR_FRONT_END_HTTPS
, "On");
1756 /* Now mangle the headers. */
1757 if (Config2
.onoff
.mangle_request_headers
)
1758 httpHdrMangleList(hdr_out
, request
, ROR_REQUEST
);
1760 strConnection
.clean();
1764 * Decides whether a particular header may be cloned from the received Clients request
1765 * to our outgoing fetch request.
1768 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, HttpRequest
* request
, const HttpRequest
* orig_request
, HttpHeader
* hdr_out
, const int we_do_ranges
, const http_state_flags flags
)
1770 debugs(11, 5, "httpBuildRequestHeader: " << e
->name
<< ": " << e
->value
);
1774 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1776 case HDR_PROXY_AUTHORIZATION
:
1777 /** \par Proxy-Authorization:
1778 * Only pass on proxy authentication to peers for which
1779 * authentication forwarding is explicitly enabled
1781 if (!flags
.originpeer
&& flags
.proxying
&& orig_request
->peer_login
&&
1782 (strcmp(orig_request
->peer_login
, "PASS") == 0 ||
1783 strcmp(orig_request
->peer_login
, "PROXYPASS") == 0 ||
1784 strcmp(orig_request
->peer_login
, "PASSTHRU") == 0)) {
1785 hdr_out
->addEntry(e
->clone());
1789 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1791 case HDR_CONNECTION
: /** \par Connection: */
1792 case HDR_TE
: /** \par TE: */
1793 case HDR_KEEP_ALIVE
: /** \par Keep-Alive: */
1794 case HDR_PROXY_AUTHENTICATE
: /** \par Proxy-Authenticate: */
1795 case HDR_TRAILERS
: /** \par Trailers: */
1796 case HDR_UPGRADE
: /** \par Upgrade: */
1797 case HDR_TRANSFER_ENCODING
: /** \par Transfer-Encoding: */
1801 /** \par OTHER headers I haven't bothered to track down yet. */
1803 case HDR_AUTHORIZATION
:
1804 /** \par WWW-Authorization:
1805 * Pass on WWW authentication */
1807 if (!flags
.originpeer
) {
1808 hdr_out
->addEntry(e
->clone());
1810 /** \note In accelerators, only forward authentication if enabled
1811 * (see also httpFixupAuthentication for special cases)
1813 if (orig_request
->peer_login
&&
1814 (strcmp(orig_request
->peer_login
, "PASS") == 0 ||
1815 strcmp(orig_request
->peer_login
, "PASSTHRU") == 0 ||
1816 strcmp(orig_request
->peer_login
, "PROXYPASS") == 0)) {
1817 hdr_out
->addEntry(e
->clone());
1825 * Normally Squid rewrites the Host: header.
1826 * However, there is one case when we don't: If the URL
1827 * went through our redirector and the admin configured
1828 * 'redir_rewrites_host' to be off.
1830 if (orig_request
->peer_domain
)
1831 hdr_out
->putStr(HDR_HOST
, orig_request
->peer_domain
);
1832 else if (request
->flags
.redirected
&& !Config
.onoff
.redir_rewrites_host
)
1833 hdr_out
->addEntry(e
->clone());
1835 /* use port# only if not default */
1837 if (orig_request
->port
== urlDefaultPort(orig_request
->protocol
)) {
1838 hdr_out
->putStr(HDR_HOST
, orig_request
->GetHost());
1840 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1841 orig_request
->GetHost(),
1842 (int) orig_request
->port
);
1848 case HDR_IF_MODIFIED_SINCE
:
1849 /** \par If-Modified-Since:
1850 * append unless we added our own;
1851 * \note at most one client's ims header can pass through */
1853 if (!hdr_out
->has(HDR_IF_MODIFIED_SINCE
))
1854 hdr_out
->addEntry(e
->clone());
1858 case HDR_MAX_FORWARDS
:
1859 /** \par Max-Forwards:
1860 * pass only on TRACE or OPTIONS requests */
1861 if (orig_request
->method
== METHOD_TRACE
|| orig_request
->method
== METHOD_OPTIONS
) {
1862 const int64_t hops
= e
->getInt64();
1865 hdr_out
->putInt64(HDR_MAX_FORWARDS
, hops
- 1);
1872 * If Via is disabled then forward any received header as-is.
1873 * Otherwise leave for explicit updated addition later. */
1875 if (!Config
.onoff
.via
)
1876 hdr_out
->addEntry(e
->clone());
1884 case HDR_REQUEST_RANGE
:
1885 /** \par Range:, If-Range:, Request-Range:
1886 * Only pass if we accept ranges */
1888 hdr_out
->addEntry(e
->clone());
1892 case HDR_PROXY_CONNECTION
:
1894 case HDR_X_FORWARDED_FOR
:
1896 case HDR_CACHE_CONTROL
:
1897 /** \par Proxy-Connaction:, X-Forwarded-For:, Cache-Control:
1898 * handled specially by Squid, so leave off for now.
1899 * append these after the loop if needed */
1902 case HDR_FRONT_END_HTTPS
:
1903 /** \par Front-End-Https:
1904 * Pass thru only if peer is configured with front-end-https */
1905 if (!flags
.front_end_https
)
1906 hdr_out
->addEntry(e
->clone());
1912 * pass on all other header fields
1913 * which are NOT listed by the special Connection: header. */
1915 if (strConnection
.size()>0 && strListIsMember(&strConnection
, e
->name
.termedBuf(), ',')) {
1916 debugs(11, 2, "'" << e
->name
<< "' header cropped by Connection: definition");
1920 hdr_out
->addEntry(e
->clone());
1925 HttpStateData::decideIfWeDoRanges (HttpRequest
* orig_request
)
1928 /* decide if we want to do Ranges ourselves
1929 * and fetch the whole object now)
1930 * We want to handle Ranges ourselves iff
1931 * - we can actually parse client Range specs
1932 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
1933 * - reply will be cachable
1934 * (If the reply will be uncachable we have to throw it away after
1935 * serving this request, so it is better to forward ranges to
1936 * the server and fetch only the requested content)
1939 int64_t roffLimit
= orig_request
->getRangeOffsetLimit();
1941 if (NULL
== orig_request
->range
|| !orig_request
->flags
.cachable
1942 || orig_request
->range
->offsetLimitExceeded(roffLimit
) || orig_request
->flags
.connection_auth
)
1945 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
1946 orig_request
->range
<< ", cachable: " <<
1947 orig_request
->flags
.cachable
<< "; we_do_ranges: " << result
);
1952 /* build request prefix and append it to a given MemBuf;
1953 * return the length of the prefix */
1955 HttpStateData::buildRequestPrefix(HttpRequest
* aRequest
,
1956 HttpRequest
* original_request
,
1957 StoreEntry
* sentry
,
1959 http_state_flags stateFlags
)
1961 const int offset
= mb
->size
;
1962 HttpVersion
httpver(1,1);
1963 mb
->Printf("%s %s HTTP/%d.%d\r\n",
1964 RequestMethodStr(aRequest
->method
),
1965 aRequest
->urlpath
.size() ? aRequest
->urlpath
.termedBuf() : "/",
1966 httpver
.major
,httpver
.minor
);
1967 /* build and pack headers */
1969 HttpHeader
hdr(hoRequest
);
1971 httpBuildRequestHeader(aRequest
, original_request
, sentry
, &hdr
, stateFlags
);
1973 if (aRequest
->flags
.pinned
&& aRequest
->flags
.connection_auth
)
1974 aRequest
->flags
.auth_sent
= 1;
1975 else if (hdr
.has(HDR_AUTHORIZATION
))
1976 aRequest
->flags
.auth_sent
= 1;
1978 packerToMemInit(&p
, mb
);
1983 /* append header terminator */
1984 mb
->append(crlf
, 2);
1985 return mb
->size
- offset
;
1988 /* This will be called when connect completes. Write request. */
1990 HttpStateData::sendRequest()
1994 debugs(11, 5, "httpSendRequest: FD " << fd
<< ", request " << request
<< ", this " << this << ".");
1997 debugs(11,3, HERE
<< "cannot send request to closing FD " << fd
);
1998 assert(closeHandler
!= NULL
);
2002 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2003 AsyncCall::Pointer timeoutCall
= asyncCall(11, 5, "HttpStateData::httpTimeout",
2004 TimeoutDialer(this,&HttpStateData::httpTimeout
));
2005 commSetTimeout(fd
, Config
.Timeout
.lifetime
, timeoutCall
);
2006 flags
.do_next_read
= 1;
2007 maybeReadVirginBody();
2009 if (orig_request
->body_pipe
!= NULL
) {
2010 if (!startRequestBodyFlow()) // register to receive body data
2012 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2013 Dialer
dialer(this, &HttpStateData::sentRequestBody
);
2014 requestSender
= asyncCall(11,5, "HttpStateData::sentRequestBody", dialer
);
2016 assert(!requestBodySource
);
2017 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2018 Dialer
dialer(this, &HttpStateData::sendComplete
);
2019 requestSender
= asyncCall(11,5, "HttpStateData::SendComplete", dialer
);
2022 if (_peer
!= NULL
) {
2023 if (_peer
->options
.originserver
) {
2025 flags
.originpeer
= 1;
2028 flags
.originpeer
= 0;
2032 flags
.originpeer
= 0;
2036 * Is keep-alive okay for all request methods?
2038 if (orig_request
->flags
.must_keepalive
)
2039 flags
.keepalive
= 1;
2040 else if (!Config
.onoff
.server_pconns
)
2041 flags
.keepalive
= 0;
2042 else if (_peer
== NULL
)
2043 flags
.keepalive
= 1;
2044 else if (_peer
->stats
.n_keepalives_sent
< 10)
2045 flags
.keepalive
= 1;
2046 else if ((double) _peer
->stats
.n_keepalives_recv
/
2047 (double) _peer
->stats
.n_keepalives_sent
> 0.50)
2048 flags
.keepalive
= 1;
2051 if (neighborType(_peer
, request
) == PEER_SIBLING
&&
2052 !_peer
->options
.allow_miss
)
2053 flags
.only_if_cached
= 1;
2055 flags
.front_end_https
= _peer
->front_end_https
;
2059 request
->peer_host
=_peer
?_peer
->host
:NULL
;
2060 buildRequestPrefix(request
, orig_request
, entry
, &mb
, flags
);
2061 debugs(11, 6, "httpSendRequest: FD " << fd
<< ":\n" << mb
.buf
);
2062 comm_write_mbuf(fd
, &mb
, requestSender
);
2068 httpStart(FwdState
*fwd
)
2070 debugs(11, 3, "httpStart: \"" << RequestMethodStr(fwd
->request
->method
) << " " << fwd
->entry
->url() << "\"" );
2071 HttpStateData
*httpState
= new HttpStateData(fwd
);
2073 if (!httpState
->sendRequest()) {
2074 debugs(11, 3, "httpStart: aborted");
2079 statCounter
.server
.all
.requests
++;
2080 statCounter
.server
.http
.requests
++;
2083 * We used to set the read timeout here, but not any more.
2084 * Now its set in httpSendComplete() after the full request,
2085 * including request body, has been written to the server.
2090 HttpStateData::doneSendingRequestBody()
2092 debugs(11,5, HERE
<< "doneSendingRequestBody: FD " << fd
);
2095 if (Config
.accessList
.brokenPosts
) {
2096 ACLFilledChecklist
ch(Config
.accessList
.brokenPosts
, request
, NULL
);
2097 if (!ch
.fastCheck()) {
2098 debugs(11, 5, "doneSendingRequestBody: didn't match brokenPosts");
2099 CommIoCbParams
io(NULL
);
2104 debugs(11, 2, "doneSendingRequestBody: matched brokenPosts");
2107 debugs(11,2, HERE
<< "cannot send CRLF to closing FD " << fd
);
2108 assert(closeHandler
!= NULL
);
2112 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2113 Dialer
dialer(this, &HttpStateData::sendComplete
);
2114 AsyncCall::Pointer call
= asyncCall(11,5, "HttpStateData::SendComplete", dialer
);
2115 comm_write(fd
, "\r\n", 2, call
);
2119 debugs(11, 5, "doneSendingRequestBody: No brokenPosts list");
2120 #endif /* HTTP_VIOLATIONS */
2122 CommIoCbParams
io(NULL
);
2128 // more origin request body data is available
2130 HttpStateData::handleMoreRequestBodyAvailable()
2132 if (eof
|| fd
< 0) {
2133 // XXX: we should check this condition in other callbacks then!
2134 // TODO: Check whether this can actually happen: We should unsubscribe
2135 // as a body consumer when the above condition(s) are detected.
2136 debugs(11, 1, HERE
<< "Transaction aborted while reading HTTP body");
2140 assert(requestBodySource
!= NULL
);
2142 if (requestBodySource
->buf().hasContent()) {
2143 // XXX: why does not this trigger a debug message on every request?
2145 if (flags
.headers_parsed
&& !flags
.abuse_detected
) {
2146 flags
.abuse_detected
= 1;
2147 debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << orig_request
->client_addr
<< "' -> '" << entry
->url() << "'" );
2149 if (virginReply()->sline
.status
== HTTP_INVALID_HEADER
) {
2156 HttpStateData::handleMoreRequestBodyAvailable();
2159 // premature end of the request body
2161 HttpStateData::handleRequestBodyProducerAborted()
2163 ServerStateData::handleRequestBodyProducerAborted();
2164 // XXX: SendComplete(COMM_ERR_CLOSING) does little. Is it enough?
2165 CommIoCbParams
io(NULL
);
2167 io
.flag
=COMM_ERR_CLOSING
;
2171 // called when we wrote request headers(!) or a part of the body
2173 HttpStateData::sentRequestBody(const CommIoCbParams
&io
)
2176 kb_incr(&statCounter
.server
.http
.kbytes_out
, io
.size
);
2178 ServerStateData::sentRequestBody(io
);
2181 // Quickly abort the transaction
2182 // TODO: destruction should be sufficient as the destructor should cleanup,
2183 // including canceling close handlers
2185 HttpStateData::abortTransaction(const char *reason
)
2187 debugs(11,5, HERE
<< "aborting transaction for " << reason
<<
2188 "; FD " << fd
<< ", this " << this);
2195 fwd
->handleUnregisteredServerEnd();
2196 deleteThis("HttpStateData::abortTransaction");
2200 HttpStateData::originalRequest()
2202 return orig_request
;