5 * DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
6 * AUTHOR: Harvest Derived
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 * Anonymizing patch by lutz@as-node.jena.thur.de
38 * have a look into http-anon.c to get more informations.
43 #include "acl/FilledChecklist.h"
45 #include "auth/UserRequest.h"
47 #include "base/AsyncJobCalls.h"
48 #include "base/TextException.h"
50 #include "comm/Connection.h"
51 #include "comm/Write.h"
53 #include "DelayPools.h"
55 #include "err_detail_type.h"
56 #include "errorpage.h"
58 #include "HttpControlMsg.h"
59 #include "HttpHdrContRange.h"
60 #include "HttpHdrSc.h"
61 #include "HttpHdrScTarget.h"
62 #include "HttpReply.h"
63 #include "HttpRequest.h"
65 #include "MemObject.h"
68 #include "SquidTime.h"
72 #define SQUID_ENTER_THROWING_CODE() try {
73 #define SQUID_EXIT_THROWING_CODE(status) \
76 catch (const std::exception &e) { \
77 debugs (11, 1, "Exception error:" << e.what()); \
81 CBDATA_CLASS_INIT(HttpStateData
);
83 static const char *const crlf
= "\r\n";
85 static void httpMaybeRemovePublic(StoreEntry
*, http_status
);
86 static void copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
,
87 HttpHeader
* hdr_out
, const int we_do_ranges
, const http_state_flags
);
89 HttpStateData::HttpStateData(FwdState
*theFwdState
) : AsyncJob("HttpStateData"), ServerStateData(theFwdState
),
90 lastChunk(0), header_bytes_read(0), reply_bytes_read(0),
91 body_bytes_truncated(0), httpChunkDecoder(NULL
)
93 debugs(11,5,HERE
<< "HttpStateData " << this << " created");
94 ignoreCacheControl
= false;
95 surrogateNoStore
= false;
96 serverConnection
= fwd
->serverConnection();
98 readBuf
->init(16*1024, 256*1024);
100 // reset peer response time stats for %<pt
101 request
->hier
.peer_http_request_sent
.tv_sec
= 0;
102 request
->hier
.peer_http_request_sent
.tv_usec
= 0;
104 if (fwd
->serverConnection() != NULL
)
105 _peer
= cbdataReference(fwd
->serverConnection()->getPeer()); /* might be NULL */
108 request
->flags
.proxying
= 1;
110 * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
111 * We might end up getting the object from somewhere else if,
112 * for example, the request to this neighbor fails.
114 if (_peer
->options
.proxy_only
)
115 entry
->releaseRequest();
118 entry
->setNoDelay(_peer
->options
.no_delay
);
123 * register the handler to free HTTP state data when the FD closes
125 typedef CommCbMemFunT
<HttpStateData
, CommCloseCbParams
> Dialer
;
126 closeHandler
= JobCallback(9, 5, Dialer
, this, HttpStateData::httpStateConnClosed
);
127 comm_add_close_handler(serverConnection
->fd
, closeHandler
);
130 HttpStateData::~HttpStateData()
133 * don't forget that ~ServerStateData() gets called automatically
136 if (!readBuf
->isNull())
141 if (httpChunkDecoder
)
142 delete httpChunkDecoder
;
144 cbdataReferenceDone(_peer
);
146 debugs(11,5, HERE
<< "HttpStateData " << this << " destroyed; " << serverConnection
);
149 const Comm::ConnectionPointer
&
150 HttpStateData::dataConnection() const
152 return serverConnection
;
157 httpStateFree(int fd, void *data)
159 HttpStateData *httpState = static_cast<HttpStateData *>(data);
160 debugs(11, 5, "httpStateFree: FD " << fd << ", httpState=" << data);
165 HttpStateData::httpStateConnClosed(const CommCloseCbParams
¶ms
)
167 debugs(11, 5, "httpStateFree: FD " << params
.fd
<< ", httpState=" << params
.data
);
168 deleteThis("HttpStateData::httpStateConnClosed");
172 httpCachable(const HttpRequestMethod
& method
)
174 /* GET and HEAD are cachable. Others are not. */
176 // TODO: replase to HttpRequestMethod::isCachable() ?
177 if (method
!= METHOD_GET
&& method
!= METHOD_HEAD
)
185 HttpStateData::httpTimeout(const CommTimeoutCbParams
¶ms
)
187 debugs(11, 4, HERE
<< serverConnection
<< ": '" << entry
->url() << "'" );
189 if (entry
->store_status
== STORE_PENDING
) {
190 fwd
->fail(errorCon(ERR_READ_TIMEOUT
, HTTP_GATEWAY_TIMEOUT
, fwd
->request
));
193 serverConnection
->close();
197 httpMaybeRemovePublic(StoreEntry
* e
, http_status status
)
203 if (!EBIT_TEST(e
->flags
, KEY_PRIVATE
))
210 case HTTP_NON_AUTHORITATIVE_INFORMATION
:
212 case HTTP_MULTIPLE_CHOICES
:
214 case HTTP_MOVED_PERMANENTLY
:
216 case HTTP_MOVED_TEMPORARILY
:
227 case HTTP_METHOD_NOT_ALLOWED
:
234 case HTTP_UNAUTHORIZED
:
244 * Any 2xx response should eject previously cached entities...
247 if (status
>= 200 && status
< 300)
255 if (!remove
&& !forbidden
)
260 if (e
->mem_obj
->request
)
261 pe
= storeGetPublicByRequest(e
->mem_obj
->request
);
263 pe
= storeGetPublic(e
->mem_obj
->url
, e
->mem_obj
->method
);
268 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, e
->mem_obj
->method
, HTCP_CLR_INVALIDATION
);
274 * Also remove any cached HEAD response in case the object has
277 if (e
->mem_obj
->request
)
278 pe
= storeGetPublicByRequestMethod(e
->mem_obj
->request
, METHOD_HEAD
);
280 pe
= storeGetPublic(e
->mem_obj
->url
, METHOD_HEAD
);
285 neighborsHtcpClear(e
, NULL
, e
->mem_obj
->request
, HttpRequestMethod(METHOD_HEAD
), HTCP_CLR_INVALIDATION
);
292 HttpStateData::processSurrogateControl(HttpReply
*reply
)
294 if (request
->flags
.accelerated
&& reply
->surrogate_control
) {
295 HttpHdrScTarget
*sctusable
= httpHdrScGetMergedTarget(reply
->surrogate_control
, Config
.Accel
.surrogate_id
);
298 if (EBIT_TEST(sctusable
->mask
, SC_NO_STORE
) ||
299 (Config
.onoff
.surrogate_is_remote
300 && EBIT_TEST(sctusable
->mask
, SC_NO_STORE_REMOTE
))) {
301 surrogateNoStore
= true;
302 entry
->makePrivate();
305 /* The HttpHeader logic cannot tell if the header it's parsing is a reply to an
306 * accelerated request or not...
307 * Still, this is an abstraction breach. - RC
309 if (sctusable
->max_age
!= -1) {
310 if (sctusable
->max_age
< sctusable
->max_stale
)
311 reply
->expires
= reply
->date
+ sctusable
->max_age
;
313 reply
->expires
= reply
->date
+ sctusable
->max_stale
;
315 /* And update the timestamps */
316 entry
->timestampsSet();
319 /* We ignore cache-control directives as per the Surrogate specification */
320 ignoreCacheControl
= true;
322 httpHdrScTargetDestroy(sctusable
);
328 HttpStateData::cacheableReply()
330 HttpReply
const *rep
= finalReply();
331 HttpHeader
const *hdr
= &rep
->header
;
332 const int cc_mask
= (rep
->cache_control
) ? rep
->cache_control
->mask
: 0;
334 #if USE_HTTP_VIOLATIONS
336 const refresh_t
*R
= NULL
;
338 /* This strange looking define first looks up the refresh pattern
339 * and then checks if the specified flag is set. The main purpose
340 * of this is to simplify the refresh pattern lookup and USE_HTTP_VIOLATIONS
343 #define REFRESH_OVERRIDE(flag) \
344 ((R = (R ? R : refreshLimits(entry->mem_obj->url))) , \
345 (R && R->flags.flag))
347 #define REFRESH_OVERRIDE(flag) 0
350 if (surrogateNoStore
)
353 // RFC 2616: do not cache replies to responses with no-store CC directive
354 if (request
&& request
->cache_control
&&
355 EBIT_TEST(request
->cache_control
->mask
, CC_NO_STORE
) &&
356 !REFRESH_OVERRIDE(ignore_no_store
))
359 if (!ignoreCacheControl
) {
360 if (EBIT_TEST(cc_mask
, CC_PRIVATE
)) {
361 if (!REFRESH_OVERRIDE(ignore_private
))
365 if (EBIT_TEST(cc_mask
, CC_NO_CACHE
)) {
366 if (!REFRESH_OVERRIDE(ignore_no_cache
))
370 if (EBIT_TEST(cc_mask
, CC_NO_STORE
)) {
371 if (!REFRESH_OVERRIDE(ignore_no_store
))
376 if (request
->flags
.auth
|| request
->flags
.auth_sent
) {
378 * Responses to requests with authorization may be cached
379 * only if a Cache-Control: public reply header is present.
380 * RFC 2068, sec 14.9.4
383 if (!EBIT_TEST(cc_mask
, CC_PUBLIC
)) {
384 if (!REFRESH_OVERRIDE(ignore_auth
))
389 /* Pragma: no-cache in _replies_ is not documented in HTTP,
390 * but servers like "Active Imaging Webcast/2.0" sure do use it */
391 if (hdr
->has(HDR_PRAGMA
)) {
392 String s
= hdr
->getList(HDR_PRAGMA
);
393 const int no_cache
= strListIsMember(&s
, "no-cache", ',');
397 if (!REFRESH_OVERRIDE(ignore_no_cache
))
403 * The "multipart/x-mixed-replace" content type is used for
404 * continuous push replies. These are generally dynamic and
405 * probably should not be cachable
407 if ((v
= hdr
->getStr(HDR_CONTENT_TYPE
)))
408 if (!strncasecmp(v
, "multipart/x-mixed-replace", 25))
411 switch (rep
->sline
.status
) {
412 /* Responses that are cacheable */
416 case HTTP_NON_AUTHORITATIVE_INFORMATION
:
418 case HTTP_MULTIPLE_CHOICES
:
420 case HTTP_MOVED_PERMANENTLY
:
424 * Don't cache objects that need to be refreshed on next request,
425 * unless we know how to refresh it.
428 if (!refreshIsCachable(entry
) && !REFRESH_OVERRIDE(store_stale
)) {
429 debugs(22, 3, "refreshIsCachable() returned non-cacheable..");
437 /* Responses that only are cacheable if the server says so */
439 case HTTP_MOVED_TEMPORARILY
:
440 case HTTP_TEMPORARY_REDIRECT
:
441 if (rep
->expires
> rep
->date
&& rep
->date
> 0)
449 /* Errors can be negatively cached */
451 case HTTP_NO_CONTENT
:
455 case HTTP_BAD_REQUEST
:
461 case HTTP_METHOD_NOT_ALLOWED
:
463 case HTTP_REQUEST_URI_TOO_LARGE
:
465 case HTTP_INTERNAL_SERVER_ERROR
:
467 case HTTP_NOT_IMPLEMENTED
:
469 case HTTP_BAD_GATEWAY
:
471 case HTTP_SERVICE_UNAVAILABLE
:
473 case HTTP_GATEWAY_TIMEOUT
:
479 /* Some responses can never be cached */
481 case HTTP_PARTIAL_CONTENT
: /* Not yet supported */
485 case HTTP_NOT_MODIFIED
:
487 case HTTP_UNAUTHORIZED
:
489 case HTTP_PROXY_AUTHENTICATION_REQUIRED
:
491 case HTTP_INVALID_HEADER
: /* Squid header parsing error */
493 case HTTP_HEADER_TOO_LARGE
:
495 case HTTP_PAYMENT_REQUIRED
:
496 case HTTP_NOT_ACCEPTABLE
:
497 case HTTP_REQUEST_TIMEOUT
:
499 case HTTP_LENGTH_REQUIRED
:
500 case HTTP_PRECONDITION_FAILED
:
501 case HTTP_REQUEST_ENTITY_TOO_LARGE
:
502 case HTTP_UNSUPPORTED_MEDIA_TYPE
:
503 case HTTP_UNPROCESSABLE_ENTITY
:
505 case HTTP_FAILED_DEPENDENCY
:
506 case HTTP_INSUFFICIENT_STORAGE
:
507 case HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
:
508 case HTTP_EXPECTATION_FAILED
:
513 /* RFC 2616 section 6.1.1: an unrecognized response MUST NOT be cached. */
514 debugs (11, 3, HERE
<< "Unknown HTTP status code " << rep
->sline
.status
<< ". Not cacheable.");
526 * For Vary, store the relevant request headers as
527 * virtual headers in the reply
528 * Returns false if the variance cannot be stored
531 httpMakeVaryMark(HttpRequest
* request
, HttpReply
const * reply
)
534 const char *pos
= NULL
;
541 vary
= reply
->header
.getList(HDR_VARY
);
543 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
544 char *name
= (char *)xmalloc(ilen
+ 1);
545 xstrncpy(name
, item
, ilen
+ 1);
548 if (strcmp(name
, "*") == 0) {
549 /* Can not handle "Vary: *" withtout ETag support */
555 strListAdd(&vstr
, name
, ',');
556 hdr
= request
->header
.getByName(name
);
558 value
= hdr
.termedBuf();
561 value
= rfc1738_escape_part(value
);
562 vstr
.append("=\"", 2);
564 vstr
.append("\"", 1);
571 #if X_ACCELERATOR_VARY
574 vary
= reply
->header
.getList(HDR_X_ACCELERATOR_VARY
);
576 while (strListGetItem(&vary
, ',', &item
, &ilen
, &pos
)) {
577 char *name
= (char *)xmalloc(ilen
+ 1);
578 xstrncpy(name
, item
, ilen
+ 1);
580 strListAdd(&vstr
, name
, ',');
581 hdr
= request
->header
.getByName(name
);
583 value
= hdr
.termedBuf();
586 value
= rfc1738_escape_part(value
);
587 vstr
.append("=\"", 2);
589 vstr
.append("\"", 1);
598 debugs(11, 3, "httpMakeVaryMark: " << vstr
);
599 return vstr
.termedBuf();
603 HttpStateData::keepaliveAccounting(HttpReply
*reply
)
607 _peer
->stats
.n_keepalives_sent
++;
609 if (reply
->keep_alive
) {
611 _peer
->stats
.n_keepalives_recv
++;
613 if (Config
.onoff
.detect_broken_server_pconns
614 && reply
->bodySize(request
->method
) == -1 && !flags
.chunked
) {
615 debugs(11, 1, "keepaliveAccounting: Impossible keep-alive header from '" << entry
->url() << "'" );
616 // debugs(11, 2, "GOT HTTP REPLY HDR:\n---------\n" << readBuf->content() << "\n----------" );
617 flags
.keepalive_broken
= 1;
623 HttpStateData::checkDateSkew(HttpReply
*reply
)
625 if (reply
->date
> -1 && !_peer
) {
626 int skew
= abs((int)(reply
->date
- squid_curtime
));
629 debugs(11, 3, "" << request
->GetHost() << "'s clock is skewed by " << skew
<< " seconds!");
634 * This creates the error page itself.. its likely
635 * that the forward ported reply header max size patch
636 * generates non http conformant error pages - in which
637 * case the errors where should be 'BAD_GATEWAY' etc
640 HttpStateData::processReplyHeader()
642 /** Creates a blank header. If this routine is made incremental, this will not do */
644 /* NP: all exit points to this function MUST call ctx_exit(ctx) */
645 Ctx ctx
= ctx_enter(entry
->mem_obj
->url
);
647 debugs(11, 3, "processReplyHeader: key '" << entry
->getMD5Text() << "'");
649 assert(!flags
.headers_parsed
);
651 if (!readBuf
->hasContent()) {
656 http_status error
= HTTP_STATUS_NONE
;
658 HttpReply
*newrep
= new HttpReply
;
659 const bool parsed
= newrep
->parse(readBuf
, eof
, &error
);
661 if (!parsed
&& readBuf
->contentSize() > 5 && strncmp(readBuf
->content(), "HTTP/", 5) != 0 && strncmp(readBuf
->content(), "ICY", 3) != 0) {
663 HttpReply
*tmprep
= new HttpReply
;
664 tmprep
->setHeaders(HTTP_OK
, "Gatewaying", NULL
, -1, -1, -1);
665 tmprep
->header
.putExt("X-Transformed-From", "HTTP/0.9");
667 newrep
->parse(mb
, eof
, &error
);
671 if (!parsed
&& error
> 0) { // unrecoverable parsing error
672 debugs(11, 3, "processReplyHeader: Non-HTTP-compliant header: '" << readBuf
->content() << "'");
673 flags
.headers_parsed
= 1;
674 newrep
->sline
.version
= HttpVersion(1,1);
675 newrep
->sline
.status
= error
;
676 HttpReply
*vrep
= setVirginReply(newrep
);
677 entry
->replaceHttpReply(vrep
);
682 if (!parsed
) { // need more data
690 debugs(11, 2, "HTTP Server " << serverConnection
);
691 debugs(11, 2, "HTTP Server REPLY:\n---------\n" << readBuf
->content() << "\n----------");
693 header_bytes_read
= headersEnd(readBuf
->content(), readBuf
->contentSize());
694 readBuf
->consume(header_bytes_read
);
697 newrep
->removeStaleWarnings();
699 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& newrep
->sline
.status
>= 100 && newrep
->sline
.status
< 200) {
706 if (newrep
->sline
.protocol
== AnyP::PROTO_HTTP
&& newrep
->header
.chunked()) {
708 httpChunkDecoder
= new ChunkedCodingParser
;
711 if (!peerSupportsConnectionPinning())
712 request
->flags
.connection_auth_disabled
= 1;
714 HttpReply
*vrep
= setVirginReply(newrep
);
715 flags
.headers_parsed
= 1;
717 keepaliveAccounting(vrep
);
721 processSurrogateControl (vrep
);
723 /** \todo IF the reply is a 1.0 reply, AND it has a Connection: Header
724 * Parse the header and remove all referenced headers
727 request
->hier
.peer_reply_status
= newrep
->sline
.status
;
732 /// ignore or start forwarding the 1xx response (a.k.a., control message)
734 HttpStateData::handle1xx(HttpReply
*reply
)
736 HttpMsgPointerT
<HttpReply
> msg(reply
); // will destroy reply if unused
738 // one 1xx at a time: we must not be called while waiting for previous 1xx
739 Must(!flags
.handling1xx
);
740 flags
.handling1xx
= true;
742 if (!request
->canHandle1xx()) {
743 debugs(11, 2, HERE
<< "ignoring client-unsupported 1xx");
748 #if USE_HTTP_VIOLATIONS
749 // check whether the 1xx response forwarding is allowed by squid.conf
750 if (Config
.accessList
.reply
) {
751 ACLFilledChecklist
ch(Config
.accessList
.reply
, originalRequest(), NULL
);
752 ch
.reply
= HTTPMSGLOCK(reply
);
753 if (!ch
.fastCheck()) { // TODO: support slow lookups?
754 debugs(11, 3, HERE
<< "ignoring denied 1xx");
759 #endif // USE_HTTP_VIOLATIONS
761 debugs(11, 2, HERE
<< "forwarding 1xx to client");
763 // the Sink will use this to call us back after writing 1xx to the client
764 typedef NullaryMemFunT
<HttpStateData
> CbDialer
;
765 const AsyncCall::Pointer cb
= JobCallback(11, 3, CbDialer
, this,
766 HttpStateData::proceedAfter1xx
);
767 CallJobHere1(11, 4, request
->clientConnectionManager
, ConnStateData
,
768 ConnStateData::sendControlMsg
, HttpControlMsg(msg
, cb
));
769 // If the call is not fired, then the Sink is gone, and HttpStateData
770 // will terminate due to an aborted store entry or another similar error.
771 // If we get stuck, it is not handle1xx fault if we could get stuck
772 // for similar reasons without a 1xx response.
775 /// restores state and resumes processing after 1xx is ignored or forwarded
777 HttpStateData::proceedAfter1xx()
779 Must(flags
.handling1xx
);
781 debugs(11, 2, HERE
<< "consuming " << header_bytes_read
<<
782 " header and " << reply_bytes_read
<< " body bytes read after 1xx");
783 header_bytes_read
= 0;
784 reply_bytes_read
= 0;
786 CallJobHere(11, 3, this, HttpStateData
, HttpStateData::processReply
);
791 * returns true if the peer can support connection pinning
793 bool HttpStateData::peerSupportsConnectionPinning() const
795 const HttpReply
*rep
= entry
->mem_obj
->getReply();
796 const HttpHeader
*hdr
= &rep
->header
;
803 /*If this peer does not support connection pinning (authenticated
804 connections) return false
806 if (!_peer
->connection_auth
)
809 /*The peer supports connection pinning and the http reply status
810 is not unauthorized, so the related connection can be pinned
812 if (rep
->sline
.status
!= HTTP_UNAUTHORIZED
)
815 /*The server respond with HTTP_UNAUTHORIZED and the peer configured
816 with "connection-auth=on" we know that the peer supports pinned
819 if (_peer
->connection_auth
== 1)
822 /*At this point peer has configured with "connection-auth=auto"
823 parameter so we need some extra checks to decide if we are going
824 to allow pinned connections or not
827 /*if the peer configured with originserver just allow connection
828 pinning (squid 2.6 behaviour)
830 if (_peer
->options
.originserver
)
833 /*if the connections it is already pinned it is OK*/
834 if (request
->flags
.pinned
)
837 /*Allow pinned connections only if the Proxy-support header exists in
838 reply and has in its list the "Session-Based-Authentication"
839 which means that the peer supports connection pinning.
841 if (!hdr
->has(HDR_PROXY_SUPPORT
))
844 header
= hdr
->getStrOrList(HDR_PROXY_SUPPORT
);
845 /* XXX This ought to be done in a case-insensitive manner */
846 rc
= (strstr(header
.termedBuf(), "Session-Based-Authentication") != NULL
);
851 // Called when we parsed (and possibly adapted) the headers but
852 // had not starting storing (a.k.a., sending) the body yet.
854 HttpStateData::haveParsedReplyHeaders()
856 ServerStateData::haveParsedReplyHeaders();
858 Ctx ctx
= ctx_enter(entry
->mem_obj
->url
);
859 HttpReply
*rep
= finalReply();
861 if (rep
->sline
.status
== HTTP_PARTIAL_CONTENT
&&
863 currentOffset
= rep
->content_range
->spec
.offset
;
865 entry
->timestampsSet();
867 /* Check if object is cacheable or not based on reply code */
868 debugs(11, 3, "haveParsedReplyHeaders: HTTP CODE: " << rep
->sline
.status
);
870 if (neighbors_do_private_keys
)
871 httpMaybeRemovePublic(entry
, rep
->sline
.status
);
873 if (rep
->header
.has(HDR_VARY
)
874 #if X_ACCELERATOR_VARY
875 || rep
->header
.has(HDR_X_ACCELERATOR_VARY
)
878 const char *vary
= httpMakeVaryMark(request
, rep
);
881 entry
->makePrivate();
882 if (!fwd
->reforwardableStatus(rep
->sline
.status
))
883 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
887 entry
->mem_obj
->vary_headers
= xstrdup(vary
);
891 * If its not a reply that we will re-forward, then
892 * allow the client to get it.
894 if (!fwd
->reforwardableStatus(rep
->sline
.status
))
895 EBIT_CLR(entry
->flags
, ENTRY_FWD_HDR_WAIT
);
897 switch (cacheableReply()) {
904 entry
->makePrivate();
909 #if USE_HTTP_VIOLATIONS
910 if (Config
.negativeTtl
> 0)
911 entry
->cacheNegatively();
914 entry
->makePrivate();
926 if (!ignoreCacheControl
&& rep
->cache_control
) {
927 if (EBIT_TEST(rep
->cache_control
->mask
, CC_PROXY_REVALIDATE
) ||
928 EBIT_TEST(rep
->cache_control
->mask
, CC_MUST_REVALIDATE
) ||
929 EBIT_TEST(rep
->cache_control
->mask
, CC_S_MAXAGE
))
930 EBIT_SET(entry
->flags
, ENTRY_REVALIDATE
);
934 headersLog(1, 0, request
->method
, rep
);
941 HttpStateData::ConnectionStatus
942 HttpStateData::statusIfComplete() const
944 const HttpReply
*rep
= virginReply();
946 * If the reply wants to close the connection, it takes precedence */
948 if (httpHeaderHasConnDir(&rep
->header
, "close"))
949 return COMPLETE_NONPERSISTENT_MSG
;
952 * If we didn't send a keep-alive request header, then this
953 * can not be a persistent connection.
955 if (!flags
.keepalive
)
956 return COMPLETE_NONPERSISTENT_MSG
;
959 * If we haven't sent the whole request then this can not be a persistent
962 if (!flags
.request_sent
) {
963 debugs(11, 2, "statusIfComplete: Request not yet fully sent \"" << RequestMethodStr(request
->method
) << " " << entry
->url() << "\"" );
964 return COMPLETE_NONPERSISTENT_MSG
;
968 * What does the reply have to say about keep-alive?
972 * If the origin server (HTTP/1.0) does not send a keep-alive
973 * header, but keeps the connection open anyway, what happens?
974 * We'll return here and http.c waits for an EOF before changing
975 * store_status to STORE_OK. Combine this with ENTRY_FWD_HDR_WAIT
976 * and an error status code, and we might have to wait until
977 * the server times out the socket.
979 if (!rep
->keep_alive
)
980 return COMPLETE_NONPERSISTENT_MSG
;
982 return COMPLETE_PERSISTENT_MSG
;
985 HttpStateData::ConnectionStatus
986 HttpStateData::persistentConnStatus() const
988 debugs(11, 3, HERE
<< serverConnection
<< " eof=" << eof
);
989 if (eof
) // already reached EOF
990 return COMPLETE_NONPERSISTENT_MSG
;
992 /* If server fd is closing (but we have not been notified yet), stop Comm
993 I/O to avoid assertions. TODO: Change Comm API to handle callers that
994 want more I/O after async closing (usually initiated by others). */
995 // XXX: add canReceive or s/canSend/canTalkToServer/
996 if (!Comm::IsConnOpen(serverConnection
))
997 return COMPLETE_NONPERSISTENT_MSG
;
1000 * In chunked response we do not know the content length but we are absolutely
1001 * sure about the end of response, so we are calling the statusIfComplete to
1002 * decide if we can be persistant
1004 if (lastChunk
&& flags
.chunked
)
1005 return statusIfComplete();
1007 const HttpReply
*vrep
= virginReply();
1008 debugs(11, 5, "persistentConnStatus: content_length=" << vrep
->content_length
);
1010 const int64_t clen
= vrep
->bodySize(request
->method
);
1012 debugs(11, 5, "persistentConnStatus: clen=" << clen
);
1014 /* If the body size is unknown we must wait for EOF */
1016 return INCOMPLETE_MSG
;
1019 * If the body size is known, we must wait until we've gotten all of it. */
1022 // if (entry->mem_obj->endOffset() < vrep->content_length + vrep->hdr_sz)
1023 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1024 debugs(11,5, "persistentConnStatus: body_bytes_read=" <<
1025 body_bytes_read
<< " content_length=" << vrep
->content_length
);
1027 if (body_bytes_read
< vrep
->content_length
)
1028 return INCOMPLETE_MSG
;
1030 if (body_bytes_truncated
> 0) // already read more than needed
1031 return COMPLETE_NONPERSISTENT_MSG
; // disable pconns
1035 * If there is no message body or we got it all, we can be persistent */
1036 return statusIfComplete();
1040 * This is the callback after some data has been read from the network
1044 HttpStateData::ReadReplyWrapper(int fd, char *buf, size_t len, comm_err_t flag, int xerrno, void *data)
1046 HttpStateData *httpState = static_cast<HttpStateData *>(data);
1047 assert (fd == httpState->serverConnection->fd);
1048 // assert(buf == readBuf->content());
1049 PROF_start(HttpStateData_readReply);
1050 httpState->readReply(len, flag, xerrno);
1051 PROF_stop(HttpStateData_readReply);
1055 /* XXX this function is too long! */
1057 HttpStateData::readReply(const CommIoCbParams
&io
)
1063 flags
.do_next_read
= 0;
1065 debugs(11, 5, HERE
<< io
.conn
<< ": len " << len
<< ".");
1067 // Bail out early on COMM_ERR_CLOSING - close handlers will tidy up for us
1068 if (io
.flag
== COMM_ERR_CLOSING
) {
1069 debugs(11, 3, "http socket closing");
1073 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1074 // TODO: should we call abortTransaction() here?
1078 // handle I/O errors
1079 if (io
.flag
!= COMM_OK
|| len
< 0) {
1080 debugs(11, 2, HERE
<< io
.conn
<< ": read failure: " << xstrerror() << ".");
1082 if (ignoreErrno(io
.xerrno
)) {
1083 flags
.do_next_read
= 1;
1086 err
= errorCon(ERR_READ_ERROR
, HTTP_BAD_GATEWAY
, fwd
->request
);
1087 err
->xerrno
= io
.xerrno
;
1089 flags
.do_next_read
= 0;
1090 serverConnection
->close();
1098 readBuf
->appended(len
);
1099 reply_bytes_read
+= len
;
1101 DelayId delayId
= entry
->mem_obj
->mostBytesAllowed();
1102 delayId
.bytesIn(len
);
1105 kb_incr(&statCounter
.server
.all
.kbytes_in
, len
);
1106 kb_incr(&statCounter
.server
.http
.kbytes_in
, len
);
1107 IOStats
.Http
.reads
++;
1109 for (clen
= len
- 1, bin
= 0; clen
; bin
++)
1112 IOStats
.Http
.read_hist
[bin
]++;
1114 // update peer response time stats (%<pt)
1115 const timeval
&sent
= request
->hier
.peer_http_request_sent
;
1116 request
->hier
.peer_response_time
=
1117 sent
.tv_sec
? tvSubMsec(sent
, current_time
) : -1;
1121 * Here the RFC says we should ignore whitespace between replies, but we can't as
1122 * doing so breaks HTTP/0.9 replies beginning with witespace, and in addition
1123 * the response splitting countermeasures is extremely likely to trigger on this,
1124 * not allowing connection reuse in the first place.
1127 if (!flags
.headers_parsed
&& len
> 0 && fd_table
[serverConnection
->fd
].uses
> 1) {
1128 /* Skip whitespace between replies */
1130 while (len
> 0 && xisspace(*buf
))
1131 memmove(buf
, buf
+ 1, len
--);
1134 /* Continue to read... */
1135 /* Timeout NOT increased. This whitespace was from previous reply */
1136 flags
.do_next_read
= 1;
1137 maybeReadVirginBody();
1144 if (len
== 0) { // reached EOF?
1146 flags
.do_next_read
= 0;
1148 /* Bug 2879: Replies may terminate with \r\n then EOF instead of \r\n\r\n
1149 * Ensure here that we have at minimum two \r\n when EOF is seen.
1150 * TODO: Add eof parameter to headersEnd() and move this hack there.
1152 if (readBuf
->contentSize() && !flags
.headers_parsed
) {
1154 * Yes Henrik, there is a point to doing this. When we
1155 * called httpProcessReplyHeader() before, we didn't find
1156 * the end of headers, but now we are definately at EOF, so
1157 * we want to process the reply headers.
1159 /* Fake an "end-of-headers" to work around such broken servers */
1160 readBuf
->append("\r\n", 2);
1167 /// processes the already read and buffered response data, possibly after
1168 /// waiting for asynchronous 1xx control message processing
1170 HttpStateData::processReply()
1173 if (flags
.handling1xx
) { // we came back after handling a 1xx response
1174 debugs(11, 5, HERE
<< "done with 1xx handling");
1175 flags
.handling1xx
= false;
1176 Must(!flags
.headers_parsed
);
1179 if (!flags
.headers_parsed
) { // have not parsed headers yet?
1180 PROF_start(HttpStateData_processReplyHeader
);
1181 processReplyHeader();
1182 PROF_stop(HttpStateData_processReplyHeader
);
1184 if (!continueAfterParsingHeader()) // parsing error or need more data
1185 return; // TODO: send errors to ICAP
1187 adaptOrFinalizeReply();
1190 // kick more reads if needed and/or process the response body, if any
1191 PROF_start(HttpStateData_processReplyBody
);
1192 processReplyBody(); // may call serverComplete()
1193 PROF_stop(HttpStateData_processReplyBody
);
1197 \retval true if we can continue with processing the body or doing ICAP.
1200 HttpStateData::continueAfterParsingHeader()
1202 if (flags
.handling1xx
) {
1203 debugs(11, 5, HERE
<< "wait for 1xx handling");
1204 Must(!flags
.headers_parsed
);
1208 if (!flags
.headers_parsed
&& !eof
) {
1209 debugs(11, 9, HERE
<< "needs more at " << readBuf
->contentSize());
1210 flags
.do_next_read
= 1;
1211 /** \retval false If we have not finished parsing the headers and may get more data.
1212 * Schedules more reads to retrieve the missing data.
1214 maybeReadVirginBody(); // schedules all kinds of reads; TODO: rename
1218 /** If we are done with parsing, check for errors */
1220 err_type error
= ERR_NONE
;
1222 if (flags
.headers_parsed
) { // parsed headers, possibly with errors
1223 // check for header parsing errors
1224 if (HttpReply
*vrep
= virginReply()) {
1225 const http_status s
= vrep
->sline
.status
;
1226 const HttpVersion
&v
= vrep
->sline
.version
;
1227 if (s
== HTTP_INVALID_HEADER
&& v
!= HttpVersion(0,9)) {
1228 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Bad header encountered from " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1229 error
= ERR_INVALID_RESP
;
1230 } else if (s
== HTTP_HEADER_TOO_LARGE
) {
1231 fwd
->dontRetry(true);
1232 error
= ERR_TOO_BIG
;
1234 return true; // done parsing, got reply, and no error
1237 // parsed headers but got no reply
1238 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: No reply at all for " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1239 error
= ERR_INVALID_RESP
;
1243 if (readBuf
->hasContent()) {
1244 error
= ERR_INVALID_RESP
;
1245 debugs(11, DBG_IMPORTANT
, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1247 error
= ERR_ZERO_SIZE_OBJECT
;
1248 debugs(11, (request
->flags
.accelerated
?DBG_IMPORTANT
:2), "WARNING: HTTP: Invalid Response: No object data received for " <<
1249 entry
->url() << " AKA " << request
->GetHost() << request
->urlpath
.termedBuf() );
1253 assert(error
!= ERR_NONE
);
1255 fwd
->fail(errorCon(error
, HTTP_BAD_GATEWAY
, fwd
->request
));
1256 flags
.do_next_read
= 0;
1257 serverConnection
->close();
1258 return false; // quit on error
1261 /** truncate what we read if we read too much so that writeReplyBody()
1262 writes no more than what we should have read */
1264 HttpStateData::truncateVirginBody()
1266 assert(flags
.headers_parsed
);
1268 HttpReply
*vrep
= virginReply();
1270 if (!vrep
->expectingBody(request
->method
, clen
) || clen
< 0)
1271 return; // no body or a body of unknown size, including chunked
1273 const int64_t body_bytes_read
= reply_bytes_read
- header_bytes_read
;
1274 if (body_bytes_read
- body_bytes_truncated
<= clen
)
1275 return; // we did not read too much or already took care of the extras
1277 if (const int64_t extras
= body_bytes_read
- body_bytes_truncated
- clen
) {
1278 // server sent more that the advertised content length
1279 debugs(11,5, HERE
<< "body_bytes_read=" << body_bytes_read
<<
1280 " clen=" << clen
<< '/' << vrep
->content_length
<<
1281 " body_bytes_truncated=" << body_bytes_truncated
<< '+' << extras
);
1283 readBuf
->truncate(extras
);
1284 body_bytes_truncated
+= extras
;
1289 * Call this when there is data from the origin server
1290 * which should be sent to either StoreEntry, or to ICAP...
1293 HttpStateData::writeReplyBody()
1295 truncateVirginBody(); // if needed
1296 const char *data
= readBuf
->content();
1297 int len
= readBuf
->contentSize();
1298 addVirginReplyBody(data
, len
);
1299 readBuf
->consume(len
);
1303 HttpStateData::decodeAndWriteReplyBody()
1305 const char *data
= NULL
;
1307 bool wasThereAnException
= false;
1308 assert(flags
.chunked
);
1309 assert(httpChunkDecoder
);
1310 SQUID_ENTER_THROWING_CODE();
1313 const bool doneParsing
= httpChunkDecoder
->parse(readBuf
,&decodedData
);
1314 len
= decodedData
.contentSize();
1315 data
=decodedData
.content();
1316 addVirginReplyBody(data
, len
);
1319 flags
.do_next_read
= 0;
1321 SQUID_EXIT_THROWING_CODE(wasThereAnException
);
1322 return wasThereAnException
;
1326 * processReplyBody has two purposes:
1327 * 1 - take the reply body data, if any, and put it into either
1328 * the StoreEntry, or give it over to ICAP.
1329 * 2 - see if we made it to the end of the response (persistent
1330 * connections and such)
1333 HttpStateData::processReplyBody()
1335 Ip::Address client_addr
;
1336 bool ispinned
= false;
1338 if (!flags
.headers_parsed
) {
1339 flags
.do_next_read
= 1;
1340 maybeReadVirginBody();
1345 debugs(11,5, HERE
<< "adaptationAccessCheckPending=" << adaptationAccessCheckPending
);
1346 if (adaptationAccessCheckPending
)
1352 * At this point the reply headers have been parsed and consumed.
1353 * That means header content has been removed from readBuf and
1354 * it contains only body data.
1356 if (flags
.chunked
) {
1357 if (!decodeAndWriteReplyBody()) {
1358 flags
.do_next_read
= 0;
1365 if (EBIT_TEST(entry
->flags
, ENTRY_ABORTED
)) {
1367 * The above writeReplyBody() call could ABORT this entry,
1368 * in that case, the server FD should already be closed.
1369 * there's nothing for us to do.
1373 switch (persistentConnStatus()) {
1374 case INCOMPLETE_MSG
: {
1375 debugs(11, 5, "processReplyBody: INCOMPLETE_MSG from " << serverConnection
);
1376 /* Wait for more data or EOF condition */
1377 AsyncCall::Pointer nil
;
1378 if (flags
.keepalive_broken
) {
1379 commSetConnTimeout(serverConnection
, 10, nil
);
1381 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, nil
);
1384 flags
.do_next_read
= 1;
1388 case COMPLETE_PERSISTENT_MSG
:
1389 debugs(11, 5, "processReplyBody: COMPLETE_PERSISTENT_MSG from " << serverConnection
);
1390 /* yes we have to clear all these! */
1391 commUnsetConnTimeout(serverConnection
);
1392 flags
.do_next_read
= 0;
1394 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1395 closeHandler
= NULL
;
1396 fwd
->unregister(serverConnection
);
1398 if (request
->flags
.spoof_client_ip
)
1399 client_addr
= request
->client_addr
;
1402 if (request
->flags
.pinned
) {
1404 } else if (request
->flags
.connection_auth
&& request
->flags
.auth_sent
) {
1408 if (request
->pinnedConnection() && ispinned
) {
1409 request
->pinnedConnection()->pinConnection(serverConnection
, request
, _peer
,
1410 (request
->flags
.connection_auth
!= 0));
1412 fwd
->pconnPush(serverConnection
, request
->peer_host
? request
->peer_host
: request
->GetHost());
1415 serverConnection
= NULL
;
1419 case COMPLETE_NONPERSISTENT_MSG
:
1420 debugs(11, 5, "processReplyBody: COMPLETE_NONPERSISTENT_MSG from " << serverConnection
);
1425 maybeReadVirginBody();
1429 HttpStateData::maybeReadVirginBody()
1431 // we may need to grow the buffer if headers do not fit
1432 const int minRead
= flags
.headers_parsed
? 0 :1024;
1433 const int read_size
= replyBodySpace(*readBuf
, minRead
);
1435 debugs(11,9, HERE
<< (flags
.do_next_read
? "may" : "wont") <<
1436 " read up to " << read_size
<< " bytes from " << serverConnection
);
1439 * why <2? Because delayAwareRead() won't actually read if
1440 * you ask it to read 1 byte. The delayed read request
1441 * just gets re-queued until the client side drains, then
1442 * the I/O thread hangs. Better to not register any read
1443 * handler until we get a notification from someone that
1444 * its okay to read again.
1449 if (flags
.do_next_read
) {
1450 flags
.do_next_read
= 0;
1451 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
1452 entry
->delayAwareRead(serverConnection
, readBuf
->space(read_size
), read_size
,
1453 JobCallback(11, 5, Dialer
, this, HttpStateData::readReply
));
1457 /// called after writing the very last request byte (body, last-chunk, etc)
1459 HttpStateData::wroteLast(const CommIoCbParams
&io
)
1461 debugs(11, 5, HERE
<< serverConnection
<< ": size " << io
.size
<< ": errflag " << io
.flag
<< ".");
1462 #if URL_CHECKSUM_DEBUG
1464 entry
->mem_obj
->checkUrlChecksum();
1468 fd_bytes(io
.fd
, io
.size
, FD_WRITE
);
1469 kb_incr(&statCounter
.server
.all
.kbytes_out
, io
.size
);
1470 kb_incr(&statCounter
.server
.http
.kbytes_out
, io
.size
);
1473 if (io
.flag
== COMM_ERR_CLOSING
)
1478 err
= errorCon(ERR_WRITE_ERROR
, HTTP_BAD_GATEWAY
, fwd
->request
);
1479 err
->xerrno
= io
.xerrno
;
1481 serverConnection
->close();
1488 /// successfully wrote the entire request (including body, last-chunk, etc.)
1490 HttpStateData::sendComplete()
1493 * Set the read timeout here because it hasn't been set yet.
1494 * We only set the read timeout after the request has been
1495 * fully written to the server-side. If we start the timeout
1496 * after connection establishment, then we are likely to hit
1497 * the timeout for POST/PUT requests that have very large
1500 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
1501 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
1502 TimeoutDialer
, this, HttpStateData::httpTimeout
);
1504 commSetConnTimeout(serverConnection
, Config
.Timeout
.read
, timeoutCall
);
1506 flags
.request_sent
= 1;
1508 request
->hier
.peer_http_request_sent
= current_time
;
1511 // Close the HTTP server connection. Used by serverComplete().
1513 HttpStateData::closeServer()
1515 debugs(11,5, HERE
<< "closing HTTP server " << serverConnection
<< " this " << this);
1517 if (Comm::IsConnOpen(serverConnection
)) {
1518 fwd
->unregister(serverConnection
);
1519 comm_remove_close_handler(serverConnection
->fd
, closeHandler
);
1520 closeHandler
= NULL
;
1521 serverConnection
->close();
1526 HttpStateData::doneWithServer() const
1528 return !Comm::IsConnOpen(serverConnection
);
1532 * Fixup authentication request headers for special cases
1535 httpFixupAuthentication(HttpRequest
* request
, const HttpHeader
* hdr_in
, HttpHeader
* hdr_out
, http_state_flags flags
)
1537 http_hdr_type header
= flags
.originpeer
? HDR_AUTHORIZATION
: HDR_PROXY_AUTHORIZATION
;
1539 /* Nothing to do unless we are forwarding to a peer */
1540 if (!request
->flags
.proxying
)
1543 /* Needs to be explicitly enabled */
1544 if (!request
->peer_login
)
1547 /* Maybe already dealt with? */
1548 if (hdr_out
->has(header
))
1551 /* Nothing to do here for PASSTHRU */
1552 if (strcmp(request
->peer_login
, "PASSTHRU") == 0)
1555 /* PROXYPASS is a special case, single-signon to servers with the proxy password (basic only) */
1556 if (flags
.originpeer
&& strcmp(request
->peer_login
, "PROXYPASS") == 0 && hdr_in
->has(HDR_PROXY_AUTHORIZATION
)) {
1557 const char *auth
= hdr_in
->getStr(HDR_PROXY_AUTHORIZATION
);
1559 if (auth
&& strncasecmp(auth
, "basic ", 6) == 0) {
1560 hdr_out
->putStr(header
, auth
);
1565 /* Special mode to pass the username to the upstream cache */
1566 if (*request
->peer_login
== '*') {
1568 const char *username
= "-";
1570 if (request
->extacl_user
.size())
1571 username
= request
->extacl_user
.termedBuf();
1573 else if (request
->auth_user_request
!= NULL
)
1574 username
= request
->auth_user_request
->username();
1577 snprintf(loginbuf
, sizeof(loginbuf
), "%s%s", username
, request
->peer_login
+ 1);
1579 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1580 old_base64_encode(loginbuf
));
1584 /* external_acl provided credentials */
1585 if (request
->extacl_user
.size() && request
->extacl_passwd
.size() &&
1586 (strcmp(request
->peer_login
, "PASS") == 0 ||
1587 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
1589 snprintf(loginbuf
, sizeof(loginbuf
), SQUIDSTRINGPH
":" SQUIDSTRINGPH
,
1590 SQUIDSTRINGPRINT(request
->extacl_user
),
1591 SQUIDSTRINGPRINT(request
->extacl_passwd
));
1592 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1593 old_base64_encode(loginbuf
));
1597 /* Kerberos login to peer */
1598 #if HAVE_AUTH_MODULE_NEGOTIATE && HAVE_KRB5 && HAVE_GSSAPI
1599 if (strncmp(request
->peer_login
, "NEGOTIATE",strlen("NEGOTIATE")) == 0) {
1601 char *PrincipalName
=NULL
,*p
;
1602 if ((p
=strchr(request
->peer_login
,':')) != NULL
) {
1605 Token
= peer_proxy_negotiate_auth(PrincipalName
, request
->peer_host
);
1607 httpHeaderPutStrf(hdr_out
, header
, "Negotiate %s",Token
);
1611 #endif /* HAVE_KRB5 && HAVE_GSSAPI */
1613 httpHeaderPutStrf(hdr_out
, header
, "Basic %s",
1614 old_base64_encode(request
->peer_login
));
1619 * build request headers and append them to a given MemBuf
1620 * used by buildRequestPrefix()
1621 * note: initialised the HttpHeader, the caller is responsible for Clean()-ing
1624 HttpStateData::httpBuildRequestHeader(HttpRequest
* request
,
1626 HttpHeader
* hdr_out
,
1627 const http_state_flags flags
)
1629 /* building buffer for complex strings */
1630 #define BBUF_SZ (MAX_URL+32)
1631 LOCAL_ARRAY(char, bbuf
, BBUF_SZ
);
1632 LOCAL_ARRAY(char, ntoabuf
, MAX_IPSTRLEN
);
1633 const HttpHeader
*hdr_in
= &request
->header
;
1634 const HttpHeaderEntry
*e
= NULL
;
1635 HttpHeaderPos pos
= HttpHeaderInitPos
;
1636 assert (hdr_out
->owner
== hoRequest
);
1638 /* append our IMS header */
1639 if (request
->lastmod
> -1)
1640 hdr_out
->putTime(HDR_IF_MODIFIED_SINCE
, request
->lastmod
);
1642 bool we_do_ranges
= decideIfWeDoRanges (request
);
1644 String
strConnection (hdr_in
->getList(HDR_CONNECTION
));
1646 while ((e
= hdr_in
->getEntry(&pos
)))
1647 copyOneHeaderFromClientsideRequestToUpstreamRequest(e
, strConnection
, request
, hdr_out
, we_do_ranges
, flags
);
1649 /* Abstraction break: We should interpret multipart/byterange responses
1650 * into offset-length data, and this works around our inability to do so.
1652 if (!we_do_ranges
&& request
->multipartRangeRequest()) {
1653 /* don't cache the result */
1654 request
->flags
.cachable
= 0;
1655 /* pretend it's not a range request */
1656 delete request
->range
;
1657 request
->range
= NULL
;
1658 request
->flags
.range
= 0;
1662 if (Config
.onoff
.via
) {
1664 strVia
= hdr_in
->getList(HDR_VIA
);
1665 snprintf(bbuf
, BBUF_SZ
, "%d.%d %s",
1666 request
->http_ver
.major
,
1667 request
->http_ver
.minor
, ThisCache
);
1668 strListAdd(&strVia
, bbuf
, ',');
1669 hdr_out
->putStr(HDR_VIA
, strVia
.termedBuf());
1673 if (request
->flags
.accelerated
) {
1674 /* Append Surrogate-Capabilities */
1675 String
strSurrogate(hdr_in
->getList(HDR_SURROGATE_CAPABILITY
));
1677 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0 ESI/1.0\"", Config
.Accel
.surrogate_id
);
1679 snprintf(bbuf
, BBUF_SZ
, "%s=\"Surrogate/1.0\"", Config
.Accel
.surrogate_id
);
1681 strListAdd(&strSurrogate
, bbuf
, ',');
1682 hdr_out
->putStr(HDR_SURROGATE_CAPABILITY
, strSurrogate
.termedBuf());
1685 /** \pre Handle X-Forwarded-For */
1686 if (strcmp(opt_forwarded_for
, "delete") != 0) {
1688 String strFwd
= hdr_in
->getList(HDR_X_FORWARDED_FOR
);
1690 if (strFwd
.size() > 65536/2) {
1691 // There is probably a forwarding loop with Via detection disabled.
1692 // If we do nothing, String will assert on overflow soon.
1693 // TODO: Terminate all transactions with huge XFF?
1696 static int warnedCount
= 0;
1697 if (warnedCount
++ < 100) {
1698 const char *url
= entry
? entry
->url() : urlCanonical(request
);
1699 debugs(11, 1, "Warning: likely forwarding loop with " << url
);
1703 if (strcmp(opt_forwarded_for
, "on") == 0) {
1704 /** If set to ON - append client IP or 'unknown'. */
1705 if ( request
->client_addr
.IsNoAddr() )
1706 strListAdd(&strFwd
, "unknown", ',');
1708 strListAdd(&strFwd
, request
->client_addr
.NtoA(ntoabuf
, MAX_IPSTRLEN
), ',');
1709 } else if (strcmp(opt_forwarded_for
, "off") == 0) {
1710 /** If set to OFF - append 'unknown'. */
1711 strListAdd(&strFwd
, "unknown", ',');
1712 } else if (strcmp(opt_forwarded_for
, "transparent") == 0) {
1713 /** If set to TRANSPARENT - pass through unchanged. */
1714 } else if (strcmp(opt_forwarded_for
, "truncate") == 0) {
1715 /** If set to TRUNCATE - drop existing list and replace with client IP or 'unknown'. */
1716 if ( request
->client_addr
.IsNoAddr() )
1719 strFwd
= request
->client_addr
.NtoA(ntoabuf
, MAX_IPSTRLEN
);
1721 if (strFwd
.size() > 0)
1722 hdr_out
->putStr(HDR_X_FORWARDED_FOR
, strFwd
.termedBuf());
1724 /** If set to DELETE - do not copy through. */
1726 /* append Host if not there already */
1727 if (!hdr_out
->has(HDR_HOST
)) {
1728 if (request
->peer_domain
) {
1729 hdr_out
->putStr(HDR_HOST
, request
->peer_domain
);
1730 } else if (request
->port
== urlDefaultPort(request
->protocol
)) {
1731 /* use port# only if not default */
1732 hdr_out
->putStr(HDR_HOST
, request
->GetHost());
1734 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1736 (int) request
->port
);
1740 /* append Authorization if known in URL, not in header and going direct */
1741 if (!hdr_out
->has(HDR_AUTHORIZATION
)) {
1742 if (!request
->flags
.proxying
&& request
->login
&& *request
->login
) {
1743 httpHeaderPutStrf(hdr_out
, HDR_AUTHORIZATION
, "Basic %s",
1744 old_base64_encode(request
->login
));
1748 /* Fixup (Proxy-)Authorization special cases. Plain relaying dealt with above */
1749 httpFixupAuthentication(request
, hdr_in
, hdr_out
, flags
);
1751 /* append Cache-Control, add max-age if not there already */
1753 HttpHdrCc
*cc
= hdr_in
->getCc();
1756 cc
= httpHdrCcCreate();
1758 #if 0 /* see bug 2330 */
1759 /* Set no-cache if determined needed but not found */
1760 if (request
->flags
.nocache
)
1761 EBIT_SET(cc
->mask
, CC_NO_CACHE
);
1764 /* Add max-age only without no-cache */
1765 if (!EBIT_TEST(cc
->mask
, CC_MAX_AGE
) && !EBIT_TEST(cc
->mask
, CC_NO_CACHE
)) {
1767 entry
? entry
->url() : urlCanonical(request
);
1768 httpHdrCcSetMaxAge(cc
, getMaxAge(url
));
1772 /* Enforce sibling relations */
1773 if (flags
.only_if_cached
)
1774 EBIT_SET(cc
->mask
, CC_ONLY_IF_CACHED
);
1778 httpHdrCcDestroy(cc
);
1781 /* maybe append Connection: keep-alive */
1782 if (flags
.keepalive
) {
1783 hdr_out
->putStr(HDR_CONNECTION
, "keep-alive");
1786 /* append Front-End-Https */
1787 if (flags
.front_end_https
) {
1788 if (flags
.front_end_https
== 1 || request
->protocol
== AnyP::PROTO_HTTPS
)
1789 hdr_out
->putStr(HDR_FRONT_END_HTTPS
, "On");
1792 if (flags
.chunked_request
) {
1793 // Do not just copy the original value so that if the client-side
1794 // starts decode other encodings, this code may remain valid.
1795 hdr_out
->putStr(HDR_TRANSFER_ENCODING
, "chunked");
1798 /* Now mangle the headers. */
1799 if (Config2
.onoff
.mangle_request_headers
)
1800 httpHdrMangleList(hdr_out
, request
, ROR_REQUEST
);
1802 strConnection
.clean();
1806 * Decides whether a particular header may be cloned from the received Clients request
1807 * to our outgoing fetch request.
1810 copyOneHeaderFromClientsideRequestToUpstreamRequest(const HttpHeaderEntry
*e
, const String strConnection
, const HttpRequest
* request
, HttpHeader
* hdr_out
, const int we_do_ranges
, const http_state_flags flags
)
1812 debugs(11, 5, "httpBuildRequestHeader: " << e
->name
<< ": " << e
->value
);
1816 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid should not pass on. */
1818 case HDR_PROXY_AUTHORIZATION
:
1819 /** \par Proxy-Authorization:
1820 * Only pass on proxy authentication to peers for which
1821 * authentication forwarding is explicitly enabled
1823 if (!flags
.originpeer
&& flags
.proxying
&& request
->peer_login
&&
1824 (strcmp(request
->peer_login
, "PASS") == 0 ||
1825 strcmp(request
->peer_login
, "PROXYPASS") == 0 ||
1826 strcmp(request
->peer_login
, "PASSTHRU") == 0)) {
1827 hdr_out
->addEntry(e
->clone());
1831 /** \par RFC 2616 sect 13.5.1 - Hop-by-Hop headers which Squid does not pass on. */
1833 case HDR_CONNECTION
: /** \par Connection: */
1834 case HDR_TE
: /** \par TE: */
1835 case HDR_KEEP_ALIVE
: /** \par Keep-Alive: */
1836 case HDR_PROXY_AUTHENTICATE
: /** \par Proxy-Authenticate: */
1837 case HDR_TRAILER
: /** \par Trailer: */
1838 case HDR_UPGRADE
: /** \par Upgrade: */
1839 case HDR_TRANSFER_ENCODING
: /** \par Transfer-Encoding: */
1843 /** \par OTHER headers I haven't bothered to track down yet. */
1845 case HDR_AUTHORIZATION
:
1846 /** \par WWW-Authorization:
1847 * Pass on WWW authentication */
1849 if (!flags
.originpeer
) {
1850 hdr_out
->addEntry(e
->clone());
1852 /** \note In accelerators, only forward authentication if enabled
1853 * (see also httpFixupAuthentication for special cases)
1855 if (request
->peer_login
&&
1856 (strcmp(request
->peer_login
, "PASS") == 0 ||
1857 strcmp(request
->peer_login
, "PASSTHRU") == 0 ||
1858 strcmp(request
->peer_login
, "PROXYPASS") == 0)) {
1859 hdr_out
->addEntry(e
->clone());
1867 * Normally Squid rewrites the Host: header.
1868 * However, there is one case when we don't: If the URL
1869 * went through our redirector and the admin configured
1870 * 'redir_rewrites_host' to be off.
1872 if (request
->peer_domain
)
1873 hdr_out
->putStr(HDR_HOST
, request
->peer_domain
);
1874 else if (request
->flags
.redirected
&& !Config
.onoff
.redir_rewrites_host
)
1875 hdr_out
->addEntry(e
->clone());
1877 /* use port# only if not default */
1879 if (request
->port
== urlDefaultPort(request
->protocol
)) {
1880 hdr_out
->putStr(HDR_HOST
, request
->GetHost());
1882 httpHeaderPutStrf(hdr_out
, HDR_HOST
, "%s:%d",
1884 (int) request
->port
);
1890 case HDR_IF_MODIFIED_SINCE
:
1891 /** \par If-Modified-Since:
1892 * append unless we added our own;
1893 * \note at most one client's ims header can pass through */
1895 if (!hdr_out
->has(HDR_IF_MODIFIED_SINCE
))
1896 hdr_out
->addEntry(e
->clone());
1900 case HDR_MAX_FORWARDS
:
1901 /** \par Max-Forwards:
1902 * pass only on TRACE or OPTIONS requests */
1903 if (request
->method
== METHOD_TRACE
|| request
->method
== METHOD_OPTIONS
) {
1904 const int64_t hops
= e
->getInt64();
1907 hdr_out
->putInt64(HDR_MAX_FORWARDS
, hops
- 1);
1914 * If Via is disabled then forward any received header as-is.
1915 * Otherwise leave for explicit updated addition later. */
1917 if (!Config
.onoff
.via
)
1918 hdr_out
->addEntry(e
->clone());
1926 case HDR_REQUEST_RANGE
:
1927 /** \par Range:, If-Range:, Request-Range:
1928 * Only pass if we accept ranges */
1930 hdr_out
->addEntry(e
->clone());
1934 case HDR_PROXY_CONNECTION
: // SHOULD ignore. But doing so breaks things.
1937 case HDR_CONTENT_LENGTH
:
1938 // pass through unless we chunk; also, keeping this away from default
1939 // prevents request smuggling via Connection: Content-Length tricks
1940 if (!flags
.chunked_request
)
1941 hdr_out
->addEntry(e
->clone());
1944 case HDR_X_FORWARDED_FOR
:
1946 case HDR_CACHE_CONTROL
:
1947 /** \par X-Forwarded-For:, Cache-Control:
1948 * handled specially by Squid, so leave off for now.
1949 * append these after the loop if needed */
1952 case HDR_FRONT_END_HTTPS
:
1953 /** \par Front-End-Https:
1954 * Pass thru only if peer is configured with front-end-https */
1955 if (!flags
.front_end_https
)
1956 hdr_out
->addEntry(e
->clone());
1962 * pass on all other header fields
1963 * which are NOT listed by the special Connection: header. */
1965 if (strConnection
.size()>0 && strListIsMember(&strConnection
, e
->name
.termedBuf(), ',')) {
1966 debugs(11, 2, "'" << e
->name
<< "' header cropped by Connection: definition");
1970 hdr_out
->addEntry(e
->clone());
1975 HttpStateData::decideIfWeDoRanges (HttpRequest
* request
)
1978 /* decide if we want to do Ranges ourselves
1979 * and fetch the whole object now)
1980 * We want to handle Ranges ourselves iff
1981 * - we can actually parse client Range specs
1982 * - the specs are expected to be simple enough (e.g. no out-of-order ranges)
1983 * - reply will be cachable
1984 * (If the reply will be uncachable we have to throw it away after
1985 * serving this request, so it is better to forward ranges to
1986 * the server and fetch only the requested content)
1989 int64_t roffLimit
= request
->getRangeOffsetLimit();
1991 if (NULL
== request
->range
|| !request
->flags
.cachable
1992 || request
->range
->offsetLimitExceeded(roffLimit
) || request
->flags
.connection_auth
)
1995 debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
1996 request
->range
<< ", cachable: " <<
1997 request
->flags
.cachable
<< "; we_do_ranges: " << result
);
2002 /* build request prefix and append it to a given MemBuf;
2003 * return the length of the prefix */
2005 HttpStateData::buildRequestPrefix(MemBuf
* mb
)
2007 const int offset
= mb
->size
;
2008 HttpVersion
httpver(1,1);
2010 if (_peer
&& !_peer
->options
.originserver
)
2013 url
= request
->urlpath
.termedBuf();
2014 mb
->Printf("%s %s %s/%d.%d\r\n",
2015 RequestMethodStr(request
->method
),
2016 url
&& *url
? url
: "/",
2017 AnyP::ProtocolType_str
[httpver
.protocol
],
2018 httpver
.major
,httpver
.minor
);
2019 /* build and pack headers */
2021 HttpHeader
hdr(hoRequest
);
2023 httpBuildRequestHeader(request
, entry
, &hdr
, flags
);
2025 if (request
->flags
.pinned
&& request
->flags
.connection_auth
)
2026 request
->flags
.auth_sent
= 1;
2027 else if (hdr
.has(HDR_AUTHORIZATION
))
2028 request
->flags
.auth_sent
= 1;
2030 packerToMemInit(&p
, mb
);
2035 /* append header terminator */
2036 mb
->append(crlf
, 2);
2037 return mb
->size
- offset
;
2040 /* This will be called when connect completes. Write request. */
2042 HttpStateData::sendRequest()
2046 debugs(11, 5, HERE
<< serverConnection
<< ", request " << request
<< ", this " << this << ".");
2048 if (!Comm::IsConnOpen(serverConnection
)) {
2049 debugs(11,3, HERE
<< "cannot send request to closing " << serverConnection
);
2050 assert(closeHandler
!= NULL
);
2054 typedef CommCbMemFunT
<HttpStateData
, CommTimeoutCbParams
> TimeoutDialer
;
2055 AsyncCall::Pointer timeoutCall
= JobCallback(11, 5,
2056 TimeoutDialer
, this, HttpStateData::httpTimeout
);
2057 commSetConnTimeout(serverConnection
, Config
.Timeout
.lifetime
, timeoutCall
);
2058 flags
.do_next_read
= 1;
2059 maybeReadVirginBody();
2061 if (request
->body_pipe
!= NULL
) {
2062 if (!startRequestBodyFlow()) // register to receive body data
2064 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2065 requestSender
= JobCallback(11,5,
2066 Dialer
, this, HttpStateData::sentRequestBody
);
2068 Must(!flags
.chunked_request
);
2069 // use chunked encoding if we do not know the length
2070 if (request
->content_length
< 0)
2071 flags
.chunked_request
= 1;
2073 assert(!requestBodySource
);
2074 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2075 requestSender
= JobCallback(11,5,
2076 Dialer
, this, HttpStateData::wroteLast
);
2079 if (_peer
!= NULL
) {
2080 if (_peer
->options
.originserver
) {
2082 flags
.originpeer
= 1;
2085 flags
.originpeer
= 0;
2089 flags
.originpeer
= 0;
2093 * Is keep-alive okay for all request methods?
2095 if (request
->flags
.must_keepalive
)
2096 flags
.keepalive
= 1;
2097 else if (!Config
.onoff
.server_pconns
)
2098 flags
.keepalive
= 0;
2099 else if (_peer
== NULL
)
2100 flags
.keepalive
= 1;
2101 else if (_peer
->stats
.n_keepalives_sent
< 10)
2102 flags
.keepalive
= 1;
2103 else if ((double) _peer
->stats
.n_keepalives_recv
/
2104 (double) _peer
->stats
.n_keepalives_sent
> 0.50)
2105 flags
.keepalive
= 1;
2108 /*The old code here was
2109 if (neighborType(_peer, request) == PEER_SIBLING && ...
2110 which is equivalent to:
2111 if (neighborType(_peer, NULL) == PEER_SIBLING && ...
2113 if (((_peer->type == PEER_MULTICAST && p->options.mcast_siblings) ||
2114 _peer->type == PEER_SIBLINGS ) && _peer->options.allow_miss)
2115 flags.only_if_cached = 1;
2117 But I suppose it was a bug
2119 if (neighborType(_peer
, request
) == PEER_SIBLING
&&
2120 !_peer
->options
.allow_miss
)
2121 flags
.only_if_cached
= 1;
2123 flags
.front_end_https
= _peer
->front_end_https
;
2127 request
->peer_host
=_peer
?_peer
->host
:NULL
;
2128 buildRequestPrefix(&mb
);
2130 debugs(11, 2, "HTTP Server " << serverConnection
);
2131 debugs(11, 2, "HTTP Server REQUEST:\n---------\n" << mb
.buf
<< "\n----------");
2133 Comm::Write(serverConnection
, &mb
, requestSender
);
2138 HttpStateData::getMoreRequestBody(MemBuf
&buf
)
2140 // parent's implementation can handle the no-encoding case
2141 if (!flags
.chunked_request
)
2142 return ServerStateData::getMoreRequestBody(buf
);
2146 Must(requestBodySource
!= NULL
);
2147 if (!requestBodySource
->getMoreData(raw
))
2148 return false; // no request body bytes to chunk yet
2150 // optimization: pre-allocate buffer size that should be enough
2151 const mb_size_t rawDataSize
= raw
.contentSize();
2152 // we may need to send: hex-chunk-size CRLF raw-data CRLF last-chunk
2153 buf
.init(16 + 2 + rawDataSize
+ 2 + 5, raw
.max_capacity
);
2155 buf
.Printf("%x\r\n", static_cast<unsigned int>(rawDataSize
));
2156 buf
.append(raw
.content(), rawDataSize
);
2159 Must(rawDataSize
> 0); // we did not accidently created last-chunk above
2161 // Do not send last-chunk unless we successfully received everything
2162 if (receivedWholeRequestBody
) {
2163 Must(!flags
.sentLastChunk
);
2164 flags
.sentLastChunk
= true;
2165 buf
.append("0\r\n\r\n", 5);
2172 httpStart(FwdState
*fwd
)
2174 debugs(11, 3, "httpStart: \"" << RequestMethodStr(fwd
->request
->method
) << " " << fwd
->entry
->url() << "\"" );
2175 HttpStateData
*httpState
= new HttpStateData(fwd
);
2177 if (!httpState
->sendRequest()) {
2178 debugs(11, 3, "httpStart: aborted");
2183 statCounter
.server
.all
.requests
++;
2184 statCounter
.server
.http
.requests
++;
2187 * We used to set the read timeout here, but not any more.
2188 * Now its set in httpSendComplete() after the full request,
2189 * including request body, has been written to the server.
2193 /// if broken posts are enabled for the request, try to fix and return true
2195 HttpStateData::finishingBrokenPost()
2197 #if USE_HTTP_VIOLATIONS
2198 if (!Config
.accessList
.brokenPosts
) {
2199 debugs(11, 5, HERE
<< "No brokenPosts list");
2203 ACLFilledChecklist
ch(Config
.accessList
.brokenPosts
, originalRequest(), NULL
);
2204 if (!ch
.fastCheck()) {
2205 debugs(11, 5, HERE
<< "didn't match brokenPosts");
2209 if (!Comm::IsConnOpen(serverConnection
)) {
2210 debugs(11,2, HERE
<< "ignoring broken POST for closed " << serverConnection
);
2211 assert(closeHandler
!= NULL
);
2212 return true; // prevent caller from proceeding as if nothing happened
2215 debugs(11, 2, "finishingBrokenPost: fixing broken POST");
2216 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2217 requestSender
= JobCallback(11,5,
2218 Dialer
, this, HttpStateData::wroteLast
);
2219 Comm::Write(serverConnection
, "\r\n", 2, requestSender
, NULL
);
2223 #endif /* USE_HTTP_VIOLATIONS */
2226 /// if needed, write last-chunk to end the request body and return true
2228 HttpStateData::finishingChunkedRequest()
2230 if (flags
.sentLastChunk
) {
2231 debugs(11, 5, HERE
<< "already sent last-chunk");
2235 Must(receivedWholeRequestBody
); // or we should not be sending last-chunk
2236 flags
.sentLastChunk
= true;
2238 typedef CommCbMemFunT
<HttpStateData
, CommIoCbParams
> Dialer
;
2239 requestSender
= JobCallback(11,5, Dialer
, this, HttpStateData::wroteLast
);
2240 Comm::Write(serverConnection
, "0\r\n\r\n", 5, requestSender
, NULL
);
2245 HttpStateData::doneSendingRequestBody()
2247 ServerStateData::doneSendingRequestBody();
2248 debugs(11,5, HERE
<< serverConnection
);
2250 // do we need to write something after the last body byte?
2251 if (flags
.chunked_request
&& finishingChunkedRequest())
2253 if (!flags
.chunked_request
&& finishingBrokenPost())
2259 // more origin request body data is available
2261 HttpStateData::handleMoreRequestBodyAvailable()
2263 if (eof
|| !Comm::IsConnOpen(serverConnection
)) {
2264 // XXX: we should check this condition in other callbacks then!
2265 // TODO: Check whether this can actually happen: We should unsubscribe
2266 // as a body consumer when the above condition(s) are detected.
2267 debugs(11, 1, HERE
<< "Transaction aborted while reading HTTP body");
2271 assert(requestBodySource
!= NULL
);
2273 if (requestBodySource
->buf().hasContent()) {
2274 // XXX: why does not this trigger a debug message on every request?
2276 if (flags
.headers_parsed
&& !flags
.abuse_detected
) {
2277 flags
.abuse_detected
= 1;
2278 debugs(11, 1, "http handleMoreRequestBodyAvailable: Likely proxy abuse detected '" << request
->client_addr
<< "' -> '" << entry
->url() << "'" );
2280 if (virginReply()->sline
.status
== HTTP_INVALID_HEADER
) {
2281 serverConnection
->close();
2287 HttpStateData::handleMoreRequestBodyAvailable();
2290 // premature end of the request body
2292 HttpStateData::handleRequestBodyProducerAborted()
2294 ServerStateData::handleRequestBodyProducerAborted();
2295 if (entry
->isEmpty()) {
2296 debugs(11, 3, "request body aborted: " << serverConnection
);
2298 // We usually get here when ICAP REQMOD aborts during body processing.
2299 // We might also get here if client-side aborts, but then our response
2300 // should not matter because either client-side will provide its own or
2301 // there will be no response at all (e.g., if the the client has left).
2302 err
= errorCon(ERR_ICAP_FAILURE
, HTTP_INTERNAL_SERVER_ERROR
, fwd
->request
);
2303 err
->xerrno
= ERR_DETAIL_SRV_REQMOD_REQ_BODY
;
2307 abortTransaction("request body producer aborted");
2310 // called when we wrote request headers(!) or a part of the body
2312 HttpStateData::sentRequestBody(const CommIoCbParams
&io
)
2315 kb_incr(&statCounter
.server
.http
.kbytes_out
, io
.size
);
2317 ServerStateData::sentRequestBody(io
);
2320 // Quickly abort the transaction
2321 // TODO: destruction should be sufficient as the destructor should cleanup,
2322 // including canceling close handlers
2324 HttpStateData::abortTransaction(const char *reason
)
2326 debugs(11,5, HERE
<< "aborting transaction for " << reason
<<
2327 "; " << serverConnection
<< ", this " << this);
2329 if (Comm::IsConnOpen(serverConnection
)) {
2330 serverConnection
->close();
2334 fwd
->handleUnregisteredServerEnd();
2335 deleteThis("HttpStateData::abortTransaction");