3 * $Id: client_side_request.cc,v 1.80 2007/04/06 04:50:06 rousskov Exp $
5 * DEBUG: section 85 Client-side Request Routines
6 * AUTHOR: Robert Collins (Originally Duane Wessels in client_side.c)
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
38 * General logic of request processing:
40 * We run a series of tests to determine if access will be permitted, and to do
41 * any redirection. Then we call into the result clientStream to retrieve data.
42 * From that point on it's up to reply management.
46 #include "clientStream.h"
47 #include "client_side_request.h"
48 #include "AuthUserRequest.h"
49 #include "HttpRequest.h"
50 #include "ACLChecklist.h"
52 #include "client_side.h"
53 #include "client_side_reply.h"
55 #include "HttpReply.h"
56 #include "MemObject.h"
57 #include "ClientRequestContext.h"
58 #include "SquidTime.h"
62 #include "ICAP/ICAPModXact.h"
63 #include "ICAP/ICAPElements.h"
64 #include "ICAP/ICAPConfig.h"
65 static void icapAclCheckDoneWrapper(ICAPServiceRep::Pointer service
, void *data
);
66 extern ICAPConfig TheICAPConfig
;
70 #define comm_close comm_lingering_close
73 static const char *const crlf
= "\r\n";
75 CBDATA_CLASS_INIT(ClientRequestContext
);
78 ClientRequestContext::operator new (size_t size
)
80 assert (size
== sizeof(ClientRequestContext
));
81 CBDATA_INIT_TYPE(ClientRequestContext
);
82 ClientRequestContext
*result
= cbdataAlloc(ClientRequestContext
);
87 ClientRequestContext::operator delete (void *address
)
89 ClientRequestContext
*t
= static_cast<ClientRequestContext
*>(address
);
95 static void clientAccessCheckDoneWrapper(int, void *);
96 static int clientHierarchical(ClientHttpRequest
* http
);
97 static void clientInterpretRequestHeaders(ClientHttpRequest
* http
);
98 static RH clientRedirectDoneWrapper
;
99 static PF checkNoCacheDoneWrapper
;
100 extern "C" CSR clientGetMoreData
;
101 extern "C" CSS clientReplyStatus
;
102 extern "C" CSD clientReplyDetach
;
103 static void checkFailureRatio(err_type
, hier_code
);
105 ClientRequestContext::~ClientRequestContext()
108 * Release our "lock" on our parent, ClientHttpRequest, if we
113 cbdataReferenceDone(http
);
116 if (acl_checklist
->asyncInProgress()) {
117 acl_checklist
->markDeleteWhenDone();
119 delete acl_checklist
;
123 debugs(85,3, HERE
<< this << " ClientRequestContext destructed");
126 ClientRequestContext::ClientRequestContext(ClientHttpRequest
*anHttp
) : http(cbdataReference(anHttp
)), acl_checklist (NULL
), redirect_state (REDIRECT_NONE
)
128 http_access_done
= false;
129 redirect_done
= false;
130 no_cache_done
= false;
131 interpreted_req_hdrs
= false;
132 debugs(85,3, HERE
<< this << " ClientRequestContext constructed");
135 CBDATA_CLASS_INIT(ClientHttpRequest
);
138 ClientHttpRequest::operator new (size_t size
)
140 assert (size
== sizeof (ClientHttpRequest
));
141 CBDATA_INIT_TYPE(ClientHttpRequest
);
142 ClientHttpRequest
*result
= cbdataAlloc(ClientHttpRequest
);
147 ClientHttpRequest::operator delete (void *address
)
149 ClientHttpRequest
*t
= static_cast<ClientHttpRequest
*>(address
);
153 ClientHttpRequest::ClientHttpRequest(ConnStateData::Pointer aConn
) : loggingEntry_(NULL
)
155 start
= current_time
;
157 dlinkAdd(this, &active
, &ClientActiveRequests
);
159 request_satisfaction_mode
= false;
164 * returns true if client specified that the object must come from the cache
165 * without contacting origin server
168 ClientHttpRequest::onlyIfCached()const
171 return request
->cache_control
&&
172 EBIT_TEST(request
->cache_control
->mask
, CC_ONLY_IF_CACHED
);
176 * This function is designed to serve a fairly specific purpose.
177 * Occasionally our vBNS-connected caches can talk to each other, but not
178 * the rest of the world. Here we try to detect frequent failures which
179 * make the cache unusable (e.g. DNS lookup and connect() failures). If
180 * the failure:success ratio goes above 1.0 then we go into "hit only"
181 * mode where we only return UDP_HIT or UDP_MISS_NOFETCH. Neighbors
182 * will only fetch HITs from us if they are using the ICP protocol. We
183 * stay in this mode for 5 minutes.
185 * Duane W., Sept 16, 1996
188 #define FAILURE_MODE_TIME 300
191 checkFailureRatio(err_type etype
, hier_code hcode
)
193 static double magic_factor
= 100.0;
197 if (hcode
== HIER_NONE
)
200 n_good
= magic_factor
/ (1.0 + request_failure_ratio
);
202 n_bad
= magic_factor
- n_good
;
208 case ERR_CONNECT_FAIL
:
218 request_failure_ratio
= n_bad
/ n_good
;
220 if (hit_only_mode_until
> squid_curtime
)
223 if (request_failure_ratio
< 1.0)
226 debug(33, 0) ("Failure Ratio at %4.2f\n", request_failure_ratio
);
228 debug(33, 0) ("Going into hit-only-mode for %d minutes...\n",
229 FAILURE_MODE_TIME
/ 60);
231 hit_only_mode_until
= squid_curtime
+ FAILURE_MODE_TIME
;
233 request_failure_ratio
= 0.8; /* reset to something less than 1.0 */
236 ClientHttpRequest::~ClientHttpRequest()
238 debug(33, 3) ("httpRequestFree: %s\n", uri
);
239 PROF_start(httpRequestFree
);
241 // Even though freeResources() below may destroy the request,
242 // we no longer set request->body_pipe to NULL here
243 // because we did not initiate that pipe (ConnStateData did)
245 /* the ICP check here was erroneous
246 * - storeReleaseRequest was always called if entry was valid
248 assert(logType
< LOG_TYPE_MAX
);
255 checkFailureRatio(request
->errType
, al
.hier
.code
);
260 if (icapHeadSource
!= NULL
) {
261 icapHeadSource
->noteInitiatorAborted();
262 icapHeadSource
= NULL
;
264 if (icapBodySource
!= NULL
)
265 stopConsumingFrom(icapBodySource
);
269 delete calloutContext
;
271 /* moving to the next connection is handled by the context free */
272 dlinkDelete(&active
, &ClientActiveRequests
);
274 PROF_stop(httpRequestFree
);
277 /* Create a request and kick it off */
279 * TODO: Pass in the buffers to be used in the inital Read request, as they are
280 * determined by the user
282 int /* returns nonzero on failure */
283 clientBeginRequest(method_t method
, char const *url
, CSCB
* streamcallback
,
284 CSD
* streamdetach
, ClientStreamData streamdata
, HttpHeader
const *header
,
285 char *tailbuf
, size_t taillen
)
288 HttpVersion
http_ver (1, 0);
289 ClientHttpRequest
*http
= new ClientHttpRequest(NULL
);
290 HttpRequest
*request
;
291 StoreIOBuffer tempBuffer
;
292 http
->start
= current_time
;
293 /* this is only used to adjust the connection offset in client_side.c */
295 tempBuffer
.length
= taillen
;
296 tempBuffer
.data
= tailbuf
;
297 /* client stream setup */
298 clientStreamInit(&http
->client_stream
, clientGetMoreData
, clientReplyDetach
,
299 clientReplyStatus
, new clientReplyContext(http
), streamcallback
,
300 streamdetach
, streamdata
, tempBuffer
);
301 /* make it visible in the 'current acctive requests list' */
303 /* internal requests only makes sense in an
304 * accelerator today. TODO: accept flags ? */
305 http
->flags
.accel
= 1;
306 /* allow size for url rewriting */
307 url_sz
= strlen(url
) + Config
.appendDomainLen
+ 5;
308 http
->uri
= (char *)xcalloc(url_sz
, 1);
309 strcpy(http
->uri
, url
);
311 if ((request
= HttpRequest::CreateFromUrlAndMethod(http
->uri
, method
)) == NULL
) {
312 debug(85, 5) ("Invalid URL: %s\n", http
->uri
);
317 * now update the headers in request with our supplied headers. urLParse
318 * should return a blank header set, but we use Update to be sure of
322 request
->header
.update(header
, NULL
);
324 http
->log_uri
= xstrdup(urlCanonicalClean(request
));
326 /* http struct now ready */
329 * build new header list *? TODO
331 request
->flags
.accelerated
= http
->flags
.accel
;
333 request
->flags
.internalclient
= 1;
335 /* this is an internally created
336 * request, not subject to acceleration
337 * target overrides */
339 * FIXME? Do we want to detect and handle internal requests of internal
343 /* Internally created requests cannot have bodies today */
344 request
->content_length
= 0;
346 request
->client_addr
= no_addr
;
348 request
->my_addr
= no_addr
; /* undefined for internal requests */
350 request
->my_port
= 0;
352 request
->http_ver
= http_ver
;
354 http
->request
= HTTPMSGLOCK(request
);
356 /* optional - skip the access check ? */
357 http
->calloutContext
= new ClientRequestContext(http
);
359 http
->calloutContext
->http_access_done
= false;
361 http
->calloutContext
->redirect_done
= true;
363 http
->calloutContext
->no_cache_done
= true;
371 ClientRequestContext::httpStateIsValid()
373 ClientHttpRequest
*http_
= http
;
375 if (cbdataReferenceValid(http_
))
380 cbdataReferenceDone(http_
);
385 /* This is the entry point for external users of the client_side routines */
387 ClientRequestContext::clientAccessCheck()
390 clientAclChecklistCreate(Config
.accessList
.http
, http
);
391 acl_checklist
->nonBlockingCheck(clientAccessCheckDoneWrapper
, this);
395 clientAccessCheckDoneWrapper(int answer
, void *data
)
397 ClientRequestContext
*calloutContext
= (ClientRequestContext
*) data
;
399 if (!calloutContext
->httpStateIsValid())
402 calloutContext
->clientAccessCheckDone(answer
);
406 ClientRequestContext::clientAccessCheckDone(int answer
)
408 acl_checklist
= NULL
;
411 debug(85, 2) ("The request %s %s is %s, because it matched '%s'\n",
412 RequestMethodStr
[http
->request
->method
], http
->uri
,
413 answer
== ACCESS_ALLOWED
? "ALLOWED" : "DENIED",
414 AclMatchedName
? AclMatchedName
: "NO ACL's");
415 char const *proxy_auth_msg
= "<null>";
417 if (http
->getConn().getRaw() != NULL
&& http
->getConn()->auth_user_request
!= NULL
)
418 proxy_auth_msg
= http
->getConn()->auth_user_request
->denyMessage("<null>");
419 else if (http
->request
->auth_user_request
!= NULL
)
420 proxy_auth_msg
= http
->request
->auth_user_request
->denyMessage("<null>");
422 if (answer
!= ACCESS_ALLOWED
) {
424 int require_auth
= (answer
== ACCESS_REQ_PROXY_AUTH
|| aclIsProxyAuth(AclMatchedName
));
425 debug(85, 5) ("Access Denied: %s\n", http
->uri
);
426 debug(85, 5) ("AclMatchedName = %s\n",
427 AclMatchedName
? AclMatchedName
: "<null>");
430 debug(33, 5) ("Proxy Auth Message = %s\n",
431 proxy_auth_msg
? proxy_auth_msg
: "<null>");
434 * NOTE: get page_id here, based on AclMatchedName because if
435 * USE_DELAY_POOLS is enabled, then AclMatchedName gets clobbered in
436 * the clientCreateStoreEntry() call just below. Pedro Ribeiro
439 page_id
= aclGetDenyInfoPage(&Config
.denyInfoList
, AclMatchedName
, answer
!= ACCESS_REQ_PROXY_AUTH
);
441 http
->logType
= LOG_TCP_DENIED
;
444 if (!http
->flags
.accel
) {
445 /* Proxy authorisation needed */
446 status
= HTTP_PROXY_AUTHENTICATION_REQUIRED
;
448 /* WWW authorisation needed */
449 status
= HTTP_UNAUTHORIZED
;
452 if (page_id
== ERR_NONE
)
453 page_id
= ERR_CACHE_ACCESS_DENIED
;
455 status
= HTTP_FORBIDDEN
;
457 if (page_id
== ERR_NONE
)
458 page_id
= ERR_ACCESS_DENIED
;
461 clientStreamNode
*node
= (clientStreamNode
*)http
->client_stream
.tail
->prev
->data
;
462 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
464 repContext
->setReplyToError(page_id
, status
,
465 http
->request
->method
, NULL
,
466 http
->getConn().getRaw() != NULL
? &http
->getConn()->peer
.sin_addr
: &no_addr
, http
->request
,
467 NULL
, http
->getConn().getRaw() != NULL
468 && http
->getConn()->auth_user_request
? http
->getConn()->
469 auth_user_request
: http
->request
->auth_user_request
);
470 node
= (clientStreamNode
*)http
->client_stream
.tail
->data
;
471 clientStreamRead(node
, http
, node
->readBuffer
);
475 /* ACCESS_ALLOWED continues here ... */
476 safe_free(http
->uri
);
478 http
->uri
= xstrdup(urlCanonical(http
->request
));
485 ClientRequestContext::icapAccessCheck()
487 ICAPAccessCheck
*icap_access_check
;
489 icap_access_check
= new ICAPAccessCheck(ICAP::methodReqmod
, ICAP::pointPreCache
, http
->request
, NULL
, icapAclCheckDoneWrapper
, this);
491 if (icap_access_check
!= NULL
) {
492 icap_access_check
->check();
500 icapAclCheckDoneWrapper(ICAPServiceRep::Pointer service
, void *data
)
502 ClientRequestContext
*calloutContext
= (ClientRequestContext
*)data
;
504 if (!calloutContext
->httpStateIsValid())
507 calloutContext
->icapAclCheckDone(service
);
511 ClientRequestContext::icapAclCheckDone(ICAPServiceRep::Pointer service
)
513 debugs(93,3,HERE
<< this << " icapAclCheckDone called");
516 if (http
->startIcap(service
))
519 if (!service
|| service
->bypass
) {
520 // handle ICAP start failure when no service was selected
521 // or where the selected service was optional
526 // handle start failure for an essential ICAP service
527 http
->handleIcapFailure();
533 clientRedirectAccessCheckDone(int answer
, void *data
)
535 ClientRequestContext
*context
= (ClientRequestContext
*)data
;
536 ClientHttpRequest
*http
= context
->http
;
537 context
->acl_checklist
= NULL
;
539 if (answer
== ACCESS_ALLOWED
)
540 redirectStart(http
, clientRedirectDoneWrapper
, context
);
542 context
->clientRedirectDone(NULL
);
546 ClientRequestContext::clientRedirectStart()
548 debug(33, 5) ("clientRedirectStart: '%s'\n", http
->uri
);
550 if (Config
.accessList
.redirector
) {
551 acl_checklist
= clientAclChecklistCreate(Config
.accessList
.redirector
, http
);
552 acl_checklist
->nonBlockingCheck(clientRedirectAccessCheckDone
, this);
554 redirectStart(http
, clientRedirectDoneWrapper
, this);
558 clientHierarchical(ClientHttpRequest
* http
)
560 const char *url
= http
->uri
;
561 HttpRequest
*request
= http
->request
;
562 method_t method
= request
->method
;
563 const wordlist
*p
= NULL
;
566 * IMS needs a private key, so we can use the hierarchy for IMS only if our
567 * neighbors support private keys
570 if (request
->flags
.ims
&& !neighbors_do_private_keys
)
574 * This is incorrect: authenticating requests can be sent via a hierarchy
575 * (they can even be cached if the correct headers are set on the reply)
577 if (request
->flags
.auth
)
580 if (method
== METHOD_TRACE
)
583 if (method
!= METHOD_GET
)
586 /* scan hierarchy_stoplist */
587 for (p
= Config
.hierarchy_stoplist
; p
; p
= p
->next
)
588 if (strstr(url
, p
->key
))
591 if (request
->flags
.loopdetect
)
594 if (request
->protocol
== PROTO_HTTP
)
595 return httpCachable(method
);
597 if (request
->protocol
== PROTO_GOPHER
)
598 return gopherCachable(request
);
600 if (request
->protocol
== PROTO_CACHEOBJ
)
608 clientInterpretRequestHeaders(ClientHttpRequest
* http
)
610 HttpRequest
*request
= http
->request
;
611 HttpHeader
*req_hdr
= &request
->header
;
613 #if !(ESI) || defined(USE_USERAGENT_LOG) || defined(USE_REFERER_LOG)
618 request
->imslen
= -1;
619 request
->ims
= req_hdr
->getTime(HDR_IF_MODIFIED_SINCE
);
621 if (request
->ims
> 0)
622 request
->flags
.ims
= 1;
626 * We ignore Cache-Control as per the Edge Architecture Section 3. See
627 * www.esi.org for more information.
631 if (req_hdr
->has(HDR_PRAGMA
)) {
632 String s
= req_hdr
->getList(HDR_PRAGMA
);
634 if (strListIsMember(&s
, "no-cache", ','))
640 if (request
->cache_control
)
641 if (EBIT_TEST(request
->cache_control
->mask
, CC_NO_CACHE
))
645 * Work around for supporting the Reload button in IE browsers when Squid
646 * is used as an accelerator or transparent proxy, by turning accelerated
647 * IMS request to no-cache requests. Now knows about IE 5.5 fix (is
648 * actually only fixed in SP1, but we can't tell whether we are talking to
649 * SP1 or not so all 5.5 versions are treated 'normally').
651 if (Config
.onoff
.ie_refresh
) {
652 if (http
->flags
.accel
&& request
->flags
.ims
) {
653 if ((str
= req_hdr
->getStr(HDR_USER_AGENT
))) {
654 if (strstr(str
, "MSIE 5.01") != NULL
)
656 else if (strstr(str
, "MSIE 5.0") != NULL
)
658 else if (strstr(str
, "MSIE 4.") != NULL
)
660 else if (strstr(str
, "MSIE 3.") != NULL
)
670 if (Config
.onoff
.reload_into_ims
)
671 request
->flags
.nocache_hack
= 1;
672 else if (refresh_nocache_hack
)
673 request
->flags
.nocache_hack
= 1;
677 request
->flags
.nocache
= 1;
680 /* ignore range header in non-GETs or non-HEADs */
681 if (request
->method
== METHOD_GET
|| request
->method
== METHOD_HEAD
) {
682 request
->range
= req_hdr
->getRange();
684 if (request
->range
) {
685 request
->flags
.range
= 1;
686 clientStreamNode
*node
= (clientStreamNode
*)http
->client_stream
.tail
->data
;
687 /* XXX: This is suboptimal. We should give the stream the range set,
688 * and thereby let the top of the stream set the offset when the
689 * size becomes known. As it is, we will end up requesting from 0
690 * for evey -X range specification.
691 * RBC - this may be somewhat wrong. We should probably set the range
692 * iter up at this point.
694 node
->readBuffer
.offset
= request
->range
->lowestOffset(0);
695 http
->range_iter
.pos
= request
->range
->begin();
696 http
->range_iter
.valid
= true;
700 /* Only HEAD and GET requests permit a Range or Request-Range header.
701 * If these headers appear on any other type of request, delete them now.
704 req_hdr
->delById(HDR_RANGE
);
705 req_hdr
->delById(HDR_REQUEST_RANGE
);
706 request
->range
= NULL
;
709 if (req_hdr
->has(HDR_AUTHORIZATION
))
710 request
->flags
.auth
= 1;
712 if (request
->login
[0] != '\0')
713 request
->flags
.auth
= 1;
715 if (req_hdr
->has(HDR_VIA
)) {
716 String s
= req_hdr
->getList(HDR_VIA
);
718 * ThisCache cannot be a member of Via header, "1.0 ThisCache" can.
719 * Note ThisCache2 has a space prepended to the hostname so we don't
720 * accidentally match super-domains.
723 if (strListIsSubstr(&s
, ThisCache2
, ',')) {
724 debugObj(33, 1, "WARNING: Forwarding loop detected for:\n",
725 request
, (ObjPackMethod
) & httpRequestPack
);
726 request
->flags
.loopdetect
= 1;
730 fvdbCountVia(s
.buf());
737 #if USE_USERAGENT_LOG
738 if ((str
= req_hdr
->getStr(HDR_USER_AGENT
)))
739 logUserAgent(fqdnFromAddr(http
->getConn().getRaw() ? http
->getConn()->log_addr
: no_addr
), str
);
744 if ((str
= req_hdr
->getStr(HDR_REFERER
)))
745 logReferer(fqdnFromAddr(http
->getConn().getRaw() ? http
->getConn()->log_addr
: no_addr
), str
, http
->log_uri
);
750 if (req_hdr
->has(HDR_X_FORWARDED_FOR
)) {
751 String s
= req_hdr
->getList(HDR_X_FORWARDED_FOR
);
752 fvdbCountForw(s
.buf());
757 if (request
->method
== METHOD_TRACE
) {
758 request
->max_forwards
= req_hdr
->getInt(HDR_MAX_FORWARDS
);
761 request
->flags
.cachable
= http
->request
->cacheable();
763 if (clientHierarchical(http
))
764 request
->flags
.hierarchical
= 1;
766 debug(85, 5) ("clientInterpretRequestHeaders: REQ_NOCACHE = %s\n",
767 request
->flags
.nocache
? "SET" : "NOT SET");
769 debug(85, 5) ("clientInterpretRequestHeaders: REQ_CACHABLE = %s\n",
770 request
->flags
.cachable
? "SET" : "NOT SET");
772 debug(85, 5) ("clientInterpretRequestHeaders: REQ_HIERARCHICAL = %s\n",
773 request
->flags
.hierarchical
? "SET" : "NOT SET");
777 clientRedirectDoneWrapper(void *data
, char *result
)
779 ClientRequestContext
*calloutContext
= (ClientRequestContext
*)data
;
781 if (!calloutContext
->httpStateIsValid())
784 calloutContext
->clientRedirectDone(result
);
788 ClientRequestContext::clientRedirectDone(char *result
)
790 HttpRequest
*new_request
= NULL
;
791 HttpRequest
*old_request
= http
->request
;
792 debug(85, 5) ("clientRedirectDone: '%s' result=%s\n", http
->uri
,
793 result
? result
: "NULL");
794 assert(redirect_state
== REDIRECT_PENDING
);
795 redirect_state
= REDIRECT_DONE
;
798 http_status status
= (http_status
) atoi(result
);
800 if (status
== HTTP_MOVED_PERMANENTLY
801 || status
== HTTP_MOVED_TEMPORARILY
802 || status
== HTTP_SEE_OTHER
803 || status
== HTTP_TEMPORARY_REDIRECT
) {
806 if ((t
= strchr(result
, ':')) != NULL
) {
807 http
->redirect
.status
= status
;
808 http
->redirect
.location
= xstrdup(t
+ 1);
810 debug(85, 1) ("clientRedirectDone: bad input: %s\n", result
);
812 } else if (strcmp(result
, http
->uri
))
813 new_request
= HttpRequest::CreateFromUrlAndMethod(result
, old_request
->method
);
817 safe_free(http
->uri
);
818 http
->uri
= xstrdup(urlCanonical(new_request
));
819 new_request
->http_ver
= old_request
->http_ver
;
820 new_request
->header
.append(&old_request
->header
);
821 new_request
->client_addr
= old_request
->client_addr
;
822 new_request
->client_port
= old_request
->client_port
;
823 new_request
->my_addr
= old_request
->my_addr
;
824 new_request
->my_port
= old_request
->my_port
;
825 new_request
->flags
= old_request
->flags
;
826 new_request
->flags
.redirected
= 1;
828 if (old_request
->auth_user_request
) {
829 new_request
->auth_user_request
= old_request
->auth_user_request
;
831 new_request
->auth_user_request
->lock()
836 if (old_request
->body_pipe
!= NULL
) {
837 new_request
->body_pipe
= old_request
->body_pipe
;
838 old_request
->body_pipe
= NULL
;
839 debugs(0,0,HERE
<< "redirecting body_pipe " << new_request
->body_pipe
<< " from request " << old_request
<< " to " << new_request
);
842 new_request
->content_length
= old_request
->content_length
;
843 new_request
->extacl_user
= old_request
->extacl_user
;
844 new_request
->extacl_passwd
= old_request
->extacl_passwd
;
845 new_request
->flags
.proxy_keepalive
= old_request
->flags
.proxy_keepalive
;
846 HTTPMSGUNLOCK(old_request
);
847 http
->request
= HTTPMSGLOCK(new_request
);
850 /* FIXME PIPELINE: This is innacurate during pipelining */
852 if (http
->getConn() != NULL
)
853 fd_note(http
->getConn()->fd
, http
->uri
);
861 ClientRequestContext::checkNoCache()
863 acl_checklist
= clientAclChecklistCreate(Config
.accessList
.noCache
, http
);
864 acl_checklist
->nonBlockingCheck(checkNoCacheDoneWrapper
, this);
868 checkNoCacheDoneWrapper(int answer
, void *data
)
870 ClientRequestContext
*calloutContext
= (ClientRequestContext
*) data
;
872 if (!calloutContext
->httpStateIsValid())
875 calloutContext
->checkNoCacheDone(answer
);
879 ClientRequestContext::checkNoCacheDone(int answer
)
881 acl_checklist
= NULL
;
882 http
->request
->flags
.cachable
= answer
;
887 * Identify requests that do not go through the store and client side stream
888 * and forward them to the appropriate location. All other requests, request
892 ClientHttpRequest::processRequest()
894 debug(85, 4) ("clientProcessRequest: %s '%s'\n",
895 RequestMethodStr
[request
->method
], uri
);
897 if (request
->method
== METHOD_CONNECT
&& !redirect
.status
) {
898 logType
= LOG_TCP_MISS
;
899 sslStart(this, &out
.size
, &al
.http
.code
);
907 ClientHttpRequest::httpStart()
909 PROF_start(httpStart
);
910 logType
= LOG_TAG_NONE
;
911 debug(85, 4) ("ClientHttpRequest::httpStart: %s for '%s'\n",
912 log_tags
[logType
], uri
);
913 /* no one should have touched this */
914 assert(out
.offset
== 0);
915 /* Use the Stream Luke */
916 clientStreamNode
*node
= (clientStreamNode
*)client_stream
.tail
->data
;
917 clientStreamRead(node
, this, node
->readBuffer
);
918 PROF_stop(httpStart
);
922 ClientHttpRequest::gotEnough() const
924 /** TODO: should be querying the stream. */
926 memObject()->getReply()->bodySize(request
->method
);
927 assert(contentLength
>= 0);
929 if (out
.offset
< contentLength
)
936 ClientHttpRequest::maxReplyBodySize(ssize_t clen
)
938 maxReplyBodySize_
= clen
;
942 ClientHttpRequest::maxReplyBodySize() const
944 return maxReplyBodySize_
;
948 ClientHttpRequest::isReplyBodyTooLarge(ssize_t clen
) const
950 if (0 == maxReplyBodySize())
951 return 0; /* disabled */
954 return 0; /* unknown */
956 return clen
> maxReplyBodySize();
960 ClientHttpRequest::storeEntry(StoreEntry
*newEntry
)
966 ClientHttpRequest::loggingEntry(StoreEntry
*newEntry
)
969 loggingEntry_
->unlock();
971 loggingEntry_
= newEntry
;
974 loggingEntry_
->lock()
980 * doCallouts() - This function controls the order of "callout"
981 * executions, including non-blocking access control checks, the
982 * redirector, and ICAP. Previously, these callouts were chained
983 * together such that "clientAccessCheckDone()" would call
984 * "clientRedirectStart()" and so on.
986 * The ClientRequestContext (aka calloutContext) class holds certain
987 * state data for the callout/callback operations. Previously
988 * ClientHttpRequest would sort of hand off control to ClientRequestContext
989 * for a short time. ClientRequestContext would then delete itself
990 * and pass control back to ClientHttpRequest when all callouts
993 * This caused some problems for ICAP because we want to make the
994 * ICAP callout after checking ACLs, but before checking the no_cache
995 * list. We can't stuff the ICAP state into the ClientRequestContext
996 * class because we still need the ICAP state after ClientRequestContext
999 * Note that ClientRequestContext is created before the first call
1002 * If one of the callouts notices that ClientHttpRequest is no
1003 * longer valid, it should call cbdataReferenceDone() so that
1004 * ClientHttpRequest's reference count goes to zero and it will get
1005 * deleted. ClientHttpRequest will then delete ClientRequestContext.
1007 * Note that we set the _done flags here before actually starting
1008 * the callout. This is strictly for convenience.
1012 ClientHttpRequest::doCallouts()
1014 assert(calloutContext
);
1016 if (!calloutContext
->http_access_done
) {
1017 calloutContext
->http_access_done
= true;
1018 calloutContext
->clientAccessCheck();
1023 if (TheICAPConfig
.onoff
&& !calloutContext
->icap_acl_check_done
) {
1024 calloutContext
->icap_acl_check_done
= true;
1025 calloutContext
->icapAccessCheck();
1031 if (!calloutContext
->redirect_done
) {
1032 calloutContext
->redirect_done
= true;
1033 assert(calloutContext
->redirect_state
== REDIRECT_NONE
);
1035 if (Config
.Program
.redirect
) {
1036 calloutContext
->redirect_state
= REDIRECT_PENDING
;
1037 calloutContext
->clientRedirectStart();
1042 if (!calloutContext
->interpreted_req_hdrs
) {
1043 calloutContext
->interpreted_req_hdrs
= 1;
1044 clientInterpretRequestHeaders(this);
1047 if (!calloutContext
->no_cache_done
) {
1048 calloutContext
->no_cache_done
= true;
1050 if (Config
.accessList
.noCache
&& request
->flags
.cachable
) {
1051 calloutContext
->checkNoCache();
1056 cbdataReferenceDone(calloutContext
->http
);
1057 delete calloutContext
;
1058 calloutContext
= NULL
;
1061 headersLog(0, 1, request
->method
, request
);
1067 #ifndef _USE_INLINE_
1068 #include "client_side_request.cci"
1073 * Initiate an ICAP transaction. Return false on errors.
1074 * The caller must handle errors.
1077 ClientHttpRequest::startIcap(ICAPServiceRep::Pointer service
)
1079 debugs(85, 3, HERE
<< this << " ClientHttpRequest::startIcap() called");
1081 debug(85,3)("ClientHttpRequest::startIcap fails: lack of service\n");
1084 if (service
->broken()) {
1085 debug(85,3)("ClientHttpRequest::startIcap fails: broken service\n");
1089 assert(!icapHeadSource
);
1090 assert(!icapBodySource
);
1091 icapHeadSource
= new ICAPModXact(this, request
, NULL
, service
);
1092 ICAPModXact::AsyncStart(icapHeadSource
.getRaw());
1097 ClientHttpRequest::noteIcapHeadersAdapted()
1099 assert(cbdataReferenceValid(this)); // indicates bug
1101 HttpMsg
*msg
= icapHeadSource
->adapted
.header
;
1104 if (HttpRequest
*new_req
= dynamic_cast<HttpRequest
*>(msg
)) {
1106 * Replace the old request with the new request.
1108 HTTPMSGUNLOCK(request
);
1109 request
= HTTPMSGLOCK(new_req
);
1111 * Store the new URI for logging
1114 uri
= xstrdup(urlCanonical(request
));
1115 setLogUri(this, urlCanonicalClean(request
));
1116 assert(request
->method
);
1117 } else if (HttpReply
*new_rep
= dynamic_cast<HttpReply
*>(msg
)) {
1118 debugs(85,3,HERE
<< "REQMOD reply is HTTP reply");
1120 // subscribe to receive reply body
1121 if (new_rep
->body_pipe
!= NULL
) {
1122 icapBodySource
= new_rep
->body_pipe
;
1123 assert(icapBodySource
->setConsumerIfNotLate(this));
1126 clientStreamNode
*node
= (clientStreamNode
*)client_stream
.tail
->prev
->data
;
1127 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1128 repContext
->createStoreEntry(request
->method
, request
->flags
);
1130 EBIT_CLR(storeEntry()->flags
, ENTRY_FWD_HDR_WAIT
);
1131 request_satisfaction_mode
= true;
1132 request_satisfaction_offset
= 0;
1133 storeEntry()->replaceHttpReply(new_rep
);
1134 clientGetMoreData(node
, this);
1137 // we are done with getting headers (but may be receiving body)
1138 icapHeadSource
= NULL
;
1140 if (!request_satisfaction_mode
)
1145 ClientHttpRequest::noteIcapHeadersAborted()
1147 icapHeadSource
= NULL
;
1148 assert(!icapBodySource
);
1149 handleIcapFailure();
1153 ClientHttpRequest::noteMoreBodyDataAvailable(BodyPipe
&)
1155 assert(request_satisfaction_mode
);
1156 assert(icapBodySource
!= NULL
);
1158 if (const size_t contentSize
= icapBodySource
->buf().contentSize()) {
1159 BodyPipeCheckout
bpc(*icapBodySource
);
1160 const StoreIOBuffer
ioBuf(&bpc
.buf
, request_satisfaction_offset
);
1161 storeEntry()->write(ioBuf
);
1162 // assume can write everything
1163 request_satisfaction_offset
+= contentSize
;
1164 bpc
.buf
.consume(contentSize
);
1168 if (icapBodySource
->exhausted())
1169 endRequestSatisfaction();
1170 // else wait for more body data
1174 ClientHttpRequest::noteBodyProductionEnded(BodyPipe
&)
1176 assert(!icapHeadSource
);
1177 if (icapBodySource
!= NULL
) { // did not end request satisfaction yet
1178 // We do not expect more because noteMoreBodyDataAvailable always
1179 // consumes everything. We do not even have a mechanism to consume
1180 // leftovers after noteMoreBodyDataAvailable notifications seize.
1181 assert(icapBodySource
->exhausted());
1182 endRequestSatisfaction();
1187 ClientHttpRequest::endRequestSatisfaction() {
1188 debugs(85,4, HERE
<< this << " ends request satisfaction");
1189 assert(request_satisfaction_mode
);
1190 stopConsumingFrom(icapBodySource
);
1192 // TODO: anything else needed to end store entry formation correctly?
1193 storeEntry()->complete();
1197 ClientHttpRequest::noteBodyProducerAborted(BodyPipe
&)
1199 assert(!icapHeadSource
);
1200 stopConsumingFrom(icapBodySource
);
1201 handleIcapFailure();
1205 ClientHttpRequest::handleIcapFailure()
1207 debugs(85,3, HERE
<< "handleIcapFailure");
1209 const bool usedStore
= storeEntry() && !storeEntry()->isEmpty();
1210 const bool usedPipe
= request
->body_pipe
!= NULL
&&
1211 request
->body_pipe
->consumedSize() > 0;
1213 // XXX: we must not try to recover if the ICAP service is not bypassable!
1215 if (!usedStore
&& !usedPipe
) {
1216 debug(85,2)("WARNING: ICAP REQMOD callout failed, proceeding with original request\n");
1222 debugs(85,3, HERE
<< "ICAP REQMOD callout failed, responding with error");
1224 clientStreamNode
*node
= (clientStreamNode
*)client_stream
.tail
->prev
->data
;
1225 clientReplyContext
*repContext
= dynamic_cast<clientReplyContext
*>(node
->data
.getRaw());
1228 // The original author of the code also wanted to pass an errno to
1229 // setReplyToError, but it seems unlikely that the errno reflects the
1230 // true cause of the error at this point, so I did not pass it.
1231 ConnStateData::Pointer c
= getConn();
1232 repContext
->setReplyToError(ERR_ICAP_FAILURE
, HTTP_INTERNAL_SERVER_ERROR
,
1233 request
->method
, NULL
,
1234 (c
!= NULL
? &c
->peer
.sin_addr
: &no_addr
), request
, NULL
,
1235 (c
!= NULL
&& c
->auth_user_request
?
1236 c
->auth_user_request
: request
->auth_user_request
));
1238 node
= (clientStreamNode
*)client_stream
.tail
->data
;
1239 clientStreamRead(node
, this, node
->readBuffer
);