// Because it failed verification, or someone bypassed the security tests
// we cannot cache the reponse for sharing between clients.
// TODO: update cache to store for particular clients only (going to same Host: and destination IP)
- if (!flags.hostVerified() && (flags.intercepted() || flags.spoofClientIp()))
+ if (!flags.hostVerified && (flags.intercepted || flags.spoof_client_ip))
return false;
if (protocol == AnyP::PROTO_HTTP)
bool
HttpRequest::conditional() const
{
- return flags.hasIMS() ||
+ return flags.ims ||
header.has(HDR_IF_MATCH) ||
header.has(HDR_IF_NONE_MATCH);
}
{
public:
RequestFlags():
- nocache(false), ims(false), auth_(false), cachable(false),
- hierarchical_(false), loopdetect(false), proxy_keepalive(false),
- proxying_(false), refresh_(false), redirected(false),
- need_validation(false), fail_on_validation_err(false),
- stale_if_hit(false), nocache_hack(false), accelerated_(false),
- ignore_cc(false), intercepted_(false), hostVerified_(false),
- spoof_client_ip(false), internal(false), internalclient(false),
- must_keepalive(false), connection_auth_wanted(false),
- connection_auth_disabled(false), connection_proxy_auth(false),
- pinned_(false), canRePin_(false), authSent_(false), noDirect_(false),
- chunkedReply_(false), streamError_(false), sslPeek_(false),
- doneFollowXForwardedFor(!FOLLOW_X_FORWARDED_FOR),
+ nocache(0), ims(0), auth(0), cachable(0),
+ hierarchical(0), loopdetect(0), proxy_keepalive(0), proxying(0),
+ refresh(0), redirected(0), need_validation(0),
+ fail_on_validation_err(0), stale_if_hit(0), accelerated(0),
+ ignore_cc(0), intercepted(0), hostVerified(0), spoof_client_ip(0),
+ internal(0), internalclient(0), must_keepalive(0), pinned(0),
+ canRePin(0), chunked_reply(0), stream_error(0), sslPeek(0),
+ done_follow_x_forwarded_for(!FOLLOW_X_FORWARDED_FOR),
sslBumped_(false), destinationIPLookedUp_(false), resetTCP_(false),
- isRanged_(false)
- {}
+ isRanged_(false) {
+#if USE_HTTP_VIOLATIONS
+ nocache_hack = 0;
+#endif
+ }
+
+ unsigned int nocache :1; ///< whether the response to this request may be READ from cache
+ unsigned int ims :1;
+ unsigned int auth :1;
+ unsigned int cachable :1; ///< whether the response to thie request may be stored in the cache
+ unsigned int hierarchical :1;
+ unsigned int loopdetect :1;
+ unsigned int proxy_keepalive :1;
+ unsigned int proxying :1; /* this should be killed, also in httpstateflags */
+ unsigned int refresh :1;
+ unsigned int redirected :1;
+ unsigned int need_validation :1;
+ unsigned int fail_on_validation_err :1; ///< whether we should fail if validation fails
+ unsigned int stale_if_hit :1; ///< reply is stale if it is a hit
+#if USE_HTTP_VIOLATIONS
+ /* for changing/ignoring no-cache requests */
+ /* TODO: remove the conditional definition, move ifdef to setter */
+ unsigned int nocache_hack :1;
+#endif
+ unsigned int accelerated :1;
+ unsigned int ignore_cc :1;
+ unsigned int intercepted :1; ///< intercepted request
+ unsigned int hostVerified :1; ///< whether the Host: header passed verification
+ unsigned int spoof_client_ip :1; /**< spoof client ip if possible */
+ unsigned int internal :1;
+ unsigned int internalclient :1;
+ unsigned int must_keepalive :1;
+ unsigned int connection_auth :1; /** Request wants connection oriented auth */
+ unsigned int connection_auth_disabled :1; /** Connection oriented auth can not be supported */
+ unsigned int connection_proxy_auth :1; /** Request wants connection oriented auth */
+ unsigned int pinned :1; /* Request sent on a pinned connection */
+ unsigned int canRePin :1; ///< OK to reopen a failed pinned connection
+ unsigned int auth_sent :1; /* Authentication forwarded */
+ unsigned int no_direct :1; /* Deny direct forwarding unless overriden by always_direct. Used in accelerator mode */
+ unsigned int chunked_reply :1; /**< Reply with chunked transfer encoding */
+ unsigned int stream_error :1; /**< Whether stream error has occured */
+ unsigned int sslPeek :1; ///< internal ssl-bump request to get server cert
+
+#if FOLLOW_X_FORWARDED_FOR
+ /* TODO: move from conditional definition to conditional setting */
+#endif /* FOLLOW_X_FORWARDED_FOR */
// When adding new flags, please update cloneAdaptationImmune() as needed.
bool resetTCP() const;
void setSslBumped(bool newValue=true) { sslBumped_=newValue; }
void clearSslBumpeD() { sslBumped_=false; }
- bool doneFollowXFF() const { return doneFollowXForwardedFor; }
+ bool doneFollowXFF() const { return done_follow_x_forwarded_for; }
void setDoneFollowXFF() {
- doneFollowXForwardedFor = true;
+ done_follow_x_forwarded_for = true;
}
void clearDoneFollowXFF() {
/* do not allow clearing if FOLLOW_X_FORWARDED_FOR is unset */
- doneFollowXForwardedFor = false || !FOLLOW_X_FORWARDED_FOR;
+ done_follow_x_forwarded_for = false || !FOLLOW_X_FORWARDED_FOR;
}
-
- bool sslPeek() const { return sslPeek_; }
- void setSslPeek() { sslPeek_=true; }
- void clearSslPeek() { sslPeek_=false; }
-
- bool hadStreamError() const { return streamError_; }
- void setStreamError() { streamError_ = true; }
- void clearStreamError() { streamError_ = false; }
-
- bool isReplyChunked() const { return chunkedReply_; }
- void markReplyChunked() { chunkedReply_ = true; }
-
- void setNoDirect() { noDirect_=true; }
- bool noDirect() const{ return noDirect_; }
-
- bool authSent() const { return authSent_; }
- void markAuthSent() { authSent_=true;}
-
- bool canRePin() const { return canRePin_; }
- void allowRepinning() { canRePin_=true; }
-
- void markPinned() { pinned_ = true; }
- void clearPinned() { pinned_ = false; }
- bool pinned() const { return pinned_; }
-
- //XXX: oddly this is set in client_side_request.cc, but never checked.
- bool wantConnectionProxyAuth() const { return connection_proxy_auth; }
- void requestConnectionProxyAuth() { connection_proxy_auth=true; }
-
- void disableConnectionAuth() { connection_auth_disabled=true; }
- bool connectionAuthDisabled() const { return connection_auth_disabled; }
-
- void wantConnectionAuth() { connection_auth_wanted=true; }
- bool connectionAuthWanted() const { return connection_auth_wanted; }
-
- void setMustKeepalive() { must_keepalive = true; }
- bool mustKeepalive() const { return must_keepalive; }
-
- //XXX: oddly this is set in client_side_request.cc but never checked.
- void setInternalClient() { internalclient=true;}
-
- void markInternal() { internal=true; }
- bool isInternal() const { return internal; }
-
- bool spoofClientIp() const { return spoof_client_ip; }
- void setSpoofClientIp() { spoof_client_ip = true; }
-
- bool hostVerified() const { return hostVerified_; }
- void markHostVerified() { hostVerified_=true; }
-
- bool intercepted() const { return intercepted_; }
- void markIntercepted() { intercepted_=true; }
-
- bool ignoringCacheControl() const { return ignore_cc; }
- void ignoreCacheControl() { ignore_cc=true; }
-
- bool accelerated() const { return accelerated_; }
- void markAccelerated() { accelerated_ = true; }
-
- /* nocache_hack is only enabled if USE_HTTP_VIOLATIONS is set at build-time.
- * Compilers will have an easy time optimizing to a NOP otherwise. */
- void hackNocache() { if (USE_HTTP_VIOLATIONS) nocache_hack=true; }
- bool noCacheHackEnabled() const { return USE_HTTP_VIOLATIONS && nocache_hack; }
-
- void setStaleIfHit() { stale_if_hit=true; }
- void clearStaleIfHit() { stale_if_hit=false; }
- bool staleIfHit() const { return stale_if_hit; }
-
- void setFailOnValidationError() { fail_on_validation_err=true; }
- bool failOnValidationError() const { return fail_on_validation_err; }
-
- bool validationNeeded() const { return need_validation; }
- void setNeedValidation() { need_validation=true; }
-
- bool isRedirected() const { return redirected; }
- void markRedirected() { redirected=true; }
-
- bool refresh() const { return refresh_; }
- void setRefresh() { refresh_ = true; }
-
- bool proxying() const { return proxying_; }
- void setProxying() { proxying_ = true; }
- void clearProxying() { proxying_ = false; }
-
- bool proxyKeepalive() const { return proxy_keepalive; }
- void setProxyKeepalive() { proxy_keepalive=true;}
- void clearProxyKeepalive() { proxy_keepalive=false; }
-
- bool loopDetect() const { return loopdetect; }
- void setLoopDetect() { loopdetect = 1; }
-
- bool hierarchical() const { return hierarchical_; }
- void setHierarchical() { hierarchical_=true; }
- void clearHierarchical() { hierarchical_=true; }
-
- bool isCachable() const { return cachable; }
- void setCachable(bool newValue=true) { cachable = newValue; }
- void setNotCachable() { cachable = false; }
-
- bool hasAuth() const { return auth_; }
- void markAuth() { auth_=true; }
-
- bool hasIMS() const { return ims; }
- void setIMS() { ims=true; }
- void clearIMS() { ims=false; }
-
- bool noCache() const { return nocache; }
- void setNocache() { nocache=true;}
private:
- bool nocache :1; ///< whether the response to this request may be READ from cache
- bool ims :1;
- bool auth_ :1;
- bool cachable :1; ///< whether the response to thie request may be stored in the cache
- bool hierarchical_ :1;
- bool loopdetect :1;
- bool proxy_keepalive :1;
- bool proxying_ :1; /* this should be killed, also in httpstateflags */
- bool refresh_ :1;
- bool redirected :1;
- bool need_validation :1;
- bool fail_on_validation_err :1; ///< whether we should fail if validation fails
- bool stale_if_hit :1; ///< reply is stale if it is a hit
- /* for changing/ignoring no-cache requests. Unused unless USE_HTTP_VIOLATIONS */
- bool nocache_hack :1;
- bool accelerated_ :1; ///<request is accelerated
- bool ignore_cc :1; ///< ignore Cache-Control
- bool intercepted_ :1; ///< intercepted request
- bool hostVerified_ :1; ///< whether the Host: header passed verification
- bool spoof_client_ip :1; ///< spoof client ip if possible
- bool internal :1;
- bool internalclient :1;
- bool must_keepalive :1;
- bool connection_auth_wanted :1; /** Request wants connection oriented auth */
- bool connection_auth_disabled :1; ///< Connection oriented auth can't be supported
- bool connection_proxy_auth :1; ///< Request wants connection oriented auth
- bool pinned_ :1; ///< Request sent on a pinned connection
- bool canRePin_ :1; ///< OK to reopen a failed pinned connection
- bool authSent_ :1; ///< Authentication was forwarded
- /** Deny direct forwarding unless overriden by always_direct.
- * Used in accelerator mode */
- bool noDirect_ :1;
- bool chunkedReply_ :1; ///< Reply with chunked transfer encoding
- bool streamError_ :1; ///< Whether stream error has occured
- bool sslPeek_ :1; ///< internal ssl-bump request to get server cert
- /* doneFollowXForwardedFor is set by default to the opposite of
+
+ /* done_follow_x_forwarded_for set by default to the opposite of
* compilation option FOLLOW_X_FORWARDED_FOR (so that it returns
- * always "done" if the build option is disabled).
+ * always "done" if the build option is disabled.
*/
- bool doneFollowXForwardedFor :1;
+ bool done_follow_x_forwarded_for :1;
bool sslBumped_ :1; /**< ssl-bumped request*/
bool destinationIPLookedUp_:1;
bool resetTCP_:1; ///< request to reset the TCP stream
// To resolve this we will force DIRECT and only to the original client destination.
// In which case, we also need this ACL to accurately match the destination
if (Config.onoff.client_dst_passthru && checklist->request &&
- (checklist->request->flags.intercepted() || checklist->request->flags.spoofClientIp())) {
+ (checklist->request->flags.intercepted || checklist->request->flags.spoof_client_ip)) {
assert(checklist->conn() && checklist->conn()->clientConnection != NULL);
return ACLIP::match(checklist->conn()->clientConnection->local);
}
return ACCESS_ALLOWED;
else
return ACCESS_DENIED;
- } else if (request->flags.accelerated()) {
+ } else if (request->flags.accelerated) {
/* WWW authorization on accelerated requests */
headertype = HDR_AUTHORIZATION;
- } else if (request->flags.intercepted() || request->flags.spoofClientIp()) {
+ } else if (request->flags.intercepted || request->flags.spoof_client_ip) {
debugs(28, DBG_IMPORTANT, "NOTICE: Authentication not applicable on intercepted requests.");
return ACCESS_DENIED;
} else {
++arg;
}
safe_free(lm_request->server_blob);
- lm_request->request->flags.setMustKeepalive();
- if (lm_request->request->flags.proxyKeepalive()) {
+ lm_request->request->flags.must_keepalive = 1;
+ if (lm_request->request->flags.proxy_keepalive) {
lm_request->server_blob = xstrdup(blob);
auth_user_request->user()->credentials(Auth::Handshake);
auth_user_request->denyMessage("Authentication in progress");
return;
/* Need keep-alive */
- if (!request->flags.proxyKeepalive() && request->flags.mustKeepalive())
+ if (!request->flags.proxy_keepalive && request->flags.must_keepalive)
return;
/* New request, no user details */
if (!keep_alive) {
/* drop the connection */
rep->header.delByName("keep-alive");
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
}
} else {
Auth::Negotiate::UserRequest *negotiate_request = dynamic_cast<Auth::Negotiate::UserRequest *>(auth_user_request.getRaw());
/* here it makes sense to drop the connection, as auth is
* tied to it, even if MAYBE the client could handle it - Kinkie */
rep->header.delByName("keep-alive");
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
/* fall through */
case Auth::Ok:
if (strncasecmp(reply, "TT ", 3) == 0) {
/* we have been given a blob to send to the client */
safe_free(lm_request->server_blob);
- lm_request->request->flags.setMustKeepalive();
- if (lm_request->request->flags.proxyKeepalive()) {
+ lm_request->request->flags.must_keepalive = 1;
+ if (lm_request->request->flags.proxy_keepalive) {
lm_request->server_blob = xstrdup(blob);
auth_user_request->user()->credentials(Auth::Handshake);
auth_user_request->denyMessage("Authentication in progress");
return;
/* Need keep-alive */
- if (!request->flags.proxyKeepalive() && request->flags.mustKeepalive())
+ if (!request->flags.proxy_keepalive && request->flags.must_keepalive)
return;
/* New request, no user details */
if (!keep_alive) {
/* drop the connection */
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
}
} else {
Auth::Ntlm::UserRequest *ntlm_request = dynamic_cast<Auth::Ntlm::UserRequest *>(auth_user_request.getRaw());
case Auth::Failed:
/* here it makes sense to drop the connection, as auth is
* tied to it, even if MAYBE the client could handle it - Kinkie */
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
/* fall through */
case Auth::Ok:
RequestMethodStr(request->method));
// TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
- if (request->persistent())
- request->flags.setProxyKeepalive();
- else
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = request->persistent() ? 1 : 0;
}
static int
{
assert(rep == NULL);
- if (!multipartRangeRequest() && !http->request->flags.isReplyChunked()) {
+ if (!multipartRangeRequest() && !http->request->flags.chunked_reply) {
size_t length = lengthToSend(bodyData.range());
noteSentBodyBytes (length);
AsyncCall::Pointer call = commCbCall(33, 5, "clientWriteBodyComplete",
if (bodyData.data && bodyData.length) {
if (multipartRangeRequest())
packRange(bodyData, mb);
- else if (http->request->flags.isReplyChunked()) {
+ else if (http->request->flags.chunked_reply) {
packChunk(bodyData, *mb);
} else {
size_t length = lengthToSend(bodyData.range());
// After sending Transfer-Encoding: chunked (at least), always send
// the last-chunk if there was no error, ignoring responseFinishedOrFailed.
- const bool mustSendLastChunk = http->request->flags.isReplyChunked() &&
- !http->request->flags.hadStreamError() &&
- !context->startOfOutput();
+ const bool mustSendLastChunk = http->request->flags.chunked_reply &&
+ !http->request->flags.stream_error && !context->startOfOutput();
if (responseFinishedOrFailed(rep, receivedData) && !mustSendLastChunk) {
context->writeComplete(context->clientConnection, NULL, 0, COMM_OK);
PROF_stop(clientSocketRecipient);
debugs(33, 5, HERE << "Range request at end of returnable " <<
"range sequence on " << clientConnection);
- if (http->request->flags.proxyKeepalive())
+ if (http->request->flags.proxy_keepalive)
return STREAM_COMPLETE;
else
return STREAM_UNPLANNED_COMPLETE;
// did we get at least what we expected, based on range specs?
if (bytesSent == bytesExpected) { // got everything
- if (http->request->flags.proxyKeepalive())
+ if (http->request->flags.proxy_keepalive)
return STREAM_COMPLETE;
else
return STREAM_UNPLANNED_COMPLETE;
// expected why would persistency matter? Should not this
// always be an error?
if (bytesSent > bytesExpected) { // got extra
- if (http->request->flags.proxyKeepalive())
+ if (http->request->flags.proxy_keepalive)
return STREAM_COMPLETE;
else
return STREAM_UNPLANNED_COMPLETE;
// at the client-side, but many such errors do require closure and the
// client-side code is bad at handling errors so we play it safe.
if (request)
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
flags.readMore = false;
debugs(33,4, HERE << "Will close after error: " << clientConnection);
}
request->clientConnectionManager = conn;
- if (http->flags.accel)
- request->flags.markAccelerated();
+ request->flags.accelerated = http->flags.accel;
request->flags.setSslBumped(conn->switchedToHttps());
- if (request->flags.sslBumped() && conn->pinning.pinned)
- request->flags.allowRepinning();
- if (conn->port->ignore_cc)
- request->flags.ignoreCacheControl();
+ request->flags.canRePin = request->flags.sslBumped() && conn->pinning.pinned;
+ request->flags.ignore_cc = conn->port->ignore_cc;
// TODO: decouple http->flags.accel from request->flags.sslBumped
- if (request->flags.accelerated() && !request->flags.sslBumped())
- if (!conn->port->allow_direct)
- request->flags.setNoDirect();
+ request->flags.no_direct = (request->flags.accelerated && !request->flags.sslBumped()) ?
+ !conn->port->allow_direct : 0;
#if USE_AUTH
if (request->flags.sslBumped()) {
if (conn->auth_user_request != NULL)
* from the port settings to the request.
*/
if (http->clientConnection != NULL) {
- if ((http->clientConnection->flags & COMM_INTERCEPTION) != 0)
- request->flags.markIntercepted();
- if ((http->clientConnection->flags & COMM_TRANSPARENT) != 0 )
- request->flags.setSpoofClientIp();
+ request->flags.intercepted = ((http->clientConnection->flags & COMM_INTERCEPTION) != 0);
+ request->flags.spoof_client_ip = ((http->clientConnection->flags & COMM_TRANSPARENT) != 0 ) ;
}
if (internalCheck(request->urlpath.termedBuf())) {
request->login[0] = '\0';
}
- if (http->flags.internal)
- request->flags.markInternal();
+ request->flags.internal = http->flags.internal;
setLogUri (http, urlCanonicalClean(request));
request->client_addr = conn->clientConnection->remote; // XXX: remove reuest->client_addr member.
#if FOLLOW_X_FORWARDED_FOR
fakeRequest->indirect_client_addr = connState->clientConnection->remote;
#endif
fakeRequest->my_addr = connState->clientConnection->local;
- if ((connState->clientConnection->flags & COMM_TRANSPARENT) != 0)
- fakeRequest->flags.setSpoofClientIp();
- if ((connState->clientConnection->flags & COMM_INTERCEPTION) != 0)
- fakeRequest->flags.markIntercepted();
+ fakeRequest->flags.spoof_client_ip = ((connState->clientConnection->flags & COMM_TRANSPARENT) != 0 ) ;
+ fakeRequest->flags.intercepted = ((connState->clientConnection->flags & COMM_INTERCEPTION) != 0);
debugs(33, 4, HERE << details << " try to generate a Dynamic SSL CTX");
connState->switchToHttps(fakeRequest, bumpMode);
}
// and now want to switch to SSL to send the error to the client
// without even peeking at the origin server certificate.
if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
- request->flags.setSslPeek();
+ request->flags.sslPeek = 1;
sslServerBump = new Ssl::ServerBump(request);
// will call httpsPeeked() with certificate and connection, eventually
{
if (errstate->httpStatus == HTTP_NOT_IMPLEMENTED && http->request)
/* prevent confusion over whether we default to persistent or not */
- http->request->flags.clearProxyKeepalive();
+ http->request->flags.proxy_keepalive = 0;
http->al->http.code = errstate->httpStatus;
return;
}
- http->request->flags.setRefresh();
+ http->request->flags.refresh = 1;
#if STORE_CLIENT_LIST_DEBUG
/* Prevent a race with the store client memory free routines
*/
// origin replied 304
if (status == HTTP_NOT_MODIFIED) {
http->logType = LOG_TCP_REFRESH_UNMODIFIED;
- http->request->flags.clearStaleIfHit(); // old_entry is no longer stale
+ http->request->flags.stale_if_hit = 0; // old_entry is no longer stale
// update headers on existing entry
old_rep->updateOnNotModified(http->storeEntry()->getReply());
// if client sent IMS
- if (http->request->flags.hasIMS() && !old_entry->modifiedSince(http->request)) {
+ if (http->request->flags.ims && !old_entry->modifiedSince(http->request)) {
// forward the 304 from origin
debugs(88, 3, "handleIMSReply: origin replied 304, revalidating existing entry and forwarding 304 to client");
sendClientUpstreamResponse();
}
// origin replied with an error
- else if (http->request->flags.failOnValidationError()) {
+ else if (http->request->flags.fail_on_validation_err) {
http->logType = LOG_TCP_REFRESH_FAIL_ERR;
debugs(88, 3, "handleIMSReply: origin replied with error " << status <<
", forwarding to client due to fail_on_validation_err");
return;
}
- if (e->checkNegativeHit() && !r->flags.noCacheHackEnabled()
+ if (e->checkNegativeHit()
+#if USE_HTTP_VIOLATIONS
+ && !r->flags.nocache_hack
+#endif
) {
http->logType = LOG_TCP_NEGATIVE_HIT;
sendMoreData(result);
* request. Otherwise two siblings could generate a loop if
* both have a stale version of the object.
*/
- r->flags.setNeedValidation();
+ r->flags.need_validation = 1;
if (e->lastmod < 0) {
/*
*/
http->logType = LOG_TCP_MISS;
processMiss();
- } else if (r->flags.noCache()) {
+ } else if (r->flags.nocache) {
/*
* This did not match a refresh pattern that overrides no-cache
* we should honour the client no-cache header.
}
/// Deny loops
- if (r->flags.loopDetect()) {
+ if (r->flags.loopdetect) {
http->al->http.code = HTTP_FORBIDDEN;
err = clientBuildError(ERR_ACCESS_DENIED, HTTP_FORBIDDEN, NULL, http->getConn()->clientConnection->remote, http->request);
createStoreEntry(r->method, RequestFlags());
if (r.header.has(HDR_IF_NONE_MATCH)) {
if (!e->hasIfNoneMatchEtag(r)) {
// RFC 2616: ignore IMS if If-None-Match did not match
- r.flags.clearIMS();
+ r.flags.ims = 0;
r.ims = -1;
r.imslen = 0;
r.header.delById(HDR_IF_MODIFIED_SINCE);
return;
}
- if (!r.flags.hasIMS()) {
+ if (!r.flags.ims) {
// RFC 2616: if If-None-Match matched and there is no IMS,
// reply with 304 Not Modified or 412 Precondition Failed
sendNotModifiedOrPreconditionFailedError();
matchedIfNoneMatch = true;
}
- if (r.flags.hasIMS()) {
+ if (r.flags.ims) {
// handle If-Modified-Since requests from the client
if (e->modifiedSince(&r)) {
http->logType = LOG_TCP_IMS_HIT;
if (http->flags.done_copying)
return 1;
- if (http->request->flags.isReplyChunked() && !flags.complete) {
+ if (http->request->flags.chunked_reply && !flags.complete) {
// last-chunk was not sent
return 0;
}
const int64_t expectedBodySize =
http->storeEntry()->getReply()->bodySize(http->request->method);
- if (!http->request->flags.proxyKeepalive() && expectedBodySize < 0) {
+ if (!http->request->flags.proxy_keepalive && expectedBodySize < 0) {
debugs(88, 5, "clientReplyStatus: closing, content_length < 0");
return STREAM_FAILED;
}
return STREAM_UNPLANNED_COMPLETE;
}
- if (http->request->flags.proxyKeepalive()) {
+ if (http->request->flags.proxy_keepalive) {
debugs(88, 5, "clientReplyStatus: stream complete and can keepalive");
return STREAM_COMPLETE;
}
}
// add Warnings required by RFC 2616 if serving a stale hit
- if (http->request->flags.staleIfHit() && logTypeIsATcpHit(http->logType)) {
+ if (http->request->flags.stale_if_hit && logTypeIsATcpHit(http->logType)) {
hdr->putWarning(110, "Response is stale");
- if (http->request->flags.validationNeeded())
+ if (http->request->flags.need_validation)
hdr->putWarning(111, "Revalidation failed");
}
||
(strncasecmp(value, "Kerberos", 8) == 0 &&
(value[8] == '\0' || value[8] == ' '))) {
- if (request->flags.connectionAuthDisabled()) {
+ if (request->flags.connection_auth_disabled) {
hdr->delAt(pos, connection_auth_blocked);
continue;
}
- request->flags.setMustKeepalive();
- if (!request->flags.accelerated() && !request->flags.intercepted()) {
+ request->flags.must_keepalive = 1;
+ if (!request->flags.accelerated && !request->flags.intercepted) {
httpHeaderPutStrf(hdr, HDR_PROXY_SUPPORT, "Session-Based-Authentication");
/*
We send "[Proxy-]Connection: Proxy-Support" header to mark
#endif
- // XXX: chunking a Content-Range response may not violate specs, but our
- // ClientSocketContext::writeComplete() confuses the end of ClientStream
- // with the end of to-client writing and may quit before writing last-chunk
- const bool maySendChunkedReply = !reply->content_range &&
- !request->multipartRangeRequest() &&
+ const bool maySendChunkedReply = !request->multipartRangeRequest() &&
reply->sline.protocol == AnyP::PROTO_HTTP && // response is HTTP
(request->http_ver >= HttpVersion(1, 1));
/* Check whether we should send keep-alive */
- if (!Config.onoff.error_pconns && reply->sline.status >= 400 && !request->flags.mustKeepalive()) {
+ if (!Config.onoff.error_pconns && reply->sline.status >= 400 && !request->flags.must_keepalive) {
debugs(33, 3, "clientBuildReplyHeader: Error, don't keep-alive");
- request->flags.clearProxyKeepalive();
- } else if (!Config.onoff.client_pconns && !request->flags.mustKeepalive()) {
+ request->flags.proxy_keepalive = 0;
+ } else if (!Config.onoff.client_pconns && !request->flags.must_keepalive) {
debugs(33, 2, "clientBuildReplyHeader: Connection Keep-Alive not requested by admin or client");
- request->flags.clearProxyKeepalive();
- } else if (request->flags.proxyKeepalive() && shutting_down) {
+ request->flags.proxy_keepalive = 0;
+ } else if (request->flags.proxy_keepalive && shutting_down) {
debugs(88, 3, "clientBuildReplyHeader: Shutting down, don't keep-alive.");
- request->flags.clearProxyKeepalive();
- } else if (request->flags.connectionAuthWanted() && !reply->keep_alive) {
+ request->flags.proxy_keepalive = 0;
+ } else if (request->flags.connection_auth && !reply->keep_alive) {
debugs(33, 2, "clientBuildReplyHeader: Connection oriented auth but server side non-persistent");
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
} else if (reply->bodySize(request->method) < 0 && !maySendChunkedReply) {
debugs(88, 3, "clientBuildReplyHeader: can't keep-alive, unknown body size" );
- request->flags.clearProxyKeepalive();
- } else if (fdUsageHigh()&& !request->flags.mustKeepalive()) {
+ request->flags.proxy_keepalive = 0;
+ } else if (fdUsageHigh()&& !request->flags.must_keepalive) {
debugs(88, 3, "clientBuildReplyHeader: Not many unused FDs, can't keep-alive");
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
} else if (request->flags.sslBumped() && !reply->persistent()) {
// We do not really have to close, but we pretend we are a tunnel.
debugs(88, 3, "clientBuildReplyHeader: bumped reply forces close");
- request->flags.clearProxyKeepalive();
+ request->flags.proxy_keepalive = 0;
}
// Decide if we send chunked reply
if (maySendChunkedReply &&
- request->flags.proxyKeepalive() &&
+ request->flags.proxy_keepalive &&
reply->bodySize(request->method) < 0) {
debugs(88, 3, "clientBuildReplyHeader: chunked reply");
- request->flags.markReplyChunked();
+ request->flags.chunked_reply = 1;
hdr->putStr(HDR_TRANSFER_ENCODING, "chunked");
}
hdr->putStr(HDR_VIA, strVia.termedBuf());
}
/* Signal keep-alive or close explicitly */
- hdr->putStr(HDR_CONNECTION, request->flags.proxyKeepalive() ? "keep-alive" : "close");
+ hdr->putStr(HDR_CONNECTION, request->flags.proxy_keepalive ? "keep-alive" : "close");
#if ADD_X_REQUEST_URI
/*
{
HttpRequest *r = http->request;
- if (r->flags.isCachable() || r->flags.isInternal()) {
+ if (r->flags.cachable || r->flags.internal) {
lookingforstore = 5;
StoreEntry::getPublicByRequest (this, r);
} else {
/** \li If the request has no-cache flag set or some no_cache HACK in operation we
* 'invalidate' the cached IP entries for this request ???
*/
- if (r->flags.noCache()) {
+ if (r->flags.nocache) {
#if USE_DNSHELPER
ipcacheInvalidate(r->GetHost());
}
- else if (r->flags.noCacheHackEnabled()) {
+#if USE_HTTP_VIOLATIONS
+
+ else if (r->flags.nocache_hack) {
+
#if USE_DNSHELPER
ipcacheInvalidate(r->GetHost());
#else
ipcacheInvalidateNegative(r->GetHost());
#endif /* USE_DNSHELPER */
+
}
+
+#endif /* USE_HTTP_VIOLATIONS */
#if USE_CACHE_DIGESTS
lookup_type = http->storeEntry() ? "HIT" : "MISS";
return;
}
- if (r->flags.noCache()) {
+ if (r->flags.nocache) {
debugs(85, 3, "clientProcessRequest2: no-cache REFRESH MISS");
http->storeEntry(NULL);
http->logType = LOG_TCP_CLIENT_REFRESH_MISS;
debugs(88, 5, "clientReplyContext::sendStreamError: A stream error has occured, marking as complete and sending no data.");
StoreIOBuffer localTempBuffer;
flags.complete = 1;
- http->request->flags.setStreamError();
+ http->request->flags.stream_error = 1;
localTempBuffer.flags.error = result.flags.error;
clientStreamCallback((clientStreamNode*)http->client_stream.head->data, http, NULL,
localTempBuffer);
/*
* build new header list *? TODO
*/
- if (http->flags.accel)
- request->flags.markAccelerated();
+ request->flags.accelerated = http->flags.accel;
- request->flags.setInternalClient();
+ request->flags.internalclient = 1;
/* this is an internally created
* request, not subject to acceleration
for (int i = 0; i < ia->count; ++i) {
if (clientConn->local.matchIPAddr(ia->in_addrs[i]) == 0) {
debugs(85, 3, HERE << "validate IP " << clientConn->local << " possible from Host:");
- http->request->flags.markHostVerified();
+ http->request->flags.hostVerified = 1;
http->doCallouts();
return;
}
// NP: it is tempting to use 'flags.nocache' but that is all about READing cache data.
// The problems here are about WRITE for new cache content, which means flags.cachable
- http->request->flags.setNotCachable(); // MUST NOT cache (for now)
+ http->request->flags.cachable = 0; // MUST NOT cache (for now)
// XXX: when we have updated the cache key to base on raw-IP + URI this cacheable limit can go.
- http->request->flags.clearHierarchical(); // MUST NOT pass to peers (for now)
+ http->request->flags.hierarchical = 0; // MUST NOT pass to peers (for now)
// XXX: when we have sorted out the best way to relay requests properly to peers this hierarchical limit can go.
http->doCallouts();
return;
return;
}
- if (http->request->flags.isInternal()) {
+ if (http->request->flags.internal) {
// TODO: kill this when URL handling allows partial URLs out of accel mode
// and we no longer screw with the URL just to add our internal host there
debugs(85, 6, HERE << "validate skipped due to internal composite URL.");
}
debugs(85, 3, HERE << "validate host=" << host << ", port=" << port << ", portStr=" << (portStr?portStr:"NULL"));
- if (http->request->flags.intercepted() || http->request->flags.spoofClientIp()) {
+ if (http->request->flags.intercepted || http->request->flags.spoof_client_ip) {
// verify the Host: port (if any) matches the apparent destination
if (portStr && port != http->getConn()->clientConnection->local.GetPort()) {
debugs(85, 3, HERE << "FAIL on validate port " << http->getConn()->clientConnection->local.GetPort() <<
} else {
// Okay no problem.
debugs(85, 3, HERE << "validate passed.");
- http->request->flags.markHostVerified();
+ http->request->flags.hostVerified = 1;
http->doCallouts();
}
safe_free(hostB);
const wordlist *p = NULL;
// intercepted requests MUST NOT (yet) be sent to peers unless verified
- if (!request->flags.hostVerified() && (request->flags.intercepted() || request->flags.spoofClientIp()))
+ if (!request->flags.hostVerified && (request->flags.intercepted || request->flags.spoof_client_ip))
return 0;
/*
* neighbors support private keys
*/
- if (request->flags.hasIMS() && !neighbors_do_private_keys)
+ if (request->flags.ims && !neighbors_do_private_keys)
return 0;
/*
* This is incorrect: authenticating requests can be sent via a hierarchy
* (they can even be cached if the correct headers are set on the reply)
*/
- if (request->flags.hasAuth())
+ if (request->flags.auth)
return 0;
if (method == METHOD_TRACE)
if (strstr(url, p->key))
return 0;
- if (request->flags.loopDetect())
+ if (request->flags.loopdetect)
return 0;
if (request->protocol == AnyP::PROTO_HTTP)
if (!http_conn)
return;
- if (http_conn->port->connection_auth_disabled)
- request->flags.disableConnectionAuth();
- if (!request->flags.connectionAuthDisabled()) {
+ request->flags.connection_auth_disabled = http_conn->port->connection_auth_disabled;
+ if (!request->flags.connection_auth_disabled) {
if (Comm::IsConnOpen(http_conn->pinning.serverConnection)) {
if (http_conn->pinning.auth) {
- request->flags.wantConnectionAuth();
- request->flags.markAuth();
+ request->flags.connection_auth = 1;
+ request->flags.auth = 1;
} else {
- request->flags.requestConnectionProxyAuth();
+ request->flags.connection_proxy_auth = 1;
}
// These should already be linked correctly.
assert(request->clientConnectionManager == http_conn);
}
/* check if connection auth is used, and flag as candidate for pinning
- * in such case.;
+ * in such case.
* Note: we may need to set flags.connection_auth even if the connection
* is already pinned if it was pinned earlier due to proxy auth
*/
- if (!request->flags.connectionAuthWanted()) {
+ if (!request->flags.connection_auth) {
if (req_hdr->has(HDR_AUTHORIZATION) || req_hdr->has(HDR_PROXY_AUTHORIZATION)) {
HttpHeaderPos pos = HttpHeaderInitPos;
HttpHeaderEntry *e;
||
strncasecmp(value, "Kerberos ", 9) == 0) {
if (e->id == HDR_AUTHORIZATION) {
- request->flags.wantConnectionAuth();
+ request->flags.connection_auth = 1;
may_pin = 1;
} else {
- request->flags.requestConnectionProxyAuth();
+ request->flags.connection_proxy_auth = 1;
may_pin = 1;
}
}
request->ims = req_hdr->getTime(HDR_IF_MODIFIED_SINCE);
if (request->ims > 0)
- request->flags.setIMS();
+ request->flags.ims = 1;
- if (!request->flags.ignoringCacheControl()) {
+ if (!request->flags.ignore_cc) {
if (req_hdr->has(HDR_PRAGMA)) {
String s = req_hdr->getList(HDR_PRAGMA);
* SP1 or not so all 5.5 versions are treated 'normally').
*/
if (Config.onoff.ie_refresh) {
- if (http->flags.accel && request->flags.hasIMS()) {
+ if (http->flags.accel && request->flags.ims) {
if ((str = req_hdr->getStr(HDR_USER_AGENT))) {
if (strstr(str, "MSIE 5.01") != NULL)
no_cache=true;
#if USE_HTTP_VIOLATIONS
if (Config.onoff.reload_into_ims)
- request->flags.hackNocache();
+ request->flags.nocache_hack = 1;
else if (refresh_nocache_hack)
- request->flags.hackNocache();
+ request->flags.nocache_hack = 1;
else
#endif
- request->flags.setNocache();
+ request->flags.nocache = 1;
}
/* ignore range header in non-GETs or non-HEADs */
}
if (req_hdr->has(HDR_AUTHORIZATION))
- request->flags.markAuth();
+ request->flags.auth = 1;
clientCheckPinning(http);
if (request->login[0] != '\0')
- request->flags.markAuth();
+ request->flags.auth = 1;
if (req_hdr->has(HDR_VIA)) {
String s = req_hdr->getList(HDR_VIA);
if (strListIsSubstr(&s, ThisCache2, ',')) {
debugObj(33, 1, "WARNING: Forwarding loop detected for:\n",
request, (ObjPackMethod) & httpRequestPack);
- request->flags.setLoopDetect();
+ request->flags.loopdetect = 1;
}
#if USE_FORW_VIA_DB
#endif
- request->flags.setCachable(http->request->cacheable());
+ request->flags.cachable = http->request->cacheable();
if (clientHierarchical(http))
- request->flags.setHierarchical();
+ request->flags.hierarchical = 1;
debugs(85, 5, "clientInterpretRequestHeaders: REQ_NOCACHE = " <<
- (request->flags.noCache() ? "SET" : "NOT SET"));
+ (request->flags.nocache ? "SET" : "NOT SET"));
debugs(85, 5, "clientInterpretRequestHeaders: REQ_CACHABLE = " <<
- (request->flags.isCachable() ? "SET" : "NOT SET"));
+ (request->flags.cachable ? "SET" : "NOT SET"));
debugs(85, 5, "clientInterpretRequestHeaders: REQ_HIERARCHICAL = " <<
- (request->flags.hierarchical() ? "SET" : "NOT SET"));
+ (request->flags.hierarchical ? "SET" : "NOT SET"));
}
debugs(61,2, HERE << "URL-rewriter diverts URL from " << urlCanonical(old_request) << " to " << urlCanonical(new_request));
// update the new request to flag the re-writing was done on it
- new_request->flags.markRedirected();
+ new_request->flags.redirected = 1;
// unlink bodypipe from the old request. Not needed there any longer.
if (old_request->body_pipe != NULL) {
ClientRequestContext::checkNoCacheDone(const allow_t &answer)
{
acl_checklist = NULL;
- if (answer == ACCESS_ALLOWED)
- http->request->flags.setCachable();
+ http->request->flags.cachable = (answer == ACCESS_ALLOWED);
http->doCallouts();
}
if (!calloutContext->no_cache_done) {
calloutContext->no_cache_done = true;
- if (Config.accessList.noCache && request->flags.isCachable()) {
+ if (Config.accessList.noCache && request->flags.cachable) {
debugs(83, 3, HERE << "Doing calloutContext->checkNoCache()");
calloutContext->checkNoCache();
return;
case LFT_LOCAL_LISTENING_IP: {
// avoid logging a dash if we have reliable info
const bool interceptedAtKnownPort = al->request ?
- (al->request->flags.spoofClientIp() ||
- al->request->flags.intercepted()) && al->cache.port :
+ (al->request->flags.spoof_client_ip ||
+ al->request->flags.intercepted) && al->cache.port :
false;
if (interceptedAtKnownPort) {
const bool portAddressConfigured = !al->cache.port->s.IsAnyAddr();
fail(anErr);
} // else use actual error from last connection attempt
#if USE_SSL
- if (request->flags.sslPeek() && request->clientConnectionManager.valid()) {
+ if (request->flags.sslPeek && request->clientConnectionManager.valid()) {
errorAppendEntry(entry, err); // will free err
err = NULL;
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
bool
FwdState::checkRetriable()
{
- // Optimize: A compliant proxy may retry PUTs, but Squid lacks the [rather
- // complicated] code required to protect the PUT request body from being
- // nibbled during the first try. Thus, Squid cannot retry some PUTs today.
- if (request->body_pipe != NULL)
- return false;
-
/* RFC2616 9.1 Safe and Idempotent Methods */
switch (request->method.id()) {
/* 9.1.1 Safe Methods */
// a user-entered address (a host name or a user-entered IP).
const bool isConnectRequest = !request->clientConnectionManager->port->spoof_client_ip &&
!request->clientConnectionManager->port->intercepted;
- if (request->flags.sslPeek() && !isConnectRequest) {
+ if (request->flags.sslPeek && !isConnectRequest) {
if (X509 *srvX509 = errDetails->peerCert()) {
if (const char *name = Ssl::CommonHostName(srvX509)) {
request->SetHost(name);
const bool hostnameIsIp = request->GetHostIsNumeric();
const bool isConnectRequest = !request->clientConnectionManager->port->spoof_client_ip &&
!request->clientConnectionManager->port->intercepted;
- if (!request->flags.sslPeek() || isConnectRequest)
+ if (!request->flags.sslPeek || isConnectRequest)
SSL_set_ex_data(ssl, ssl_ex_index_server, (void*)hostname);
// Use SNI TLS extension only when we connect directly
peerConnectSucceded(serverConnection()->getPeer());
// some requests benefit from pinning but do not require it and can "repin"
- const bool rePin = request->flags.canRePin() &&
+ const bool rePin = request->flags.canRePin &&
request->clientConnectionManager.valid();
if (rePin) {
debugs(17, 3, HERE << "repinning " << serverConn);
request->clientConnectionManager->pinConnection(serverConn,
- request, serverConn->getPeer(), request->flags.hasAuth());
- request->flags.markPinned();
+ request, serverConn->getPeer(), request->flags.auth);
+ request->flags.pinned = 1;
}
#if USE_SSL
- if (!request->flags.pinned() || rePin) {
+ if (!request->flags.pinned || rePin) {
if ((serverConnection()->getPeer() && serverConnection()->getPeer()->use_ssl) ||
(!serverConnection()->getPeer() && request->protocol == AnyP::PROTO_HTTPS) ||
- request->flags.sslPeek()) {
+ request->flags.sslPeek) {
initiateSSL();
return;
}
return;
}
- request->flags.clearPinned(); // XXX: what if the ConnStateData set this to flag existing credentials?
+ request->flags.pinned = 0; // XXX: what if the ConnStateData set this to flag existing credentials?
// XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
// XXX: also, logs will now lie if pinning is broken and leads to an error message.
if (serverDestinations[0]->peerType == PINNED) {
serverConn->peerType = HIER_DIRECT;
#endif
++n_tries;
- request->flags.markPinned();
+ request->flags.pinned = 1;
if (pinned_connection->pinnedAuth())
- request->flags.markAuth();
+ request->flags.auth = 1;
comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
// the server may close the pinned connection before this request
pconnRace = racePossible;
}
/* Failure. Fall back on next path unless we can re-pin */
debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
- if (pconnRace != raceHappened || !request->flags.canRePin()) {
+ if (pconnRace != raceHappened || !request->flags.canRePin) {
serverDestinations.shift();
pconnRace = raceImpossible;
startConnectionOrFail();
#endif
#if USE_SSL
- if (request->flags.sslPeek()) {
+ if (request->flags.sslPeek) {
CallJobHere1(17, 4, request->clientConnectionManager, ConnStateData,
ConnStateData::httpsPeeked, serverConnection());
unregister(serverConn); // async call owns it now
request->peer_domain = serverConnection()->getPeer()->domain;
httpStart(this);
} else {
- assert(!request->flags.sslPeek());
+ assert(!request->flags.sslPeek);
request->peer_login = NULL;
request->peer_domain = NULL;
ErrorState *
FwdState::makeConnectingError(const err_type type) const
{
- return new ErrorState(type, request->flags.validationNeeded() ?
+ return new ErrorState(type, request->flags.need_validation ?
HTTP_GATEWAY_TIMEOUT : HTTP_SERVICE_UNAVAILABLE, request);
}
conn->local.SetIPv4();
// maybe use TPROXY client address
- if (request && request->flags.spoofClientIp()) {
+ if (request && request->flags.spoof_client_ip) {
if (!conn->getPeer() || !conn->getPeer()->options.no_tproxy) {
#if FOLLOW_X_FORWARDED_FOR && LINUX_NETFILTER
if (Config.onoff.tproxy_uses_indirect_client)
_peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */
if (_peer) {
- request->flags.setProxying();
+ request->flags.proxying = 1;
/*
* This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
* We might end up getting the object from somewhere else if,
void
HttpStateData::processSurrogateControl(HttpReply *reply)
{
- if (request->flags.accelerated() && reply->surrogate_control) {
+ if (request->flags.accelerated && reply->surrogate_control) {
HttpHdrScTarget *sctusable = reply->surrogate_control->getMergedTarget(Config.Accel.surrogate_id);
if (sctusable) {
}
}
- if (request->flags.hasAuth() || request->flags.authSent()) {
+ if (request->flags.auth || request->flags.auth_sent) {
/*
* Responses to requests with authorization may be cached
* only if a Cache-Control: public reply header is present.
}
if (!peerSupportsConnectionPinning())
- request->flags.disableConnectionAuth();
+ request->flags.connection_auth_disabled = 1;
HttpReply *vrep = setVirginReply(newrep);
flags.headers_parsed = 1;
return true;
/*if the connections it is already pinned it is OK*/
- if (request->flags.pinned())
+ if (request->flags.pinned)
return true;
/*Allow pinned connections only if the Proxy-support header exists in
debugs(11, DBG_IMPORTANT, "WARNING: HTTP: Invalid Response: Headers did not parse at all for " << entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() );
} else {
error = ERR_ZERO_SIZE_OBJECT;
- debugs(11, (request->flags.accelerated()?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " <<
+ debugs(11, (request->flags.accelerated?DBG_IMPORTANT:2), "WARNING: HTTP: Invalid Response: No object data received for " <<
entry->url() << " AKA " << request->GetHost() << request->urlpath.termedBuf() );
}
}
closeHandler = NULL;
fwd->unregister(serverConnection);
- if (request->flags.spoofClientIp())
+ if (request->flags.spoof_client_ip)
client_addr = request->client_addr;
- if (request->flags.pinned()) {
+ if (request->flags.pinned) {
ispinned = true;
- } else if (request->flags.connectionAuthWanted() && request->flags.authSent()) {
+ } else if (request->flags.connection_auth && request->flags.auth_sent) {
ispinned = true;
}
if (request->pinnedConnection() && ispinned) {
request->pinnedConnection()->pinConnection(serverConnection, request, _peer,
- request->flags.connectionAuthWanted());
+ (request->flags.connection_auth != 0));
} else {
fwd->pconnPush(serverConnection, request->peer_host ? request->peer_host : request->GetHost());
}
http_hdr_type header = flags.originpeer ? HDR_AUTHORIZATION : HDR_PROXY_AUTHORIZATION;
/* Nothing to do unless we are forwarding to a peer */
- if (!request->flags.proxying())
+ if (!request->flags.proxying)
return;
/* Needs to be explicitly enabled */
*/
if (!we_do_ranges && request->multipartRangeRequest()) {
/* don't cache the result */
- request->flags.setNotCachable();
+ request->flags.cachable = 0;
/* pretend it's not a range request */
delete request->range;
request->range = NULL;
strVia.clean();
}
- if (request->flags.accelerated()) {
+ if (request->flags.accelerated) {
/* Append Surrogate-Capabilities */
String strSurrogate(hdr_in->getList(HDR_SURROGATE_CAPABILITY));
#if USE_SQUID_ESI
/* append Authorization if known in URL, not in header and going direct */
if (!hdr_out->has(HDR_AUTHORIZATION)) {
- if (!request->flags.proxying() && request->login && *request->login) {
+ if (!request->flags.proxying && request->login && *request->login) {
httpHeaderPutStrf(hdr_out, HDR_AUTHORIZATION, "Basic %s",
old_base64_encode(request->login));
}
*/
if (request->peer_domain)
hdr_out->putStr(HDR_HOST, request->peer_domain);
- else if (request->flags.isRedirected() && !Config.onoff.redir_rewrites_host)
+ else if (request->flags.redirected && !Config.onoff.redir_rewrites_host)
hdr_out->addEntry(e->clone());
else {
/* use port# only if not default */
int64_t roffLimit = request->getRangeOffsetLimit();
- if (NULL == request->range || !request->flags.isCachable()
- || request->range->offsetLimitExceeded(roffLimit) || request->flags.connectionAuthWanted())
+ if (NULL == request->range || !request->flags.cachable
+ || request->range->offsetLimitExceeded(roffLimit) || request->flags.connection_auth)
result = false;
debugs(11, 8, "decideIfWeDoRanges: range specs: " <<
request->range << ", cachable: " <<
- request->flags.isCachable() << "; we_do_ranges: " << result);
+ request->flags.cachable << "; we_do_ranges: " << result);
return result;
}
Packer p;
httpBuildRequestHeader(request, entry, fwd->al, &hdr, flags);
- if (request->flags.pinned() && request->flags.connectionAuthWanted())
- request->flags.markAuthSent();
+ if (request->flags.pinned && request->flags.connection_auth)
+ request->flags.auth_sent = 1;
else if (hdr.has(HDR_AUTHORIZATION))
- request->flags.markAuthSent();
+ request->flags.auth_sent = 1;
packerToMemInit(&p, mb);
hdr.packInto(&p);
/*
* Is keep-alive okay for all request methods?
*/
- if (request->flags.mustKeepalive())
+ if (request->flags.must_keepalive)
flags.keepalive = 1;
else if (!Config.onoff.server_pconns)
flags.keepalive = 0;
tempBuffer.data = ex->buf;
storeClientCopy(ex->sc, ex->e, tempBuffer,
netdbExchangeHandleReply, ex);
- ex->r->flags.setLoopDetect(); /* cheat! -- force direct */
+ ex->r->flags.loopdetect = 1; /* cheat! -- force direct */
if (p->login)
xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ);
return;
}
- flags.setCachable();
+ flags.cachable = 1;
StoreEntry *e = storeCreateEntry(url,
url,
flags,
if (neighborType(p, request) == PEER_SIBLING) {
#if PEER_MULTICAST_SIBLINGS
if (p->type == PEER_MULTICAST && p->options.mcast_siblings &&
- (request->flags.noCache() || request->flags.refresh() || request->flags.loopDetect() || request->flags.validationNeeded()))
+ (request->flags.nocache || request->flags.refresh || request->flags.loopdetect || request->flags.need_validation))
debugs(15, 2, "peerAllowedToUse(" << p->name << ", " << request->GetHost() << ") : multicast-siblings optimization match");
#endif
- if (request->flags.noCache())
+ if (request->flags.nocache)
return false;
- if (request->flags.refresh())
+ if (request->flags.refresh)
return false;
- if (request->flags.loopDetect())
+ if (request->flags.loopdetect)
return false;
- if (request->flags.validationNeeded())
+ if (request->flags.need_validation)
return false;
}
/* the case below seems strange, but can happen if the
* URL host is on the other side of a firewall */
if (p->type == PEER_SIBLING)
- if (!request->flags.hierarchical())
+ if (!request->flags.hierarchical)
return 0;
if (!peerAllowedToUse(p, request))
int p_rtt;
int i;
- if (!request->flags.hierarchical())
+ if (!request->flags.hierarchical)
return NULL;
storeKeyPublicByRequest(request);
pd_last_req_time = squid_curtime;
- req->flags.setCachable();
+ req->flags.cachable = 1;
/* the rest is based on clientProcessExpired() */
- req->flags.setRefresh();
+ req->flags.refresh = 1;
old_e = fetch->old_entry = Store::Root().get(key);
assert(direct != DIRECT_YES);
debugs(44, 3, "peerSelectIcpPing: " << entry->url() );
- if (!request->flags.hierarchical() && direct != DIRECT_NO)
+ if (!request->flags.hierarchical && direct != DIRECT_NO)
return 0;
if (EBIT_TEST(entry->flags, KEY_PRIVATE) && !neighbors_do_private_keys)
// To resolve this we must use only the original client destination when going DIRECT
// on intercepted traffic which failed Host verification
const HttpRequest *req = psstate->request;
- const bool isIntercepted = !req->flags.isRedirected() &&
- (req->flags.intercepted() || req->flags.spoofClientIp());
- const bool useOriginalDst = Config.onoff.client_dst_passthru || !req->flags.hostVerified();
+ const bool isIntercepted = !req->flags.redirected &&
+ (req->flags.intercepted || req->flags.spoof_client_ip);
+ const bool useOriginalDst = Config.onoff.client_dst_passthru || !req->flags.hostVerified;
const bool choseDirect = fs && fs->code == HIER_DIRECT;
if (isIntercepted && useOriginalDst && choseDirect) {
// construct a "result" adding the ORIGINAL_DST to the set instead of DIRECT
break;
// for TPROXY we must skip unusable addresses.
- if (psstate->request->flags.spoofClientIp() && !(fs->_peer && fs->_peer->options.no_tproxy) ) {
+ if (psstate->request->flags.spoof_client_ip && !(fs->_peer && fs->_peer->options.no_tproxy) ) {
if (ia->in_addrs[n].IsIPv4() != psstate->request->client_addr.IsIPv4()) {
// we CAN'T spoof the address on this link. find another.
continue;
HttpRequest *request = ps->request;
debugs(44, 3, "peerSelectFoo: '" << RequestMethodStr(request->method) << " " << request->GetHost() << "'");
- /* If we don't know whether DIRECT is permitted ... */
+ /** If we don't know whether DIRECT is permitted ... */
if (ps->direct == DIRECT_UNKNOWN) {
if (ps->always_direct == ACCESS_DUNNO) {
debugs(44, 3, "peerSelectFoo: direct = " << DirectStr[ps->direct] << " (always_direct to be checked)");
- /* check always_direct; */
+ /** check always_direct; */
ps->acl_checklist = new ACLFilledChecklist(Config.accessList.AlwaysDirect, request, NULL);
ps->acl_checklist->nonBlockingCheck(peerCheckAlwaysDirectDone, ps);
return;
} else if (ps->never_direct == ACCESS_DUNNO) {
debugs(44, 3, "peerSelectFoo: direct = " << DirectStr[ps->direct] << " (never_direct to be checked)");
- /* check never_direct; */
+ /** check never_direct; */
ps->acl_checklist = new ACLFilledChecklist(Config.accessList.NeverDirect, request, NULL);
ps->acl_checklist->nonBlockingCheck(peerCheckNeverDirectDone, ps);
return;
- } else if (request->flags.noDirect()) {
- /* if we are accelerating, direct is not an option. */
+ } else if (request->flags.no_direct) {
+ /** if we are accelerating, direct is not an option. */
ps->direct = DIRECT_NO;
debugs(44, 3, "peerSelectFoo: direct = " << DirectStr[ps->direct] << " (forced non-direct)");
- } else if (request->flags.loopDetect()) {
- /* if we are in a forwarding-loop, direct is not an option. */
+ } else if (request->flags.loopdetect) {
+ /** if we are in a forwarding-loop, direct is not an option. */
ps->direct = DIRECT_YES;
debugs(44, 3, "peerSelectFoo: direct = " << DirectStr[ps->direct] << " (forwarding loop detected)");
} else if (peerCheckNetdbDirect(ps)) {
if (Config.onoff.prefer_direct)
peerGetSomeDirect(ps);
- if (request->flags.hierarchical() || !Config.onoff.nonhierarchical_direct) {
+ if (request->flags.hierarchical || !Config.onoff.nonhierarchical_direct) {
peerGetSomeParent(ps);
peerGetAllParents(ps);
}
debugs(22, 3, "\tentry->timestamp:\t" << mkrfc1123(entry->timestamp));
- if (request && !request->flags.ignoringCacheControl()) {
+ if (request && !request->flags.ignore_cc) {
const HttpHdrCc *const cc = request->cache_control;
if (cc && cc->hasMinFresh()) {
const int32_t minFresh=cc->minFresh();
entry->mem_obj->getReply()->cache_control->staleIfError() < staleness) {
debugs(22, 3, "refreshCheck: stale-if-error period expired.");
- request->flags.setFailOnValidationError();
+ request->flags.fail_on_validation_err = 1;
}
if (EBIT_TEST(entry->flags, ENTRY_REVALIDATE) && staleness > -1
) {
debugs(22, 3, "refreshCheck: YES: Must revalidate stale response");
if (request)
- request->flags.setFailOnValidationError();
+ request->flags.fail_on_validation_err = 1;
return STALE_MUST_REVALIDATE;
}
/* request-specific checks */
- if (request && !request->flags.ignoringCacheControl()) {
+ if (request && !request->flags.ignore_cc) {
HttpHdrCc *cc = request->cache_control;
- if (request->flags.hasIMS() && (R->flags.refresh_ims || Config.onoff.refresh_all_ims)) {
+ if (request->flags.ims && (R->flags.refresh_ims || Config.onoff.refresh_all_ims)) {
/* The clients no-cache header is changed into a IMS query */
debugs(22, 3, "refreshCheck: YES: refresh-ims");
return STALE_FORCED_RELOAD;
#if USE_HTTP_VIOLATIONS
- if (!request->flags.noCacheHackEnabled()) {
+ if (!request->flags.nocache_hack) {
(void) 0;
} else if (R->flags.ignore_reload) {
/* The clients no-cache header is ignored */
} else {
/* The clients no-cache header is not overridden on this request */
debugs(22, 3, "refreshCheck: YES: client reload");
- request->flags.setNocache();
+ request->flags.nocache = 1;
return STALE_FORCED_RELOAD;
}
if ( max_stale >= 0 && staleness > max_stale) {
debugs(22, 3, "refreshCheck: YES: max-stale limit");
if (request)
- request->flags.setFailOnValidationError();
+ request->flags.fail_on_validation_err = 1;
return STALE_MAX_STALE;
}
int reason = refreshCheck(entry, request, 0);
++ refreshCounts[rcHTTP].total;
++ refreshCounts[rcHTTP].status[reason];
- if (refreshIsStaleIfHit(reason))
- request->flags.setStaleIfHit();
+ request->flags.stale_if_hit = refreshIsStaleIfHit(reason);
return (Config.onoff.offline || reason < 200) ? 0 : 1;
}
mem = e->mem_obj;
mem->method = method;
- if (neighbors_do_private_keys || !flags.hierarchical())
+ if (neighbors_do_private_keys || !flags.hierarchical)
e->setPrivateKey();
else
e->setPublicKey();
- if (flags.isCachable()) {
+ if (flags.cachable) {
EBIT_SET(e->flags, ENTRY_CACHABLE);
EBIT_CLR(e->flags, RELEASE_REQUEST);
} else {
assert(mem);
debugs(90, 3, "CheckQuickAbort2: entry=" << entry << ", mem=" << mem);
- if (mem->request && !mem->request->flags.isCachable()) {
+ if (mem->request && !mem->request->flags.cachable) {
debugs(90, 3, "CheckQuickAbort2: YES !mem->request->flags.cachable");
return 1;
}
debugs(71, 2, "storeDigestRewrite: start rewrite #" << sd_state.rewrite_count + 1);
/* make new store entry */
url = internalLocalUri("/squid-internal-periodic/", StoreDigestFileName);
- flags.setCachable();
+ flags.cachable = 1;
e = storeCreateEntry(url, url, flags, METHOD_GET);
assert(e);
sd_state.rewrite_lock = e;
{
/* Create "vary" base object */
RequestFlags flags;
- flags.setCachable();
+ flags.cachable = 1;
StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, METHOD_GET);
HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const
rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000);
{
/* Create "vary" base object */
RequestFlags flags;
- flags.setCachable();
+ flags.cachable = 1;
StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, METHOD_GET);
/* We are allowed to do this typecast */
HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const
testRock::createEntry(const int i)
{
RequestFlags flags;
- flags.setCachable();
+ flags.cachable = 1;
char url[64];
snprintf(url, sizeof(url), "dummy url %i", i);
url[sizeof(url) - 1] = '\0';
{
/* Create "vary" base object */
RequestFlags flags;
- flags.setCachable();
+ flags.cachable = 1;
StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, METHOD_GET);
HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const
rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000);
TunnelStateData *tunnelState = (TunnelStateData *)data;
debugs(26, 3, HERE << server << ", tunnelState=" << tunnelState);
- if (tunnelState->request && (tunnelState->request->flags.spoofClientIp() || tunnelState->request->flags.intercepted()))
+ if (tunnelState->request && (tunnelState->request->flags.spoof_client_ip || tunnelState->request->flags.intercepted))
tunnelStartShoveling(tunnelState); // ssl-bumped connection, be quiet
else {
AsyncCall::Pointer call = commCbCall(5,5, "tunnelConnectedWriteDone",
debugs(26, 4, HERE << "determine post-connect handling pathway.");
if (conn->getPeer()) {
tunnelState->request->peer_login = conn->getPeer()->login;
- if (conn->getPeer()->options.originserver)
- tunnelState->request->flags.setProxying();
- else
- tunnelState->request->flags.clearProxying();
+ tunnelState->request->flags.proxying = (conn->getPeer()->options.originserver?0:1);
} else {
tunnelState->request->peer_login = NULL;
- tunnelState->request->flags.clearProxying();
+ tunnelState->request->flags.proxying = 0;
}
- if (tunnelState->request->flags.proxying())
+ if (tunnelState->request->flags.proxying)
tunnelRelayConnectRequest(conn, tunnelState);
else {
tunnelConnected(conn, tunnelState);
http_state_flags flags;
debugs(26, 3, HERE << srv << ", tunnelState=" << tunnelState);
memset(&flags, '\0', sizeof(flags));
- flags.proxying = tunnelState->request->flags.proxying();
+ flags.proxying = tunnelState->request->flags.proxying;
MemBuf mb;
mb.init();
mb.Printf("CONNECT %s HTTP/1.1\r\n", tunnelState->url);