From: Francesco Chemolli Date: Mon, 28 Jan 2013 16:56:05 +0000 (+0100) Subject: Changed clients of RequestFlags to use bool constants; changed FwdState.flags to... X-Git-Tag: SQUID_3_4_0_1~338 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e857372a737ea966f5f4a7e65270ce20db2fb278;p=thirdparty%2Fsquid.git Changed clients of RequestFlags to use bool constants; changed FwdState.flags to bool --- diff --git a/src/acl/DestinationIp.cc b/src/acl/DestinationIp.cc index 68ede133a6..86825b74f4 100644 --- a/src/acl/DestinationIp.cc +++ b/src/acl/DestinationIp.cc @@ -114,7 +114,7 @@ DestinationIPLookup::LookupDone(const ipcache_addrs *, const DnsLookupDetails &d { ACLFilledChecklist *checklist = Filled((ACLChecklist*)data); assert (checklist->asyncState() == DestinationIPLookup::Instance()); - checklist->request->flags.destinationIpLookedUp=true; + checklist->request->flags.destinationIpLookedUp = true; checklist->request->recordLookup(details); checklist->asyncInProgress(false); checklist->changeState (ACLChecklist::NullState::Instance()); diff --git a/src/auth/negotiate/UserRequest.cc b/src/auth/negotiate/UserRequest.cc index 0f4a956638..8e6e95322c 100644 --- a/src/auth/negotiate/UserRequest.cc +++ b/src/auth/negotiate/UserRequest.cc @@ -266,7 +266,7 @@ Auth::Negotiate::UserRequest::HandleReply(void *data, const HelperReply &reply) case HelperReply::TT: /* we have been given a blob to send to the client */ safe_free(lm_request->server_blob); - lm_request->request->flags.mustKeepalive = 1; + lm_request->request->flags.mustKeepalive = true; if (lm_request->request->flags.proxyKeepalive) { Note::Pointer tokenNote = reply.notes.find("token"); lm_request->server_blob = xstrdup(tokenNote->firstValue()); diff --git a/src/auth/negotiate/auth_negotiate.cc b/src/auth/negotiate/auth_negotiate.cc index 9710f58c33..7a8cc41e26 100644 --- a/src/auth/negotiate/auth_negotiate.cc +++ b/src/auth/negotiate/auth_negotiate.cc @@ -226,7 +226,7 @@ Auth::Negotiate::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, if (!keep_alive) { /* drop the connection */ rep->header.delByName("keep-alive"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } } else { Auth::Negotiate::UserRequest *negotiate_request = dynamic_cast(auth_user_request.getRaw()); @@ -238,7 +238,7 @@ Auth::Negotiate::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, /* here it makes sense to drop the connection, as auth is * tied to it, even if MAYBE the client could handle it - Kinkie */ rep->header.delByName("keep-alive"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; /* fall through */ case Auth::Ok: diff --git a/src/auth/ntlm/UserRequest.cc b/src/auth/ntlm/UserRequest.cc index a11b9abd8c..eca993bf02 100644 --- a/src/auth/ntlm/UserRequest.cc +++ b/src/auth/ntlm/UserRequest.cc @@ -259,7 +259,7 @@ Auth::Ntlm::UserRequest::HandleReply(void *data, const HelperReply &reply) case HelperReply::TT: /* we have been given a blob to send to the client */ safe_free(lm_request->server_blob); - lm_request->request->flags.mustKeepalive = 1; + lm_request->request->flags.mustKeepalive = true; if (lm_request->request->flags.proxyKeepalive) { Note::Pointer serverBlob = reply.notes.find("token"); lm_request->server_blob = xstrdup(serverBlob->firstValue()); diff --git a/src/auth/ntlm/auth_ntlm.cc b/src/auth/ntlm/auth_ntlm.cc index a25f726abf..bada7d0658 100644 --- a/src/auth/ntlm/auth_ntlm.cc +++ b/src/auth/ntlm/auth_ntlm.cc @@ -215,7 +215,7 @@ Auth::Ntlm::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, Http if (!keep_alive) { /* drop the connection */ - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } } else { Auth::Ntlm::UserRequest *ntlm_request = dynamic_cast(auth_user_request.getRaw()); @@ -226,7 +226,7 @@ Auth::Ntlm::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, Http case Auth::Failed: /* here it makes sense to drop the connection, as auth is * tied to it, even if MAYBE the client could handle it - Kinkie */ - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; /* fall through */ case Auth::Ok: diff --git a/src/client_side.cc b/src/client_side.cc index 919d243ede..9c316eb48d 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -863,7 +863,7 @@ clientSetKeepaliveFlag(ClientHttpRequest * http) RequestMethodStr(request->method)); // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply. - request->flags.proxyKeepalive = request->persistent() ? 1 : 0; + request->flags.proxyKeepalive = request->persistent(); } static int @@ -2489,7 +2489,7 @@ ConnStateData::quitAfterError(HttpRequest *request) // at the client-side, but many such errors do require closure and the // client-side code is bad at handling errors so we play it safe. if (request) - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; flags.readMore = false; debugs(33,4, HERE << "Will close after error: " << clientConnection); } @@ -3931,7 +3931,7 @@ ConnStateData::switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode) // and now want to switch to SSL to send the error to the client // without even peeking at the origin server certificate. if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) { - request->flags.sslPeek = 1; + request->flags.sslPeek = true; sslServerBump = new Ssl::ServerBump(request); // will call httpsPeeked() with certificate and connection, eventually diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc index 17574dc76a..146f1ca317 100644 --- a/src/client_side_reply.cc +++ b/src/client_side_reply.cc @@ -129,7 +129,7 @@ void clientReplyContext::setReplyToError(const HttpRequestMethod& method, ErrorS { if (errstate->httpStatus == HTTP_NOT_IMPLEMENTED && http->request) /* prevent confusion over whether we default to persistent or not */ - http->request->flags.proxyKeepalive = 0; + http->request->flags.proxyKeepalive = false; http->al->http.code = errstate->httpStatus; @@ -273,7 +273,7 @@ clientReplyContext::processExpired() return; } - http->request->flags.refresh = 1; + http->request->flags.refresh = true; #if STORE_CLIENT_LIST_DEBUG /* Prevent a race with the store client memory free routines */ @@ -390,7 +390,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result) // origin replied 304 if (status == HTTP_NOT_MODIFIED) { http->logType = LOG_TCP_REFRESH_UNMODIFIED; - http->request->flags.staleIfHit = 0; // old_entry is no longer stale + http->request->flags.staleIfHit = false; // old_entry is no longer stale // update headers on existing entry old_rep->updateOnNotModified(http->storeEntry()->getReply()); @@ -558,7 +558,7 @@ clientReplyContext::cacheHit(StoreIOBuffer result) * request. Otherwise two siblings could generate a loop if * both have a stale version of the object. */ - r->flags.needValidation = 1; + r->flags.needValidation = true; if (e->lastmod < 0) { debugs(88, 3, "validate HIT object? NO. Missing Last-Modified header. Do MISS."); @@ -734,7 +734,7 @@ clientReplyContext::processConditional(StoreIOBuffer &result) if (r.header.has(HDR_IF_NONE_MATCH)) { if (!e->hasIfNoneMatchEtag(r)) { // RFC 2616: ignore IMS if If-None-Match did not match - r.flags.ims = 0; + r.flags.ims = false; r.ims = -1; r.imslen = 0; r.header.delById(HDR_IF_MODIFIED_SINCE); @@ -1407,7 +1407,7 @@ clientReplyContext::buildReplyHeader() hdr->delAt(pos, connection_auth_blocked); continue; } - request->flags.mustKeepalive = 1; + request->flags.mustKeepalive = true; if (!request->flags.accelerated && !request->flags.intercepted) { httpHeaderPutStrf(hdr, HDR_PROXY_SUPPORT, "Session-Based-Authentication"); /* @@ -1463,30 +1463,30 @@ clientReplyContext::buildReplyHeader() /* Check whether we should send keep-alive */ if (!Config.onoff.error_pconns && reply->sline.status >= 400 && !request->flags.mustKeepalive) { debugs(33, 3, "clientBuildReplyHeader: Error, don't keep-alive"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (!Config.onoff.client_pconns && !request->flags.mustKeepalive) { debugs(33, 2, "clientBuildReplyHeader: Connection Keep-Alive not requested by admin or client"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (request->flags.proxyKeepalive && shutting_down) { debugs(88, 3, "clientBuildReplyHeader: Shutting down, don't keep-alive."); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (request->flags.connectionAuth && !reply->keep_alive) { debugs(33, 2, "clientBuildReplyHeader: Connection oriented auth but server side non-persistent"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (reply->bodySize(request->method) < 0 && !maySendChunkedReply) { debugs(88, 3, "clientBuildReplyHeader: can't keep-alive, unknown body size" ); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (fdUsageHigh()&& !request->flags.mustKeepalive) { debugs(88, 3, "clientBuildReplyHeader: Not many unused FDs, can't keep-alive"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (request->flags.sslBumped && !reply->persistent()) { // We do not really have to close, but we pretend we are a tunnel. debugs(88, 3, "clientBuildReplyHeader: bumped reply forces close"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } else if (request->pinnedConnection() && !reply->persistent()) { // The peer wants to close the pinned connection debugs(88, 3, "pinned reply forces close"); - request->flags.proxyKeepalive = 0; + request->flags.proxyKeepalive = false; } // Decide if we send chunked reply @@ -1494,7 +1494,7 @@ clientReplyContext::buildReplyHeader() request->flags.proxyKeepalive && reply->bodySize(request->method) < 0) { debugs(88, 3, "clientBuildReplyHeader: chunked reply"); - request->flags.chunkedReply = 1; + request->flags.chunkedReply = true; hdr->putStr(HDR_TRANSFER_ENCODING, "chunked"); } @@ -1824,7 +1824,7 @@ clientReplyContext::sendStreamError(StoreIOBuffer const &result) debugs(88, 5, "clientReplyContext::sendStreamError: A stream error has occured, marking as complete and sending no data."); StoreIOBuffer localTempBuffer; flags.complete = 1; - http->request->flags.streamError = 1; + http->request->flags.streamError = true; localTempBuffer.flags.error = result.flags.error; clientStreamCallback((clientStreamNode*)http->client_stream.head->data, http, NULL, localTempBuffer); diff --git a/src/client_side_request.cc b/src/client_side_request.cc index fb793f910e..c00bf07a62 100644 --- a/src/client_side_request.cc +++ b/src/client_side_request.cc @@ -392,7 +392,7 @@ clientBeginRequest(const HttpRequestMethod& method, char const *url, CSCB * stre */ request->flags.accelerated = http->flags.accel; - request->flags.internalClient = 1; + request->flags.internalClient = true; /* this is an internally created * request, not subject to acceleration @@ -537,7 +537,7 @@ clientFollowXForwardedForCheck(allow_t answer, void *data) conn->log_addr = request->indirect_client_addr; } request->x_forwarded_for_iterator.clean(); - request->flags.done_follow_x_forwarded_for=true; + request->flags.done_follow_x_forwarded_for = true; if (answer != ACCESS_ALLOWED && answer != ACCESS_DENIED) { debugs(28, DBG_CRITICAL, "ERROR: Processing X-Forwarded-For. Stopping at IP address: " << request->indirect_client_addr ); @@ -568,7 +568,7 @@ ClientRequestContext::hostHeaderIpVerify(const ipcache_addrs* ia, const DnsLooku for (int i = 0; i < ia->count; ++i) { if (clientConn->local.matchIPAddr(ia->in_addrs[i]) == 0) { debugs(85, 3, HERE << "validate IP " << clientConn->local << " possible from Host:"); - http->request->flags.hostVerified = 1; + http->request->flags.hostVerified = true; http->doCallouts(); return; } @@ -590,9 +590,9 @@ ClientRequestContext::hostHeaderVerifyFailed(const char *A, const char *B) // NP: it is tempting to use 'flags.noCache' but that is all about READing cache data. // The problems here are about WRITE for new cache content, which means flags.cachable - http->request->flags.cachable = 0; // MUST NOT cache (for now) + http->request->flags.cachable = false; // MUST NOT cache (for now) // XXX: when we have updated the cache key to base on raw-IP + URI this cacheable limit can go. - http->request->flags.hierarchical = 0; // MUST NOT pass to peers (for now) + http->request->flags.hierarchical = false; // MUST NOT pass to peers (for now) // XXX: when we have sorted out the best way to relay requests properly to peers this hierarchical limit can go. http->doCallouts(); return; @@ -702,7 +702,7 @@ ClientRequestContext::hostHeaderVerify() } else { // Okay no problem. debugs(85, 3, HERE << "validate passed."); - http->request->flags.hostVerified = 1; + http->request->flags.hostVerified = true; http->doCallouts(); } safe_free(hostB); @@ -990,10 +990,10 @@ clientCheckPinning(ClientHttpRequest * http) if (!request->flags.connectionAuthDisabled) { if (Comm::IsConnOpen(http_conn->pinning.serverConnection)) { if (http_conn->pinning.auth) { - request->flags.connectionAuth = 1; - request->flags.auth = 1; + request->flags.connectionAuth = true; + request->flags.auth = true; } else { - request->flags.connectionProxyAuth = 1; + request->flags.connectionProxyAuth = true; } // These should already be linked correctly. assert(request->clientConnectionManager == http_conn); @@ -1019,10 +1019,10 @@ clientCheckPinning(ClientHttpRequest * http) || strncasecmp(value, "Kerberos ", 9) == 0) { if (e->id == HDR_AUTHORIZATION) { - request->flags.connectionAuth = 1; + request->flags.connectionAuth = true; may_pin = 1; } else { - request->flags.connectionProxyAuth = 1; + request->flags.connectionProxyAuth = true; may_pin = 1; } } @@ -1048,7 +1048,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) request->ims = req_hdr->getTime(HDR_IF_MODIFIED_SINCE); if (request->ims > 0) - request->flags.ims = 1; + request->flags.ims = true; if (!request->flags.ignoreCc) { if (request->cache_control) { @@ -1090,13 +1090,13 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) #if USE_HTTP_VIOLATIONS if (Config.onoff.reload_into_ims) - request->flags.nocacheHack = 1; + request->flags.nocacheHack = true; else if (refresh_nocache_hack) - request->flags.nocacheHack = 1; + request->flags.nocacheHack = true; else #endif - request->flags.noCache = 1; + request->flags.noCache = true; } /* ignore range header in non-GETs or non-HEADs */ @@ -1106,7 +1106,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) request->range = req_hdr->getRange(); if (request->range) { - request->flags.isRanged=true; + request->flags.isRanged = true; clientStreamNode *node = (clientStreamNode *)http->client_stream.tail->data; /* XXX: This is suboptimal. We should give the stream the range set, * and thereby let the top of the stream set the offset when the @@ -1132,12 +1132,12 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) } if (req_hdr->has(HDR_AUTHORIZATION)) - request->flags.auth = 1; + request->flags.auth = true; clientCheckPinning(http); if (request->login[0] != '\0') - request->flags.auth = 1; + request->flags.auth = true; if (req_hdr->has(HDR_VIA)) { String s = req_hdr->getList(HDR_VIA); @@ -1150,7 +1150,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) if (strListIsSubstr(&s, ThisCache2, ',')) { debugObj(33, 1, "WARNING: Forwarding loop detected for:\n", request, (ObjPackMethod) & httpRequestPack); - request->flags.loopDetected = 1; + request->flags.loopDetected = true; } #if USE_FORW_VIA_DB @@ -1174,7 +1174,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http) request->flags.cachable = http->request->maybeCacheable(); if (clientHierarchical(http)) - request->flags.hierarchical = 1; + request->flags.hierarchical = true; debugs(85, 5, "clientInterpretRequestHeaders: REQ_NOCACHE = " << (request->flags.noCache ? "SET" : "NOT SET")); @@ -1278,7 +1278,7 @@ ClientRequestContext::clientRedirectDone(const HelperReply &reply) debugs(61,2, HERE << "URL-rewriter diverts URL from " << urlCanonical(old_request) << " to " << urlCanonical(new_request)); // update the new request to flag the re-writing was done on it - new_request->flags.redirected = 1; + new_request->flags.redirected = true; // unlink bodypipe from the old request. Not needed there any longer. if (old_request->body_pipe != NULL) { diff --git a/src/errorpage.cc b/src/errorpage.cc index 58837c358e..69c70489b5 100644 --- a/src/errorpage.cc +++ b/src/errorpage.cc @@ -631,7 +631,7 @@ errorAppendEntry(StoreEntry * entry, ErrorState * err) if (err->page_id == TCP_RESET) { if (err->request) { debugs(4, 2, "RSTing this reply"); - err->request->flags.resetTcp=true; + err->request->flags.resetTcp = true; } } diff --git a/src/forward.cc b/src/forward.cc index 084855b568..0b11e0af47 100644 --- a/src/forward.cc +++ b/src/forward.cc @@ -204,12 +204,12 @@ FwdState::selectPeerForIntercepted() void FwdState::completed() { - if (flags.forward_completed == 1) { + if (flags.forward_completed) { debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!"); return; } - flags.forward_completed = 1; + flags.forward_completed = true; if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) { debugs(17, 3, HERE << "entry aborted"); @@ -1107,7 +1107,7 @@ FwdState::connectStart() return; } - request->flags.pinned = 0; // XXX: what if the ConnStateData set this to flag existing credentials? + request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials? // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below. // XXX: also, logs will now lie if pinning is broken and leads to an error message. if (serverDestinations[0]->peerType == PINNED) { @@ -1125,9 +1125,9 @@ FwdState::connectStart() serverConn->peerType = HIER_DIRECT; #endif ++n_tries; - request->flags.pinned = 1; + request->flags.pinned = true; if (pinned_connection->pinnedAuth()) - request->flags.auth = 1; + request->flags.auth = true; comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this); // the server may close the pinned connection before this request pconnRace = racePossible; @@ -1323,7 +1323,7 @@ FwdState::dispatch() ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, HTTP_BAD_REQUEST, request); fail(anErr); // Set the dont_retry flag because this is not a transient (network) error. - flags.dont_retry = 1; + flags.dont_retry = true; if (Comm::IsConnOpen(serverConn)) { serverConn->close(); } diff --git a/src/forward.h b/src/forward.h index 73e359b210..aad16c21ac 100644 --- a/src/forward.h +++ b/src/forward.h @@ -128,9 +128,9 @@ private: } calls; struct { - unsigned int connected_okay:1; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc - unsigned int dont_retry:1; - unsigned int forward_completed:1; + bool connected_okay; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc + bool dont_retry; + bool forward_completed; } flags; /** connections to open, in order, until successful */ diff --git a/src/http.cc b/src/http.cc index b6eb1908d8..d8a6050b3c 100644 --- a/src/http.cc +++ b/src/http.cc @@ -122,7 +122,7 @@ HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"), _peer = cbdataReference(fwd->serverConnection()->getPeer()); /* might be NULL */ if (_peer) { - request->flags.proxying = 1; + request->flags.proxying = true; /* * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here. * We might end up getting the object from somewhere else if, @@ -752,7 +752,7 @@ HttpStateData::processReplyHeader() } if (!peerSupportsConnectionPinning()) - request->flags.connectionAuthDisabled = 1; + request->flags.connectionAuthDisabled = true; HttpReply *vrep = setVirginReply(newrep); flags.headers_parsed = true; @@ -1442,7 +1442,7 @@ HttpStateData::processReplyBody() if (ispinned && request->clientConnectionManager.valid()) { request->clientConnectionManager->pinConnection(serverConnection, request, _peer, - (request->flags.connectionAuth != 0)); + (request->flags.connectionAuth)); } else { fwd->pconnPush(serverConnection, request->peer_host ? request->peer_host : request->GetHost()); } @@ -1691,11 +1691,11 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request, */ if (!we_do_ranges && request->multipartRangeRequest()) { /* don't cache the result */ - request->flags.cachable = 0; + request->flags.cachable = false; /* pretend it's not a range request */ delete request->range; request->range = NULL; - request->flags.isRanged=false; + request->flags.isRanged = false; } /* append Via */ @@ -2065,9 +2065,9 @@ HttpStateData::buildRequestPrefix(MemBuf * mb) httpBuildRequestHeader(request, entry, fwd->al, &hdr, flags); if (request->flags.pinned && request->flags.connectionAuth) - request->flags.authSent = 1; + request->flags.authSent = true; else if (hdr.has(HDR_AUTHORIZATION)) - request->flags.authSent = 1; + request->flags.authSent = true; packerToMemInit(&p, mb); hdr.packInto(&p); diff --git a/src/icmp/net_db.cc b/src/icmp/net_db.cc index 4df0c0beaf..c289e0a828 100644 --- a/src/icmp/net_db.cc +++ b/src/icmp/net_db.cc @@ -1344,7 +1344,7 @@ netdbExchangeStart(void *data) tempBuffer.data = ex->buf; storeClientCopy(ex->sc, ex->e, tempBuffer, netdbExchangeHandleReply, ex); - ex->r->flags.loopDetected = 1; /* cheat! -- force direct */ + ex->r->flags.loopDetected = true; /* cheat! -- force direct */ if (p->login) xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ); diff --git a/src/mime.cc b/src/mime.cc index 6b3cb5a04d..b20d50b33c 100644 --- a/src/mime.cc +++ b/src/mime.cc @@ -455,7 +455,7 @@ MimeIcon::created (StoreEntry *newEntry) return; } - flags.cachable = 1; + flags.cachable = true; StoreEntry *e = storeCreateEntry(url,url,flags,Http::METHOD_GET); assert(e != NULL); EBIT_SET(e->flags, ENTRY_SPECIAL); diff --git a/src/peer_digest.cc b/src/peer_digest.cc index 5be917c9a0..3cfe453178 100644 --- a/src/peer_digest.cc +++ b/src/peer_digest.cc @@ -370,10 +370,10 @@ peerDigestRequest(PeerDigest * pd) pd_last_req_time = squid_curtime; - req->flags.cachable = 1; + req->flags.cachable = true; /* the rest is based on clientProcessExpired() */ - req->flags.refresh = 1; + req->flags.refresh = true; old_e = fetch->old_entry = Store::Root().get(key); diff --git a/src/refresh.cc b/src/refresh.cc index fe92ca6705..cf5d9fd966 100644 --- a/src/refresh.cc +++ b/src/refresh.cc @@ -293,7 +293,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta) entry->mem_obj->getReply()->cache_control->staleIfError() < staleness) { debugs(22, 3, "refreshCheck: stale-if-error period expired."); - request->flags.failOnValidationError = 1; + request->flags.failOnValidationError = true; } if (EBIT_TEST(entry->flags, ENTRY_REVALIDATE) && staleness > -1 @@ -303,7 +303,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta) ) { debugs(22, 3, "refreshCheck: YES: Must revalidate stale response"); if (request) - request->flags.failOnValidationError = 1; + request->flags.failOnValidationError = true; return STALE_MUST_REVALIDATE; } @@ -331,7 +331,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta) } else { /* The clients no-cache header is not overridden on this request */ debugs(22, 3, "refreshCheck: YES: client reload"); - request->flags.noCache = 1; + request->flags.noCache = true; return STALE_FORCED_RELOAD; } @@ -398,7 +398,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta) if ( max_stale >= 0 && staleness > max_stale) { debugs(22, 3, "refreshCheck: YES: max-stale limit"); if (request) - request->flags.failOnValidationError = 1; + request->flags.failOnValidationError = true; return STALE_MAX_STALE; } diff --git a/src/store_digest.cc b/src/store_digest.cc index 8f94ac0b26..74e902e9e0 100644 --- a/src/store_digest.cc +++ b/src/store_digest.cc @@ -392,7 +392,7 @@ storeDigestRewriteStart(void *datanotused) debugs(71, 2, "storeDigestRewrite: start rewrite #" << sd_state.rewrite_count + 1); /* make new store entry */ url = internalLocalUri("/squid-internal-periodic/", StoreDigestFileName); - flags.cachable = 1; + flags.cachable = true; e = storeCreateEntry(url, url, flags, Http::METHOD_GET); assert(e); sd_state.rewrite_lock = e; diff --git a/src/tests/testCoss.cc b/src/tests/testCoss.cc index d2cabb711f..7fe22ed25f 100644 --- a/src/tests/testCoss.cc +++ b/src/tests/testCoss.cc @@ -190,7 +190,7 @@ testCoss::testCossSearch() { /* Create "vary" base object */ RequestFlags flags; - flags.cachable = 1; + flags.cachable = true; StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, METHOD_GET); HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000); diff --git a/src/tests/testRock.cc b/src/tests/testRock.cc index f935ae0d93..84481fe823 100644 --- a/src/tests/testRock.cc +++ b/src/tests/testRock.cc @@ -172,7 +172,7 @@ StoreEntry * testRock::createEntry(const int i) { RequestFlags flags; - flags.cachable = 1; + flags.cachable = true; char url[64]; snprintf(url, sizeof(url), "dummy url %i", i); url[sizeof(url) - 1] = '\0'; diff --git a/src/tests/testUfs.cc b/src/tests/testUfs.cc index 7c929d6782..22c8a50d6b 100644 --- a/src/tests/testUfs.cc +++ b/src/tests/testUfs.cc @@ -142,7 +142,7 @@ testUfs::testUfsSearch() { /* Create "vary" base object */ RequestFlags flags; - flags.cachable = 1; + flags.cachable = true; StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, Http::METHOD_GET); HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000); diff --git a/src/tunnel.cc b/src/tunnel.cc index 670194dfaf..7aacec1f8e 100644 --- a/src/tunnel.cc +++ b/src/tunnel.cc @@ -614,10 +614,10 @@ tunnelConnectDone(const Comm::ConnectionPointer &conn, comm_err_t status, int xe debugs(26, 4, HERE << "determine post-connect handling pathway."); if (conn->getPeer()) { tunnelState->request->peer_login = conn->getPeer()->login; - tunnelState->request->flags.proxying = (conn->getPeer()->options.originserver?0:1); + tunnelState->request->flags.proxying = !(conn->getPeer()->options.originserver); } else { tunnelState->request->peer_login = NULL; - tunnelState->request->flags.proxying = 0; + tunnelState->request->flags.proxying = false; } if (tunnelState->request->flags.proxying)