]> git.ipfire.org Git - thirdparty/squid.git/commitdiff
Changed clients of RequestFlags to use bool constants; changed FwdState.flags to...
authorFrancesco Chemolli <kinkie@squid-cache.org>
Mon, 28 Jan 2013 16:56:05 +0000 (17:56 +0100)
committerFrancesco Chemolli <kinkie@squid-cache.org>
Mon, 28 Jan 2013 16:56:05 +0000 (17:56 +0100)
21 files changed:
src/acl/DestinationIp.cc
src/auth/negotiate/UserRequest.cc
src/auth/negotiate/auth_negotiate.cc
src/auth/ntlm/UserRequest.cc
src/auth/ntlm/auth_ntlm.cc
src/client_side.cc
src/client_side_reply.cc
src/client_side_request.cc
src/errorpage.cc
src/forward.cc
src/forward.h
src/http.cc
src/icmp/net_db.cc
src/mime.cc
src/peer_digest.cc
src/refresh.cc
src/store_digest.cc
src/tests/testCoss.cc
src/tests/testRock.cc
src/tests/testUfs.cc
src/tunnel.cc

index 68ede133a6018fe1e4cf9fa5609ffb51ca90f5c2..86825b74f4f1fe52fbaa19bbf87449e70821fa73 100644 (file)
@@ -114,7 +114,7 @@ DestinationIPLookup::LookupDone(const ipcache_addrs *, const DnsLookupDetails &d
 {
     ACLFilledChecklist *checklist = Filled((ACLChecklist*)data);
     assert (checklist->asyncState() == DestinationIPLookup::Instance());
-    checklist->request->flags.destinationIpLookedUp=true;
+    checklist->request->flags.destinationIpLookedUp = true;
     checklist->request->recordLookup(details);
     checklist->asyncInProgress(false);
     checklist->changeState (ACLChecklist::NullState::Instance());
index 0f4a9566382e39549fcb19503a532f20c0c40b74..8e6e95322c980d28dcef79e714e034f23bce8e12 100644 (file)
@@ -266,7 +266,7 @@ Auth::Negotiate::UserRequest::HandleReply(void *data, const HelperReply &reply)
     case HelperReply::TT:
         /* we have been given a blob to send to the client */
         safe_free(lm_request->server_blob);
-        lm_request->request->flags.mustKeepalive = 1;
+        lm_request->request->flags.mustKeepalive = true;
         if (lm_request->request->flags.proxyKeepalive) {
             Note::Pointer tokenNote = reply.notes.find("token");
             lm_request->server_blob = xstrdup(tokenNote->firstValue());
index 9710f58c33f0561bfe2070f30e4e86b72d6a9a41..7a8cc41e26dba0c0eca3432d319fce50b72e649f 100644 (file)
@@ -226,7 +226,7 @@ Auth::Negotiate::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request,
         if (!keep_alive) {
             /* drop the connection */
             rep->header.delByName("keep-alive");
-            request->flags.proxyKeepalive = 0;
+            request->flags.proxyKeepalive = false;
         }
     } else {
         Auth::Negotiate::UserRequest *negotiate_request = dynamic_cast<Auth::Negotiate::UserRequest *>(auth_user_request.getRaw());
@@ -238,7 +238,7 @@ Auth::Negotiate::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request,
             /* here it makes sense to drop the connection, as auth is
              * tied to it, even if MAYBE the client could handle it - Kinkie */
             rep->header.delByName("keep-alive");
-            request->flags.proxyKeepalive = 0;
+            request->flags.proxyKeepalive = false;
             /* fall through */
 
         case Auth::Ok:
index a11b9abd8ce1742d2cda56c4e0afbd0ce728a4ce..eca993bf02a09b09177e82017d390ade15434223 100644 (file)
@@ -259,7 +259,7 @@ Auth::Ntlm::UserRequest::HandleReply(void *data, const HelperReply &reply)
     case HelperReply::TT:
         /* we have been given a blob to send to the client */
         safe_free(lm_request->server_blob);
-        lm_request->request->flags.mustKeepalive = 1;
+        lm_request->request->flags.mustKeepalive = true;
         if (lm_request->request->flags.proxyKeepalive) {
             Note::Pointer serverBlob = reply.notes.find("token");
             lm_request->server_blob = xstrdup(serverBlob->firstValue());
index a25f726abf9b276ef834a046112d8cc9d9de9e4c..bada7d0658774496369add6bc1c5e059c805a606 100644 (file)
@@ -215,7 +215,7 @@ Auth::Ntlm::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, Http
 
         if (!keep_alive) {
             /* drop the connection */
-            request->flags.proxyKeepalive = 0;
+            request->flags.proxyKeepalive = false;
         }
     } else {
         Auth::Ntlm::UserRequest *ntlm_request = dynamic_cast<Auth::Ntlm::UserRequest *>(auth_user_request.getRaw());
@@ -226,7 +226,7 @@ Auth::Ntlm::Config::fixHeader(Auth::UserRequest::Pointer auth_user_request, Http
         case Auth::Failed:
             /* here it makes sense to drop the connection, as auth is
              * tied to it, even if MAYBE the client could handle it - Kinkie */
-            request->flags.proxyKeepalive = 0;
+            request->flags.proxyKeepalive = false;
             /* fall through */
 
         case Auth::Ok:
index 919d243edea9178b7c7f57a6125f14b120f7a123..9c316eb48d9459ab8c565c2f0e7099753452c232 100644 (file)
@@ -863,7 +863,7 @@ clientSetKeepaliveFlag(ClientHttpRequest * http)
            RequestMethodStr(request->method));
 
     // TODO: move to HttpRequest::hdrCacheInit, just like HttpReply.
-    request->flags.proxyKeepalive = request->persistent() ? 1 : 0;
+    request->flags.proxyKeepalive = request->persistent();
 }
 
 static int
@@ -2489,7 +2489,7 @@ ConnStateData::quitAfterError(HttpRequest *request)
     // at the client-side, but many such errors do require closure and the
     // client-side code is bad at handling errors so we play it safe.
     if (request)
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     flags.readMore = false;
     debugs(33,4, HERE << "Will close after error: " << clientConnection);
 }
@@ -3931,7 +3931,7 @@ ConnStateData::switchToHttps(HttpRequest *request, Ssl::BumpMode bumpServerMode)
     // and now want to switch to SSL to send the error to the client
     // without even peeking at the origin server certificate.
     if (bumpServerMode == Ssl::bumpServerFirst && !sslServerBump) {
-        request->flags.sslPeek = 1;
+        request->flags.sslPeek = true;
         sslServerBump = new Ssl::ServerBump(request);
 
         // will call httpsPeeked() with certificate and connection, eventually
index 17574dc76a874c1acaa7f7209ce81ea677ed70f3..146f1ca317f110b80bf481c1edadec2e1c4ff0d2 100644 (file)
@@ -129,7 +129,7 @@ void clientReplyContext::setReplyToError(const HttpRequestMethod& method, ErrorS
 {
     if (errstate->httpStatus == HTTP_NOT_IMPLEMENTED && http->request)
         /* prevent confusion over whether we default to persistent or not */
-        http->request->flags.proxyKeepalive = 0;
+        http->request->flags.proxyKeepalive = false;
 
     http->al->http.code = errstate->httpStatus;
 
@@ -273,7 +273,7 @@ clientReplyContext::processExpired()
         return;
     }
 
-    http->request->flags.refresh = 1;
+    http->request->flags.refresh = true;
 #if STORE_CLIENT_LIST_DEBUG
     /* Prevent a race with the store client memory free routines
      */
@@ -390,7 +390,7 @@ clientReplyContext::handleIMSReply(StoreIOBuffer result)
     // origin replied 304
     if (status == HTTP_NOT_MODIFIED) {
         http->logType = LOG_TCP_REFRESH_UNMODIFIED;
-        http->request->flags.staleIfHit = 0; // old_entry is no longer stale
+        http->request->flags.staleIfHit = false; // old_entry is no longer stale
 
         // update headers on existing entry
         old_rep->updateOnNotModified(http->storeEntry()->getReply());
@@ -558,7 +558,7 @@ clientReplyContext::cacheHit(StoreIOBuffer result)
          * request.  Otherwise two siblings could generate a loop if
          * both have a stale version of the object.
          */
-        r->flags.needValidation = 1;
+        r->flags.needValidation = true;
 
         if (e->lastmod < 0) {
             debugs(88, 3, "validate HIT object? NO. Missing Last-Modified header. Do MISS.");
@@ -734,7 +734,7 @@ clientReplyContext::processConditional(StoreIOBuffer &result)
     if (r.header.has(HDR_IF_NONE_MATCH)) {
         if (!e->hasIfNoneMatchEtag(r)) {
             // RFC 2616: ignore IMS if If-None-Match did not match
-            r.flags.ims = 0;
+            r.flags.ims = false;
             r.ims = -1;
             r.imslen = 0;
             r.header.delById(HDR_IF_MODIFIED_SINCE);
@@ -1407,7 +1407,7 @@ clientReplyContext::buildReplyHeader()
                         hdr->delAt(pos, connection_auth_blocked);
                         continue;
                     }
-                    request->flags.mustKeepalive = 1;
+                    request->flags.mustKeepalive = true;
                     if (!request->flags.accelerated && !request->flags.intercepted) {
                         httpHeaderPutStrf(hdr, HDR_PROXY_SUPPORT, "Session-Based-Authentication");
                         /*
@@ -1463,30 +1463,30 @@ clientReplyContext::buildReplyHeader()
     /* Check whether we should send keep-alive */
     if (!Config.onoff.error_pconns && reply->sline.status >= 400 && !request->flags.mustKeepalive) {
         debugs(33, 3, "clientBuildReplyHeader: Error, don't keep-alive");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (!Config.onoff.client_pconns && !request->flags.mustKeepalive) {
         debugs(33, 2, "clientBuildReplyHeader: Connection Keep-Alive not requested by admin or client");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (request->flags.proxyKeepalive && shutting_down) {
         debugs(88, 3, "clientBuildReplyHeader: Shutting down, don't keep-alive.");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (request->flags.connectionAuth && !reply->keep_alive) {
         debugs(33, 2, "clientBuildReplyHeader: Connection oriented auth but server side non-persistent");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (reply->bodySize(request->method) < 0 && !maySendChunkedReply) {
         debugs(88, 3, "clientBuildReplyHeader: can't keep-alive, unknown body size" );
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (fdUsageHigh()&& !request->flags.mustKeepalive) {
         debugs(88, 3, "clientBuildReplyHeader: Not many unused FDs, can't keep-alive");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (request->flags.sslBumped && !reply->persistent()) {
         // We do not really have to close, but we pretend we are a tunnel.
         debugs(88, 3, "clientBuildReplyHeader: bumped reply forces close");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     } else if (request->pinnedConnection() && !reply->persistent()) {
         // The peer wants to close the pinned connection
         debugs(88, 3, "pinned reply forces close");
-        request->flags.proxyKeepalive = 0;
+        request->flags.proxyKeepalive = false;
     }
 
     // Decide if we send chunked reply
@@ -1494,7 +1494,7 @@ clientReplyContext::buildReplyHeader()
             request->flags.proxyKeepalive &&
             reply->bodySize(request->method) < 0) {
         debugs(88, 3, "clientBuildReplyHeader: chunked reply");
-        request->flags.chunkedReply = 1;
+        request->flags.chunkedReply = true;
         hdr->putStr(HDR_TRANSFER_ENCODING, "chunked");
     }
 
@@ -1824,7 +1824,7 @@ clientReplyContext::sendStreamError(StoreIOBuffer const &result)
     debugs(88, 5, "clientReplyContext::sendStreamError: A stream error has occured, marking as complete and sending no data.");
     StoreIOBuffer localTempBuffer;
     flags.complete = 1;
-    http->request->flags.streamError = 1;
+    http->request->flags.streamError = true;
     localTempBuffer.flags.error = result.flags.error;
     clientStreamCallback((clientStreamNode*)http->client_stream.head->data, http, NULL,
                          localTempBuffer);
index fb793f910ec833a69d2d767851679c43977f6741..c00bf07a625e27a9da427b1b6de88a3daec9727f 100644 (file)
@@ -392,7 +392,7 @@ clientBeginRequest(const HttpRequestMethod& method, char const *url, CSCB * stre
      */
     request->flags.accelerated = http->flags.accel;
 
-    request->flags.internalClient = 1;
+    request->flags.internalClient = true;
 
     /* this is an internally created
      * request, not subject to acceleration
@@ -537,7 +537,7 @@ clientFollowXForwardedForCheck(allow_t answer, void *data)
         conn->log_addr = request->indirect_client_addr;
     }
     request->x_forwarded_for_iterator.clean();
-    request->flags.done_follow_x_forwarded_for=true;
+    request->flags.done_follow_x_forwarded_for = true;
 
     if (answer != ACCESS_ALLOWED && answer != ACCESS_DENIED) {
         debugs(28, DBG_CRITICAL, "ERROR: Processing X-Forwarded-For. Stopping at IP address: " << request->indirect_client_addr );
@@ -568,7 +568,7 @@ ClientRequestContext::hostHeaderIpVerify(const ipcache_addrs* ia, const DnsLooku
         for (int i = 0; i < ia->count; ++i) {
             if (clientConn->local.matchIPAddr(ia->in_addrs[i]) == 0) {
                 debugs(85, 3, HERE << "validate IP " << clientConn->local << " possible from Host:");
-                http->request->flags.hostVerified = 1;
+                http->request->flags.hostVerified = true;
                 http->doCallouts();
                 return;
             }
@@ -590,9 +590,9 @@ ClientRequestContext::hostHeaderVerifyFailed(const char *A, const char *B)
 
         // NP: it is tempting to use 'flags.noCache' but that is all about READing cache data.
         // The problems here are about WRITE for new cache content, which means flags.cachable
-        http->request->flags.cachable = 0; // MUST NOT cache (for now)
+        http->request->flags.cachable = false; // MUST NOT cache (for now)
         // XXX: when we have updated the cache key to base on raw-IP + URI this cacheable limit can go.
-        http->request->flags.hierarchical = 0; // MUST NOT pass to peers (for now)
+        http->request->flags.hierarchical = false; // MUST NOT pass to peers (for now)
         // XXX: when we have sorted out the best way to relay requests properly to peers this hierarchical limit can go.
         http->doCallouts();
         return;
@@ -702,7 +702,7 @@ ClientRequestContext::hostHeaderVerify()
     } else {
         // Okay no problem.
         debugs(85, 3, HERE << "validate passed.");
-        http->request->flags.hostVerified = 1;
+        http->request->flags.hostVerified = true;
         http->doCallouts();
     }
     safe_free(hostB);
@@ -990,10 +990,10 @@ clientCheckPinning(ClientHttpRequest * http)
     if (!request->flags.connectionAuthDisabled) {
         if (Comm::IsConnOpen(http_conn->pinning.serverConnection)) {
             if (http_conn->pinning.auth) {
-                request->flags.connectionAuth = 1;
-                request->flags.auth = 1;
+                request->flags.connectionAuth = true;
+                request->flags.auth = true;
             } else {
-                request->flags.connectionProxyAuth = 1;
+                request->flags.connectionProxyAuth = true;
             }
             // These should already be linked correctly.
             assert(request->clientConnectionManager == http_conn);
@@ -1019,10 +1019,10 @@ clientCheckPinning(ClientHttpRequest * http)
                             ||
                             strncasecmp(value, "Kerberos ", 9) == 0) {
                         if (e->id == HDR_AUTHORIZATION) {
-                            request->flags.connectionAuth = 1;
+                            request->flags.connectionAuth = true;
                             may_pin = 1;
                         } else {
-                            request->flags.connectionProxyAuth = 1;
+                            request->flags.connectionProxyAuth = true;
                             may_pin = 1;
                         }
                     }
@@ -1048,7 +1048,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
     request->ims = req_hdr->getTime(HDR_IF_MODIFIED_SINCE);
 
     if (request->ims > 0)
-        request->flags.ims = 1;
+        request->flags.ims = true;
 
     if (!request->flags.ignoreCc) {
         if (request->cache_control) {
@@ -1090,13 +1090,13 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
 #if USE_HTTP_VIOLATIONS
 
         if (Config.onoff.reload_into_ims)
-            request->flags.nocacheHack = 1;
+            request->flags.nocacheHack = true;
         else if (refresh_nocache_hack)
-            request->flags.nocacheHack = 1;
+            request->flags.nocacheHack = true;
         else
 #endif
 
-            request->flags.noCache = 1;
+            request->flags.noCache = true;
     }
 
     /* ignore range header in non-GETs or non-HEADs */
@@ -1106,7 +1106,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
             request->range = req_hdr->getRange();
 
         if (request->range) {
-            request->flags.isRanged=true;
+            request->flags.isRanged = true;
             clientStreamNode *node = (clientStreamNode *)http->client_stream.tail->data;
             /* XXX: This is suboptimal. We should give the stream the range set,
              * and thereby let the top of the stream set the offset when the
@@ -1132,12 +1132,12 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
     }
 
     if (req_hdr->has(HDR_AUTHORIZATION))
-        request->flags.auth = 1;
+        request->flags.auth = true;
 
     clientCheckPinning(http);
 
     if (request->login[0] != '\0')
-        request->flags.auth = 1;
+        request->flags.auth = true;
 
     if (req_hdr->has(HDR_VIA)) {
         String s = req_hdr->getList(HDR_VIA);
@@ -1150,7 +1150,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
         if (strListIsSubstr(&s, ThisCache2, ',')) {
             debugObj(33, 1, "WARNING: Forwarding loop detected for:\n",
                      request, (ObjPackMethod) & httpRequestPack);
-            request->flags.loopDetected = 1;
+            request->flags.loopDetected = true;
         }
 
 #if USE_FORW_VIA_DB
@@ -1174,7 +1174,7 @@ clientInterpretRequestHeaders(ClientHttpRequest * http)
     request->flags.cachable = http->request->maybeCacheable();
 
     if (clientHierarchical(http))
-        request->flags.hierarchical = 1;
+        request->flags.hierarchical = true;
 
     debugs(85, 5, "clientInterpretRequestHeaders: REQ_NOCACHE = " <<
            (request->flags.noCache ? "SET" : "NOT SET"));
@@ -1278,7 +1278,7 @@ ClientRequestContext::clientRedirectDone(const HelperReply &reply)
                     debugs(61,2, HERE << "URL-rewriter diverts URL from " << urlCanonical(old_request) << " to " << urlCanonical(new_request));
 
                     // update the new request to flag the re-writing was done on it
-                    new_request->flags.redirected = 1;
+                    new_request->flags.redirected = true;
 
                     // unlink bodypipe from the old request. Not needed there any longer.
                     if (old_request->body_pipe != NULL) {
index 58837c358efd8a8e3f92e785143a390208a73f3c..69c70489b58775271fa5d981e6b07036ef3ae9b7 100644 (file)
@@ -631,7 +631,7 @@ errorAppendEntry(StoreEntry * entry, ErrorState * err)
     if (err->page_id == TCP_RESET) {
         if (err->request) {
             debugs(4, 2, "RSTing this reply");
-            err->request->flags.resetTcp=true;
+            err->request->flags.resetTcp = true;
         }
     }
 
index 084855b5682c16dae71f78acf9f0e5fc254f9e0c..0b11e0af477d3b5616ba09bd71bee9e646857877 100644 (file)
@@ -204,12 +204,12 @@ FwdState::selectPeerForIntercepted()
 void
 FwdState::completed()
 {
-    if (flags.forward_completed == 1) {
+    if (flags.forward_completed) {
         debugs(17, DBG_IMPORTANT, HERE << "FwdState::completed called on a completed request! Bad!");
         return;
     }
 
-    flags.forward_completed = 1;
+    flags.forward_completed = true;
 
     if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
         debugs(17, 3, HERE << "entry aborted");
@@ -1107,7 +1107,7 @@ FwdState::connectStart()
         return;
     }
 
-    request->flags.pinned = 0; // XXX: what if the ConnStateData set this to flag existing credentials?
+    request->flags.pinned = false; // XXX: what if the ConnStateData set this to flag existing credentials?
     // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
     // XXX: also, logs will now lie if pinning is broken and leads to an error message.
     if (serverDestinations[0]->peerType == PINNED) {
@@ -1125,9 +1125,9 @@ FwdState::connectStart()
                 serverConn->peerType = HIER_DIRECT;
 #endif
             ++n_tries;
-            request->flags.pinned = 1;
+            request->flags.pinned = true;
             if (pinned_connection->pinnedAuth())
-                request->flags.auth = 1;
+                request->flags.auth = true;
             comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
             // the server may close the pinned connection before this request
             pconnRace = racePossible;
@@ -1323,7 +1323,7 @@ FwdState::dispatch()
             ErrorState *anErr = new ErrorState(ERR_UNSUP_REQ, HTTP_BAD_REQUEST, request);
             fail(anErr);
             // Set the dont_retry flag because this is not a transient (network) error.
-            flags.dont_retry = 1;
+            flags.dont_retry = true;
             if (Comm::IsConnOpen(serverConn)) {
                 serverConn->close();
             }
index 73e359b210524982d219fd2aa5f4ad0658e6c561..aad16c21ac8d2488cc545813e384dc6d589bfe39 100644 (file)
@@ -128,9 +128,9 @@ private:
     } calls;
 
     struct {
-        unsigned int connected_okay:1; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc
-        unsigned int dont_retry:1;
-        unsigned int forward_completed:1;
+        bool connected_okay; ///< TCP link ever opened properly. This affects retry of POST,PUT,CONNECT,etc
+        bool dont_retry;
+        bool forward_completed;
     } flags;
 
     /** connections to open, in order, until successful */
index b6eb1908d89634051cabf5df6e4d8342391ffcf8..d8a6050b3c8aabbea759feb07f8fe0b935eb0613 100644 (file)
@@ -122,7 +122,7 @@ HttpStateData::HttpStateData(FwdState *theFwdState) : AsyncJob("HttpStateData"),
         _peer = cbdataReference(fwd->serverConnection()->getPeer());         /* might be NULL */
 
     if (_peer) {
-        request->flags.proxying = 1;
+        request->flags.proxying = true;
         /*
          * This NEIGHBOR_PROXY_ONLY check probably shouldn't be here.
          * We might end up getting the object from somewhere else if,
@@ -752,7 +752,7 @@ HttpStateData::processReplyHeader()
     }
 
     if (!peerSupportsConnectionPinning())
-        request->flags.connectionAuthDisabled = 1;
+        request->flags.connectionAuthDisabled = true;
 
     HttpReply *vrep = setVirginReply(newrep);
     flags.headers_parsed = true;
@@ -1442,7 +1442,7 @@ HttpStateData::processReplyBody()
 
             if (ispinned && request->clientConnectionManager.valid()) {
                 request->clientConnectionManager->pinConnection(serverConnection, request, _peer,
-                        (request->flags.connectionAuth != 0));
+                        (request->flags.connectionAuth));
             } else {
                 fwd->pconnPush(serverConnection, request->peer_host ? request->peer_host : request->GetHost());
             }
@@ -1691,11 +1691,11 @@ HttpStateData::httpBuildRequestHeader(HttpRequest * request,
      */
     if (!we_do_ranges && request->multipartRangeRequest()) {
         /* don't cache the result */
-        request->flags.cachable = 0;
+        request->flags.cachable = false;
         /* pretend it's not a range request */
         delete request->range;
         request->range = NULL;
-        request->flags.isRanged=false;
+        request->flags.isRanged = false;
     }
 
     /* append Via */
@@ -2065,9 +2065,9 @@ HttpStateData::buildRequestPrefix(MemBuf * mb)
         httpBuildRequestHeader(request, entry, fwd->al, &hdr, flags);
 
         if (request->flags.pinned && request->flags.connectionAuth)
-            request->flags.authSent = 1;
+            request->flags.authSent = true;
         else if (hdr.has(HDR_AUTHORIZATION))
-            request->flags.authSent = 1;
+            request->flags.authSent = true;
 
         packerToMemInit(&p, mb);
         hdr.packInto(&p);
index 4df0c0beaf167890a113a409a650976b90fb8f21..c289e0a8282b45a4abb2e4f3199ccd136272917d 100644 (file)
@@ -1344,7 +1344,7 @@ netdbExchangeStart(void *data)
     tempBuffer.data = ex->buf;
     storeClientCopy(ex->sc, ex->e, tempBuffer,
                     netdbExchangeHandleReply, ex);
-    ex->r->flags.loopDetected = 1;     /* cheat! -- force direct */
+    ex->r->flags.loopDetected = true;  /* cheat! -- force direct */
 
     if (p->login)
         xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ);
index 6b3cb5a04d924a548eaa2b0775ba97fcc21e121a..b20d50b33cc44fd03b7822e82d3ee7efce1e255e 100644 (file)
@@ -455,7 +455,7 @@ MimeIcon::created (StoreEntry *newEntry)
         return;
     }
 
-    flags.cachable = 1;
+    flags.cachable = true;
     StoreEntry *e = storeCreateEntry(url,url,flags,Http::METHOD_GET);
     assert(e != NULL);
     EBIT_SET(e->flags, ENTRY_SPECIAL);
index 5be917c9a0c1eb788e9ebb9bf04bfdb575883c6d..3cfe453178e7dd80caaac58773b98bbc27164d86 100644 (file)
@@ -370,10 +370,10 @@ peerDigestRequest(PeerDigest * pd)
 
     pd_last_req_time = squid_curtime;
 
-    req->flags.cachable = 1;
+    req->flags.cachable = true;
 
     /* the rest is based on clientProcessExpired() */
-    req->flags.refresh = 1;
+    req->flags.refresh = true;
 
     old_e = fetch->old_entry = Store::Root().get(key);
 
index fe92ca6705d5abb7e3136570c66e0c2cc7d3d1eb..cf5d9fd9666d3ed55306e80293b04baa91894cc9 100644 (file)
@@ -293,7 +293,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta)
             entry->mem_obj->getReply()->cache_control->staleIfError() < staleness) {
 
         debugs(22, 3, "refreshCheck: stale-if-error period expired.");
-        request->flags.failOnValidationError = 1;
+        request->flags.failOnValidationError = true;
     }
 
     if (EBIT_TEST(entry->flags, ENTRY_REVALIDATE) && staleness > -1
@@ -303,7 +303,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta)
        ) {
         debugs(22, 3, "refreshCheck: YES: Must revalidate stale response");
         if (request)
-            request->flags.failOnValidationError = 1;
+            request->flags.failOnValidationError = true;
         return STALE_MUST_REVALIDATE;
     }
 
@@ -331,7 +331,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta)
         } else {
             /* The clients no-cache header is not overridden on this request */
             debugs(22, 3, "refreshCheck: YES: client reload");
-            request->flags.noCache = 1;
+            request->flags.noCache = true;
             return STALE_FORCED_RELOAD;
         }
 
@@ -398,7 +398,7 @@ refreshCheck(const StoreEntry * entry, HttpRequest * request, time_t delta)
     if ( max_stale >= 0 && staleness > max_stale) {
         debugs(22, 3, "refreshCheck: YES: max-stale limit");
         if (request)
-            request->flags.failOnValidationError = 1;
+            request->flags.failOnValidationError = true;
         return STALE_MAX_STALE;
     }
 
index 8f94ac0b265dade3669adb43efc60cea92734dd0..74e902e9e03dd194be175344d6bc26032bdacdf8 100644 (file)
@@ -392,7 +392,7 @@ storeDigestRewriteStart(void *datanotused)
     debugs(71, 2, "storeDigestRewrite: start rewrite #" << sd_state.rewrite_count + 1);
     /* make new store entry */
     url = internalLocalUri("/squid-internal-periodic/", StoreDigestFileName);
-    flags.cachable = 1;
+    flags.cachable = true;
     e = storeCreateEntry(url, url, flags, Http::METHOD_GET);
     assert(e);
     sd_state.rewrite_lock = e;
index d2cabb711f34157c3e6619e175fd71eb3248823e..7fe22ed25fc8418385a756a2f593835a80e75e62 100644 (file)
@@ -190,7 +190,7 @@ testCoss::testCossSearch()
     {
         /* Create "vary" base object */
         RequestFlags flags;
-        flags.cachable = 1;
+        flags.cachable = true;
         StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, METHOD_GET);
         HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const
         rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000);
index f935ae0d9382d6d3ec1497fc313cc029c61f19e2..84481fe823704c81642801a1400eaa4db2ad9e6c 100644 (file)
@@ -172,7 +172,7 @@ StoreEntry *
 testRock::createEntry(const int i)
 {
     RequestFlags flags;
-    flags.cachable = 1;
+    flags.cachable = true;
     char url[64];
     snprintf(url, sizeof(url), "dummy url %i", i);
     url[sizeof(url) - 1] = '\0';
index 7c929d6782cc09146d78948f8fb6029b2a5c45ce..22c8a50d6b10544dada7fa609ead30fbeb8141c7 100644 (file)
@@ -142,7 +142,7 @@ testUfs::testUfsSearch()
     {
         /* Create "vary" base object */
         RequestFlags flags;
-        flags.cachable = 1;
+        flags.cachable = true;
         StoreEntry *pe = storeCreateEntry("dummy url", "dummy log url", flags, Http::METHOD_GET);
         HttpReply *rep = (HttpReply *) pe->getReply(); // bypass const
         rep->setHeaders(HTTP_OK, "dummy test object", "x-squid-internal/test", -1, -1, squid_curtime + 100000);
index 670194dfaf2f80588a40008a54e3624a811567ac..7aacec1f8e71351f94795a0bf66fe86711136f64 100644 (file)
@@ -614,10 +614,10 @@ tunnelConnectDone(const Comm::ConnectionPointer &conn, comm_err_t status, int xe
     debugs(26, 4, HERE << "determine post-connect handling pathway.");
     if (conn->getPeer()) {
         tunnelState->request->peer_login = conn->getPeer()->login;
-        tunnelState->request->flags.proxying = (conn->getPeer()->options.originserver?0:1);
+        tunnelState->request->flags.proxying = !(conn->getPeer()->options.originserver);
     } else {
         tunnelState->request->peer_login = NULL;
-        tunnelState->request->flags.proxying = 0;
+        tunnelState->request->flags.proxying = false;
     }
 
     if (tunnelState->request->flags.proxying)