From: Francesco Chemolli Date: Sun, 2 Feb 2014 08:57:20 +0000 (+0100) Subject: Remove layering violations in Vector users X-Git-Tag: SQUID_3_5_0_1~379^2~17 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4c9eadc2b69a21500ae43e34e0f7e82285cb85ea;p=thirdparty%2Fsquid.git Remove layering violations in Vector users --- diff --git a/lib/MemPoolMalloc.cc b/lib/MemPoolMalloc.cc index 06f18516e3..bd25ccfe30 100644 --- a/lib/MemPoolMalloc.cc +++ b/lib/MemPoolMalloc.cc @@ -127,7 +127,7 @@ MemPoolMalloc::~MemPoolMalloc() bool MemPoolMalloc::idleTrigger(int shift) const { - return freelist.count >> (shift ? 8 : 0); + return freelist.size() >> (shift ? 8 : 0); } void diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc index bea0c7a42c..206c4ad320 100644 --- a/src/HttpHdrRange.cc +++ b/src/HttpHdrRange.cc @@ -404,14 +404,14 @@ int HttpHdrRange::canonize (int64_t newClen) { clen = newClen; - debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.count << + debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.size() << " specs, clen: " << clen); Vector goods; getCanonizedSpecs(goods); merge (goods); - debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.count << + debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.size() << " specs"); - return specs.count > 0; + return specs.size() > 0; // fixme, should return bool } /* hack: returns true if range specs are too "complex" for Squid to handle */ diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc index bd6c0eb08e..f54e9c228f 100644 --- a/src/HttpHeader.cc +++ b/src/HttpHeader.cc @@ -454,12 +454,12 @@ HttpHeader::clean() * has been used. As a hack, just never count zero-sized header * arrays. */ - if (0 != entries.count) - HttpHeaderStats[owner].hdrUCountDistr.count(entries.count); + if (!entries.empty()) + HttpHeaderStats[owner].hdrUCountDistr.count(entries.size()); ++ HttpHeaderStats[owner].destroyedCount; - HttpHeaderStats[owner].busyDestroyedCount += entries.count > 0; + HttpHeaderStats[owner].busyDestroyedCount += entries.size() > 0; } // if (owner <= hoReply) while ((e = getEntry(&pos))) { @@ -748,11 +748,11 @@ HttpHeaderEntry * HttpHeader::getEntry(HttpHeaderPos * pos) const { assert(pos); - assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.count); + assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.size()); - for (++(*pos); *pos < (ssize_t)entries.count; ++(*pos)) { - if (entries.items[*pos]) - return (HttpHeaderEntry*)entries.items[*pos]; + for (++(*pos); *pos < (ssize_t)entries.size(); ++(*pos)) { + if (entries[*pos]) + return (HttpHeaderEntry*)entries[*pos]; } return NULL; @@ -871,9 +871,9 @@ void HttpHeader::delAt(HttpHeaderPos pos, int &headers_deleted) { HttpHeaderEntry *e; - assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.count); - e = (HttpHeaderEntry*)entries.items[pos]; - entries.items[pos] = NULL; + assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.size()); + e = (HttpHeaderEntry*)entries[pos]; + entries[pos] = NULL; /* decrement header length, allow for ": " and crlf */ len -= e->name.size() + 2 + e->value.size() + 2; assert(len >= 0); @@ -914,7 +914,7 @@ HttpHeader::addEntry(HttpHeaderEntry * e) assert_eid(e->id); assert(e->name.size()); - debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count); + debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size()); if (CBIT_TEST(mask, e->id)) ++ Headers[e->id].stat.repCount; diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc index dc18b33515..f825ff4ccc 100644 --- a/src/HttpRequest.cc +++ b/src/HttpRequest.cc @@ -479,7 +479,7 @@ HttpRequest::adaptHistoryImport(const HttpRequest &them) bool HttpRequest::multipartRangeRequest() const { - return (range && range->specs.count > 1); + return (range && !range->specs.empty()); } bool diff --git a/src/adaptation/AccessCheck.cc b/src/adaptation/AccessCheck.cc index 3e50fc67c8..19ee86346b 100644 --- a/src/adaptation/AccessCheck.cc +++ b/src/adaptation/AccessCheck.cc @@ -102,7 +102,7 @@ Adaptation::AccessCheck::check() AccessRule *r = *i; if (isCandidate(*r)) { debugs(93, 5, HERE << "check: rule '" << r->id << "' is a candidate"); - candidates += r->id; + candidates.push_back(r->id); } } diff --git a/src/adaptation/icap/Options.cc b/src/adaptation/icap/Options.cc index e219a3751c..9524a1ec49 100644 --- a/src/adaptation/icap/Options.cc +++ b/src/adaptation/icap/Options.cc @@ -125,7 +125,7 @@ void Adaptation::Icap::Options::configure(const HttpReply *reply) void Adaptation::Icap::Options::cfgMethod(ICAP::Method m) { Must(m != ICAP::methodNone); - methods += m; + methods.push_back(m); } // TODO: HttpHeader should provide a general method for this type of conversion diff --git a/src/base/Vector.h b/src/base/Vector.h index 5f430955b0..69b7669942 100644 --- a/src/base/Vector.h +++ b/src/base/Vector.h @@ -115,6 +115,8 @@ public: const_iterator begin () const; iterator end(); const_iterator end () const; + E& at(unsigned i); + const E& at(unsigned i) const; E& operator [] (unsigned i); const E& operator [] (unsigned i) const; @@ -367,7 +369,7 @@ Vector::end() const template E & -Vector::operator [] (unsigned i) +Vector::at(unsigned i) { assert (size() > i); return items[i]; @@ -375,12 +377,26 @@ Vector::operator [] (unsigned i) template const E & -Vector::operator [] (unsigned i) const +Vector::at(unsigned i) const { assert (size() > i); return items[i]; } +template +E & +Vector::operator [] (unsigned i) +{ + return items[i]; +} + +template +const E & +Vector::operator [] (unsigned i) const +{ + return items[i]; +} + template VectorIteratorBase::VectorIteratorBase() : pos(0), theVector(NULL) {} diff --git a/src/cache_cf.cc b/src/cache_cf.cc index 7b1f453a45..d1e8e3eead 100644 --- a/src/cache_cf.cc +++ b/src/cache_cf.cc @@ -1897,7 +1897,7 @@ static int find_fstype(char *type) { for (size_t i = 0; i < StoreFileSystem::FileSystems().size(); ++i) - if (strcasecmp(type, StoreFileSystem::FileSystems().items[i]->type()) == 0) + if (strcasecmp(type, StoreFileSystem::FileSystems().at(i)->type()) == 0) return (int)i; return (-1); @@ -1940,7 +1940,7 @@ parse_cachedir(SquidConfig::_cacheSwap * swap) sd = dynamic_cast(swap->swapDirs[i].getRaw()); - if (strcmp(sd->type(), StoreFileSystem::FileSystems().items[fs]->type()) != 0) { + if (strcmp(sd->type(), StoreFileSystem::FileSystems().at(fs)->type()) != 0) { debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " << sd->type() << " " << sd->path << " to " << type_str << ". Restart required"); return; @@ -1965,7 +1965,7 @@ parse_cachedir(SquidConfig::_cacheSwap * swap) allocate_new_swapdir(swap); - swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().items[fs]->createSwapDir(); + swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().at(fs)->createSwapDir(); sd = dynamic_cast(swap->swapDirs[swap->n_configured].getRaw()); diff --git a/src/client_side.cc b/src/client_side.cc index b5a00a3527..f9fe7b1e96 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -1361,7 +1361,7 @@ ClientSocketContext::buildRangeHeader(HttpReply * rep) bool replyMatchRequest = rep->content_range != NULL ? request->range->contains(rep->content_range->spec) : true; - const int spec_count = http->request->range->specs.count; + const int spec_count = http->request->range->specs.size(); int64_t actual_clen = -1; debugs(33, 3, "clientBuildRangeHeader: range spec count: " << diff --git a/src/errorpage.cc b/src/errorpage.cc index 6c25de0ae2..d7d4c51a22 100644 --- a/src/errorpage.cc +++ b/src/errorpage.cc @@ -206,7 +206,7 @@ errorInitialize(void) /** \par * Index any unknown file names used by deny_info. */ - ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i - ERR_MAX]; + ErrorDynamicPageInfo *info = ErrorDynamicPages[i - ERR_MAX]; assert(info && info->id == i && info->page_name); const char *pg = info->page_name; @@ -533,7 +533,7 @@ errorPageId(const char *page_name) } for (size_t j = 0; j < ErrorDynamicPages.size(); ++j) { - if (strcmp(ErrorDynamicPages.items[j]->page_name, page_name) == 0) + if (strcmp(ErrorDynamicPages[j]->page_name, page_name) == 0) return j + ERR_MAX; } @@ -563,7 +563,7 @@ errorPageName(int pageId) return err_type_str[pageId]; if (pageId >= ERR_MAX && pageId - ERR_MAX < (ssize_t)ErrorDynamicPages.size()) - return ErrorDynamicPages.items[pageId - ERR_MAX]->page_name; + return ErrorDynamicPages[pageId - ERR_MAX]->page_name; return "ERR_UNKNOWN"; /* should not happen */ } @@ -595,8 +595,8 @@ ErrorState::ErrorState(err_type t, Http::StatusCode status, HttpRequest * req) : { memset(&ftp, 0, sizeof(ftp)); - if (page_id >= ERR_MAX && ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect != Http::scNone) - httpStatus = ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect; + if (page_id >= ERR_MAX && ErrorDynamicPages[page_id - ERR_MAX]->page_redirect != Http::scNone) + httpStatus = ErrorDynamicPages[page_id - ERR_MAX]->page_redirect; if (req != NULL) { request = req; diff --git a/src/store.cc b/src/store.cc index 593db046d9..6da82d5048 100644 --- a/src/store.cc +++ b/src/store.cc @@ -1290,7 +1290,7 @@ storeLateRelease(void *unused) } for (i = 0; i < 10; ++i) { - e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL; + e = LateReleaseStack.empty() ? NULL : LateReleaseStack.pop(); if (e == NULL) { /* done! */ diff --git a/src/tests/test_http_range.cc b/src/tests/test_http_range.cc index bbf0544512..160a96beb2 100644 --- a/src/tests/test_http_range.cc +++ b/src/tests/test_http_range.cc @@ -86,7 +86,7 @@ testRangeParser(char const *rangestring) HttpHdrRange copy(*range); - assert (copy.specs.count == range->specs.count); + assert (copy.specs.size() == range->specs.size()); HttpHdrRange::iterator pos = range->begin(); @@ -111,7 +111,7 @@ void testRangeIter () { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); size_t counter = 0; HttpHdrRange::iterator i = range->begin(); @@ -132,7 +132,7 @@ void testRangeCanonization() { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); /* 0-3 needs a content length of 4 */ /* This passes in the extant code - but should it? */ @@ -140,13 +140,13 @@ testRangeCanonization() if (!range->canonize(3)) exit(1); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); delete range; range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); /* 0-3 needs a content length of 4 */ if (!range->canonize(4)) @@ -156,7 +156,7 @@ testRangeCanonization() range=rangeFromString("bytes=3-6"); - assert (range->specs.count == 1); + assert (range->specs.size() == 1); /* 3-6 needs a content length of 4 or more */ if (range->canonize(3)) @@ -166,7 +166,7 @@ testRangeCanonization() range=rangeFromString("bytes=3-6"); - assert (range->specs.count == 1); + assert (range->specs.size() == 1); /* 3-6 needs a content length of 4 or more */ if (!range->canonize(4)) @@ -176,12 +176,12 @@ testRangeCanonization() range=rangeFromString("bytes=1-1,2-3"); - assert (range->specs.count == 2); + assert (range->specs.size()== 2); if (!range->canonize(4)) exit(1); - assert (range->specs.count == 2); + assert (range->specs.size() == 2); delete range; }